@deepgram/sdk 1.3.1 → 1.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (194) hide show
  1. package/CHANGELOG.md +7 -1
  2. package/dist/browser/index.js +1 -0
  3. package/dist/index.d.ts +1315 -24
  4. package/dist/index.js +1 -41
  5. package/package.json +16 -5
  6. package/tsconfig-es6.json +16 -0
  7. package/webpack.config.js +35 -0
  8. package/dist/billing.d.ts +0 -18
  9. package/dist/billing.js +0 -73
  10. package/dist/billing.js.map +0 -1
  11. package/dist/constants/defaultOptions.d.ts +0 -6
  12. package/dist/constants/defaultOptions.js +0 -10
  13. package/dist/constants/defaultOptions.js.map +0 -1
  14. package/dist/constants/index.d.ts +0 -1
  15. package/dist/constants/index.js +0 -18
  16. package/dist/constants/index.js.map +0 -1
  17. package/dist/enums/alternatives.d.ts +0 -4
  18. package/dist/enums/alternatives.js +0 -3
  19. package/dist/enums/alternatives.js.map +0 -1
  20. package/dist/enums/connectionState.d.ts +0 -6
  21. package/dist/enums/connectionState.js +0 -11
  22. package/dist/enums/connectionState.js.map +0 -1
  23. package/dist/enums/diarization.d.ts +0 -4
  24. package/dist/enums/diarization.js +0 -3
  25. package/dist/enums/diarization.js.map +0 -1
  26. package/dist/enums/index.d.ts +0 -7
  27. package/dist/enums/index.js +0 -24
  28. package/dist/enums/index.js.map +0 -1
  29. package/dist/enums/liveTranscriptionEvents.d.ts +0 -6
  30. package/dist/enums/liveTranscriptionEvents.js +0 -3
  31. package/dist/enums/liveTranscriptionEvents.js.map +0 -1
  32. package/dist/enums/models.d.ts +0 -5
  33. package/dist/enums/models.js +0 -3
  34. package/dist/enums/models.js.map +0 -1
  35. package/dist/enums/punctuation.d.ts +0 -4
  36. package/dist/enums/punctuation.js +0 -3
  37. package/dist/enums/punctuation.js.map +0 -1
  38. package/dist/enums/searchKind.d.ts +0 -4
  39. package/dist/enums/searchKind.js +0 -3
  40. package/dist/enums/searchKind.js.map +0 -1
  41. package/dist/helpers/index.d.ts +0 -1
  42. package/dist/helpers/index.js +0 -18
  43. package/dist/helpers/index.js.map +0 -1
  44. package/dist/helpers/secondsToTimestamp.d.ts +0 -1
  45. package/dist/helpers/secondsToTimestamp.js +0 -8
  46. package/dist/helpers/secondsToTimestamp.js.map +0 -1
  47. package/dist/httpRequest.d.ts +0 -3
  48. package/dist/httpRequest.js +0 -91
  49. package/dist/httpRequest.js.map +0 -1
  50. package/dist/index.js.map +0 -1
  51. package/dist/invitation.d.ts +0 -29
  52. package/dist/invitation.js +0 -99
  53. package/dist/invitation.js.map +0 -1
  54. package/dist/keys.d.ts +0 -32
  55. package/dist/keys.js +0 -132
  56. package/dist/keys.js.map +0 -1
  57. package/dist/members.d.ts +0 -18
  58. package/dist/members.js +0 -73
  59. package/dist/members.js.map +0 -1
  60. package/dist/projects.d.ts +0 -21
  61. package/dist/projects.js +0 -82
  62. package/dist/projects.js.map +0 -1
  63. package/dist/scopes.d.ts +0 -20
  64. package/dist/scopes.js +0 -77
  65. package/dist/scopes.js.map +0 -1
  66. package/dist/transcription/index.d.ts +0 -18
  67. package/dist/transcription/index.js +0 -72
  68. package/dist/transcription/index.js.map +0 -1
  69. package/dist/transcription/liveTranscription.d.ts +0 -23
  70. package/dist/transcription/liveTranscription.js +0 -83
  71. package/dist/transcription/liveTranscription.js.map +0 -1
  72. package/dist/transcription/preRecordedTranscription.d.ts +0 -8
  73. package/dist/transcription/preRecordedTranscription.js +0 -114
  74. package/dist/transcription/preRecordedTranscription.js.map +0 -1
  75. package/dist/types/balance.d.ts +0 -6
  76. package/dist/types/balance.js +0 -3
  77. package/dist/types/balance.js.map +0 -1
  78. package/dist/types/balanceList.d.ts +0 -4
  79. package/dist/types/balanceList.js +0 -3
  80. package/dist/types/balanceList.js.map +0 -1
  81. package/dist/types/channel.d.ts +0 -25
  82. package/dist/types/channel.js +0 -3
  83. package/dist/types/channel.js.map +0 -1
  84. package/dist/types/createKeyOptions.d.ts +0 -13
  85. package/dist/types/createKeyOptions.js +0 -3
  86. package/dist/types/createKeyOptions.js.map +0 -1
  87. package/dist/types/hit.d.ts +0 -21
  88. package/dist/types/hit.js +0 -3
  89. package/dist/types/hit.js.map +0 -1
  90. package/dist/types/index.d.ts +0 -35
  91. package/dist/types/index.js +0 -52
  92. package/dist/types/index.js.map +0 -1
  93. package/dist/types/invitationList.d.ts +0 -4
  94. package/dist/types/invitationList.js +0 -3
  95. package/dist/types/invitationList.js.map +0 -1
  96. package/dist/types/invitationOptions.d.ts +0 -4
  97. package/dist/types/invitationOptions.js +0 -3
  98. package/dist/types/invitationOptions.js.map +0 -1
  99. package/dist/types/key.d.ts +0 -25
  100. package/dist/types/key.js +0 -3
  101. package/dist/types/key.js.map +0 -1
  102. package/dist/types/keyResponse.d.ts +0 -50
  103. package/dist/types/keyResponse.js +0 -3
  104. package/dist/types/keyResponse.js.map +0 -1
  105. package/dist/types/keyword.d.ts +0 -4
  106. package/dist/types/keyword.js +0 -3
  107. package/dist/types/keyword.js.map +0 -1
  108. package/dist/types/liveTranscriptionOptions.d.ts +0 -148
  109. package/dist/types/liveTranscriptionOptions.js +0 -3
  110. package/dist/types/liveTranscriptionOptions.js.map +0 -1
  111. package/dist/types/liveTranscriptionResponse.d.ts +0 -9
  112. package/dist/types/liveTranscriptionResponse.js +0 -3
  113. package/dist/types/liveTranscriptionResponse.js.map +0 -1
  114. package/dist/types/member.d.ts +0 -7
  115. package/dist/types/member.js +0 -3
  116. package/dist/types/member.js.map +0 -1
  117. package/dist/types/memberList.d.ts +0 -4
  118. package/dist/types/memberList.js +0 -3
  119. package/dist/types/memberList.js.map +0 -1
  120. package/dist/types/message.d.ts +0 -3
  121. package/dist/types/message.js +0 -3
  122. package/dist/types/message.js.map +0 -1
  123. package/dist/types/metadata.d.ts +0 -8
  124. package/dist/types/metadata.js +0 -3
  125. package/dist/types/metadata.js.map +0 -1
  126. package/dist/types/prerecordedTranscriptionOptions.d.ts +0 -126
  127. package/dist/types/prerecordedTranscriptionOptions.js +0 -3
  128. package/dist/types/prerecordedTranscriptionOptions.js.map +0 -1
  129. package/dist/types/prerecordedTranscriptionResponse.d.ts +0 -25
  130. package/dist/types/prerecordedTranscriptionResponse.js +0 -51
  131. package/dist/types/prerecordedTranscriptionResponse.js.map +0 -1
  132. package/dist/types/project.d.ts +0 -17
  133. package/dist/types/project.js +0 -3
  134. package/dist/types/project.js.map +0 -1
  135. package/dist/types/projectPatchResponse.d.ts +0 -6
  136. package/dist/types/projectPatchResponse.js +0 -3
  137. package/dist/types/projectPatchResponse.js.map +0 -1
  138. package/dist/types/projectResponse.d.ts +0 -4
  139. package/dist/types/projectResponse.js +0 -3
  140. package/dist/types/projectResponse.js.map +0 -1
  141. package/dist/types/scopeList.d.ts +0 -3
  142. package/dist/types/scopeList.js +0 -3
  143. package/dist/types/scopeList.js.map +0 -1
  144. package/dist/types/search.d.ts +0 -14
  145. package/dist/types/search.js +0 -3
  146. package/dist/types/search.js.map +0 -1
  147. package/dist/types/transcriptionSource.d.ts +0 -14
  148. package/dist/types/transcriptionSource.js +0 -3
  149. package/dist/types/transcriptionSource.js.map +0 -1
  150. package/dist/types/usageCallback.d.ts +0 -4
  151. package/dist/types/usageCallback.js +0 -3
  152. package/dist/types/usageCallback.js.map +0 -1
  153. package/dist/types/usageField.d.ts +0 -7
  154. package/dist/types/usageField.js +0 -3
  155. package/dist/types/usageField.js.map +0 -1
  156. package/dist/types/usageFieldOptions.d.ts +0 -4
  157. package/dist/types/usageFieldOptions.js +0 -3
  158. package/dist/types/usageFieldOptions.js.map +0 -1
  159. package/dist/types/usageOptions.d.ts +0 -23
  160. package/dist/types/usageOptions.js +0 -3
  161. package/dist/types/usageOptions.js.map +0 -1
  162. package/dist/types/usageRequest.d.ts +0 -11
  163. package/dist/types/usageRequest.js +0 -3
  164. package/dist/types/usageRequest.js.map +0 -1
  165. package/dist/types/usageRequestDetail.d.ts +0 -30
  166. package/dist/types/usageRequestDetail.js +0 -3
  167. package/dist/types/usageRequestDetail.js.map +0 -1
  168. package/dist/types/usageRequestList.d.ts +0 -6
  169. package/dist/types/usageRequestList.js +0 -3
  170. package/dist/types/usageRequestList.js.map +0 -1
  171. package/dist/types/usageRequestListOptions.d.ts +0 -7
  172. package/dist/types/usageRequestListOptions.js +0 -3
  173. package/dist/types/usageRequestListOptions.js.map +0 -1
  174. package/dist/types/usageRequestMessage.d.ts +0 -3
  175. package/dist/types/usageRequestMessage.js +0 -3
  176. package/dist/types/usageRequestMessage.js.map +0 -1
  177. package/dist/types/usageResponse.d.ts +0 -10
  178. package/dist/types/usageResponse.js +0 -3
  179. package/dist/types/usageResponse.js.map +0 -1
  180. package/dist/types/usageResponseDetail.d.ts +0 -6
  181. package/dist/types/usageResponseDetail.js +0 -3
  182. package/dist/types/usageResponseDetail.js.map +0 -1
  183. package/dist/types/utterance.d.ts +0 -39
  184. package/dist/types/utterance.js +0 -3
  185. package/dist/types/utterance.js.map +0 -1
  186. package/dist/types/wordBase.d.ts +0 -8
  187. package/dist/types/wordBase.js +0 -3
  188. package/dist/types/wordBase.js.map +0 -1
  189. package/dist/usage.d.ts +0 -34
  190. package/dist/usage.js +0 -122
  191. package/dist/usage.js.map +0 -1
  192. package/dist/userAgent.d.ts +0 -1
  193. package/dist/userAgent.js +0 -20
  194. package/dist/userAgent.js.map +0 -1
package/dist/index.d.ts CHANGED
@@ -1,25 +1,1316 @@
1
- import { Keys } from "./keys";
2
- import { Projects } from "./projects";
3
- import { Transcriber } from "./transcription";
4
- import { Usage } from "./usage";
5
- import { Members } from "./members";
6
- import { Invitation } from "./invitation";
7
- import { Billing } from "./billing";
8
- import { Scopes } from "./scopes";
9
- export declare class Deepgram {
10
- private _apiUrl;
11
- private _apiKey;
12
- keys: Keys;
13
- projects: Projects;
14
- transcription: Transcriber;
15
- usage: Usage;
16
- members: Members;
17
- invitation: Invitation;
18
- billing: Billing;
19
- scopes: Scopes;
20
- constructor(apiKey: string, apiUrl?: string);
21
- /**
22
- * Ensures that the provided options were provided
23
- */
24
- private _validateOptions;
1
+ declare module 'types/balance' {
2
+ export type Balance = {
3
+ balance_id: string;
4
+ amount: number;
5
+ units: string;
6
+ purchase: string;
7
+ };
8
+
9
+ }
10
+ declare module 'types/balanceList' {
11
+ import { Balance } from 'types/balance';
12
+ export type BalanceList = {
13
+ balances?: Array<Balance>;
14
+ };
15
+
16
+ }
17
+ declare module 'types/hit' {
18
+ /**
19
+ * Represents an identified search term in the transcript
20
+ */
21
+ export type Hit = {
22
+ /**
23
+ * Value between 0 and 1 that indicates the model's relative confidence in this hit.
24
+ */
25
+ confidence: number;
26
+ /**
27
+ * Offset in seconds from the start of the audio to where the hit occurs.
28
+ */
29
+ start: number;
30
+ /**
31
+ * Offset in seconds from the start of the audio to where the hit ends.
32
+ */
33
+ end: number;
34
+ /**
35
+ * Transcript that corresponds to the time between start and end.
36
+ */
37
+ snippet: string;
38
+ };
39
+
40
+ }
41
+ declare module 'types/search' {
42
+ import { Hit } from 'types/hit';
43
+ /**
44
+ * Search result for a transcription
45
+ */
46
+ export type Search = {
47
+ /**
48
+ * Term for which Deepgram is searching.
49
+ */
50
+ query: string;
51
+ /**
52
+ * Instances of query found in transcript
53
+ */
54
+ hits: Array<Hit>;
55
+ };
56
+
57
+ }
58
+ declare module 'types/wordBase' {
59
+ export type WordBase = {
60
+ word: string;
61
+ start: number;
62
+ end: number;
63
+ confidence: number;
64
+ punctuated_word?: string;
65
+ speaker?: number;
66
+ };
67
+
68
+ }
69
+ declare module 'types/channel' {
70
+ import { Search } from 'types/search';
71
+ import { WordBase } from 'types/wordBase';
72
+ /**
73
+ * Channel of speech identified by Deepgram
74
+ */
75
+ export type Channel = {
76
+ /**
77
+ * Searched terms & results
78
+ */
79
+ search?: Array<Search>;
80
+ alternatives: Array<{
81
+ /**
82
+ * Text of speech identified by API
83
+ */
84
+ transcript: string;
85
+ /**
86
+ * Confidence in transcript generated
87
+ */
88
+ confidence: number;
89
+ /**
90
+ * Array of words included in the transcript
91
+ */
92
+ words: Array<WordBase>;
93
+ }>;
94
+ };
95
+
96
+ }
97
+ declare module 'types/createKeyOptions' {
98
+ /**
99
+ * Optional options used when creating an API key
100
+ */
101
+ export type CreateKeyOptions = {
102
+ /**
103
+ * Date on which the key you would like to create should expire.
104
+ */
105
+ expirationDate?: Date;
106
+ /**
107
+ * Length of time (in seconds) during which the key you would like to create will remain valid.
108
+ */
109
+ timeToLive?: number;
110
+ };
111
+
112
+ }
113
+ declare module 'types/invitationOptions' {
114
+ export type InvitationOptions = {
115
+ email?: string;
116
+ scope?: string;
117
+ };
118
+
119
+ }
120
+ declare module 'types/invitationList' {
121
+ import { InvitationOptions } from 'types/invitationOptions';
122
+ export type InvitationList = {
123
+ invites?: Array<InvitationOptions>;
124
+ };
125
+
126
+ }
127
+ declare module 'types/key' {
128
+ /**
129
+ * API key used for authenticating with the Deepgram API
130
+ */
131
+ export type Key = {
132
+ /**
133
+ * Unique identifier of the key to use in API requests
134
+ */
135
+ api_key_id: string;
136
+ /**
137
+ * API key to send in API requests (Only displayed when first created)
138
+ */
139
+ key?: string;
140
+ /**
141
+ * Comment for user reference
142
+ */
143
+ comment: string;
144
+ /**
145
+ * Timestamp of the date/time the key was created
146
+ */
147
+ created: string;
148
+ /**
149
+ * Array of scopes assigned to the key
150
+ */
151
+ scopes: Array<string>;
152
+ };
153
+
154
+ }
155
+ declare module 'types/member' {
156
+ export type Member = {
157
+ member_id: string;
158
+ first_name?: string;
159
+ last_name?: string;
160
+ scopes?: Array<string>;
161
+ email: string;
162
+ };
163
+
164
+ }
165
+ declare module 'types/keyResponseObj' {
166
+ import { Key } from 'types/key';
167
+ import { Member } from 'types/member';
168
+ export type KeyResponseObj = {
169
+ /**
170
+ * Optional member associated with the API key
171
+ */
172
+ member?: Member;
173
+ /**
174
+ * API key
175
+ */
176
+ api_key: Key;
177
+ /**
178
+ * Unique identifier of the key to use in API requests
179
+ * @deprecated This property has moved to api_key.api_key_id and will
180
+ * be removed in future versions.
181
+ */
182
+ api_key_id: string;
183
+ /**
184
+ * API key to send in API requests (Only displayed when first created)
185
+ * @deprecated This property has moved to api_key.key and will
186
+ * be removed in future versions.
187
+ */
188
+ key?: string;
189
+ /**
190
+ * Comment for user reference
191
+ * @deprecated This property has moved to api_key.comment and will
192
+ * be removed in future versions.
193
+ */
194
+ comment: string;
195
+ /**
196
+ * Timestamp of the date/time the key was created
197
+ * @deprecated This property has moved to api_key.created and will
198
+ * be removed in future versions.
199
+ */
200
+ created: string;
201
+ /**
202
+ * Array of scopes assigned to the key
203
+ * @deprecated This property has moved to api_key.scopes and will
204
+ * be removed in future versions.
205
+ */
206
+ scopes: Array<string>;
207
+ };
208
+
209
+ }
210
+ declare module 'types/keyResponse' {
211
+ import { KeyResponseObj } from 'types/keyResponseObj';
212
+ /**
213
+ * Response from the Deepgram API to list keys
214
+ */
215
+ export type KeyResponse = {
216
+ /**
217
+ * Array of API keys associated with the project
218
+ */
219
+ api_keys: Array<KeyResponseObj>;
220
+ };
221
+
222
+ }
223
+ declare module 'enums/alternatives' {
224
+ export const enum Alternatives {
225
+ One = "one-alternative",
226
+ Multiple = "multiple-alternatives"
227
+ }
228
+
229
+ }
230
+ declare module 'enums/connectionState' {
231
+ export enum ConnectionState {
232
+ CONNECTING = 0,
233
+ OPEN = 1,
234
+ CLOSING = 2,
235
+ CLOSED = 3
236
+ }
237
+
238
+ }
239
+ declare module 'enums/diarization' {
240
+ export const enum Diarization {
241
+ Diarized = "diarized",
242
+ NonDiarized = "non-diarized"
243
+ }
244
+
245
+ }
246
+ declare module 'enums/liveTranscriptionEvents' {
247
+ export const enum LiveTranscriptionEvents {
248
+ Open = "open",
249
+ Close = "close",
250
+ TranscriptReceived = "transcriptReceived",
251
+ Error = "error"
252
+ }
253
+
254
+ }
255
+ declare module 'enums/models' {
256
+ export const enum Models {
257
+ General = "general",
258
+ Meeting = "meeting",
259
+ PhoneCall = "phonecall"
260
+ }
261
+
262
+ }
263
+ declare module 'enums/punctuation' {
264
+ export const enum Punctuation {
265
+ NonPunctuated = "non-punctuated",
266
+ Punctuated = "punctuated"
267
+ }
268
+
269
+ }
270
+ declare module 'enums/searchKind' {
271
+ export const enum SearchKind {
272
+ NoSearch = "no-search",
273
+ WithSearch = "with-search"
274
+ }
275
+
276
+ }
277
+ declare module 'enums/index' {
278
+ export * from 'enums/alternatives';
279
+ export * from 'enums/connectionState';
280
+ export * from 'enums/diarization';
281
+ export * from 'enums/liveTranscriptionEvents';
282
+ export * from 'enums/models';
283
+ export * from 'enums/punctuation';
284
+ export * from 'enums/searchKind';
285
+
286
+ }
287
+ declare module 'types/liveTranscriptionOptions' {
288
+ import { Models } from 'enums';
289
+ /**
290
+ * Options for transcription
291
+ */
292
+ export type LiveTranscriptionOptions = {
293
+ /**
294
+ * AI model used to process submitted audio.
295
+ * @default general
296
+ * @remarks Possible values are general, phonecall, meeting or a custom string
297
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/model
298
+ */
299
+ model?: Models | string;
300
+ /**
301
+ * Version of the model to use.
302
+ * @default latest
303
+ * @remarks latest OR <version_id>
304
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/version
305
+ */
306
+ version?: string;
307
+ /**
308
+ * BCP-47 language tag that hints at the primary spoken language.
309
+ * @default en-US
310
+ * @remarks Possible values are en-GB, en-IN, en-NZ, en-US, es, fr, ko, pt,
311
+ * pt-BR, ru, tr or null
312
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/language
313
+ */
314
+ language?: string;
315
+ /**
316
+ * Indicates whether to add punctuation and capitalization to the transcript.
317
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/punctuate
318
+ */
319
+ punctuate?: boolean;
320
+ /**
321
+ * Indicates whether to remove profanity from the transcript.
322
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/profanity_filter
323
+ */
324
+ profanity_filter?: boolean;
325
+ /**
326
+ * Indicates whether to redact sensitive information, replacing redacted content with asterisks (*).
327
+ * @remarks Options include:
328
+ * `pci`: Redacts sensitive credit card information, including credit card number, expiration date, and CVV
329
+ * `numbers` (or `true)`: Aggressively redacts strings of numerals
330
+ * `ssn` (*beta*): Redacts social security numbers
331
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/redact
332
+ */
333
+ redact?: Array<string>;
334
+ /**
335
+ * Indicates whether to recognize speaker changes. When set to true, each word
336
+ * in the transcript will be assigned a speaker number starting at 0.
337
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/diarize
338
+ */
339
+ diarize?: boolean;
340
+ /**
341
+ * Indicates whether to transcribe each audio channel independently. When set
342
+ * to true, you will receive one transcript for each channel, which means you
343
+ * can apply a different model to each channel using the model parameter (e.g.,
344
+ * set model to general:phonecall, which applies the general model to channel
345
+ * 0 and the phonecall model to channel 1).
346
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/multichannel
347
+ */
348
+ multichannel?: boolean;
349
+ /**
350
+ * Maximum number of transcript alternatives to return. Just like a human listener,
351
+ * Deepgram can provide multiple possible interpretations of what it hears.
352
+ * @default 1
353
+ */
354
+ alternatives?: number;
355
+ /**
356
+ * Indicates whether to convert numbers from written format (e.g., one) to
357
+ * numerical format (e.g., 1). Deepgram can format numbers up to 999,999.
358
+ * @remarks Converted numbers do not include punctuation. For example,
359
+ * 999,999 would be transcribed as 999999.
360
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/numerals
361
+ */
362
+ numerals?: boolean;
363
+ /**
364
+ * Terms or phrases to search for in the submitted audio. Deepgram searches
365
+ * for acoustic patterns in audio rather than text patterns in transcripts
366
+ * because we have noticed that acoustic pattern matching is more performant.
367
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/search
368
+ */
369
+ search?: Array<string>;
370
+ /**
371
+ * Callback URL to provide if you would like your submitted audio to be
372
+ * processed asynchronously. When passed, Deepgram will immediately respond
373
+ * with a request_id. When it has finished analyzing the audio, it will send
374
+ * a POST request to the provided URL with an appropriate HTTP status code.
375
+ * @remarks You may embed basic authentication credentials in the callback URL.
376
+ * Only ports 80, 443, 8080, and 8443 can be used for callbacks.
377
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/callback
378
+ */
379
+ callback?: string;
380
+ /**
381
+ * Keywords to which the model should pay particular attention to boosting
382
+ * or suppressing to help it understand context. Just like a human listener,
383
+ * Deepgram can better understand mumbled, distorted, or otherwise
384
+ * hard-to-decipher speech when it knows the context of the conversation.
385
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/keywords
386
+ */
387
+ keywords?: Array<string>;
388
+ /**
389
+ * Indicates whether the streaming endpoint should send you updates to its
390
+ * transcription as more audio becomes available. By default, the streaming
391
+ * endpoint returns regular updates, which means transcription results will
392
+ * likely change for a period of time. You can avoid receiving these updates
393
+ * by setting this flag to false.
394
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeStreamingAudio/properties/interim_results
395
+ */
396
+ interim_results?: boolean;
397
+ /**
398
+ * Indicates whether Deepgram will detect whether a speaker has finished
399
+ * speaking (or paused for a significant period of time, indicating the
400
+ * completion of an idea). When Deepgram detects an endpoint, it assumes
401
+ * that no additional data will improve its prediction, so it immediately
402
+ * finalizes the result for the processed time range and returns the
403
+ * transcript with a speech_final parameter set to true.
404
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeStreamingAudio/properties/endpointing
405
+ */
406
+ endpointing?: boolean;
407
+ /**
408
+ * Length of time in milliseconds of silence that voice activation detection
409
+ * (VAD) will use to detect that a speaker has finished speaking. Used when
410
+ * endpointing is enabled. Defaults to 10 ms. Deepgram customers may configure
411
+ * a value between 10 ms and 500 ms; on-premise customers may remove this
412
+ * restriction.
413
+ * @default 10
414
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeStreamingAudio/properties/vad_turnoff
415
+ */
416
+ vad_turnoff?: number;
417
+ /**
418
+ * Expected encoding of the submitted streaming audio.
419
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeStreamingAudio/properties/encoding
420
+ */
421
+ encoding?: string;
422
+ /**
423
+ * Number of independent audio channels contained in submitted streaming
424
+ * audio. Only read when a value is provided for encoding.
425
+ * @default 1
426
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeStreamingAudio/properties/channels
427
+ */
428
+ channels?: number;
429
+ /**
430
+ * Sample rate of submitted streaming audio. Required (and only read)
431
+ * when a value is provided for encoding.
432
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeStreamingAudio/properties/sample_rate
433
+ */
434
+ sample_rate?: number;
435
+ };
436
+
437
+ }
438
+ declare module 'types/liveTranscriptionResponse' {
439
+ import { Channel } from 'types/channel';
440
+ export type LiveTranscriptionResponse = {
441
+ channel_index: Array<number>;
442
+ duration: number;
443
+ start: number;
444
+ is_final: boolean;
445
+ speech_final: boolean;
446
+ channel: Channel;
447
+ };
448
+
449
+ }
450
+ declare module 'types/memberList' {
451
+ import { Member } from 'types/member';
452
+ export type MemberList = {
453
+ members?: Array<Member>;
454
+ };
455
+
456
+ }
457
+ declare module 'types/message' {
458
+ export type Message = {
459
+ message?: string;
460
+ };
461
+
462
+ }
463
+ declare module 'types/metadata' {
464
+ export type Metadata = {
465
+ request_id: string;
466
+ transaction_key: string;
467
+ sha256: string;
468
+ created: string;
469
+ duration: number;
470
+ channels: number;
471
+ };
472
+
473
+ }
474
+ declare module 'types/prerecordedTranscriptionOptions' {
475
+ import { Models } from 'enums';
476
+ /**
477
+ * Options for transcription
478
+ */
479
+ export type PrerecordedTranscriptionOptions = {
480
+ /**
481
+ * AI model used to process submitted audio.
482
+ * @default general
483
+ * @remarks Possible values are general, phonecall, meeting or a custom string
484
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/model
485
+ */
486
+ model?: Models | string;
487
+ /**
488
+ * Version of the model to use.
489
+ * @default latest
490
+ * @remarks latest OR <version_id>
491
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/version
492
+ */
493
+ version?: string;
494
+ /**
495
+ * BCP-47 language tag that hints at the primary spoken language.
496
+ * @default en-US
497
+ * @remarks Possible values are en-GB, en-IN, en-NZ, en-US, es, fr, ko, pt,
498
+ * pt-BR, ru, tr or null
499
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/language
500
+ */
501
+ language?: string;
502
+ /**
503
+ * Indicates whether to add punctuation and capitalization to the transcript.
504
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/punctuate
505
+ */
506
+ punctuate?: boolean;
507
+ /**
508
+ * Indicates whether to remove profanity from the transcript.
509
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/profanity_filter
510
+ */
511
+ profanity_filter?: boolean;
512
+ /**
513
+ * Indicates whether to redact sensitive information, replacing redacted content with asterisks (*).
514
+ * @remarks Options include:
515
+ * `pci`: Redacts sensitive credit card information, including credit card number, expiration date, and CVV
516
+ * `numbers` (or `true)`: Aggressively redacts strings of numerals
517
+ * `ssn` (*beta*): Redacts social security numbers
518
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/redact
519
+ */
520
+ redact?: Array<string>;
521
+ /**
522
+ * Indicates whether to recognize speaker changes. When set to true, each word
523
+ * in the transcript will be assigned a speaker number starting at 0.
524
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/diarize
525
+ */
526
+ diarize?: boolean;
527
+ /**
528
+ * Indicates whether to transcribe each audio channel independently. When set
529
+ * to true, you will receive one transcript for each channel, which means you
530
+ * can apply a different model to each channel using the model parameter (e.g.,
531
+ * set model to general:phonecall, which applies the general model to channel
532
+ * 0 and the phonecall model to channel 1).
533
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/multichannel
534
+ */
535
+ multichannel?: boolean;
536
+ /**
537
+ * Maximum number of transcript alternatives to return. Just like a human listener,
538
+ * Deepgram can provide multiple possible interpretations of what it hears.
539
+ * @default 1
540
+ */
541
+ alternatives?: number;
542
+ /**
543
+ * Indicates whether to convert numbers from written format (e.g., one) to
544
+ * numerical format (e.g., 1). Deepgram can format numbers up to 999,999.
545
+ * @remarks Converted numbers do not include punctuation. For example,
546
+ * 999,999 would be transcribed as 999999.
547
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/numerals
548
+ */
549
+ numerals?: boolean;
550
+ /**
551
+ * Terms or phrases to search for in the submitted audio. Deepgram searches
552
+ * for acoustic patterns in audio rather than text patterns in transcripts
553
+ * because we have noticed that acoustic pattern matching is more performant.
554
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/search
555
+ */
556
+ search?: Array<string>;
557
+ /**
558
+ * Callback URL to provide if you would like your submitted audio to be
559
+ * processed asynchronously. When passed, Deepgram will immediately respond
560
+ * with a request_id. When it has finished analyzing the audio, it will send
561
+ * a POST request to the provided URL with an appropriate HTTP status code.
562
+ * @remarks You may embed basic authentication credentials in the callback URL.
563
+ * Only ports 80, 443, 8080, and 8443 can be used for callbacks.
564
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/callback
565
+ */
566
+ callback?: string;
567
+ /**
568
+ * Keywords to which the model should pay particular attention to boosting
569
+ * or suppressing to help it understand context. Just like a human listener,
570
+ * Deepgram can better understand mumbled, distorted, or otherwise
571
+ * hard-to-decipher speech when it knows the context of the conversation.
572
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/keywords
573
+ */
574
+ keywords?: Array<string>;
575
+ /**
576
+ * Indicates whether Deepgram will segment speech into meaningful semantic
577
+ * units, which allows the model to interact more naturally and effectively
578
+ * with speakers' spontaneous speech patterns. For example, when humans
579
+ * speak to each other conversationally, they often pause mid-sentence to
580
+ * reformulate their thoughts, or stop and restart a badly-worded sentence.
581
+ * When utterances is set to true, these utterances are identified and
582
+ * returned in the transcript results.
583
+ *
584
+ * By default, when utterances is enabled, it starts a new utterance after
585
+ * 0.8 s of silence. You can customize the length of time used to determine
586
+ * where to split utterances by submitting the utt_split parameter.
587
+ * @remarks **BETA FEATURE**
588
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/utterances
589
+ */
590
+ utterances?: boolean;
591
+ /**
592
+ * Length of time in seconds of silence between words that Deepgram will
593
+ * use when determining where to split utterances. Used when utterances
594
+ * is enabled.
595
+ * @default 0.8 seconds
596
+ * @remarks **BETA FEATURE**
597
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/utt_split
598
+ */
599
+ utt_split?: number;
600
+ };
601
+
602
+ }
603
+ declare module 'types/utterance' {
604
+ import { WordBase } from 'types/wordBase';
605
+ export type Utterance = {
606
+ /**
607
+ * Start time (in seconds) from the beginning of the audio stream.
608
+ */
609
+ start: number;
610
+ /**
611
+ * End time (in seconds) from the beginning of the audio stream.
612
+ */
613
+ end: number;
614
+ /**
615
+ * Floating point value between 0 and 1 that indicates overall transcript
616
+ * reliability. Larger values indicate higher confidence.
617
+ */
618
+ confidence: number;
619
+ /**
620
+ * Audio channel to which the utterance belongs. When using multichannel audio,
621
+ * utterances are chronologically ordered by channel.
622
+ */
623
+ channel: number;
624
+ /**
625
+ * Transcript for the audio segment being processed.
626
+ */
627
+ transcript: string;
628
+ /**
629
+ * Object containing each word in the transcript, along with its start time
630
+ * and end time (in seconds) from the beginning of the audio stream, and a confidence value.
631
+ */
632
+ words: Array<WordBase>;
633
+ /**
634
+ * Integer indicating the predicted speaker of the majority of words
635
+ * in the utterance who is saying the words being processed.
636
+ */
637
+ speaker?: number;
638
+ /**
639
+ * Unique identifier of the utterance
640
+ */
641
+ id: string;
642
+ };
643
+
644
+ }
645
+ declare module 'helpers/secondsToTimestamp' {
646
+ export function secondsToTimestamp(seconds: number): string;
647
+
648
+ }
649
+ declare module 'helpers/validateOptions' {
650
+ export function validateOptions(apiKey: string, apiUrl: string): void;
651
+
652
+ }
653
+ declare module 'helpers/index' {
654
+ export * from 'helpers/secondsToTimestamp';
655
+ export * from 'helpers/validateOptions';
656
+
657
+ }
658
+ declare module 'types/prerecordedTranscriptionResponse' {
659
+ import { Metadata } from 'types/metadata';
660
+ import { Channel } from 'types/channel';
661
+ import { Utterance } from 'types/utterance';
662
+ export class PrerecordedTranscriptionResponse {
663
+ request_id?: string;
664
+ metadata?: Metadata;
665
+ results?: {
666
+ channels: Array<Channel>;
667
+ utterances?: Array<Utterance>;
668
+ };
669
+ /**
670
+ * Converts the transcription to the WebVTT format
671
+ * @remarks In order to translate the transcription to WebVTT, the utterances
672
+ * feature must be used.
673
+ * @returns A string with the transcription in the WebVTT format
674
+ */
675
+ toWebVTT(): string;
676
+ /**
677
+ * Converts the transcription to the SRT format
678
+ * @remarks In order to translate the transcription to SRT, the utterances
679
+ * feature must be used.
680
+ * @returns A string with the transcription in the SRT format
681
+ */
682
+ toSRT(): string;
683
+ }
684
+
685
+ }
686
+ declare module 'types/project' {
687
+ /**
688
+ * Deepgram project
689
+ */
690
+ export type Project = {
691
+ /**
692
+ * Unique identifier of the project
693
+ */
694
+ project_id: string;
695
+ /**
696
+ * User provided name of the project
697
+ */
698
+ name?: string;
699
+ /**
700
+ * Name of the company associated with the project. Optional.
701
+ */
702
+ company?: string;
703
+ };
704
+
705
+ }
706
+ declare module 'types/projectPatchResponse' {
707
+ export type ProjectPatchResponse = {
708
+ /**
709
+ * Success message.
710
+ */
711
+ message: string;
712
+ };
713
+
714
+ }
715
+ declare module 'types/projectResponse' {
716
+ import { Project } from 'types/project';
717
+ export type ProjectResponse = {
718
+ projects: Array<Project>;
719
+ };
720
+
721
+ }
722
+ declare module 'types/scopeList' {
723
+ export type ScopeList = {
724
+ scopes: Array<string>;
725
+ };
726
+
727
+ }
728
+ declare module 'types/transcriptionSource' {
729
+ /// <reference types="node" />
730
+ import { ReadStream } from 'fs';
731
+ export type TranscriptionSource = UrlSource | BufferSource | ReadStreamSource;
732
+ export type ReadStreamSource = {
733
+ stream: ReadStream;
734
+ mimetype: string;
735
+ };
736
+ export type UrlSource = {
737
+ url: string;
738
+ };
739
+ export type BufferSource = {
740
+ buffer: Buffer;
741
+ mimetype: string;
742
+ };
743
+
744
+ }
745
+ declare module 'types/usageCallback' {
746
+ export type UsageCallback = {
747
+ code: number;
748
+ completed: string;
749
+ };
750
+
751
+ }
752
+ declare module 'types/usageField' {
753
+ export type UsageField = {
754
+ tags: Array<string>;
755
+ models: Array<string>;
756
+ processing_methods: Array<string>;
757
+ languages: Array<string>;
758
+ features: Array<string>;
759
+ };
760
+
761
+ }
762
+ declare module 'types/usageFieldOptions' {
763
+ export type UsageFieldOptions = {
764
+ start?: string;
765
+ end?: string;
766
+ };
767
+
768
+ }
769
+ declare module 'types/usageOptions' {
770
+ export type UsageOptions = {
771
+ start?: string;
772
+ end?: string;
773
+ accessor?: string;
774
+ tag?: Array<string>;
775
+ method?: "sync" | "async" | "streaming";
776
+ model?: string;
777
+ multichannel?: boolean;
778
+ interim_results?: boolean;
779
+ punctuate?: boolean;
780
+ ner?: boolean;
781
+ utterances?: boolean;
782
+ replace?: boolean;
783
+ profanity_filter?: boolean;
784
+ keywords?: boolean;
785
+ sentiment?: boolean;
786
+ diarize?: boolean;
787
+ detect_language?: boolean;
788
+ search?: boolean;
789
+ redact?: boolean;
790
+ alternatives?: boolean;
791
+ numerals?: boolean;
792
+ };
793
+
794
+ }
795
+ declare module 'types/usageRequestDetail' {
796
+ export type UsageRequestDetail = {
797
+ details: {
798
+ usd: number;
799
+ duration: number;
800
+ total_audio: number;
801
+ channels: number;
802
+ streams: number;
803
+ model: string;
804
+ method: "sync" | "async" | "streaming";
805
+ tags: Array<string>;
806
+ features: Array<string>;
807
+ config: {
808
+ multichannel?: boolean;
809
+ interim_results?: boolean;
810
+ punctuate?: boolean;
811
+ ner?: boolean;
812
+ utterances?: boolean;
813
+ replace?: boolean;
814
+ profanity_filter?: boolean;
815
+ keywords?: boolean;
816
+ sentiment?: boolean;
817
+ diarize?: boolean;
818
+ detect_language?: boolean;
819
+ search?: boolean;
820
+ redact?: boolean;
821
+ alternatives?: boolean;
822
+ numerals?: boolean;
823
+ };
824
+ };
825
+ };
826
+
827
+ }
828
+ declare module 'types/usageRequestMessage' {
829
+ export type UsageRequestMessage = {
830
+ message?: string;
831
+ };
832
+
833
+ }
834
+ declare module 'types/usageRequest' {
835
+ import { UsageCallback } from 'types/usageCallback';
836
+ import { UsageRequestDetail } from 'types/usageRequestDetail';
837
+ import { UsageRequestMessage } from 'types/usageRequestMessage';
838
+ export type UsageRequest = {
839
+ request_id: string;
840
+ created: string;
841
+ path: string;
842
+ accessor: string;
843
+ response?: UsageRequestDetail | UsageRequestMessage;
844
+ callback?: UsageCallback;
845
+ };
846
+
847
+ }
848
+ declare module 'types/usageRequestList' {
849
+ import { UsageRequest } from 'types/usageRequest';
850
+ export type UsageRequestList = {
851
+ page: number;
852
+ limit: number;
853
+ requests?: Array<UsageRequest>;
854
+ };
855
+
856
+ }
857
+ declare module 'types/usageRequestListOptions' {
858
+ export type UsageRequestListOptions = {
859
+ start?: string;
860
+ end?: string;
861
+ page?: number;
862
+ limit?: number;
863
+ status?: "succeeded" | "failed";
864
+ };
865
+
866
+ }
867
+ declare module 'types/usageResponseDetail' {
868
+ export type UsageResponseDetail = {
869
+ start: string;
870
+ end: string;
871
+ hours: number;
872
+ requests: number;
873
+ };
874
+
875
+ }
876
+ declare module 'types/usageResponse' {
877
+ import { UsageResponseDetail } from 'types/usageResponseDetail';
878
+ export type UsageResponse = {
879
+ start: string;
880
+ end: string;
881
+ resolution: {
882
+ units: string;
883
+ amount: number;
884
+ };
885
+ results: Array<UsageResponseDetail>;
886
+ };
887
+
888
+ }
889
+ declare module 'types/projectPatchRequest' {
890
+ export type ProjectPatchRequest = {
891
+ name?: string;
892
+ company?: string;
893
+ };
894
+
895
+ }
896
+ declare module 'types/requestFunction' {
897
+ /// <reference types="node" />
898
+ import { ReadStream } from 'fs';
899
+ export type RequestFunction = NodeRequest | BrowserRequest;
900
+ export type NodeRequest = (method: string, api_key: string, apiUrl: string, path: string, payload?: string | Buffer | ReadStream, options?: Object) => Promise<any>;
901
+ export type BrowserRequest = (method: string, api_key: string, apiUrl: string, path: string, payload?: string) => Promise<any>;
902
+
903
+ }
904
+ declare module 'types/index' {
905
+ export * from 'types/balance';
906
+ export * from 'types/balanceList';
907
+ export * from 'types/channel';
908
+ export * from 'types/createKeyOptions';
909
+ export * from 'types/hit';
910
+ export * from 'types/invitationList';
911
+ export * from 'types/invitationOptions';
912
+ export * from 'types/key';
913
+ export * from 'types/keyResponse';
914
+ export * from 'types/liveTranscriptionOptions';
915
+ export * from 'types/liveTranscriptionResponse';
916
+ export * from 'types/member';
917
+ export * from 'types/memberList';
918
+ export * from 'types/message';
919
+ export * from 'types/metadata';
920
+ export * from 'types/prerecordedTranscriptionOptions';
921
+ export * from 'types/prerecordedTranscriptionResponse';
922
+ export * from 'types/project';
923
+ export * from 'types/projectPatchResponse';
924
+ export * from 'types/projectResponse';
925
+ export * from 'types/scopeList';
926
+ export * from 'types/search';
927
+ export * from 'types/transcriptionSource';
928
+ export * from 'types/usageCallback';
929
+ export * from 'types/usageField';
930
+ export * from 'types/usageFieldOptions';
931
+ export * from 'types/usageOptions';
932
+ export * from 'types/usageRequest';
933
+ export * from 'types/usageRequestDetail';
934
+ export * from 'types/usageRequestList';
935
+ export * from 'types/usageRequestListOptions';
936
+ export * from 'types/usageResponse';
937
+ export * from 'types/usageResponseDetail';
938
+ export * from 'types/utterance';
939
+ export * from 'types/wordBase';
940
+ export * from 'types/keyResponseObj';
941
+ export * from 'types/projectPatchRequest';
942
+ export * from 'types/requestFunction';
943
+
944
+ }
945
+ declare module 'billing' {
946
+ import { BalanceList, Balance, RequestFunction } from 'types';
947
+ export class Billing {
948
+ private _credentials;
949
+ private _apiUrl;
950
+ private _request;
951
+ constructor(_credentials: string, _apiUrl: string, _request: RequestFunction);
952
+ private apiPath;
953
+ /**
954
+ * Retrieves list of balance info of the specified project.
955
+ * @param projectId Unique identifier of the project
956
+ */
957
+ listBalances(projectId: string): Promise<BalanceList>;
958
+ /**
959
+ * Retrieves balance info of a specified balance_id in the specified project.
960
+ * @param projectId Unique identifier of the project
961
+ * @param balanceId Unique identifier of the balance
962
+ */
963
+ getBalance(projectId: string, balanceId: string): Promise<Balance>;
964
+ }
965
+
966
+ }
967
+ declare module 'userAgent' {
968
+ export function userAgent(): string;
969
+
970
+ }
971
+ declare module 'httpRequest' {
972
+ /// <reference types="node" />
973
+ import { ReadStream } from 'fs';
974
+ export function _request<T>(method: string, api_key: string, apiUrl: string, path: string, payload?: string | Buffer | ReadStream, options?: Object): Promise<T>;
975
+
976
+ }
977
+ declare module 'constants/defaultOptions' {
978
+ /**
979
+ * Default SDK options
980
+ */
981
+ export const DefaultOptions: {
982
+ apiUrl: string;
983
+ };
984
+
985
+ }
986
+ declare module 'constants/index' {
987
+ export * from 'constants/defaultOptions';
988
+
989
+ }
990
+ declare module 'keys' {
991
+ import { CreateKeyOptions, KeyResponse, Key, RequestFunction } from 'types';
992
+ export class Keys {
993
+ private _credentials;
994
+ private _apiUrl;
995
+ private _request;
996
+ constructor(_credentials: string, _apiUrl: string, _request: RequestFunction);
997
+ private apiPath;
998
+ /**
999
+ * Retrieves all keys associated with the provided projectId
1000
+ * @param projectId Unique identifier of the project containing API keys
1001
+ */
1002
+ list(projectId: string): Promise<KeyResponse>;
1003
+ /**
1004
+ * Retrieves a specific key associated with the provided projectId
1005
+ * @param projectId Unique identifier of the project containing API keys
1006
+ * @param keyId Unique identifier for the key to retrieve
1007
+ */
1008
+ get(projectId: string, keyId: string): Promise<Key>;
1009
+ /**
1010
+ * Creates an API key with the provided scopes
1011
+ * @param projectId Unique identifier of the project to create an API key under
1012
+ * @param comment Comment to describe the key
1013
+ * @param scopes Permission scopes associated with the API key
1014
+ * @param options Optional options used when creating API keys
1015
+ */
1016
+ create(projectId: string, comment: string, scopes: Array<string>, options?: CreateKeyOptions): Promise<Key>;
1017
+ /**
1018
+ * Deletes an API key
1019
+ * @param projectId Unique identifier of the project to create an API key under
1020
+ * @param keyId Unique identifier for the key to delete
1021
+ */
1022
+ delete(projectId: string, keyId: string): Promise<void>;
1023
+ }
1024
+
1025
+ }
1026
+ declare module 'projects' {
1027
+ import { Project, ProjectPatchResponse, ProjectResponse, ProjectPatchRequest, RequestFunction } from 'types';
1028
+ export class Projects {
1029
+ private _credentials;
1030
+ private _apiUrl;
1031
+ private _request;
1032
+ constructor(_credentials: string, _apiUrl: string, _request: RequestFunction);
1033
+ private apiPath;
1034
+ /**
1035
+ * Returns all projects accessible by the API key
1036
+ */
1037
+ list(): Promise<ProjectResponse>;
1038
+ /**
1039
+ * Retrieves a specific project based on the provided projectId
1040
+ * @param projectId Unique identifier of the project to retrieve
1041
+ */
1042
+ get(projectId: string): Promise<Project>;
1043
+ /**
1044
+ * Update a specific project
1045
+ * @param project project to update
1046
+ */
1047
+ update(project: Project, payload: ProjectPatchRequest): Promise<ProjectPatchResponse>;
1048
+ }
1049
+
1050
+ }
1051
+ declare module 'transcription/liveTranscription' {
1052
+ /// <reference types="node" />
1053
+ import EventEmitter from 'events';
1054
+ import { ConnectionState } from 'enums';
1055
+ import { LiveTranscriptionOptions } from 'types';
1056
+ export class LiveTranscription extends EventEmitter {
1057
+ private _socket;
1058
+ constructor(credentials: string, apiUrl: string, options?: LiveTranscriptionOptions);
1059
+ private _bindSocketEvents;
1060
+ /**
1061
+ * Returns the ready state of the websocket connection
1062
+ */
1063
+ getReadyState(): ConnectionState;
1064
+ /**
1065
+ * Sends data to the Deepgram API via websocket connection
1066
+ * @param data Audio data to send to Deepgram
1067
+ */
1068
+ send(data: string | ArrayBufferLike | Blob | ArrayBufferView): void;
1069
+ /**
1070
+ * Denote that you are finished sending audio and close
1071
+ * the websocket connection when transcription is finished
1072
+ */
1073
+ finish(): void;
1074
+ }
1075
+
1076
+ }
1077
+ declare module 'transcription/preRecordedTranscription' {
1078
+ import { PrerecordedTranscriptionOptions, PrerecordedTranscriptionResponse, TranscriptionSource } from 'types';
1079
+ /**
1080
+ * Transcribes audio from a file or buffer
1081
+ * @param credentials Base64 encoded API key & secret
1082
+ * @param source Url or Buffer of file to transcribe
1083
+ * @param options Options to modify transcriptions
1084
+ */
1085
+ export const preRecordedTranscription: (apiKey: string, apiUrl: string, source: TranscriptionSource, options?: PrerecordedTranscriptionOptions | undefined) => Promise<PrerecordedTranscriptionResponse>;
1086
+
1087
+ }
1088
+ declare module 'transcription/index' {
1089
+ import { LiveTranscriptionOptions, PrerecordedTranscriptionOptions, PrerecordedTranscriptionResponse, TranscriptionSource } from 'types';
1090
+ import { LiveTranscription } from 'transcription/liveTranscription';
1091
+ export class Transcriber {
1092
+ private _credentials;
1093
+ private _apiUrl;
1094
+ constructor(_credentials: string, _apiUrl: string);
1095
+ /**
1096
+ * Transcribes prerecorded audio from a file or buffer
1097
+ * @param source Url or Buffer of file to transcribe
1098
+ * @param options Options to modify transcriptions
1099
+ */
1100
+ preRecorded(source: TranscriptionSource, options?: PrerecordedTranscriptionOptions): Promise<PrerecordedTranscriptionResponse>;
1101
+ /**
1102
+ * Opens a websocket to Deepgram's API for live transcriptions
1103
+ * @param options Options to modify transcriptions
1104
+ */
1105
+ live(options?: LiveTranscriptionOptions): LiveTranscription;
1106
+ }
1107
+
1108
+ }
1109
+ declare module 'usage' {
1110
+ import { RequestFunction, UsageField, UsageFieldOptions, UsageOptions, UsageRequest, UsageRequestList, UsageRequestListOptions, UsageResponse } from 'types';
1111
+ export class Usage {
1112
+ private _credentials;
1113
+ private _apiUrl;
1114
+ private _request;
1115
+ constructor(_credentials: string, _apiUrl: string, _request: RequestFunction);
1116
+ private apiPath;
1117
+ /**
1118
+ * Retrieves all requests associated with the provided projectId based
1119
+ * on the provided options
1120
+ * @param projectId Unique identifier of the project
1121
+ * @param options Additional filter options
1122
+ */
1123
+ listRequests(projectId: string, options?: UsageRequestListOptions): Promise<UsageRequestList>;
1124
+ /**
1125
+ * Retrieves a specific request associated with the provided projectId
1126
+ * @param projectId Unique identifier of the project
1127
+ * @param requestId Unique identifier for the request to retrieve
1128
+ */
1129
+ getRequest(projectId: string, requestId: string): Promise<UsageRequest>;
1130
+ /**
1131
+ * Retrieves usage associated with the provided projectId based
1132
+ * on the provided options
1133
+ * @param projectId Unique identifier of the project
1134
+ * @param options Options to filter usage
1135
+ */
1136
+ getUsage(projectId: string, options?: UsageOptions): Promise<UsageResponse>;
1137
+ /**
1138
+ * Retrieves features used by the provided projectId based
1139
+ * on the provided options
1140
+ * @param projectId Unique identifier of the project
1141
+ * @param options Options to filter usage
1142
+ */
1143
+ getFields(projectId: string, options?: UsageFieldOptions): Promise<UsageField>;
1144
+ }
1145
+
1146
+ }
1147
+ declare module 'members' {
1148
+ import { MemberList, Message, RequestFunction } from 'types';
1149
+ export class Members {
1150
+ private _credentials;
1151
+ private _apiUrl;
1152
+ private _request;
1153
+ constructor(_credentials: string, _apiUrl: string, _request: RequestFunction);
1154
+ private apiPath;
1155
+ /**
1156
+ * Retrieves account objects for all of the accounts in the specified project.
1157
+ * @param projectId Unique identifier of the project
1158
+ */
1159
+ listMembers(projectId: string): Promise<MemberList>;
1160
+ /**
1161
+ * Retrieves account objects for all of the accounts in the specified project.
1162
+ * @param projectId Unique identifier of the project
1163
+ * @param memberId Unique identifier of the member
1164
+ */
1165
+ removeMember(projectId: string, memberId: string): Promise<Message>;
1166
+ }
1167
+
1168
+ }
1169
+ declare module 'invitation' {
1170
+ import { Message, InvitationOptions, InvitationList, RequestFunction } from 'types';
1171
+ export class Invitation {
1172
+ private _credentials;
1173
+ private _apiUrl;
1174
+ private _request;
1175
+ constructor(_credentials: string, _apiUrl: string, _request: RequestFunction);
1176
+ private apiPath;
1177
+ /**
1178
+ * Lists all the current invites of a specified project.
1179
+ * @param projectId Unique identifier of the project
1180
+ */
1181
+ list(projectId: string): Promise<InvitationList>;
1182
+ /**
1183
+ * Sends an invitation to join the specified project.
1184
+ * @param projectId Unique identifier of the project
1185
+ */
1186
+ send(projectId: string, options: InvitationOptions): Promise<Message>;
1187
+ /**
1188
+ * Removes the authenticated account from the specified project.
1189
+ * @param projectId Unique identifier of the project
1190
+ */
1191
+ leave(projectId: string): Promise<Message>;
1192
+ /**
1193
+ * Removes the specified email from the invitations on the specified project.
1194
+ * @param projectId Unique identifier of the project
1195
+ * @param email email address of the invitee
1196
+ * NOTE: This will return successful even if the email does not have an invite on the project.
1197
+ */
1198
+ delete(projectId: string, email: string): Promise<Message>;
1199
+ }
1200
+
1201
+ }
1202
+ declare module 'scopes' {
1203
+ import { ScopeList, Message, RequestFunction } from 'types';
1204
+ export class Scopes {
1205
+ private _credentials;
1206
+ private _apiUrl;
1207
+ private _request;
1208
+ constructor(_credentials: string, _apiUrl: string, _request: RequestFunction);
1209
+ private apiPath;
1210
+ /**
1211
+ * Retrieves scopes of the specified member in the specified project.
1212
+ * @param projectId Unique identifier of the project
1213
+ * @param memberId Unique identifier of the member
1214
+ */
1215
+ get(projectId: string, memberId: string): Promise<ScopeList>;
1216
+ /**
1217
+ * Updates the scope for the specified member in the specified project.
1218
+ * @param projectId Unique identifier of the project
1219
+ * @param memberId Unique identifier of the member being updated
1220
+ * @param scope string of the scope to update to
1221
+ */
1222
+ update(projectID: string, memberId: string, scope: string): Promise<Message>;
1223
+ }
1224
+
1225
+ }
1226
+ declare module 'index' {
1227
+ import { Keys } from 'keys';
1228
+ import { Projects } from 'projects';
1229
+ import { Transcriber } from 'transcription';
1230
+ import { Usage } from 'usage';
1231
+ import { Members } from 'members';
1232
+ import { Invitation } from 'invitation';
1233
+ import { Billing } from 'billing';
1234
+ import { Scopes } from 'scopes';
1235
+ export class Deepgram {
1236
+ private _apiUrl;
1237
+ private _apiKey;
1238
+ keys: Keys;
1239
+ projects: Projects;
1240
+ transcription: Transcriber;
1241
+ usage: Usage;
1242
+ members: Members;
1243
+ invitation: Invitation;
1244
+ billing: Billing;
1245
+ scopes: Scopes;
1246
+ constructor(apiKey: string, apiUrl?: string);
1247
+ }
1248
+
1249
+ }
1250
+ declare module 'browser/httpFetch' {
1251
+ export function _request<T>(method: string, api_key: string, apiUrl: string, path: string, payload?: string): Promise<T>;
1252
+
1253
+ }
1254
+ declare module 'browser/transcription/preRecordedTranscription' {
1255
+ import { PrerecordedTranscriptionOptions, PrerecordedTranscriptionResponse, UrlSource } from 'types';
1256
+ /**
1257
+ * Transcribes audio from a url
1258
+ * @param credentials Base64 encoded API key & secret
1259
+ * @param apiUrl url string of Deepgram's API
1260
+ * @param source Url or Buffer of file to transcribe
1261
+ * @param options Options to modify transcriptions
1262
+ */
1263
+ export const preRecordedTranscription: (apiKey: string, apiUrl: string, source: UrlSource, options?: PrerecordedTranscriptionOptions | undefined) => Promise<PrerecordedTranscriptionResponse>;
1264
+
1265
+ }
1266
+ declare module 'browser/transcription/index' {
1267
+ import { LiveTranscriptionOptions, PrerecordedTranscriptionOptions, PrerecordedTranscriptionResponse, UrlSource } from 'types';
1268
+ export class Transcriber {
1269
+ private _credentials;
1270
+ private _apiUrl;
1271
+ constructor(_credentials: string, _apiUrl: string);
1272
+ /**
1273
+ * Transcribes prerecorded audio from a file or buffer
1274
+ * @param source Url or Buffer of file to transcribe
1275
+ * @param options Options to modify transcriptions
1276
+ */
1277
+ preRecorded(source: UrlSource, options?: PrerecordedTranscriptionOptions): Promise<PrerecordedTranscriptionResponse>;
1278
+ /**
1279
+ * Opens a websocket to Deepgram's API for live transcriptions
1280
+ * @param options Options to modify transcriptions
1281
+ */
1282
+ live(options?: LiveTranscriptionOptions): WebSocket;
1283
+ }
1284
+
1285
+ }
1286
+ declare module 'browser/index' {
1287
+ import { Transcriber } from 'browser/transcription';
1288
+ import { Projects } from 'projects';
1289
+ import { Keys } from 'keys';
1290
+ import { Usage } from 'usage';
1291
+ import { Members } from 'members';
1292
+ import { Invitation } from 'invitation';
1293
+ import { Billing } from 'billing';
1294
+ import { Scopes } from 'scopes';
1295
+ export class Deepgram {
1296
+ private _apiUrl;
1297
+ private _apiKey;
1298
+ transcription: Transcriber;
1299
+ projects: Projects;
1300
+ keys: Keys;
1301
+ usage: Usage;
1302
+ members: Members;
1303
+ invitation: Invitation;
1304
+ billing: Billing;
1305
+ scopes: Scopes;
1306
+ constructor(apiKey: string, apiUrl?: string);
1307
+ }
1308
+
1309
+ }
1310
+ declare module 'types/keyword' {
1311
+ export type Keyword = {
1312
+ keyword: string;
1313
+ boost?: number;
1314
+ };
1315
+
25
1316
  }