@deepgram/sdk 1.1.0 → 1.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -9,6 +9,28 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
9
9
 
10
10
  ---
11
11
 
12
+ ## [1.2.2]
13
+
14
+ ### Updated
15
+
16
+ - Updated the `wordBase` type to include an optional `speaker` property.
17
+ - Updated the documentation for the speaker property of the `utterance` type.
18
+
19
+ ## [1.2.1]
20
+
21
+ ### Fixed
22
+
23
+ - Fixed a bug that caused real-time transcriptions to not close correctly. This
24
+ would result in the user not received the final transcription.
25
+
26
+ ## [1.2.0]
27
+
28
+ ### Updated
29
+
30
+ - Updated the `keys.create` function to allow new `expirationDate` or `timeToLive`
31
+ values. These are optional and one at most can be provided. Providing both will
32
+ throw an error.
33
+
12
34
  ## [1.1.0]
13
35
 
14
36
  ### Added
@@ -129,7 +151,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
129
151
 
130
152
  ---
131
153
 
132
- [unreleased]: https://github.com/deepgram/node-sdk/compare/1.1.0...HEAD
154
+ [unreleased]: https://github.com/deepgram/node-sdk/compare/1.2.2...HEAD
155
+ [1.2.2]: https://github.com/deepgram/node-sdk/compare/1.2.1...1.2.2
156
+ [1.2.1]: https://github.com/deepgram/node-sdk/compare/1.2.0...1.2.1
157
+ [1.2.0]: https://github.com/deepgram/node-sdk/compare/1.1.0...1.2.0
133
158
  [1.1.0]: https://github.com/deepgram/node-sdk/compare/1.0.3...1.1.0
134
159
  [1.0.3]: https://github.com/deepgram/node-sdk/compare/1.0.2...1.0.3
135
160
  [1.0.2]: https://github.com/deepgram/node-sdk/compare/1.0.0...1.0.2
package/README.md CHANGED
@@ -10,6 +10,11 @@ speech recognition APIs.
10
10
  To access the API you will need a Deepgram account. Sign up for free at
11
11
  [signup][signup].
12
12
 
13
+ ## Documentation
14
+
15
+ Full documentation of the Node.js SDK can be found on the
16
+ [Deepgram Developer Portal](https://developers.deepgram.com/sdks-tools/sdks/node-sdk/).
17
+
13
18
  You can learn more about the full Deepgram API at [https://developers.deepgram.com](https://developers.deepgram.com).
14
19
 
15
20
  ## Installation
@@ -34,822 +39,60 @@ const { Deepgram } = require("@deepgram/sdk");
34
39
  const deepgram = new Deepgram(DEEPGRAM_API_KEY);
35
40
  ```
36
41
 
37
- ## Usage
38
-
39
- ## Transcription
40
-
41
- The `transcription` property can handle both pre-recorded and live transcriptions.
42
+ ## Examples
42
43
 
43
- ### Prerecorded Transcription
44
+ ### Transcribe an Existing File
44
45
 
45
- The `transcription.preRecorded` method handles sending an existing file or
46
- buffer to the Deepgram API to generate a transcription. [Additional options](#prerecorded-transcription-options)
47
- can be provided to customize the result.
46
+ #### Remote Files
48
47
 
49
48
  ```js
50
- // Sending a file
51
49
  const fileSource = { url: URL_OF_FILE };
52
50
 
53
- // Sending a buffer
54
- const bufferSource = { buffer: BUFFER_OF_FILE, mimetype: MIMETYPE_OF_FILE };
55
-
56
- // Sending a ReadStream
57
- const streamSource = {
58
- stream: fs.createReadStream("/path/to/file"),
59
- mimetype: MIMETYPE_OF_FILE,
60
- };
61
-
62
- // Both fileSource or bufferSource could be provided as the source parameter
63
- const response = await deepgram.transcription.preRecorded(
64
- fileSource | bufferSource | streamSource,
65
- {
66
- punctuate: true,
67
- // other options are available
68
- }
69
- );
70
- ```
71
-
72
- #### Prerecorded Transcription Options
73
-
74
- Additional transcription options can be provided for prerecorded transcriptions.
75
-
76
- ```js
77
- {
78
- /**
79
- * AI model used to process submitted audio.
80
- * @default general
81
- * @remarks Possible values are general, phonecall, meeting or a custom string
82
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/model
83
- */
84
- model?: Models | string;
85
-
86
- /**
87
- * Version of the model to use.
88
- * @default latest
89
- * @remarks latest OR <version_id>
90
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/version
91
- */
92
- version: string;
93
- /**
94
- * BCP-47 language tag that hints at the primary spoken language.
95
- * @default en-US
96
- * @remarks Possible values are en-GB, en-IN, en-NZ, en-US, es, fr, ko, pt,
97
- * pt-BR, ru, tr or null
98
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/language
99
- */
100
- language?: string;
101
- /**
102
- * Indicates whether to add punctuation and capitalization to the transcript.
103
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/punctuate
104
- */
105
- punctuate?: boolean;
106
- /**
107
- * Indicates whether to remove profanity from the transcript.
108
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/profanity_filter
109
- */
110
- profanity_filter?: boolean;
111
- /**
112
- * Indicates whether to redact sensitive information, replacing redacted content with asterisks (*).
113
- * @remarks Options include:
114
- * `pci`: Redacts sensitive credit card information, including credit card number, expiration date, and CVV
115
- * `numbers` (or `true)`: Aggressively redacts strings of numerals
116
- * `ssn` (*beta*): Redacts social security numbers
117
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/redact
118
- */
119
- redact?: Array<string>;
120
- /**
121
- * Indicates whether to recognize speaker changes. When set to true, each word
122
- * in the transcript will be assigned a speaker number starting at 0.
123
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/diarize
124
- */
125
- diarize?: boolean;
126
- /**
127
- * Indicates whether to transcribe each audio channel independently. When set
128
- * to true, you will receive one transcript for each channel, which means you
129
- * can apply a different model to each channel using the model parameter (e.g.,
130
- * set model to general:phonecall, which applies the general model to channel
131
- * 0 and the phonecall model to channel 1).
132
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/multichannel
133
- */
134
- multichannel?: boolean;
135
- /**
136
- * Maximum number of transcript alternatives to return. Just like a human listener,
137
- * Deepgram can provide multiple possible interpretations of what it hears.
138
- * @default 1
139
- */
140
- alternatives?: number;
141
- /**
142
- * Indicates whether to convert numbers from written format (e.g., one) to
143
- * numerical format (e.g., 1). Deepgram can format numbers up to 999,999.
144
- * @remarks Converted numbers do not include punctuation. For example,
145
- * 999,999 would be transcribed as 999999.
146
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/numerals
147
- */
148
- numerals?: boolean;
149
- /**
150
- * Terms or phrases to search for in the submitted audio. Deepgram searches
151
- * for acoustic patterns in audio rather than text patterns in transcripts
152
- * because we have noticed that acoustic pattern matching is more performant.
153
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/search
154
- */
155
- search?: Array<string>;
156
- /**
157
- * Callback URL to provide if you would like your submitted audio to be
158
- * processed asynchronously. When passed, Deepgram will immediately respond
159
- * with a request_id. When it has finished analyzing the audio, it will send
160
- * a POST request to the provided URL with an appropriate HTTP status code.
161
- * @remarks You may embed basic authentication credentials in the callback URL.
162
- * Only ports 80, 443, 8080, and 8443 can be used for callbacks.
163
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/callback
164
- */
165
- callback?: string;
166
- /**
167
- * Keywords to which the model should pay particular attention to boosting
168
- * or suppressing to help it understand context. Just like a human listener,
169
- * Deepgram can better understand mumbled, distorted, or otherwise
170
- * hard-to-decipher speech when it knows the context of the conversation.
171
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/keywords
172
- */
173
- keywords?: Array<string>;
174
- /**
175
- * Indicates whether Deepgram will segment speech into meaningful semantic
176
- * units, which allows the model to interact more naturally and effectively
177
- * with speakers' spontaneous speech patterns. For example, when humans
178
- * speak to each other conversationally, they often pause mid-sentence to
179
- * reformulate their thoughts, or stop and restart a badly-worded sentence.
180
- * When utterances is set to true, these utterances are identified and
181
- * returned in the transcript results.
182
- *
183
- * By default, when utterances is enabled, it starts a new utterance after
184
- * 0.8 s of silence. You can customize the length of time used to determine
185
- * where to split utterances by submitting the utt_split parameter.
186
- * @remarks **BETA FEATURE**
187
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/utterances
188
- */
189
- utterances?: boolean;
190
- /**
191
- * Length of time in seconds of silence between words that Deepgram will
192
- * use when determining where to split utterances. Used when utterances
193
- * is enabled.
194
- * @default 0.8 seconds
195
- * @remarks **BETA FEATURE**
196
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/utt_split
197
- */
198
- utt_split?: number;
199
- }
200
- ```
201
-
202
- #### Prerecorded Transcription Response
203
-
204
- ```ts
205
- {
206
- request_id?: string;
207
- metadata?: {
208
- request_id: string;
209
- transaction_key: string;
210
- sha256: string;
211
- created: string;
212
- duration: number;
213
- channels: number;
214
- };
215
- results?: {
216
- channels: Array<{
217
- search?: Array<{
218
- query: string;
219
- hits: Array<{
220
- confidence: number;
221
- start: number;
222
- end: number;
223
- snippet: string;
224
- }>;
225
- }>;
226
- alternatives: Array<{
227
- transcript: string;
228
- confidence: number;
229
- words: Array<{
230
- word: string;
231
- start: number;
232
- end: number;
233
- confidence: number;
234
- punctuated_word?: string;
235
- }>;
236
- }>;
237
- }>;
238
- utterances?: Array<{
239
- start: number;
240
- end: number;
241
- confidence: number;
242
- channel: number;
243
- transcript: string;
244
- words: Array<{
245
- word: string;
246
- start: number;
247
- end: number;
248
- confidence: number;
249
- punctuated_word?: string;
250
- }>;
251
- speaker?: number;
252
- id: string;
253
- }>;
254
- };
255
- };
256
- ```
257
-
258
- ### Live Transcription
259
-
260
- The `transcription.live` method provides access to a websocket connection
261
- to the Deepgram API for generating streaming transcriptions. [Additional options](#live-transcription-options)
262
- can be provided to customize the result.
263
-
264
- ```js
265
- const deepgramLive = deepgram.transcription.live({ punctuate: true });
266
-
267
- socket.on("microphone-stream", (stream) => {
268
- deepgramSocket.send(stream);
269
- });
270
-
271
- /**
272
- * Receive transcriptions based on sent streams
273
- */
274
- deepgramLive.addListener("transcriptReceived", (transcription) => {
275
- console.log(transcription.data);
51
+ const response = await deepgram.transcription.preRecorded(fileSource, {
52
+ punctuate: true,
276
53
  });
277
54
  ```
278
55
 
279
- #### Events
280
-
281
- The following events are fired by the live transcription object:
282
-
283
- | Event | Description | Data |
284
- | -------------------- | ----------------------------------------------------- | ------------------------------------------------- |
285
- | `open` | The websocket connection to Deepgram has been opened. | The DG live transcription object |
286
- | `close` | The websocket connection to Deepgram has been closed. | WebSocket.CloseEvent |
287
- | `error` | An error occurred with the websocket connection | Error object |
288
- | `transcriptReceived` | Deepgram has responded with a transcription | [Transcription Response](#transcription-response) |
289
-
290
- #### Live Transcription Options
291
-
292
- Additional transcription options can be provided for live transcriptions.
56
+ #### Local Files
293
57
 
294
58
  ```js
295
- {
296
- /**
297
- * AI model used to process submitted audio.
298
- * @default general
299
- * @remarks Possible values are general, phonecall, meeting or a custom string
300
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/model
301
- */
302
- model?: Models | string;
303
-
304
- /**
305
- * Version of the model to use.
306
- * @default latest
307
- * @remarks latest OR <version_id>
308
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/version
309
- */
310
- version: string;
311
- /**
312
- * BCP-47 language tag that hints at the primary spoken language.
313
- * @default en-US
314
- * @remarks Possible values are en-GB, en-IN, en-NZ, en-US, es, fr, ko, pt,
315
- * pt-BR, ru, tr or null
316
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/language
317
- */
318
- language?: string;
319
- /**
320
- * Indicates whether to add punctuation and capitalization to the transcript.
321
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/punctuate
322
- */
323
- punctuate?: boolean;
324
- /**
325
- * Indicates whether to remove profanity from the transcript.
326
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/profanity_filter
327
- */
328
- profanity_filter?: boolean;
329
- /**
330
- * Indicates whether to redact sensitive information, replacing redacted content with asterisks (*).
331
- * @remarks Options include:
332
- * `pci`: Redacts sensitive credit card information, including credit card number, expiration date, and CVV
333
- * `numbers` (or `true)`: Aggressively redacts strings of numerals
334
- * `ssn` (*beta*): Redacts social security numbers
335
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/redact
336
- */
337
- redact?: Array<string>;
338
- /**
339
- * Indicates whether to recognize speaker changes. When set to true, each word
340
- * in the transcript will be assigned a speaker number starting at 0.
341
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/diarize
342
- */
343
- diarize?: boolean;
344
- /**
345
- * Indicates whether to transcribe each audio channel independently. When set
346
- * to true, you will receive one transcript for each channel, which means you
347
- * can apply a different model to each channel using the model parameter (e.g.,
348
- * set model to general:phonecall, which applies the general model to channel
349
- * 0 and the phonecall model to channel 1).
350
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/multichannel
351
- */
352
- multichannel?: boolean;
353
- /**
354
- * Maximum number of transcript alternatives to return. Just like a human listener,
355
- * Deepgram can provide multiple possible interpretations of what it hears.
356
- * @default 1
357
- */
358
- alternatives?: number;
359
- /**
360
- * Indicates whether to convert numbers from written format (e.g., one) to
361
- * numerical format (e.g., 1). Deepgram can format numbers up to 999,999.
362
- * @remarks Converted numbers do not include punctuation. For example,
363
- * 999,999 would be transcribed as 999999.
364
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/numerals
365
- */
366
- numerals?: boolean;
367
- /**
368
- * Terms or phrases to search for in the submitted audio. Deepgram searches
369
- * for acoustic patterns in audio rather than text patterns in transcripts
370
- * because we have noticed that acoustic pattern matching is more performant.
371
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/search
372
- */
373
- search?: Array<string>;
374
- /**
375
- * Callback URL to provide if you would like your submitted audio to be
376
- * processed asynchronously. When passed, Deepgram will immediately respond
377
- * with a request_id. When it has finished analyzing the audio, it will send
378
- * a POST request to the provided URL with an appropriate HTTP status code.
379
- * @remarks You may embed basic authentication credentials in the callback URL.
380
- * Only ports 80, 443, 8080, and 8443 can be used for callbacks.
381
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/callback
382
- */
383
- callback?: string;
384
- /**
385
- * Keywords to which the model should pay particular attention to boosting
386
- * or suppressing to help it understand context. Just like a human listener,
387
- * Deepgram can better understand mumbled, distorted, or otherwise
388
- * hard-to-decipher speech when it knows the context of the conversation.
389
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/keywords
390
- */
391
- keywords?: Array<string>;
392
- /**
393
- * Indicates whether the streaming endpoint should send you updates to its
394
- * transcription as more audio becomes available. By default, the streaming
395
- * endpoint returns regular updates, which means transcription results will
396
- * likely change for a period of time. You can avoid receiving these updates
397
- * by setting this flag to false.
398
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeStreamingAudio/properties/interim_results
399
- */
400
- interim_results?: boolean;
401
- /**
402
- * Indicates whether Deepgram will detect whether a speaker has finished
403
- * speaking (or paused for a significant period of time, indicating the
404
- * completion of an idea). When Deepgram detects an endpoint, it assumes
405
- * that no additional data will improve its prediction, so it immediately
406
- * finalizes the result for the processed time range and returns the
407
- * transcript with a speech_final parameter set to true.
408
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeStreamingAudio/properties/endpointing
409
- */
410
- endpointing?: boolean;
411
- /**
412
- * Length of time in milliseconds of silence that voice activation detection
413
- * (VAD) will use to detect that a speaker has finished speaking. Used when
414
- * endpointing is enabled. Defaults to 10 ms. Deepgram customers may configure
415
- * a value between 10 ms and 500 ms; on-premise customers may remove this
416
- * restriction.
417
- * @default 10
418
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeStreamingAudio/properties/vad_turnoff
419
- */
420
- vad_turnoff?: number;
421
- /**
422
- * Expected encoding of the submitted streaming audio.
423
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeStreamingAudio/properties/encoding
424
- */
425
- encoding?: string;
426
- /**
427
- * Number of independent audio channels contained in submitted streaming
428
- * audio. Only read when a value is provided for encoding.
429
- * @default 1
430
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeStreamingAudio/properties/channels
431
- */
432
- channels?: number;
433
- /**
434
- * Sample rate of submitted streaming audio. Required (and only read)
435
- * when a value is provided for encoding.
436
- * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeStreamingAudio/properties/sample_rate
437
- */
438
- sample_rate?: number;
439
- }
440
- ```
441
-
442
- #### Live Transcription Response
443
-
444
- ```ts
445
- {
446
- channel_index: Array<number>;
447
- duration: number;
448
- start: number;
449
- is_final: boolean;
450
- speech_final: boolean;
451
- channel: {
452
- search?: Array<{
453
- query: string;
454
- hits: Array<{
455
- confidence: number;
456
- start: number;
457
- end: number;
458
- snippet: string;
459
- }>
460
- }>,
461
- alternatives: Array<{
462
- transcript: string;
463
- confidence: number;
464
- words: Array<{
465
- word: string;
466
- start: number;
467
- end: number;
468
- confidence: number;
469
- punctuated_word?: string;
470
- }>
471
- }>
472
- }
59
+ const streamSource = {
60
+ stream: fs.createReadStream("/path/to/file"),
61
+ mimetype: MIMETYPE_OF_FILE,
473
62
  };
474
- ```
475
-
476
- ## Project Management
477
-
478
- ### List Projects
479
-
480
- Retrieve all projects
481
-
482
- ```js
483
- const projects = await deepgram.projects.list();
484
- ```
485
-
486
- #### List Projects Response
487
-
488
- ```ts
489
- {
490
- projects: [
491
- {
492
- id: string,
493
- name: string,
494
- },
495
- ],
496
- }
497
- ```
498
-
499
- ### Get a Project
500
63
 
501
- Retrieves all project based on the provided project id.
502
-
503
- ```js
504
- const project = await deepgram.projects.get(PROJECT_ID);
505
- ```
506
-
507
- #### Get a Project Response
508
-
509
- ```ts
510
- {
511
- id: string,
512
- name: string,
513
- }
514
- ```
515
-
516
- ### Update a Project
517
-
518
- Updates a project based on a provided project object. This object must contain
519
- `project_id` and `name` properties.
520
-
521
- ```js
522
- const updateResponse = await deepgram.projects.update(project);
523
- ```
524
-
525
- #### Update a Project Response
526
-
527
- ```ts
528
- {
529
- message: string;
530
- }
531
- ```
532
-
533
- ## Key Management
534
-
535
- ### List Keys
536
-
537
- Retrieves all keys for a given project.
538
-
539
- ```js
540
- const response = await deepgram.keys.list(PROJECT_ID);
541
- ```
542
-
543
- #### List Keys Response
544
-
545
- ```ts
546
- {
547
- api_keys: [
548
- {
549
- api_key_id: string,
550
- comment: string,
551
- created: string,
552
- scopes: Array<string>
553
- },
554
- ];
555
- }
556
- ```
557
-
558
- ### Create Key
559
-
560
- Create a new API key for a project using the `keys.create` method
561
- with a name for the key.
562
-
563
- ```js
564
- const response = await deepgram.keys.create(PROJECT_ID, COMMENT_FOR_KEY);
565
- ```
566
-
567
- #### Create Key Response
568
-
569
- ```ts
570
- {
571
- api_key_id: string,
572
- key: string,
573
- comment: string,
574
- created: string,
575
- scopes: Array<string>
576
- }
577
- ```
578
-
579
- ### Delete key
580
-
581
- Delete an existing API key using the `keys.delete` method with the key to
582
- delete.
583
-
584
- ```js
585
- await deepgram.keys.delete(PROJECT_ID, KEY_ID);
586
- ```
587
-
588
- ## Usage
589
-
590
- ### Requests by Project
591
-
592
- Retrieves transcription requests for a project based on the provided options.
593
-
594
- ```js
595
- const response = await deepgram.usage.listRequests(PROJECT_ID, {
596
- limit: 10,
597
- // other options are available
64
+ const response = await deepgram.transcription.preRecorded(streamSource, {
65
+ punctuate: true,
598
66
  });
599
67
  ```
600
68
 
601
- #### Requests by Project Options
602
-
603
- ```js
604
- {
605
- // The time to retrieve requests made since
606
- // Example: "2020-01-01T00:00:00+00:00"
607
- start?: string,
608
- // The time to retrieve requests made until
609
- // Example: "2021-01-01T00:00:00+00:00"
610
- end?: string,
611
- // Page of requests to return
612
- // Defaults to 0
613
- page?: number,
614
- // Number of requests to return per page
615
- // Defaults to 10. Maximum of 100
616
- limit?: number,
617
- // Filter by succeeded or failed requests
618
- // By default, all requests are returned
619
- status?: 'succeeded' | 'failed'
620
- }
621
- ```
622
-
623
- #### Requests by Project Response
624
-
625
- ```ts
626
- {
627
- page: number,
628
- limit: number,
629
- requests?: [
630
- {
631
- request_id: string;
632
- created: string;
633
- path: string;
634
- accessor: string;
635
- response?: {
636
- details: {
637
- usd: number;
638
- duration: number;
639
- total_audio: number;
640
- channels: number;
641
- streams: number;
642
- model: string;
643
- method: string;
644
- tags: Array<string>;
645
- features: Array<string>;
646
- config: {
647
- multichannel?: boolean;
648
- interim_results?: boolean;
649
- punctuate?: boolean;
650
- ner?: boolean;
651
- utterances?: boolean;
652
- replace?: boolean;
653
- profanity_filter?: boolean;
654
- keywords?: boolean;
655
- sentiment?: boolean;
656
- diarize?: boolean;
657
- detect_language?: boolean;
658
- search?: boolean;
659
- redact?: boolean;
660
- alternatives?: boolean;
661
- numerals?: boolean;
662
- };
663
- }
664
- }, ||
665
- {
666
- message?: string;
667
- },
668
- callback?: {
669
- code: number;
670
- completed: string;
671
- },
672
- },
673
- ];
674
- }
675
- ```
676
-
677
- ### Get Specific Request
678
-
679
- Retrieves a specific transcription request for a project based on the provided
680
- `projectId` and `requestId`.
69
+ ### Transcribe Audio in Real-Time
681
70
 
682
71
  ```js
683
- const response = await deepgram.usage.getRequest(PROJECT_ID, REQUEST_ID);
684
- ```
685
-
686
- #### Specific Request Response
687
-
688
- ```ts
689
- {
690
- request_id: string;
691
- created: string;
692
- path: string;
693
- accessor: string;
694
- response?: {
695
- details: {
696
- usd: number;
697
- duration: number;
698
- total_audio: number;
699
- channels: number;
700
- streams: number;
701
- model: string;
702
- method: string;
703
- tags: Array<string>;
704
- features: Array<string>;
705
- config: {
706
- multichannel?: boolean;
707
- interim_results?: boolean;
708
- punctuate?: boolean;
709
- ner?: boolean;
710
- utterances?: boolean;
711
- replace?: boolean;
712
- profanity_filter?: boolean;
713
- keywords?: boolean;
714
- sentiment?: boolean;
715
- diarize?: boolean;
716
- detect_language?: boolean;
717
- search?: boolean;
718
- redact?: boolean;
719
- alternatives?: boolean;
720
- numerals?: boolean;
721
- };
722
- }
723
- }, ||
724
- {
725
- message?: string;
726
- },
727
- callback?: {
728
- code: number;
729
- completed: string;
730
- }
731
- }
732
- ```
72
+ navigator.mediaDevices.getUserMedia({ audio: true }).then((stream) => {
73
+ const mediaRecorder = new MediaRecorder(stream, {
74
+ mimeType: 'audio/webm',
75
+ });
76
+ const deepgramSocket = deepgram.transcription.live({ punctuate: true });
733
77
 
734
- ### Get Usage by Project
78
+ deepgramSocket.addListener('open', () => {
79
+ mediaRecorder.addEventListener('dataavailable', async (event) => {
80
+ if (event.data.size > 0 && deepgramSocket.readyState == 1) {
81
+ deepgramSocket.send(event.data)
82
+ }
83
+ })
84
+ mediaRecorder.start(1000)
85
+ });
735
86
 
736
- Retrieves aggregated usage data for a project based on the provided options.
737
-
738
- ```js
739
- const response = await deepgram.usage.getUsage(PROJECT_ID, {
740
- start: "2020-01-01T00:00:00+00:00",
741
- // other options are available
742
- });
743
- ```
744
-
745
- #### Usage by Project Options
746
-
747
- ```js
748
- {
749
- // The time to retrieve requests made since
750
- // Example: "2020-01-01T00:00:00+00:00"
751
- start?: string,
752
- // The time to retrieve requests made until
753
- // Example: "2021-01-01T00:00:00+00:00"
754
- end?: string,
755
- // Specific identifer for a request
756
- accessor?: string,
757
- // Array of tags used in requests
758
- tag?: Array<string>,
759
- // Filter requests by method
760
- method?: "sync" | "async" | "streaming",
761
- // Filter requests by model used
762
- model?: string,
763
- // Filter only requests using multichannel feature
764
- multichannel?: boolean,
765
- // Filter only requests using interim results feature
766
- interim_results?: boolean,
767
- // Filter only requests using the punctuation feature
768
- punctuate?: boolean,
769
- // Filter only requests using ner feature
770
- ner?: boolean,
771
- // Filter only requests using utterances feature
772
- utterances?: boolean,
773
- // Filter only requests using replace feature
774
- replace?: boolean,
775
- // Filter only requests using profanity_filter feature
776
- profanity_filter?: boolean,
777
- // Filter only requests using keywords feature
778
- keywords?: boolean,
779
- // Filter only requests using sentiment feature
780
- sentiment?: boolean,
781
- // Filter only requests using diarization feature
782
- diarize?: boolean,
783
- // Filter only requests using detect_language feature
784
- detect_language?: boolean,
785
- // Filter only requests using search feature
786
- search?: boolean,
787
- // Filter only requests using redact feature
788
- redact?: boolean,
789
- // Filter only requests using alternatives feature
790
- alternatives?: boolean,
791
- // Filter only requests using numerals feature
792
- numerals?: boolean
793
- }
794
- ```
795
-
796
- #### Get Usage Response
797
-
798
- ```ts
799
- {
800
- start: string,
801
- end: string,
802
- resolution: {
803
- units: string,
804
- amount: number
805
- };
806
- results: [
807
- {
808
- start: string,
809
- end: string,
810
- hours: number,
811
- requests: number
87
+ deepgramSocket.addListener("transcriptReceived", (received) => {
88
+ const transcript = received.channel.alternatives[0].transcript;
89
+ if (transcript && received.is_final) {
90
+ console.log(transcript);
812
91
  }
813
- ];
814
- }
815
- ```
816
-
817
- ### Get Fields
818
-
819
- Retrieves features used by the provided projectId based on the provided options.
820
-
821
- ```js
822
- const response = await deepgram.usage.getUsage(PROJECT_ID, {
823
- start: "2020-01-01T00:00:00+00:00",
824
- // other options are available
92
+ });
825
93
  });
826
94
  ```
827
95
 
828
- #### Get Fields Options
829
-
830
- ```js
831
- {
832
- // The time to retrieve requests made since
833
- // Example: "2020-01-01T00:00:00+00:00"
834
- start?: string,
835
- // The time to retrieve requests made until
836
- // Example: "2021-01-01T00:00:00+00:00"
837
- end?: string
838
- }
839
- ```
840
-
841
- #### Get Fields Response
842
-
843
- ```ts
844
- {
845
- tags: Array<string>,
846
- models: Array<string>,
847
- processing_methods: Array<string>,
848
- languages: Array<string>,
849
- features: Array<string>
850
- }
851
- ```
852
-
853
96
  ## Samples
854
97
 
855
98
  To run the sample code, first run the following in your terminal:
package/dist/keys.d.ts CHANGED
@@ -1,4 +1,4 @@
1
- import { KeyResponse, Key } from "./types";
1
+ import { CreateKeyOptions, KeyResponse, Key } from "./types";
2
2
  export declare class Keys {
3
3
  private _credentials;
4
4
  private _apiUrl;
@@ -20,8 +20,9 @@ export declare class Keys {
20
20
  * @param projectId Unique identifier of the project to create an API key under
21
21
  * @param comment Comment to describe the key
22
22
  * @param scopes Permission scopes associated with the API key
23
+ * @param options Optional options used when creating API keys
23
24
  */
24
- create(projectId: string, comment: string, scopes: Array<string>): Promise<Key>;
25
+ create(projectId: string, comment: string, scopes: Array<string>, options?: CreateKeyOptions): Promise<Key>;
25
26
  /**
26
27
  * Deletes an API key
27
28
  * @param projectId Unique identifier of the project to create an API key under
package/dist/keys.js CHANGED
@@ -72,11 +72,25 @@ var Keys = /** @class */ (function () {
72
72
  * @param projectId Unique identifier of the project to create an API key under
73
73
  * @param comment Comment to describe the key
74
74
  * @param scopes Permission scopes associated with the API key
75
+ * @param options Optional options used when creating API keys
75
76
  */
76
- Keys.prototype.create = function (projectId, comment, scopes) {
77
+ Keys.prototype.create = function (projectId, comment, scopes, options) {
77
78
  return __awaiter(this, void 0, void 0, function () {
78
79
  return __generator(this, function (_a) {
79
- return [2 /*return*/, (0, httpRequest_1._request)("POST", this._credentials, this._apiUrl, this.apiPath + "/" + projectId + "/keys", JSON.stringify({ comment: comment, scopes: scopes }))];
80
+ /** Throw an error if the user provided both expirationDate and timeToLive */
81
+ if (options &&
82
+ options.expirationDate !== undefined &&
83
+ options.timeToLive !== undefined) {
84
+ throw new Error("Please provide expirationDate or timeToLive or neither. Providing both is not allowed.");
85
+ }
86
+ return [2 /*return*/, (0, httpRequest_1._request)("POST", this._credentials, this._apiUrl, this.apiPath + "/" + projectId + "/keys", JSON.stringify({
87
+ comment: comment,
88
+ scopes: scopes,
89
+ expiration_date: options && options.expirationDate
90
+ ? options.expirationDate
91
+ : undefined,
92
+ time_to_live_in_seconds: options && options.timeToLive ? options.timeToLive : undefined,
93
+ }))];
80
94
  });
81
95
  });
82
96
  };
package/dist/keys.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"file":"keys.js","sourceRoot":"","sources":["../src/keys.ts"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA,6CAAyC;AAGzC;IACE,cAAoB,YAAoB,EAAU,OAAe;QAA7C,iBAAY,GAAZ,YAAY,CAAQ;QAAU,YAAO,GAAP,OAAO,CAAQ;QAEzD,YAAO,GAAG,cAAc,CAAC;IAFmC,CAAC;IAIrE;;;OAGG;IACG,mBAAI,GAAV,UAAW,SAAiB;;;gBAC1B,sBAAO,IAAA,sBAAQ,EACb,KAAK,EACL,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,OAAO,EACT,IAAI,CAAC,OAAO,SAAI,SAAS,UAAO,CACpC,EAAC;;;KACH;IAED;;;;OAIG;IACG,kBAAG,GAAT,UAAU,SAAiB,EAAE,KAAa;;;gBACxC,sBAAO,IAAA,sBAAQ,EACb,KAAK,EACL,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,OAAO,EACT,IAAI,CAAC,OAAO,SAAI,SAAS,cAAS,KAAO,CAC7C,EAAC;;;KACH;IAED;;;;;OAKG;IACG,qBAAM,GAAZ,UACE,SAAiB,EACjB,OAAe,EACf,MAAqB;;;gBAErB,sBAAO,IAAA,sBAAQ,EACb,MAAM,EACN,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,OAAO,EACT,IAAI,CAAC,OAAO,SAAI,SAAS,UAAO,EACnC,IAAI,CAAC,SAAS,CAAC,EAAE,OAAO,SAAA,EAAE,MAAM,QAAA,EAAE,CAAC,CACpC,EAAC;;;KACH;IAED;;;;OAIG;IACG,qBAAM,GAAZ,UAAa,SAAiB,EAAE,KAAa;;;gBAC3C,sBAAO,IAAA,sBAAQ,EACb,QAAQ,EACR,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,OAAO,EACT,IAAI,CAAC,OAAO,SAAI,SAAS,cAAS,KAAO,CAC7C,EAAC;;;KACH;IACH,WAAC;AAAD,CAAC,AAjED,IAiEC;AAjEY,oBAAI"}
1
+ {"version":3,"file":"keys.js","sourceRoot":"","sources":["../src/keys.ts"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA,6CAAyC;AAGzC;IACE,cAAoB,YAAoB,EAAU,OAAe;QAA7C,iBAAY,GAAZ,YAAY,CAAQ;QAAU,YAAO,GAAP,OAAO,CAAQ;QAEzD,YAAO,GAAG,cAAc,CAAC;IAFmC,CAAC;IAIrE;;;OAGG;IACG,mBAAI,GAAV,UAAW,SAAiB;;;gBAC1B,sBAAO,IAAA,sBAAQ,EACb,KAAK,EACL,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,OAAO,EACT,IAAI,CAAC,OAAO,SAAI,SAAS,UAAO,CACpC,EAAC;;;KACH;IAED;;;;OAIG;IACG,kBAAG,GAAT,UAAU,SAAiB,EAAE,KAAa;;;gBACxC,sBAAO,IAAA,sBAAQ,EACb,KAAK,EACL,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,OAAO,EACT,IAAI,CAAC,OAAO,SAAI,SAAS,cAAS,KAAO,CAC7C,EAAC;;;KACH;IAED;;;;;;OAMG;IACG,qBAAM,GAAZ,UACE,SAAiB,EACjB,OAAe,EACf,MAAqB,EACrB,OAA0B;;;gBAE1B,6EAA6E;gBAC7E,IACE,OAAO;oBACP,OAAO,CAAC,cAAc,KAAK,SAAS;oBACpC,OAAO,CAAC,UAAU,KAAK,SAAS,EAChC;oBACA,MAAM,IAAI,KAAK,CACb,wFAAwF,CACzF,CAAC;iBACH;gBAED,sBAAO,IAAA,sBAAQ,EACb,MAAM,EACN,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,OAAO,EACT,IAAI,CAAC,OAAO,SAAI,SAAS,UAAO,EACnC,IAAI,CAAC,SAAS,CAAC;wBACb,OAAO,SAAA;wBACP,MAAM,QAAA;wBACN,eAAe,EACb,OAAO,IAAI,OAAO,CAAC,cAAc;4BAC/B,CAAC,CAAC,OAAO,CAAC,cAAc;4BACxB,CAAC,CAAC,SAAS;wBACf,uBAAuB,EACrB,OAAO,IAAI,OAAO,CAAC,UAAU,CAAC,CAAC,CAAC,OAAO,CAAC,UAAU,CAAC,CAAC,CAAC,SAAS;qBACjE,CAAC,CACH,EAAC;;;KACH;IAED;;;;OAIG;IACG,qBAAM,GAAZ,UAAa,SAAiB,EAAE,KAAa;;;gBAC3C,sBAAO,IAAA,sBAAQ,EACb,QAAQ,EACR,IAAI,CAAC,YAAY,EACjB,IAAI,CAAC,OAAO,EACT,IAAI,CAAC,OAAO,SAAI,SAAS,cAAS,KAAO,CAC7C,EAAC;;;KACH;IACH,WAAC;AAAD,CAAC,AAvFD,IAuFC;AAvFY,oBAAI"}
@@ -75,7 +75,7 @@ var LiveTranscription = /** @class */ (function (_super) {
75
75
  * the websocket connection when transcription is finished
76
76
  */
77
77
  LiveTranscription.prototype.finish = function () {
78
- this._socket.close(1000);
78
+ this._socket.send(new Uint8Array(0));
79
79
  };
80
80
  return LiveTranscription;
81
81
  }(events_1.default));
@@ -1 +1 @@
1
- {"version":3,"file":"liveTranscription.js","sourceRoot":"","sources":["../../src/transcription/liveTranscription.ts"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;AAAA,kDAAkC;AAClC,4DAAsC;AACtC,0CAA2B;AAC3B,kCAAoE;AAEpE,0CAAyC;AAEzC;IAAuC,qCAAY;IAGjD,2BACE,WAAmB,EACnB,MAAc,EACd,OAAkC;QAHpC,YAKE,kBAAM,SAAS,CAAC,SAWjB;QAVC,KAAI,CAAC,OAAO,GAAG,IAAI,YAAS,CAC1B,WAAS,MAAM,mBAAc,qBAAW,CAAC,SAAS,CAAC,OAAO,CAAG,EAC7D;YACE,OAAO,EAAE;gBACP,aAAa,EAAE,WAAS,WAAa;gBACrC,YAAY,EAAE,IAAA,qBAAS,GAAE;aAC1B;SACF,CACF,CAAC;QACF,KAAI,CAAC,iBAAiB,EAAE,CAAC;;IAC3B,CAAC;IAEO,6CAAiB,GAAzB;QAAA,iBAgBC;QAfC,IAAI,CAAC,OAAO,CAAC,MAAM,GAAG;YACpB,KAAI,CAAC,IAAI,oBAA+B,KAAI,CAAC,CAAC;QAChD,CAAC,CAAC;QAEF,IAAI,CAAC,OAAO,CAAC,OAAO,GAAG,UAAC,KAA2B;YACjD,KAAI,CAAC,IAAI,sBAAgC,KAAK,CAAC,CAAC;QAClD,CAAC,CAAC;QAEF,IAAI,CAAC,OAAO,CAAC,OAAO,GAAG,UAAC,KAAK;YAC3B,KAAI,CAAC,IAAI,sBAAgC,KAAK,CAAC,CAAC;QAClD,CAAC,CAAC;QAEF,IAAI,CAAC,OAAO,CAAC,SAAS,GAAG,UAAC,CAAC;YACzB,KAAI,CAAC,IAAI,gDAA6C,CAAC,CAAC,IAAI,CAAC,CAAC;QAChE,CAAC,CAAC;IACJ,CAAC;IAED;;OAEG;IACI,yCAAa,GAApB;QACE,OAAO,IAAI,CAAC,OAAO,CAAC,UAAU,CAAC;IACjC,CAAC;IAED;;;OAGG;IACI,gCAAI,GAAX,UAAY,IAAuD;QACjE,IAAI,IAAI,CAAC,OAAO,CAAC,UAAU,KAAK,uBAAe,CAAC,IAAI,EAAE;YACpD,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;SACzB;aAAM;YACL,IAAI,CAAC,IAAI,sBAEP,sCAAsC,CACvC,CAAC;SACH;IACH,CAAC;IAED;;;OAGG;IACI,kCAAM,GAAb;QACE,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC;IAC3B,CAAC;IACH,wBAAC;AAAD,CAAC,AApED,CAAuC,gBAAY,GAoElD;AApEY,8CAAiB"}
1
+ {"version":3,"file":"liveTranscription.js","sourceRoot":"","sources":["../../src/transcription/liveTranscription.ts"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;AAAA,kDAAkC;AAClC,4DAAsC;AACtC,0CAA2B;AAC3B,kCAAoE;AAEpE,0CAAyC;AAEzC;IAAuC,qCAAY;IAGjD,2BACE,WAAmB,EACnB,MAAc,EACd,OAAkC;QAHpC,YAKE,kBAAM,SAAS,CAAC,SAWjB;QAVC,KAAI,CAAC,OAAO,GAAG,IAAI,YAAS,CAC1B,WAAS,MAAM,mBAAc,qBAAW,CAAC,SAAS,CAAC,OAAO,CAAG,EAC7D;YACE,OAAO,EAAE;gBACP,aAAa,EAAE,WAAS,WAAa;gBACrC,YAAY,EAAE,IAAA,qBAAS,GAAE;aAC1B;SACF,CACF,CAAC;QACF,KAAI,CAAC,iBAAiB,EAAE,CAAC;;IAC3B,CAAC;IAEO,6CAAiB,GAAzB;QAAA,iBAgBC;QAfC,IAAI,CAAC,OAAO,CAAC,MAAM,GAAG;YACpB,KAAI,CAAC,IAAI,oBAA+B,KAAI,CAAC,CAAC;QAChD,CAAC,CAAC;QAEF,IAAI,CAAC,OAAO,CAAC,OAAO,GAAG,UAAC,KAA2B;YACjD,KAAI,CAAC,IAAI,sBAAgC,KAAK,CAAC,CAAC;QAClD,CAAC,CAAC;QAEF,IAAI,CAAC,OAAO,CAAC,OAAO,GAAG,UAAC,KAAK;YAC3B,KAAI,CAAC,IAAI,sBAAgC,KAAK,CAAC,CAAC;QAClD,CAAC,CAAC;QAEF,IAAI,CAAC,OAAO,CAAC,SAAS,GAAG,UAAC,CAAC;YACzB,KAAI,CAAC,IAAI,gDAA6C,CAAC,CAAC,IAAI,CAAC,CAAC;QAChE,CAAC,CAAC;IACJ,CAAC;IAED;;OAEG;IACI,yCAAa,GAApB;QACE,OAAO,IAAI,CAAC,OAAO,CAAC,UAAU,CAAC;IACjC,CAAC;IAED;;;OAGG;IACI,gCAAI,GAAX,UAAY,IAAuD;QACjE,IAAI,IAAI,CAAC,OAAO,CAAC,UAAU,KAAK,uBAAe,CAAC,IAAI,EAAE;YACpD,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;SACzB;aAAM;YACL,IAAI,CAAC,IAAI,sBAEP,sCAAsC,CACvC,CAAC;SACH;IACH,CAAC;IAED;;;OAGG;IACI,kCAAM,GAAb;QACE,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,IAAI,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC;IACvC,CAAC;IACH,wBAAC;AAAD,CAAC,AApED,CAAuC,gBAAY,GAoElD;AApEY,8CAAiB"}
@@ -0,0 +1,13 @@
1
+ /**
2
+ * Optional options used when creating an API key
3
+ */
4
+ export declare type CreateKeyOptions = {
5
+ /**
6
+ * Date on which the key you would like to create should expire.
7
+ */
8
+ expirationDate?: Date;
9
+ /**
10
+ * Length of time (in seconds) during which the key you would like to create will remain valid.
11
+ */
12
+ timeToLive?: number;
13
+ };
@@ -0,0 +1,3 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ //# sourceMappingURL=createKeyOptions.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"createKeyOptions.js","sourceRoot":"","sources":["../../src/types/createKeyOptions.ts"],"names":[],"mappings":""}
@@ -1,4 +1,5 @@
1
1
  export * from "./channel";
2
+ export * from "./createKeyOptions";
2
3
  export * from "./hit";
3
4
  export * from "./key";
4
5
  export * from "./keyResponse";
@@ -11,6 +11,7 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
11
11
  };
12
12
  Object.defineProperty(exports, "__esModule", { value: true });
13
13
  __exportStar(require("./channel"), exports);
14
+ __exportStar(require("./createKeyOptions"), exports);
14
15
  __exportStar(require("./hit"), exports);
15
16
  __exportStar(require("./key"), exports);
16
17
  __exportStar(require("./keyResponse"), exports);
@@ -1 +1 @@
1
- {"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/types/index.ts"],"names":[],"mappings":";;;;;;;;;;;;AAAA,4CAA0B;AAC1B,wCAAsB;AACtB,wCAAsB;AACtB,gDAA8B;AAC9B,6DAA2C;AAC3C,8DAA4C;AAC5C,2CAAyB;AACzB,6CAA2B;AAC3B,oEAAkD;AAClD,qEAAmD;AACnD,4CAA0B;AAC1B,yDAAuC;AACvC,oDAAkC;AAClC,2CAAyB;AACzB,wDAAsC;AACtC,kDAAgC;AAChC,+CAA6B;AAC7B,sDAAoC;AACpC,iDAA+B;AAC/B,iDAA+B;AAC/B,uDAAqC;AACrC,qDAAmC;AACnC,4DAA0C;AAC1C,kDAAgC;AAChC,wDAAsC;AACtC,8CAA4B;AAC5B,6CAA2B"}
1
+ {"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/types/index.ts"],"names":[],"mappings":";;;;;;;;;;;;AAAA,4CAA0B;AAC1B,qDAAmC;AACnC,wCAAsB;AACtB,wCAAsB;AACtB,gDAA8B;AAC9B,6DAA2C;AAC3C,8DAA4C;AAC5C,2CAAyB;AACzB,6CAA2B;AAC3B,oEAAkD;AAClD,qEAAmD;AACnD,4CAA0B;AAC1B,yDAAuC;AACvC,oDAAkC;AAClC,2CAAyB;AACzB,wDAAsC;AACtC,kDAAgC;AAChC,+CAA6B;AAC7B,sDAAoC;AACpC,iDAA+B;AAC/B,iDAA+B;AAC/B,uDAAqC;AACrC,qDAAmC;AACnC,4DAA0C;AAC1C,kDAAgC;AAChC,wDAAsC;AACtC,8CAA4B;AAC5B,6CAA2B"}
@@ -28,7 +28,8 @@ export declare type Utterance = {
28
28
  */
29
29
  words: Array<WordBase>;
30
30
  /**
31
- * Integer indicating the speaker who is saying the word being processed.
31
+ * Integer indicating the predicted speaker of the majority of words
32
+ * in the utterance who is saying the words being processed.
32
33
  */
33
34
  speaker?: number;
34
35
  /**
@@ -4,4 +4,5 @@ export declare type WordBase = {
4
4
  end: number;
5
5
  confidence: number;
6
6
  punctuated_word?: string;
7
+ speaker?: number;
7
8
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@deepgram/sdk",
3
- "version": "1.1.0",
3
+ "version": "1.2.2",
4
4
  "description": "An SDK for the Deepgram automated speech recognition platform",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",