@deepgram/sdk 1.4.7 → 1.4.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/dist/browser/httpFetch.d.ts +1 -0
  2. package/dist/browser/index.d.ts +21 -0
  3. package/dist/browser/index.js +1 -1
  4. package/dist/constants/defaultOptions.d.ts +6 -0
  5. package/dist/constants/index.d.ts +1 -0
  6. package/dist/enums/alternatives.d.ts +4 -0
  7. package/dist/enums/connectionState.d.ts +6 -0
  8. package/dist/enums/diarization.d.ts +4 -0
  9. package/dist/enums/index.d.ts +7 -0
  10. package/dist/enums/liveTranscriptionEvents.d.ts +6 -0
  11. package/dist/enums/models.d.ts +5 -0
  12. package/dist/enums/punctuation.d.ts +4 -0
  13. package/dist/enums/searchKind.d.ts +4 -0
  14. package/dist/helpers/index.d.ts +2 -0
  15. package/dist/helpers/secondsToTimestamp.d.ts +1 -0
  16. package/dist/helpers/validateOptions.d.ts +1 -0
  17. package/dist/index.js +1 -1
  18. package/dist/transcription/index.d.ts +18 -0
  19. package/dist/transcription/liveTranscription.d.ts +23 -0
  20. package/dist/transcription/preRecordedTranscription.d.ts +8 -0
  21. package/dist/types/balance.d.ts +6 -0
  22. package/dist/types/balanceList.d.ts +4 -0
  23. package/dist/types/channel.d.ts +25 -0
  24. package/dist/types/createKeyOptions.d.ts +13 -0
  25. package/dist/types/hit.d.ts +21 -0
  26. package/dist/types/index.d.ts +38 -0
  27. package/dist/types/invitationList.d.ts +4 -0
  28. package/dist/types/invitationOptions.d.ts +4 -0
  29. package/dist/types/key.d.ts +25 -0
  30. package/dist/types/keyResponse.d.ts +10 -0
  31. package/dist/types/keyResponseObj.d.ts +42 -0
  32. package/dist/types/keyword.d.ts +4 -0
  33. package/dist/types/liveTranscriptionOptions.d.ts +161 -0
  34. package/dist/types/liveTranscriptionResponse.d.ts +9 -0
  35. package/dist/types/member.d.ts +7 -0
  36. package/dist/types/memberList.d.ts +4 -0
  37. package/dist/types/message.d.ts +3 -0
  38. package/dist/types/metadata.d.ts +8 -0
  39. package/dist/types/prerecordedTranscriptionOptions.d.ts +139 -0
  40. package/dist/types/prerecordedTranscriptionResponse.d.ts +25 -0
  41. package/dist/types/project.d.ts +17 -0
  42. package/dist/types/projectPatchRequest.d.ts +4 -0
  43. package/dist/types/projectPatchResponse.d.ts +6 -0
  44. package/dist/types/projectResponse.d.ts +4 -0
  45. package/dist/types/requestFunction.d.ts +5 -0
  46. package/dist/types/scopeList.d.ts +3 -0
  47. package/dist/types/search.d.ts +14 -0
  48. package/dist/types/transcriptionSource.d.ts +14 -0
  49. package/dist/types/usageCallback.d.ts +4 -0
  50. package/dist/types/usageField.d.ts +7 -0
  51. package/dist/types/usageFieldOptions.d.ts +4 -0
  52. package/dist/types/usageOptions.d.ts +23 -0
  53. package/dist/types/usageRequest.d.ts +11 -0
  54. package/dist/types/usageRequestDetail.d.ts +30 -0
  55. package/dist/types/usageRequestList.d.ts +6 -0
  56. package/dist/types/usageRequestListOptions.d.ts +7 -0
  57. package/dist/types/usageRequestMessage.d.ts +3 -0
  58. package/dist/types/usageResponse.d.ts +10 -0
  59. package/dist/types/usageResponseDetail.d.ts +6 -0
  60. package/dist/types/utterance.d.ts +39 -0
  61. package/dist/types/wordBase.d.ts +8 -0
  62. package/package.json +2 -2
@@ -0,0 +1,38 @@
1
+ export * from "./balance";
2
+ export * from "./balanceList";
3
+ export * from "./channel";
4
+ export * from "./createKeyOptions";
5
+ export * from "./hit";
6
+ export * from "./invitationList";
7
+ export * from "./invitationOptions";
8
+ export * from "./key";
9
+ export * from "./keyResponse";
10
+ export * from "./liveTranscriptionOptions";
11
+ export * from "./liveTranscriptionResponse";
12
+ export * from "./member";
13
+ export * from "./memberList";
14
+ export * from "./message";
15
+ export * from "./metadata";
16
+ export * from "./prerecordedTranscriptionOptions";
17
+ export * from "./prerecordedTranscriptionResponse";
18
+ export * from "./project";
19
+ export * from "./projectPatchResponse";
20
+ export * from "./projectResponse";
21
+ export * from "./scopeList";
22
+ export * from "./search";
23
+ export * from "./transcriptionSource";
24
+ export * from "./usageCallback";
25
+ export * from "./usageField";
26
+ export * from "./usageFieldOptions";
27
+ export * from "./usageOptions";
28
+ export * from "./usageRequest";
29
+ export * from "./usageRequestDetail";
30
+ export * from "./usageRequestList";
31
+ export * from "./usageRequestListOptions";
32
+ export * from "./usageResponse";
33
+ export * from "./usageResponseDetail";
34
+ export * from "./utterance";
35
+ export * from "./wordBase";
36
+ export * from "./keyResponseObj";
37
+ export * from "./projectPatchRequest";
38
+ export * from "./requestFunction";
@@ -0,0 +1,4 @@
1
+ import { InvitationOptions } from "./invitationOptions";
2
+ export declare type InvitationList = {
3
+ invites?: Array<InvitationOptions>;
4
+ };
@@ -0,0 +1,4 @@
1
+ export declare type InvitationOptions = {
2
+ email?: string;
3
+ scope?: string;
4
+ };
@@ -0,0 +1,25 @@
1
+ /**
2
+ * API key used for authenticating with the Deepgram API
3
+ */
4
+ export declare type Key = {
5
+ /**
6
+ * Unique identifier of the key to use in API requests
7
+ */
8
+ api_key_id: string;
9
+ /**
10
+ * API key to send in API requests (Only displayed when first created)
11
+ */
12
+ key?: string;
13
+ /**
14
+ * Comment for user reference
15
+ */
16
+ comment: string;
17
+ /**
18
+ * Timestamp of the date/time the key was created
19
+ */
20
+ created: string;
21
+ /**
22
+ * Array of scopes assigned to the key
23
+ */
24
+ scopes: Array<string>;
25
+ };
@@ -0,0 +1,10 @@
1
+ import { KeyResponseObj } from "./keyResponseObj";
2
+ /**
3
+ * Response from the Deepgram API to list keys
4
+ */
5
+ export declare type KeyResponse = {
6
+ /**
7
+ * Array of API keys associated with the project
8
+ */
9
+ api_keys: Array<KeyResponseObj>;
10
+ };
@@ -0,0 +1,42 @@
1
+ import { Key } from "./key";
2
+ import { Member } from "./member";
3
+ export declare type KeyResponseObj = {
4
+ /**
5
+ * Optional member associated with the API key
6
+ */
7
+ member?: Member;
8
+ /**
9
+ * API key
10
+ */
11
+ api_key: Key;
12
+ /**
13
+ * Unique identifier of the key to use in API requests
14
+ * @deprecated This property has moved to api_key.api_key_id and will
15
+ * be removed in future versions.
16
+ */
17
+ api_key_id: string;
18
+ /**
19
+ * API key to send in API requests (Only displayed when first created)
20
+ * @deprecated This property has moved to api_key.key and will
21
+ * be removed in future versions.
22
+ */
23
+ key?: string;
24
+ /**
25
+ * Comment for user reference
26
+ * @deprecated This property has moved to api_key.comment and will
27
+ * be removed in future versions.
28
+ */
29
+ comment: string;
30
+ /**
31
+ * Timestamp of the date/time the key was created
32
+ * @deprecated This property has moved to api_key.created and will
33
+ * be removed in future versions.
34
+ */
35
+ created: string;
36
+ /**
37
+ * Array of scopes assigned to the key
38
+ * @deprecated This property has moved to api_key.scopes and will
39
+ * be removed in future versions.
40
+ */
41
+ scopes: Array<string>;
42
+ };
@@ -0,0 +1,4 @@
1
+ export declare type Keyword = {
2
+ keyword: string;
3
+ boost?: number;
4
+ };
@@ -0,0 +1,161 @@
1
+ import { Models } from "../enums";
2
+ /**
3
+ * Options for transcription
4
+ */
5
+ export declare type LiveTranscriptionOptions = {
6
+ /**
7
+ * AI model used to process submitted audio.
8
+ * @default general
9
+ * @remarks Possible values are general, phonecall, meeting or a custom string
10
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/model
11
+ */
12
+ model?: Models | string;
13
+ /**
14
+ * Version of the model to use.
15
+ * @default latest
16
+ * @remarks latest OR <version_id>
17
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/version
18
+ */
19
+ version?: string;
20
+ /**
21
+ * Tier of the model to use.
22
+ * @default base
23
+ * @remarks Possible values are base or enhanced
24
+ * @see https://developers.deepgram.com/documentation/features/tier/
25
+ */
26
+ tier?: string;
27
+ /**
28
+ * Terms or phrases to search for in the submitted audio and replace
29
+ * @remarks Can send multiple instances in query string replace=this:that&replace=thisalso:thatalso. Replacing a term or phrase with nothing will remove the term or phrase from the audio transcript.
30
+ * @see https://developers.deepgram.com/documentation/features/replace/
31
+ */
32
+ replace?: string;
33
+ /**
34
+ * BCP-47 language tag that hints at the primary spoken language.
35
+ * @default en-US
36
+ * @remarks Possible values are en-GB, en-IN, en-NZ, en-US, es, fr, ko, pt,
37
+ * pt-BR, ru, tr or null
38
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/language
39
+ */
40
+ language?: string;
41
+ /**
42
+ * Indicates whether to add punctuation and capitalization to the transcript.
43
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/punctuate
44
+ */
45
+ punctuate?: boolean;
46
+ /**
47
+ * Indicates whether to remove profanity from the transcript.
48
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/profanity_filter
49
+ */
50
+ profanity_filter?: boolean;
51
+ /**
52
+ * Indicates whether to redact sensitive information, replacing redacted content with asterisks (*).
53
+ * @remarks Options include:
54
+ * `pci`: Redacts sensitive credit card information, including credit card number, expiration date, and CVV
55
+ * `numbers` (or `true)`: Aggressively redacts strings of numerals
56
+ * `ssn` (*beta*): Redacts social security numbers
57
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/redact
58
+ */
59
+ redact?: Array<string>;
60
+ /**
61
+ * Indicates whether to recognize speaker changes. When set to true, each word
62
+ * in the transcript will be assigned a speaker number starting at 0.
63
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/diarize
64
+ */
65
+ diarize?: boolean;
66
+ /**
67
+ * Indicates whether to transcribe each audio channel independently. When set
68
+ * to true, you will receive one transcript for each channel, which means you
69
+ * can apply a different model to each channel using the model parameter (e.g.,
70
+ * set model to general:phonecall, which applies the general model to channel
71
+ * 0 and the phonecall model to channel 1).
72
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/multichannel
73
+ */
74
+ multichannel?: boolean;
75
+ /**
76
+ * Maximum number of transcript alternatives to return. Just like a human listener,
77
+ * Deepgram can provide multiple possible interpretations of what it hears.
78
+ * @default 1
79
+ */
80
+ alternatives?: number;
81
+ /**
82
+ * Indicates whether to convert numbers from written format (e.g., one) to
83
+ * numerical format (e.g., 1). Deepgram can format numbers up to 999,999.
84
+ * @remarks Converted numbers do not include punctuation. For example,
85
+ * 999,999 would be transcribed as 999999.
86
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/numerals
87
+ */
88
+ numerals?: boolean;
89
+ /**
90
+ * Terms or phrases to search for in the submitted audio. Deepgram searches
91
+ * for acoustic patterns in audio rather than text patterns in transcripts
92
+ * because we have noticed that acoustic pattern matching is more performant.
93
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/search
94
+ */
95
+ search?: Array<string>;
96
+ /**
97
+ * Callback URL to provide if you would like your submitted audio to be
98
+ * processed asynchronously. When passed, Deepgram will immediately respond
99
+ * with a request_id. When it has finished analyzing the audio, it will send
100
+ * a POST request to the provided URL with an appropriate HTTP status code.
101
+ * @remarks You may embed basic authentication credentials in the callback URL.
102
+ * Only ports 80, 443, 8080, and 8443 can be used for callbacks.
103
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/callback
104
+ */
105
+ callback?: string;
106
+ /**
107
+ * Keywords to which the model should pay particular attention to boosting
108
+ * or suppressing to help it understand context. Just like a human listener,
109
+ * Deepgram can better understand mumbled, distorted, or otherwise
110
+ * hard-to-decipher speech when it knows the context of the conversation.
111
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/keywords
112
+ */
113
+ keywords?: Array<string>;
114
+ /**
115
+ * Indicates whether the streaming endpoint should send you updates to its
116
+ * transcription as more audio becomes available. By default, the streaming
117
+ * endpoint returns regular updates, which means transcription results will
118
+ * likely change for a period of time. You can avoid receiving these updates
119
+ * by setting this flag to false.
120
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeStreamingAudio/properties/interim_results
121
+ */
122
+ interim_results?: boolean;
123
+ /**
124
+ * Indicates whether Deepgram will detect whether a speaker has finished
125
+ * speaking (or paused for a significant period of time, indicating the
126
+ * completion of an idea). When Deepgram detects an endpoint, it assumes
127
+ * that no additional data will improve its prediction, so it immediately
128
+ * finalizes the result for the processed time range and returns the
129
+ * transcript with a speech_final parameter set to true.
130
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeStreamingAudio/properties/endpointing
131
+ */
132
+ endpointing?: boolean;
133
+ /**
134
+ * Length of time in milliseconds of silence that voice activation detection
135
+ * (VAD) will use to detect that a speaker has finished speaking. Used when
136
+ * endpointing is enabled. Defaults to 10 ms. Deepgram customers may configure
137
+ * a value between 10 ms and 500 ms; on-premise customers may remove this
138
+ * restriction.
139
+ * @default 10
140
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeStreamingAudio/properties/vad_turnoff
141
+ */
142
+ vad_turnoff?: number;
143
+ /**
144
+ * Expected encoding of the submitted streaming audio.
145
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeStreamingAudio/properties/encoding
146
+ */
147
+ encoding?: string;
148
+ /**
149
+ * Number of independent audio channels contained in submitted streaming
150
+ * audio. Only read when a value is provided for encoding.
151
+ * @default 1
152
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeStreamingAudio/properties/channels
153
+ */
154
+ channels?: number;
155
+ /**
156
+ * Sample rate of submitted streaming audio. Required (and only read)
157
+ * when a value is provided for encoding.
158
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeStreamingAudio/properties/sample_rate
159
+ */
160
+ sample_rate?: number;
161
+ };
@@ -0,0 +1,9 @@
1
+ import { Channel } from "./channel";
2
+ export declare type LiveTranscriptionResponse = {
3
+ channel_index: Array<number>;
4
+ duration: number;
5
+ start: number;
6
+ is_final: boolean;
7
+ speech_final: boolean;
8
+ channel: Channel;
9
+ };
@@ -0,0 +1,7 @@
1
+ export declare type Member = {
2
+ member_id: string;
3
+ first_name?: string;
4
+ last_name?: string;
5
+ scopes?: Array<string>;
6
+ email: string;
7
+ };
@@ -0,0 +1,4 @@
1
+ import { Member } from "./member";
2
+ export declare type MemberList = {
3
+ members?: Array<Member>;
4
+ };
@@ -0,0 +1,3 @@
1
+ export declare type Message = {
2
+ message?: string;
3
+ };
@@ -0,0 +1,8 @@
1
+ export declare type Metadata = {
2
+ request_id: string;
3
+ transaction_key: string;
4
+ sha256: string;
5
+ created: string;
6
+ duration: number;
7
+ channels: number;
8
+ };
@@ -0,0 +1,139 @@
1
+ import { Models } from "../enums";
2
+ /**
3
+ * Options for transcription
4
+ */
5
+ export declare type PrerecordedTranscriptionOptions = {
6
+ /**
7
+ * AI model used to process submitted audio.
8
+ * @default general
9
+ * @remarks Possible values are general, phonecall, meeting or a custom string
10
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/model
11
+ */
12
+ model?: Models | string;
13
+ /**
14
+ * Version of the model to use.
15
+ * @default latest
16
+ * @remarks latest OR <version_id>
17
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/version
18
+ */
19
+ version?: string;
20
+ /**
21
+ * Tier of the model to use.
22
+ * @default base
23
+ * @remarks Possible values are base or enhanced
24
+ * @see https://developers.deepgram.com/documentation/features/tier/
25
+ */
26
+ tier?: string;
27
+ /**
28
+ * Terms or phrases to search for in the submitted audio and replace
29
+ * @remarks Can send multiple instances in query string replace=this:that&replace=thisalso:thatalso. Replacing a term or phrase with nothing will remove the term or phrase from the audio transcript.
30
+ * @see https://developers.deepgram.com/documentation/features/replace/
31
+ */
32
+ replace?: string;
33
+ /**
34
+ * BCP-47 language tag that hints at the primary spoken language.
35
+ * @default en-US
36
+ * @remarks Possible values are en-GB, en-IN, en-NZ, en-US, es, fr, ko, pt,
37
+ * pt-BR, ru, tr or null
38
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/language
39
+ */
40
+ language?: string;
41
+ /**
42
+ * Indicates whether to add punctuation and capitalization to the transcript.
43
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/punctuate
44
+ */
45
+ punctuate?: boolean;
46
+ /**
47
+ * Indicates whether to remove profanity from the transcript.
48
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/profanity_filter
49
+ */
50
+ profanity_filter?: boolean;
51
+ /**
52
+ * Indicates whether to redact sensitive information, replacing redacted content with asterisks (*).
53
+ * @remarks Options include:
54
+ * `pci`: Redacts sensitive credit card information, including credit card number, expiration date, and CVV
55
+ * `numbers` (or `true)`: Aggressively redacts strings of numerals
56
+ * `ssn` (*beta*): Redacts social security numbers
57
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/redact
58
+ */
59
+ redact?: Array<string>;
60
+ /**
61
+ * Indicates whether to recognize speaker changes. When set to true, each word
62
+ * in the transcript will be assigned a speaker number starting at 0.
63
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/diarize
64
+ */
65
+ diarize?: boolean;
66
+ /**
67
+ * Indicates whether to transcribe each audio channel independently. When set
68
+ * to true, you will receive one transcript for each channel, which means you
69
+ * can apply a different model to each channel using the model parameter (e.g.,
70
+ * set model to general:phonecall, which applies the general model to channel
71
+ * 0 and the phonecall model to channel 1).
72
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/multichannel
73
+ */
74
+ multichannel?: boolean;
75
+ /**
76
+ * Maximum number of transcript alternatives to return. Just like a human listener,
77
+ * Deepgram can provide multiple possible interpretations of what it hears.
78
+ * @default 1
79
+ */
80
+ alternatives?: number;
81
+ /**
82
+ * Indicates whether to convert numbers from written format (e.g., one) to
83
+ * numerical format (e.g., 1). Deepgram can format numbers up to 999,999.
84
+ * @remarks Converted numbers do not include punctuation. For example,
85
+ * 999,999 would be transcribed as 999999.
86
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/numerals
87
+ */
88
+ numerals?: boolean;
89
+ /**
90
+ * Terms or phrases to search for in the submitted audio. Deepgram searches
91
+ * for acoustic patterns in audio rather than text patterns in transcripts
92
+ * because we have noticed that acoustic pattern matching is more performant.
93
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/search
94
+ */
95
+ search?: Array<string>;
96
+ /**
97
+ * Callback URL to provide if you would like your submitted audio to be
98
+ * processed asynchronously. When passed, Deepgram will immediately respond
99
+ * with a request_id. When it has finished analyzing the audio, it will send
100
+ * a POST request to the provided URL with an appropriate HTTP status code.
101
+ * @remarks You may embed basic authentication credentials in the callback URL.
102
+ * Only ports 80, 443, 8080, and 8443 can be used for callbacks.
103
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/callback
104
+ */
105
+ callback?: string;
106
+ /**
107
+ * Keywords to which the model should pay particular attention to boosting
108
+ * or suppressing to help it understand context. Just like a human listener,
109
+ * Deepgram can better understand mumbled, distorted, or otherwise
110
+ * hard-to-decipher speech when it knows the context of the conversation.
111
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/keywords
112
+ */
113
+ keywords?: Array<string>;
114
+ /**
115
+ * Indicates whether Deepgram will segment speech into meaningful semantic
116
+ * units, which allows the model to interact more naturally and effectively
117
+ * with speakers' spontaneous speech patterns. For example, when humans
118
+ * speak to each other conversationally, they often pause mid-sentence to
119
+ * reformulate their thoughts, or stop and restart a badly-worded sentence.
120
+ * When utterances is set to true, these utterances are identified and
121
+ * returned in the transcript results.
122
+ *
123
+ * By default, when utterances is enabled, it starts a new utterance after
124
+ * 0.8 s of silence. You can customize the length of time used to determine
125
+ * where to split utterances by submitting the utt_split parameter.
126
+ * @remarks **BETA FEATURE**
127
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/utterances
128
+ */
129
+ utterances?: boolean;
130
+ /**
131
+ * Length of time in seconds of silence between words that Deepgram will
132
+ * use when determining where to split utterances. Used when utterances
133
+ * is enabled.
134
+ * @default 0.8 seconds
135
+ * @remarks **BETA FEATURE**
136
+ * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeAudio/properties/utt_split
137
+ */
138
+ utt_split?: number;
139
+ };
@@ -0,0 +1,25 @@
1
+ import { Metadata } from "./metadata";
2
+ import { Channel } from "./channel";
3
+ import { Utterance } from "./utterance";
4
+ export declare class PrerecordedTranscriptionResponse {
5
+ request_id?: string;
6
+ metadata?: Metadata;
7
+ results?: {
8
+ channels: Array<Channel>;
9
+ utterances?: Array<Utterance>;
10
+ };
11
+ /**
12
+ * Converts the transcription to the WebVTT format
13
+ * @remarks In order to translate the transcription to WebVTT, the utterances
14
+ * feature must be used.
15
+ * @returns A string with the transcription in the WebVTT format
16
+ */
17
+ toWebVTT(): string;
18
+ /**
19
+ * Converts the transcription to the SRT format
20
+ * @remarks In order to translate the transcription to SRT, the utterances
21
+ * feature must be used.
22
+ * @returns A string with the transcription in the SRT format
23
+ */
24
+ toSRT(): string;
25
+ }
@@ -0,0 +1,17 @@
1
+ /**
2
+ * Deepgram project
3
+ */
4
+ export declare type Project = {
5
+ /**
6
+ * Unique identifier of the project
7
+ */
8
+ project_id: string;
9
+ /**
10
+ * User provided name of the project
11
+ */
12
+ name?: string;
13
+ /**
14
+ * Name of the company associated with the project. Optional.
15
+ */
16
+ company?: string;
17
+ };
@@ -0,0 +1,4 @@
1
+ export declare type ProjectPatchRequest = {
2
+ name?: string;
3
+ company?: string;
4
+ };
@@ -0,0 +1,6 @@
1
+ export declare type ProjectPatchResponse = {
2
+ /**
3
+ * Success message.
4
+ */
5
+ message: string;
6
+ };
@@ -0,0 +1,4 @@
1
+ import { Project } from "./project";
2
+ export declare type ProjectResponse = {
3
+ projects: Array<Project>;
4
+ };
@@ -0,0 +1,5 @@
1
+ /// <reference types="node" />
2
+ import { ReadStream } from "fs";
3
+ export declare type RequestFunction = NodeRequest | BrowserRequest;
4
+ export declare type NodeRequest = (method: string, api_key: string, apiUrl: string, path: string, payload?: string | Buffer | ReadStream, options?: Object) => Promise<any>;
5
+ export declare type BrowserRequest = (method: string, api_key: string, apiUrl: string, path: string, payload?: string) => Promise<any>;
@@ -0,0 +1,3 @@
1
+ export declare type ScopeList = {
2
+ scopes: Array<string>;
3
+ };
@@ -0,0 +1,14 @@
1
+ import { Hit } from "./hit";
2
+ /**
3
+ * Search result for a transcription
4
+ */
5
+ export declare type Search = {
6
+ /**
7
+ * Term for which Deepgram is searching.
8
+ */
9
+ query: string;
10
+ /**
11
+ * Instances of query found in transcript
12
+ */
13
+ hits: Array<Hit>;
14
+ };
@@ -0,0 +1,14 @@
1
+ /// <reference types="node" />
2
+ import { Readable } from "stream";
3
+ export declare type TranscriptionSource = UrlSource | BufferSource | ReadStreamSource;
4
+ export declare type ReadStreamSource = {
5
+ stream: Readable;
6
+ mimetype: string;
7
+ };
8
+ export declare type UrlSource = {
9
+ url: string;
10
+ };
11
+ export declare type BufferSource = {
12
+ buffer: Buffer;
13
+ mimetype: string;
14
+ };
@@ -0,0 +1,4 @@
1
+ export declare type UsageCallback = {
2
+ code: number;
3
+ completed: string;
4
+ };
@@ -0,0 +1,7 @@
1
+ export declare type UsageField = {
2
+ tags: Array<string>;
3
+ models: Array<string>;
4
+ processing_methods: Array<string>;
5
+ languages: Array<string>;
6
+ features: Array<string>;
7
+ };
@@ -0,0 +1,4 @@
1
+ export declare type UsageFieldOptions = {
2
+ start?: string;
3
+ end?: string;
4
+ };
@@ -0,0 +1,23 @@
1
+ export declare type UsageOptions = {
2
+ start?: string;
3
+ end?: string;
4
+ accessor?: string;
5
+ tag?: Array<string>;
6
+ method?: "sync" | "async" | "streaming";
7
+ model?: string;
8
+ multichannel?: boolean;
9
+ interim_results?: boolean;
10
+ punctuate?: boolean;
11
+ ner?: boolean;
12
+ utterances?: boolean;
13
+ replace?: boolean;
14
+ profanity_filter?: boolean;
15
+ keywords?: boolean;
16
+ sentiment?: boolean;
17
+ diarize?: boolean;
18
+ detect_language?: boolean;
19
+ search?: boolean;
20
+ redact?: boolean;
21
+ alternatives?: boolean;
22
+ numerals?: boolean;
23
+ };
@@ -0,0 +1,11 @@
1
+ import { UsageCallback } from "./usageCallback";
2
+ import { UsageRequestDetail } from "./usageRequestDetail";
3
+ import { UsageRequestMessage } from "./usageRequestMessage";
4
+ export declare type UsageRequest = {
5
+ request_id: string;
6
+ created: string;
7
+ path: string;
8
+ accessor: string;
9
+ response?: UsageRequestDetail | UsageRequestMessage;
10
+ callback?: UsageCallback;
11
+ };