@ai-sdk/deepgram 2.0.10 → 2.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,21 @@
1
1
  # @ai-sdk/deepgram
2
2
 
3
+ ## 2.0.12
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [462ad00]
8
+ - @ai-sdk/provider-utils@4.0.10
9
+
10
+ ## 2.0.11
11
+
12
+ ### Patch Changes
13
+
14
+ - 4de5a1d: chore: excluded tests from src folder in npm package
15
+ - Updated dependencies [4de5a1d]
16
+ - @ai-sdk/provider@3.0.5
17
+ - @ai-sdk/provider-utils@4.0.9
18
+
3
19
  ## 2.0.10
4
20
 
5
21
  ### Patch Changes
package/dist/index.js CHANGED
@@ -593,7 +593,7 @@ var DeepgramSpeechModel = class {
593
593
  };
594
594
 
595
595
  // src/version.ts
596
- var VERSION = true ? "2.0.10" : "0.0.0-test";
596
+ var VERSION = true ? "2.0.12" : "0.0.0-test";
597
597
 
598
598
  // src/deepgram-provider.ts
599
599
  function createDeepgram(options = {}) {
package/dist/index.mjs CHANGED
@@ -579,7 +579,7 @@ var DeepgramSpeechModel = class {
579
579
  };
580
580
 
581
581
  // src/version.ts
582
- var VERSION = true ? "2.0.10" : "0.0.0-test";
582
+ var VERSION = true ? "2.0.12" : "0.0.0-test";
583
583
 
584
584
  // src/deepgram-provider.ts
585
585
  function createDeepgram(options = {}) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai-sdk/deepgram",
3
- "version": "2.0.10",
3
+ "version": "2.0.12",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
@@ -10,6 +10,10 @@
10
10
  "dist/**/*",
11
11
  "docs/**/*",
12
12
  "src",
13
+ "!src/**/*.test.ts",
14
+ "!src/**/*.test-d.ts",
15
+ "!src/**/__snapshots__",
16
+ "!src/**/__fixtures__",
13
17
  "CHANGELOG.md",
14
18
  "README.md"
15
19
  ],
@@ -25,16 +29,16 @@
25
29
  }
26
30
  },
27
31
  "dependencies": {
28
- "@ai-sdk/provider": "3.0.4",
29
- "@ai-sdk/provider-utils": "4.0.8"
32
+ "@ai-sdk/provider": "3.0.5",
33
+ "@ai-sdk/provider-utils": "4.0.10"
30
34
  },
31
35
  "devDependencies": {
32
36
  "@types/node": "20.17.24",
33
37
  "tsup": "^8",
34
38
  "typescript": "5.6.3",
35
39
  "zod": "3.25.76",
36
- "@ai-sdk/test-server": "1.0.2",
37
- "@vercel/ai-tsconfig": "0.0.0"
40
+ "@vercel/ai-tsconfig": "0.0.0",
41
+ "@ai-sdk/test-server": "1.0.3"
38
42
  },
39
43
  "peerDependencies": {
40
44
  "zod": "^3.25.76 || ^4.1.8"
@@ -1,34 +0,0 @@
1
- import { safeParseJSON } from '@ai-sdk/provider-utils';
2
- import { deepgramErrorDataSchema } from './deepgram-error';
3
- import { describe, expect, it } from 'vitest';
4
-
5
- describe('deepgramErrorDataSchema', () => {
6
- it('should parse Deepgram resource exhausted error', async () => {
7
- const error = `
8
- {"error":{"message":"{\\n \\"error\\": {\\n \\"code\\": 429,\\n \\"message\\": \\"Resource has been exhausted (e.g. check quota).\\",\\n \\"status\\": \\"RESOURCE_EXHAUSTED\\"\\n }\\n}\\n","code":429}}
9
- `;
10
-
11
- const result = await safeParseJSON({
12
- text: error,
13
- schema: deepgramErrorDataSchema,
14
- });
15
-
16
- expect(result).toStrictEqual({
17
- success: true,
18
- value: {
19
- error: {
20
- message:
21
- '{\n "error": {\n "code": 429,\n "message": "Resource has been exhausted (e.g. check quota).",\n "status": "RESOURCE_EXHAUSTED"\n }\n}\n',
22
- code: 429,
23
- },
24
- },
25
- rawValue: {
26
- error: {
27
- message:
28
- '{\n "error": {\n "code": 429,\n "message": "Resource has been exhausted (e.g. check quota).",\n "status": "RESOURCE_EXHAUSTED"\n }\n}\n',
29
- code: 429,
30
- },
31
- },
32
- });
33
- });
34
- });
@@ -1,355 +0,0 @@
1
- import { createTestServer } from '@ai-sdk/test-server/with-vitest';
2
- import { createDeepgram } from './deepgram-provider';
3
- import { DeepgramSpeechModel } from './deepgram-speech-model';
4
- import { describe, it, expect, vi } from 'vitest';
5
-
6
- vi.mock('./version', () => ({
7
- VERSION: '0.0.0-test',
8
- }));
9
-
10
- const provider = createDeepgram({ apiKey: 'test-api-key' });
11
- const model = provider.speech('aura-2-helena-en');
12
-
13
- const server = createTestServer({
14
- 'https://api.deepgram.com/v1/speak': {},
15
- });
16
-
17
- describe('doGenerate', () => {
18
- function prepareAudioResponse({
19
- headers,
20
- }: {
21
- headers?: Record<string, string>;
22
- } = {}) {
23
- const audioBuffer = new Uint8Array(100); // Mock audio data
24
- server.urls['https://api.deepgram.com/v1/speak'].response = {
25
- type: 'binary',
26
- headers: {
27
- 'content-type': 'audio/mp3',
28
- ...headers,
29
- },
30
- body: Buffer.from(audioBuffer),
31
- };
32
- return audioBuffer;
33
- }
34
-
35
- it('should pass the model and text', async () => {
36
- prepareAudioResponse();
37
-
38
- await model.doGenerate({
39
- text: 'Hello, welcome to Deepgram!',
40
- });
41
-
42
- expect(await server.calls[0].requestBodyJson).toMatchObject({
43
- text: 'Hello, welcome to Deepgram!',
44
- });
45
-
46
- const url = new URL(server.calls[0].requestUrl);
47
- expect(url.searchParams.get('model')).toBe('aura-2-helena-en');
48
- });
49
-
50
- it('should pass headers', async () => {
51
- prepareAudioResponse();
52
-
53
- const provider = createDeepgram({
54
- apiKey: 'test-api-key',
55
- headers: {
56
- 'Custom-Provider-Header': 'provider-header-value',
57
- },
58
- });
59
-
60
- await provider.speech('aura-2-helena-en').doGenerate({
61
- text: 'Hello, welcome to Deepgram!',
62
- headers: {
63
- 'Custom-Request-Header': 'request-header-value',
64
- },
65
- });
66
-
67
- expect(server.calls[0].requestHeaders).toMatchObject({
68
- authorization: 'Token test-api-key',
69
- 'content-type': 'application/json',
70
- 'custom-provider-header': 'provider-header-value',
71
- 'custom-request-header': 'request-header-value',
72
- });
73
-
74
- expect(server.calls[0].requestUserAgent).toContain(
75
- `ai-sdk/deepgram/0.0.0-test`,
76
- );
77
- });
78
-
79
- it('should pass query parameters for model', async () => {
80
- prepareAudioResponse();
81
-
82
- await model.doGenerate({
83
- text: 'Hello, welcome to Deepgram!',
84
- });
85
-
86
- const url = new URL(server.calls[0].requestUrl);
87
- expect(url.searchParams.get('model')).toBe('aura-2-helena-en');
88
- });
89
-
90
- it('should map outputFormat to encoding/container', async () => {
91
- prepareAudioResponse();
92
-
93
- await model.doGenerate({
94
- text: 'Hello, welcome to Deepgram!',
95
- outputFormat: 'wav',
96
- });
97
-
98
- const url = new URL(server.calls[0].requestUrl);
99
- expect(url.searchParams.get('container')).toBe('wav');
100
- expect(url.searchParams.get('encoding')).toBe('linear16');
101
- });
102
-
103
- it('should pass provider options', async () => {
104
- prepareAudioResponse();
105
-
106
- await model.doGenerate({
107
- text: 'Hello, welcome to Deepgram!',
108
- providerOptions: {
109
- deepgram: {
110
- encoding: 'mp3',
111
- bitRate: 48000,
112
- container: 'wav',
113
- callback: 'https://example.com/callback',
114
- callbackMethod: 'POST',
115
- mipOptOut: true,
116
- tag: 'test-tag',
117
- },
118
- },
119
- });
120
-
121
- const url = new URL(server.calls[0].requestUrl);
122
- expect(url.searchParams.get('encoding')).toBe('mp3');
123
- expect(url.searchParams.get('bit_rate')).toBe('48000');
124
- // mp3 doesn't support container, so it should be removed
125
- expect(url.searchParams.get('container')).toBeNull();
126
- expect(url.searchParams.get('callback')).toBe(
127
- 'https://example.com/callback',
128
- );
129
- expect(url.searchParams.get('callback_method')).toBe('POST');
130
- expect(url.searchParams.get('mip_opt_out')).toBe('true');
131
- expect(url.searchParams.get('tag')).toBe('test-tag');
132
- });
133
-
134
- it('should handle array tag', async () => {
135
- prepareAudioResponse();
136
-
137
- await model.doGenerate({
138
- text: 'Hello, welcome to Deepgram!',
139
- providerOptions: {
140
- deepgram: {
141
- tag: ['tag1', 'tag2'],
142
- },
143
- },
144
- });
145
-
146
- const url = new URL(server.calls[0].requestUrl);
147
- expect(url.searchParams.get('tag')).toBe('tag1,tag2');
148
- });
149
-
150
- it('should return audio data', async () => {
151
- const audio = new Uint8Array(100); // Mock audio data
152
- prepareAudioResponse({
153
- headers: {
154
- 'x-request-id': 'test-request-id',
155
- },
156
- });
157
-
158
- const result = await model.doGenerate({
159
- text: 'Hello, welcome to Deepgram!',
160
- });
161
-
162
- expect(result.audio).toStrictEqual(audio);
163
- });
164
-
165
- it('should include response data with timestamp, modelId and headers', async () => {
166
- prepareAudioResponse({
167
- headers: {
168
- 'x-request-id': 'test-request-id',
169
- },
170
- });
171
-
172
- const testDate = new Date(0);
173
- const customModel = new DeepgramSpeechModel('aura-2-helena-en', {
174
- provider: 'test-provider',
175
- url: () => 'https://api.deepgram.com/v1/speak',
176
- headers: () => ({}),
177
- _internal: {
178
- currentDate: () => testDate,
179
- },
180
- });
181
-
182
- const result = await customModel.doGenerate({
183
- text: 'Hello, welcome to Deepgram!',
184
- });
185
-
186
- expect(result.response).toMatchObject({
187
- timestamp: testDate,
188
- modelId: 'aura-2-helena-en',
189
- headers: {
190
- 'content-type': 'audio/mp3',
191
- 'x-request-id': 'test-request-id',
192
- },
193
- });
194
- });
195
-
196
- it('should warn about unsupported voice parameter', async () => {
197
- prepareAudioResponse();
198
-
199
- const result = await model.doGenerate({
200
- text: 'Hello, welcome to Deepgram!',
201
- voice: 'different-voice',
202
- });
203
-
204
- expect(result.warnings).toMatchInlineSnapshot(`
205
- [
206
- {
207
- "details": "Deepgram TTS models embed the voice in the model ID. The voice parameter "different-voice" was ignored. Use the model ID to select a voice (e.g., "aura-2-helena-en").",
208
- "feature": "voice",
209
- "type": "unsupported",
210
- },
211
- ]
212
- `);
213
- });
214
-
215
- it('should warn about unsupported speed parameter', async () => {
216
- prepareAudioResponse();
217
-
218
- const result = await model.doGenerate({
219
- text: 'Hello, welcome to Deepgram!',
220
- speed: 1.5,
221
- });
222
-
223
- expect(result.warnings).toMatchInlineSnapshot(`
224
- [
225
- {
226
- "details": "Deepgram TTS REST API does not support speed adjustment. Speed parameter was ignored.",
227
- "feature": "speed",
228
- "type": "unsupported",
229
- },
230
- ]
231
- `);
232
- });
233
-
234
- it('should warn about unsupported language parameter', async () => {
235
- prepareAudioResponse();
236
-
237
- const result = await model.doGenerate({
238
- text: 'Hello, welcome to Deepgram!',
239
- language: 'en',
240
- });
241
-
242
- expect(result.warnings).toMatchInlineSnapshot(`
243
- [
244
- {
245
- "details": "Deepgram TTS models are language-specific via the model ID. Language parameter "en" was ignored. Select a model with the appropriate language suffix (e.g., "-en" for English).",
246
- "feature": "language",
247
- "type": "unsupported",
248
- },
249
- ]
250
- `);
251
- });
252
-
253
- it('should warn about unsupported instructions parameter', async () => {
254
- prepareAudioResponse();
255
-
256
- const result = await model.doGenerate({
257
- text: 'Hello, welcome to Deepgram!',
258
- instructions: 'Speak slowly',
259
- });
260
-
261
- expect(result.warnings).toMatchInlineSnapshot(`
262
- [
263
- {
264
- "details": "Deepgram TTS REST API does not support instructions. Instructions parameter was ignored.",
265
- "feature": "instructions",
266
- "type": "unsupported",
267
- },
268
- ]
269
- `);
270
- });
271
-
272
- it('should include request body in response', async () => {
273
- prepareAudioResponse();
274
-
275
- const result = await model.doGenerate({
276
- text: 'Hello, welcome to Deepgram!',
277
- });
278
-
279
- expect(result.request?.body).toBe(
280
- JSON.stringify({ text: 'Hello, welcome to Deepgram!' }),
281
- );
282
- });
283
-
284
- it('should clean up incompatible parameters when encoding changes via providerOptions', async () => {
285
- prepareAudioResponse();
286
-
287
- // Test case 1: outputFormat sets sample_rate, encoding changed to mp3 (fixed sample rate)
288
- await model.doGenerate({
289
- text: 'Hello, welcome to Deepgram!',
290
- outputFormat: 'linear16_16000', // Sets: encoding=linear16, sample_rate=16000
291
- providerOptions: {
292
- deepgram: {
293
- encoding: 'mp3', // Changes encoding to mp3
294
- },
295
- },
296
- });
297
-
298
- const url1 = new URL(server.calls[0].requestUrl);
299
- expect(url1.searchParams.get('encoding')).toBe('mp3');
300
- expect(url1.searchParams.get('sample_rate')).toBeNull(); // Should be removed
301
-
302
- // Test case 2: outputFormat sets container for linear16, encoding changed to opus
303
- await model.doGenerate({
304
- text: 'Hello, welcome to Deepgram!',
305
- outputFormat: 'linear16_16000', // Sets: encoding=linear16, container=wav
306
- providerOptions: {
307
- deepgram: {
308
- encoding: 'opus', // Changes encoding to opus
309
- },
310
- },
311
- });
312
-
313
- const url2 = new URL(server.calls[1].requestUrl);
314
- expect(url2.searchParams.get('encoding')).toBe('opus');
315
- expect(url2.searchParams.get('container')).toBe('ogg'); // Should be ogg, not wav
316
- expect(url2.searchParams.get('sample_rate')).toBeNull(); // Should be removed
317
-
318
- // Test case 3: outputFormat sets bit_rate, encoding changed to linear16 (no bitrate support)
319
- await model.doGenerate({
320
- text: 'Hello, welcome to Deepgram!',
321
- outputFormat: 'mp3', // Sets: encoding=mp3
322
- providerOptions: {
323
- deepgram: {
324
- encoding: 'linear16', // Changes encoding to linear16
325
- bitRate: 48000, // Try to set bitrate
326
- },
327
- },
328
- });
329
-
330
- const url3 = new URL(server.calls[2].requestUrl);
331
- expect(url3.searchParams.get('encoding')).toBe('linear16');
332
- expect(url3.searchParams.get('bit_rate')).toBeNull(); // Should be removed
333
- });
334
-
335
- it('should clean up incompatible parameters when container changes encoding implicitly', async () => {
336
- prepareAudioResponse();
337
-
338
- // Test case: outputFormat sets sample_rate, container changes encoding to opus
339
- await model.doGenerate({
340
- text: 'Hello, welcome to Deepgram!',
341
- outputFormat: 'linear16_16000', // Sets: encoding=linear16, sample_rate=16000
342
- providerOptions: {
343
- deepgram: {
344
- container: 'ogg', // Changes encoding to opus implicitly
345
- },
346
- },
347
- });
348
-
349
- const callIndex = server.calls.length - 1;
350
- const url = new URL(server.calls[callIndex].requestUrl);
351
- expect(url.searchParams.get('encoding')).toBe('opus');
352
- expect(url.searchParams.get('container')).toBe('ogg');
353
- expect(url.searchParams.get('sample_rate')).toBeNull(); // Should be removed (opus has fixed sample rate)
354
- });
355
- });
@@ -1,249 +0,0 @@
1
- import { createTestServer } from '@ai-sdk/test-server/with-vitest';
2
- import { DeepgramTranscriptionModel } from './deepgram-transcription-model';
3
- import { createDeepgram } from './deepgram-provider';
4
- import { readFile } from 'node:fs/promises';
5
- import path from 'node:path';
6
- import { describe, it, expect, vi } from 'vitest';
7
-
8
- vi.mock('./version', () => ({
9
- VERSION: '0.0.0-test',
10
- }));
11
-
12
- const audioData = await readFile(path.join(__dirname, 'transcript-test.mp3'));
13
- const provider = createDeepgram({ apiKey: 'test-api-key' });
14
- const model = provider.transcription('nova-3');
15
-
16
- const server = createTestServer({
17
- 'https://api.deepgram.com/v1/listen': {},
18
- });
19
-
20
- describe('doGenerate', () => {
21
- function prepareJsonResponse({
22
- headers,
23
- detectedLanguage,
24
- }: {
25
- headers?: Record<string, string>;
26
- detectedLanguage?: string;
27
- } = {}) {
28
- server.urls['https://api.deepgram.com/v1/listen'].response = {
29
- type: 'json-value',
30
- headers,
31
- body: {
32
- metadata: {
33
- transaction_key: 'deprecated',
34
- request_id: '2479c8c8-8185-40ac-9ac6-f0874419f793',
35
- sha256:
36
- '154e291ecfa8be6ab8343560bcc109008fa7853eb5372533e8efdefc9b504c33',
37
- created: '2024-02-06T19:56:16.180Z',
38
- duration: 25.933313,
39
- channels: 1,
40
- models: ['30089e05-99d1-4376-b32e-c263170674af'],
41
- model_info: {
42
- '30089e05-99d1-4376-b32e-c263170674af': {
43
- name: '2-general-nova',
44
- version: '2024-01-09.29447',
45
- arch: 'nova-3',
46
- },
47
- },
48
- },
49
- results: {
50
- channels: [
51
- {
52
- detected_language: detectedLanguage,
53
- alternatives: [
54
- {
55
- transcript: 'Hello world!',
56
- confidence: 0.99902344,
57
- words: [
58
- {
59
- word: 'hello',
60
- start: 0.08,
61
- end: 0.32,
62
- confidence: 0.9975586,
63
- punctuated_word: 'Hello.',
64
- },
65
- {
66
- word: 'world',
67
- start: 0.32,
68
- end: 0.79999995,
69
- confidence: 0.9921875,
70
- punctuated_word: 'World',
71
- },
72
- ],
73
- paragraphs: {
74
- transcript: 'Hello world!',
75
- paragraphs: [
76
- {
77
- sentences: [
78
- {
79
- text: 'Hello world!',
80
- start: 0.08,
81
- end: 0.32,
82
- },
83
- ],
84
- num_words: 2,
85
- start: 0.08,
86
- end: 0.79999995,
87
- },
88
- ],
89
- },
90
- },
91
- ],
92
- },
93
- ],
94
- },
95
- },
96
- };
97
- }
98
-
99
- it('should pass the model', async () => {
100
- prepareJsonResponse();
101
-
102
- await model.doGenerate({
103
- audio: audioData,
104
- mediaType: 'audio/wav',
105
- });
106
-
107
- expect(await server.calls[0].requestBodyMultipart).toMatchObject({});
108
- });
109
-
110
- it('should pass headers', async () => {
111
- prepareJsonResponse();
112
-
113
- const provider = createDeepgram({
114
- apiKey: 'test-api-key',
115
- headers: {
116
- 'Custom-Provider-Header': 'provider-header-value',
117
- },
118
- });
119
-
120
- await provider.transcription('nova-3').doGenerate({
121
- audio: audioData,
122
- mediaType: 'audio/wav',
123
- headers: {
124
- 'Custom-Request-Header': 'request-header-value',
125
- },
126
- });
127
-
128
- expect(server.calls[0].requestHeaders).toMatchObject({
129
- authorization: 'Token test-api-key',
130
- 'content-type': 'audio/wav',
131
- 'custom-provider-header': 'provider-header-value',
132
- 'custom-request-header': 'request-header-value',
133
- });
134
- expect(server.calls[0].requestUserAgent).toContain(
135
- `ai-sdk/deepgram/0.0.0-test`,
136
- );
137
- });
138
-
139
- it('should extract the transcription text', async () => {
140
- prepareJsonResponse();
141
-
142
- const result = await model.doGenerate({
143
- audio: audioData,
144
- mediaType: 'audio/wav',
145
- });
146
-
147
- expect(result.text).toBe('Hello world!');
148
- });
149
-
150
- it('should include response data with timestamp, modelId and headers', async () => {
151
- prepareJsonResponse({
152
- headers: {
153
- 'x-request-id': 'test-request-id',
154
- 'x-ratelimit-remaining': '123',
155
- },
156
- });
157
-
158
- const testDate = new Date(0);
159
- const customModel = new DeepgramTranscriptionModel('nova-3', {
160
- provider: 'test-provider',
161
- url: () => 'https://api.deepgram.com/v1/listen',
162
- headers: () => ({}),
163
- _internal: {
164
- currentDate: () => testDate,
165
- },
166
- });
167
-
168
- const result = await customModel.doGenerate({
169
- audio: audioData,
170
- mediaType: 'audio/wav',
171
- });
172
-
173
- expect(result.response).toMatchObject({
174
- timestamp: testDate,
175
- modelId: 'nova-3',
176
- headers: {
177
- 'content-type': 'application/json',
178
- 'x-request-id': 'test-request-id',
179
- 'x-ratelimit-remaining': '123',
180
- },
181
- });
182
- });
183
-
184
- it('should use real date when no custom date provider is specified', async () => {
185
- prepareJsonResponse();
186
-
187
- const testDate = new Date(0);
188
- const customModel = new DeepgramTranscriptionModel('nova-3', {
189
- provider: 'test-provider',
190
- url: () => 'https://api.deepgram.com/v1/listen',
191
- headers: () => ({}),
192
- _internal: {
193
- currentDate: () => testDate,
194
- },
195
- });
196
-
197
- const result = await customModel.doGenerate({
198
- audio: audioData,
199
- mediaType: 'audio/wav',
200
- });
201
-
202
- expect(result.response.timestamp.getTime()).toEqual(testDate.getTime());
203
- expect(result.response.modelId).toBe('nova-3');
204
- });
205
-
206
- it('should pass detectLanguage as detect_language query parameter', async () => {
207
- prepareJsonResponse();
208
-
209
- await model.doGenerate({
210
- audio: audioData,
211
- mediaType: 'audio/wav',
212
- providerOptions: {
213
- deepgram: {
214
- detectLanguage: true,
215
- },
216
- },
217
- });
218
-
219
- const requestUrl = server.calls[0].requestUrl;
220
- expect(requestUrl).toContain('detect_language=true');
221
- });
222
-
223
- it('should return detected language from response', async () => {
224
- prepareJsonResponse({ detectedLanguage: 'sv' });
225
-
226
- const result = await model.doGenerate({
227
- audio: audioData,
228
- mediaType: 'audio/wav',
229
- providerOptions: {
230
- deepgram: {
231
- detectLanguage: true,
232
- },
233
- },
234
- });
235
-
236
- expect(result.language).toBe('sv');
237
- });
238
-
239
- it('should return undefined language when not detected', async () => {
240
- prepareJsonResponse();
241
-
242
- const result = await model.doGenerate({
243
- audio: audioData,
244
- mediaType: 'audio/wav',
245
- });
246
-
247
- expect(result.language).toBeUndefined();
248
- });
249
- });