voice-router-dev 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs ADDED
@@ -0,0 +1,4821 @@
1
+ #!/usr/bin/env node
2
+ var __defProp = Object.defineProperty;
3
+ var __export = (target, all) => {
4
+ for (var name in all)
5
+ __defProp(target, name, { get: all[name], enumerable: true });
6
+ };
7
+
8
+ // src/router/voice-router.ts
9
+ var VoiceRouter = class {
10
+ constructor(config) {
11
+ this.adapters = /* @__PURE__ */ new Map();
12
+ this.roundRobinIndex = 0;
13
+ this.config = {
14
+ selectionStrategy: "default",
15
+ ...config
16
+ };
17
+ if (Object.keys(config.providers).length === 0) {
18
+ throw new Error("VoiceRouter requires at least one provider configuration");
19
+ }
20
+ if (this.config.selectionStrategy === "default" && !this.config.defaultProvider) {
21
+ this.config.defaultProvider = Object.keys(config.providers)[0];
22
+ }
23
+ }
24
+ /**
25
+ * Register an adapter for a provider
26
+ *
27
+ * Call this method for each provider you want to use. The adapter will be
28
+ * initialized with the configuration provided in the constructor.
29
+ *
30
+ * @param adapter - Provider adapter instance to register
31
+ * @throws {Error} If no configuration found for the provider
32
+ *
33
+ * @example
34
+ * ```typescript
35
+ * const router = new VoiceRouter({
36
+ * providers: {
37
+ * gladia: { apiKey: 'YOUR_KEY' }
38
+ * }
39
+ * });
40
+ *
41
+ * router.registerAdapter(new GladiaAdapter());
42
+ * ```
43
+ */
44
+ registerAdapter(adapter) {
45
+ const providerConfig = this.config.providers[adapter.name];
46
+ if (!providerConfig) {
47
+ throw new Error(`No configuration found for provider: ${adapter.name}`);
48
+ }
49
+ adapter.initialize(providerConfig);
50
+ this.adapters.set(adapter.name, adapter);
51
+ }
52
+ /**
53
+ * Get an adapter by provider name
54
+ */
55
+ getAdapter(provider) {
56
+ const adapter = this.adapters.get(provider);
57
+ if (!adapter) {
58
+ throw new Error(
59
+ `Provider '${provider}' is not registered. Available providers: ${Array.from(this.adapters.keys()).join(", ")}`
60
+ );
61
+ }
62
+ return adapter;
63
+ }
64
+ /**
65
+ * Select provider based on configured strategy
66
+ */
67
+ selectProvider(preferredProvider) {
68
+ if (preferredProvider) {
69
+ if (!this.adapters.has(preferredProvider)) {
70
+ throw new Error(
71
+ `Provider '${preferredProvider}' is not registered. Available providers: ${Array.from(this.adapters.keys()).join(", ")}`
72
+ );
73
+ }
74
+ return preferredProvider;
75
+ }
76
+ switch (this.config.selectionStrategy) {
77
+ case "explicit":
78
+ throw new Error(
79
+ "Provider must be explicitly specified when using 'explicit' selection strategy"
80
+ );
81
+ case "round-robin": {
82
+ const providers = Array.from(this.adapters.keys());
83
+ const provider = providers[this.roundRobinIndex % providers.length];
84
+ this.roundRobinIndex++;
85
+ return provider;
86
+ }
87
+ case "default":
88
+ default:
89
+ if (!this.config.defaultProvider) {
90
+ throw new Error("No default provider configured");
91
+ }
92
+ return this.config.defaultProvider;
93
+ }
94
+ }
95
+ /**
96
+ * Transcribe audio using a specific provider or the default
97
+ *
98
+ * Submit audio for transcription. The provider will be selected based on
99
+ * your configuration strategy (explicit, default, or round-robin).
100
+ *
101
+ * @param audio - Audio input (URL, file buffer, or stream)
102
+ * @param options - Transcription options (language, diarization, etc.)
103
+ * @param options.provider - Specific provider to use (overrides selection strategy)
104
+ * @returns Unified transcription response with normalized format
105
+ * @throws {Error} If provider not registered or selection fails
106
+ *
107
+ * @example URL audio
108
+ * ```typescript
109
+ * const result = await router.transcribe({
110
+ * type: 'url',
111
+ * url: 'https://example.com/audio.mp3'
112
+ * }, {
113
+ * language: 'en',
114
+ * diarization: true,
115
+ * summarization: true
116
+ * });
117
+ *
118
+ * if (result.success) {
119
+ * console.log('Transcript:', result.data.text);
120
+ * console.log('Speakers:', result.data.speakers);
121
+ * console.log('Summary:', result.data.summary);
122
+ * }
123
+ * ```
124
+ *
125
+ * @example Specific provider
126
+ * ```typescript
127
+ * const result = await router.transcribe(audio, {
128
+ * provider: 'gladia', // Force use of Gladia
129
+ * language: 'en'
130
+ * });
131
+ * ```
132
+ */
133
+ async transcribe(audio, options) {
134
+ const provider = this.selectProvider(options?.provider);
135
+ const adapter = this.getAdapter(provider);
136
+ const { provider: _, ...adapterOptions } = options || {};
137
+ return adapter.transcribe(audio, adapterOptions);
138
+ }
139
+ /**
140
+ * Get transcription result by ID
141
+ * Provider must be specified since IDs are provider-specific
142
+ */
143
+ async getTranscript(transcriptId, provider) {
144
+ const adapter = this.getAdapter(provider);
145
+ return adapter.getTranscript(transcriptId);
146
+ }
147
+ /**
148
+ * Stream audio for real-time transcription
149
+ * Only works with providers that support streaming
150
+ *
151
+ * @param options - Streaming options including provider selection
152
+ * @param callbacks - Event callbacks for transcription results
153
+ * @returns Promise that resolves with a StreamingSession
154
+ *
155
+ * @example
156
+ * ```typescript
157
+ * import { VoiceRouter } from '@meeting-baas/sdk';
158
+ *
159
+ * const router = new VoiceRouter();
160
+ * router.initialize({
161
+ * gladia: { apiKey: process.env.GLADIA_KEY },
162
+ * deepgram: { apiKey: process.env.DEEPGRAM_KEY }
163
+ * });
164
+ *
165
+ * const session = await router.transcribeStream({
166
+ * provider: 'deepgram',
167
+ * encoding: 'linear16',
168
+ * sampleRate: 16000,
169
+ * language: 'en'
170
+ * }, {
171
+ * onTranscript: (event) => console.log(event.text),
172
+ * onError: (error) => console.error(error)
173
+ * });
174
+ *
175
+ * // Send audio chunks
176
+ * await session.sendAudio({ data: audioBuffer });
177
+ * await session.close();
178
+ * ```
179
+ */
180
+ async transcribeStream(options, callbacks) {
181
+ const provider = this.selectProvider(options?.provider);
182
+ const adapter = this.getAdapter(provider);
183
+ if (!adapter.capabilities.streaming || !adapter.transcribeStream) {
184
+ throw new Error(`Provider '${provider}' does not support streaming transcription`);
185
+ }
186
+ const { provider: _, ...adapterOptions } = options || {};
187
+ return adapter.transcribeStream(adapterOptions, callbacks);
188
+ }
189
+ /**
190
+ * Delete a transcription
191
+ * Not all providers support this operation
192
+ */
193
+ async deleteTranscript(transcriptId, provider) {
194
+ const adapter = this.getAdapter(provider);
195
+ if (!adapter.deleteTranscript) {
196
+ throw new Error(`Provider '${provider}' does not support deleting transcripts`);
197
+ }
198
+ return adapter.deleteTranscript(transcriptId);
199
+ }
200
+ /**
201
+ * List recent transcriptions
202
+ * Not all providers support this operation
203
+ */
204
+ async listTranscripts(provider, options) {
205
+ const adapter = this.getAdapter(provider);
206
+ if (!adapter.listTranscripts) {
207
+ throw new Error(`Provider '${provider}' does not support listing transcripts`);
208
+ }
209
+ return adapter.listTranscripts(options);
210
+ }
211
+ /**
212
+ * Get capabilities for a specific provider
213
+ */
214
+ getProviderCapabilities(provider) {
215
+ const adapter = this.getAdapter(provider);
216
+ return adapter.capabilities;
217
+ }
218
+ /**
219
+ * Get all registered providers
220
+ */
221
+ getRegisteredProviders() {
222
+ return Array.from(this.adapters.keys());
223
+ }
224
+ /**
225
+ * Get raw provider client for advanced usage
226
+ */
227
+ getRawProviderClient(provider) {
228
+ const adapter = this.getAdapter(provider);
229
+ if (!adapter.getRawClient) {
230
+ throw new Error(`Provider '${provider}' does not expose a raw client`);
231
+ }
232
+ return adapter.getRawClient();
233
+ }
234
+ };
235
+ function createVoiceRouter(config, adapters) {
236
+ const router = new VoiceRouter(config);
237
+ if (adapters && adapters.length > 0) {
238
+ for (const adapter of adapters) {
239
+ router.registerAdapter(adapter);
240
+ }
241
+ }
242
+ return router;
243
+ }
244
+
245
+ // src/adapters/base-adapter.ts
246
+ var BaseAdapter = class {
247
+ initialize(config) {
248
+ this.config = config;
249
+ }
250
+ /**
251
+ * Helper method to create error responses
252
+ */
253
+ createErrorResponse(error, statusCode) {
254
+ const err = error;
255
+ return {
256
+ success: false,
257
+ provider: this.name,
258
+ error: {
259
+ code: err.code || "UNKNOWN_ERROR",
260
+ message: err.message || "An unknown error occurred",
261
+ statusCode: statusCode || err.statusCode,
262
+ details: error
263
+ }
264
+ };
265
+ }
266
+ /**
267
+ * Helper method to validate configuration
268
+ */
269
+ validateConfig() {
270
+ if (!this.config) {
271
+ throw new Error(`Adapter ${this.name} is not initialized. Call initialize() first.`);
272
+ }
273
+ if (!this.config.apiKey) {
274
+ throw new Error(`API key is required for ${this.name} provider`);
275
+ }
276
+ }
277
+ };
278
+
279
+ // src/adapters/gladia-adapter.ts
280
+ import axios from "axios";
281
+ import WebSocket from "ws";
282
+ var GladiaAdapter = class extends BaseAdapter {
283
+ constructor() {
284
+ super(...arguments);
285
+ this.name = "gladia";
286
+ this.capabilities = {
287
+ streaming: true,
288
+ diarization: true,
289
+ wordTimestamps: true,
290
+ languageDetection: true,
291
+ customVocabulary: true,
292
+ summarization: true,
293
+ sentimentAnalysis: true,
294
+ entityDetection: true,
295
+ piiRedaction: false
296
+ // Gladia doesn't have PII redaction in their API
297
+ };
298
+ this.baseUrl = "https://api.gladia.io/v2";
299
+ }
300
+ initialize(config) {
301
+ super.initialize(config);
302
+ this.client = axios.create({
303
+ baseURL: config.baseUrl || this.baseUrl,
304
+ timeout: config.timeout || 6e4,
305
+ headers: {
306
+ "x-gladia-key": config.apiKey,
307
+ "Content-Type": "application/json",
308
+ ...config.headers
309
+ }
310
+ });
311
+ }
312
+ /**
313
+ * Submit audio for transcription
314
+ *
315
+ * Sends audio to Gladia API for transcription. If a webhook URL is provided,
316
+ * returns immediately with the job ID. Otherwise, polls until completion.
317
+ *
318
+ * @param audio - Audio input (currently only URL type supported)
319
+ * @param options - Transcription options
320
+ * @param options.language - Language code (e.g., 'en', 'es', 'fr')
321
+ * @param options.languageDetection - Enable automatic language detection
322
+ * @param options.diarization - Enable speaker identification
323
+ * @param options.speakersExpected - Number of expected speakers (for diarization)
324
+ * @param options.summarization - Generate text summary
325
+ * @param options.sentimentAnalysis - Analyze sentiment of transcription
326
+ * @param options.customVocabulary - Words to boost in recognition
327
+ * @param options.webhookUrl - Callback URL for async results
328
+ * @returns Normalized transcription response
329
+ * @throws {Error} If audio type is not 'url' (file/stream not yet supported)
330
+ *
331
+ * @example Simple transcription
332
+ * ```typescript
333
+ * const result = await adapter.transcribe({
334
+ * type: 'url',
335
+ * url: 'https://example.com/meeting.mp3'
336
+ * });
337
+ * ```
338
+ *
339
+ * @example With advanced features
340
+ * ```typescript
341
+ * const result = await adapter.transcribe({
342
+ * type: 'url',
343
+ * url: 'https://example.com/meeting.mp3'
344
+ * }, {
345
+ * language: 'en',
346
+ * diarization: true,
347
+ * speakersExpected: 3,
348
+ * summarization: true,
349
+ * customVocabulary: ['API', 'TypeScript', 'JavaScript']
350
+ * });
351
+ * ```
352
+ *
353
+ * @example With webhook (returns job ID immediately for polling)
354
+ * ```typescript
355
+ * // Submit transcription with webhook
356
+ * const result = await adapter.transcribe({
357
+ * type: 'url',
358
+ * url: 'https://example.com/meeting.mp3'
359
+ * }, {
360
+ * webhookUrl: 'https://myapp.com/webhook/transcription',
361
+ * language: 'en'
362
+ * });
363
+ *
364
+ * // Get job ID for polling
365
+ * const jobId = result.data?.id;
366
+ * console.log('Job ID:', jobId); // Use this to poll for status
367
+ *
368
+ * // Later: Poll for completion (if webhook fails or you want to check)
369
+ * const status = await adapter.getTranscript(jobId);
370
+ * if (status.data?.status === 'completed') {
371
+ * console.log('Transcript:', status.data.text);
372
+ * }
373
+ * ```
374
+ */
375
+ async transcribe(audio, options) {
376
+ this.validateConfig();
377
+ try {
378
+ const payload = this.buildTranscriptionRequest(audio, options);
379
+ const response = await this.client.post(
380
+ "/transcription",
381
+ payload
382
+ );
383
+ const jobId = response.data.id;
384
+ if (options?.webhookUrl) {
385
+ return {
386
+ success: true,
387
+ provider: this.name,
388
+ data: {
389
+ id: jobId,
390
+ text: "",
391
+ status: "queued"
392
+ },
393
+ raw: response.data
394
+ };
395
+ }
396
+ return await this.pollForCompletion(jobId);
397
+ } catch (error) {
398
+ return this.createErrorResponse(error);
399
+ }
400
+ }
401
+ /**
402
+ * Get transcription result by ID
403
+ */
404
+ async getTranscript(transcriptId) {
405
+ this.validateConfig();
406
+ try {
407
+ const response = await this.client.get(`/transcription/${transcriptId}`);
408
+ return this.normalizeResponse(response.data);
409
+ } catch (error) {
410
+ return this.createErrorResponse(error);
411
+ }
412
+ }
413
+ /**
414
+ * Build Gladia transcription request from unified options
415
+ */
416
+ buildTranscriptionRequest(audio, options) {
417
+ let audioUrl;
418
+ if (audio.type === "url") {
419
+ audioUrl = audio.url;
420
+ } else {
421
+ throw new Error(
422
+ "Gladia adapter currently only supports URL-based audio input. Use audio.type='url'"
423
+ );
424
+ }
425
+ const request = {
426
+ audio_url: audioUrl
427
+ };
428
+ if (options) {
429
+ if (options.language || options.languageDetection) {
430
+ request.language_config = {
431
+ languages: options.language ? [options.language] : void 0,
432
+ code_switching: options.languageDetection
433
+ };
434
+ }
435
+ if (options.diarization) {
436
+ request.diarization = true;
437
+ if (options.speakersExpected) {
438
+ request.diarization_config = {
439
+ number_of_speakers: options.speakersExpected
440
+ };
441
+ }
442
+ }
443
+ if (options.customVocabulary && options.customVocabulary.length > 0) {
444
+ request.custom_vocabulary = true;
445
+ request.custom_vocabulary_config = {
446
+ vocabulary: options.customVocabulary
447
+ };
448
+ }
449
+ if (options.summarization) {
450
+ request.summarization = true;
451
+ }
452
+ if (options.sentimentAnalysis) {
453
+ request.sentiment_analysis = true;
454
+ }
455
+ if (options.entityDetection) {
456
+ request.named_entity_recognition = true;
457
+ }
458
+ if (options.webhookUrl) {
459
+ request.callback = true;
460
+ request.callback_config = {
461
+ url: options.webhookUrl
462
+ };
463
+ }
464
+ if (options.metadata) {
465
+ request.custom_metadata = options.metadata;
466
+ }
467
+ }
468
+ return request;
469
+ }
470
+ /**
471
+ * Normalize Gladia response to unified format
472
+ */
473
+ normalizeResponse(response) {
474
+ let status;
475
+ switch (response.status) {
476
+ case "queued":
477
+ status = "queued";
478
+ break;
479
+ case "processing":
480
+ status = "processing";
481
+ break;
482
+ case "done":
483
+ status = "completed";
484
+ break;
485
+ case "error":
486
+ status = "error";
487
+ break;
488
+ default:
489
+ status = "queued";
490
+ }
491
+ if (response.status === "error") {
492
+ return {
493
+ success: false,
494
+ provider: this.name,
495
+ error: {
496
+ code: response.error_code?.toString() || "TRANSCRIPTION_ERROR",
497
+ message: "Transcription failed",
498
+ statusCode: response.error_code || void 0
499
+ },
500
+ raw: response
501
+ };
502
+ }
503
+ const result = response.result;
504
+ const transcription = result?.transcription;
505
+ return {
506
+ success: true,
507
+ provider: this.name,
508
+ data: {
509
+ id: response.id,
510
+ text: transcription?.full_transcript || "",
511
+ confidence: void 0,
512
+ // Gladia doesn't provide overall confidence
513
+ status,
514
+ language: transcription?.languages?.[0],
515
+ // Use first detected language
516
+ duration: void 0,
517
+ // Not directly available in Gladia response
518
+ speakers: this.extractSpeakers(transcription),
519
+ words: this.extractWords(transcription),
520
+ utterances: this.extractUtterances(transcription),
521
+ summary: result?.summarization?.results || void 0,
522
+ metadata: {
523
+ requestParams: response.request_params,
524
+ customMetadata: response.custom_metadata
525
+ },
526
+ createdAt: response.created_at,
527
+ completedAt: response.completed_at || void 0
528
+ },
529
+ raw: response
530
+ };
531
+ }
532
+ /**
533
+ * Extract speaker information from Gladia response
534
+ */
535
+ extractSpeakers(transcription) {
536
+ if (!transcription?.utterances) {
537
+ return void 0;
538
+ }
539
+ const speakerSet = /* @__PURE__ */ new Set();
540
+ transcription.utterances.forEach((utterance) => {
541
+ if (utterance.speaker !== void 0) {
542
+ speakerSet.add(utterance.speaker);
543
+ }
544
+ });
545
+ if (speakerSet.size === 0) {
546
+ return void 0;
547
+ }
548
+ return Array.from(speakerSet).map((speakerId) => ({
549
+ id: speakerId.toString(),
550
+ label: `Speaker ${speakerId}`
551
+ }));
552
+ }
553
+ /**
554
+ * Extract word timestamps from Gladia response
555
+ */
556
+ extractWords(transcription) {
557
+ if (!transcription?.utterances) {
558
+ return void 0;
559
+ }
560
+ const allWords = transcription.utterances.flatMap(
561
+ (utterance) => utterance.words.map((word) => ({
562
+ text: word.word,
563
+ start: word.start,
564
+ end: word.end,
565
+ confidence: word.confidence,
566
+ speaker: utterance.speaker?.toString()
567
+ }))
568
+ );
569
+ return allWords.length > 0 ? allWords : void 0;
570
+ }
571
+ /**
572
+ * Extract utterances from Gladia response
573
+ */
574
+ extractUtterances(transcription) {
575
+ if (!transcription?.utterances) {
576
+ return void 0;
577
+ }
578
+ return transcription.utterances.map((utterance) => ({
579
+ text: utterance.text,
580
+ start: utterance.start,
581
+ end: utterance.end,
582
+ speaker: utterance.speaker?.toString(),
583
+ confidence: utterance.confidence,
584
+ words: utterance.words.map((word) => ({
585
+ text: word.word,
586
+ start: word.start,
587
+ end: word.end,
588
+ confidence: word.confidence
589
+ }))
590
+ }));
591
+ }
592
+ /**
593
+ * Poll for transcription completion
594
+ */
595
+ async pollForCompletion(jobId, maxAttempts = 60, intervalMs = 2e3) {
596
+ for (let attempt = 0; attempt < maxAttempts; attempt++) {
597
+ const result = await this.getTranscript(jobId);
598
+ if (!result.success) {
599
+ return result;
600
+ }
601
+ const status = result.data?.status;
602
+ if (status === "completed") {
603
+ return result;
604
+ }
605
+ if (status === "error") {
606
+ return {
607
+ success: false,
608
+ provider: this.name,
609
+ error: {
610
+ code: "TRANSCRIPTION_ERROR",
611
+ message: "Transcription failed"
612
+ },
613
+ raw: result.raw
614
+ };
615
+ }
616
+ await new Promise((resolve) => setTimeout(resolve, intervalMs));
617
+ }
618
+ return {
619
+ success: false,
620
+ provider: this.name,
621
+ error: {
622
+ code: "POLLING_TIMEOUT",
623
+ message: `Transcription did not complete after ${maxAttempts} attempts`
624
+ }
625
+ };
626
+ }
627
+ /**
628
+ * Stream audio for real-time transcription
629
+ *
630
+ * Creates a WebSocket connection to Gladia for streaming transcription.
631
+ * First initializes a session via REST API, then connects to WebSocket.
632
+ *
633
+ * @param options - Streaming configuration options
634
+ * @param callbacks - Event callbacks for transcription results
635
+ * @returns Promise that resolves with a StreamingSession
636
+ *
637
+ * @example Real-time streaming
638
+ * ```typescript
639
+ * const session = await adapter.transcribeStream({
640
+ * encoding: 'wav/pcm',
641
+ * sampleRate: 16000,
642
+ * channels: 1,
643
+ * language: 'en',
644
+ * interimResults: true
645
+ * }, {
646
+ * onOpen: () => console.log('Connected'),
647
+ * onTranscript: (event) => {
648
+ * if (event.isFinal) {
649
+ * console.log('Final:', event.text);
650
+ * } else {
651
+ * console.log('Interim:', event.text);
652
+ * }
653
+ * },
654
+ * onError: (error) => console.error('Error:', error),
655
+ * onClose: () => console.log('Disconnected')
656
+ * });
657
+ *
658
+ * // Send audio chunks
659
+ * const audioChunk = getAudioChunk(); // Your audio source
660
+ * await session.sendAudio({ data: audioChunk });
661
+ *
662
+ * // Close when done
663
+ * await session.close();
664
+ * ```
665
+ */
666
+ async transcribeStream(options, callbacks) {
667
+ this.validateConfig();
668
+ const streamingRequest = {
669
+ encoding: options?.encoding,
670
+ sample_rate: options?.sampleRate,
671
+ channels: options?.channels,
672
+ endpointing: options?.endpointing
673
+ };
674
+ if (options?.language) {
675
+ streamingRequest.language_config = {
676
+ languages: [options.language]
677
+ };
678
+ }
679
+ const initResponse = await this.client.post(
680
+ "/streaming/init",
681
+ streamingRequest
682
+ );
683
+ const { id, url: wsUrl } = initResponse.data;
684
+ const ws = new WebSocket(wsUrl);
685
+ let sessionStatus = "connecting";
686
+ ws.on("open", () => {
687
+ sessionStatus = "open";
688
+ callbacks?.onOpen?.();
689
+ });
690
+ ws.on("message", (data) => {
691
+ try {
692
+ const message = JSON.parse(data.toString());
693
+ if (message.type === "transcript") {
694
+ callbacks?.onTranscript?.({
695
+ type: "transcript",
696
+ text: message.text || "",
697
+ isFinal: message.is_final === true,
698
+ confidence: message.confidence,
699
+ words: message.words?.map((word) => ({
700
+ text: word.word || word.text,
701
+ start: word.start,
702
+ end: word.end,
703
+ confidence: word.confidence
704
+ })),
705
+ data: message
706
+ });
707
+ } else if (message.type === "utterance") {
708
+ const utterance = {
709
+ text: message.text || "",
710
+ start: message.start || 0,
711
+ end: message.end || 0,
712
+ speaker: message.speaker?.toString(),
713
+ confidence: message.confidence,
714
+ words: message.words?.map((word) => ({
715
+ text: word.word || word.text,
716
+ start: word.start,
717
+ end: word.end,
718
+ confidence: word.confidence
719
+ }))
720
+ };
721
+ callbacks?.onUtterance?.(utterance);
722
+ } else if (message.type === "metadata") {
723
+ callbacks?.onMetadata?.(message);
724
+ }
725
+ } catch (error) {
726
+ callbacks?.onError?.({
727
+ code: "PARSE_ERROR",
728
+ message: "Failed to parse WebSocket message",
729
+ details: error
730
+ });
731
+ }
732
+ });
733
+ ws.on("error", (error) => {
734
+ callbacks?.onError?.({
735
+ code: "WEBSOCKET_ERROR",
736
+ message: error.message,
737
+ details: error
738
+ });
739
+ });
740
+ ws.on("close", (code, reason) => {
741
+ sessionStatus = "closed";
742
+ callbacks?.onClose?.(code, reason.toString());
743
+ });
744
+ await new Promise((resolve, reject) => {
745
+ const timeout = setTimeout(() => {
746
+ reject(new Error("WebSocket connection timeout"));
747
+ }, 1e4);
748
+ ws.once("open", () => {
749
+ clearTimeout(timeout);
750
+ resolve();
751
+ });
752
+ ws.once("error", (error) => {
753
+ clearTimeout(timeout);
754
+ reject(error);
755
+ });
756
+ });
757
+ return {
758
+ id,
759
+ provider: this.name,
760
+ createdAt: /* @__PURE__ */ new Date(),
761
+ getStatus: () => sessionStatus,
762
+ sendAudio: async (chunk) => {
763
+ if (sessionStatus !== "open") {
764
+ throw new Error(`Cannot send audio: session is ${sessionStatus}`);
765
+ }
766
+ if (ws.readyState !== WebSocket.OPEN) {
767
+ throw new Error("WebSocket is not open");
768
+ }
769
+ ws.send(chunk.data);
770
+ if (chunk.isLast) {
771
+ ws.send(
772
+ JSON.stringify({
773
+ type: "stop_recording"
774
+ })
775
+ );
776
+ }
777
+ },
778
+ close: async () => {
779
+ if (sessionStatus === "closed" || sessionStatus === "closing") {
780
+ return;
781
+ }
782
+ sessionStatus = "closing";
783
+ if (ws.readyState === WebSocket.OPEN) {
784
+ ws.send(
785
+ JSON.stringify({
786
+ type: "stop_recording"
787
+ })
788
+ );
789
+ }
790
+ return new Promise((resolve) => {
791
+ const timeout = setTimeout(() => {
792
+ ws.terminate();
793
+ resolve();
794
+ }, 5e3);
795
+ ws.close();
796
+ ws.once("close", () => {
797
+ clearTimeout(timeout);
798
+ sessionStatus = "closed";
799
+ resolve();
800
+ });
801
+ });
802
+ }
803
+ };
804
+ }
805
+ };
806
+ function createGladiaAdapter(config) {
807
+ const adapter = new GladiaAdapter();
808
+ adapter.initialize(config);
809
+ return adapter;
810
+ }
811
+
812
+ // src/adapters/assemblyai-adapter.ts
813
+ import axios2 from "axios";
814
+ import WebSocket2 from "ws";
815
+ var AssemblyAIAdapter = class extends BaseAdapter {
816
+ constructor() {
817
+ super(...arguments);
818
+ this.name = "assemblyai";
819
+ this.capabilities = {
820
+ streaming: true,
821
+ diarization: true,
822
+ wordTimestamps: true,
823
+ languageDetection: true,
824
+ customVocabulary: true,
825
+ summarization: true,
826
+ sentimentAnalysis: true,
827
+ entityDetection: true,
828
+ piiRedaction: true
829
+ };
830
+ this.baseUrl = "https://api.assemblyai.com/v2";
831
+ this.wsBaseUrl = "wss://api.assemblyai.com/v2/realtime/ws";
832
+ }
833
+ initialize(config) {
834
+ super.initialize(config);
835
+ this.client = axios2.create({
836
+ baseURL: config.baseUrl || this.baseUrl,
837
+ timeout: config.timeout || 6e4,
838
+ headers: {
839
+ authorization: config.apiKey,
840
+ "Content-Type": "application/json",
841
+ ...config.headers
842
+ }
843
+ });
844
+ }
845
+ /**
846
+ * Submit audio for transcription
847
+ *
848
+ * Sends audio to AssemblyAI API for transcription. If a webhook URL is provided,
849
+ * returns immediately with the job ID. Otherwise, polls until completion.
850
+ *
851
+ * @param audio - Audio input (currently only URL type supported)
852
+ * @param options - Transcription options
853
+ * @param options.language - Language code (e.g., 'en', 'en_us', 'es', 'fr')
854
+ * @param options.languageDetection - Enable automatic language detection
855
+ * @param options.diarization - Enable speaker identification (speaker_labels)
856
+ * @param options.speakersExpected - Number of expected speakers
857
+ * @param options.summarization - Generate text summary
858
+ * @param options.sentimentAnalysis - Analyze sentiment of transcription
859
+ * @param options.entityDetection - Detect named entities (people, places, etc.)
860
+ * @param options.piiRedaction - Redact personally identifiable information
861
+ * @param options.customVocabulary - Words to boost in recognition
862
+ * @param options.webhookUrl - Callback URL for async results
863
+ * @returns Normalized transcription response
864
+ * @throws {Error} If audio type is not 'url' (file/stream not yet supported)
865
+ *
866
+ * @example Simple transcription
867
+ * ```typescript
868
+ * const result = await adapter.transcribe({
869
+ * type: 'url',
870
+ * url: 'https://example.com/meeting.mp3'
871
+ * });
872
+ * ```
873
+ *
874
+ * @example With advanced features
875
+ * ```typescript
876
+ * const result = await adapter.transcribe({
877
+ * type: 'url',
878
+ * url: 'https://example.com/meeting.mp3'
879
+ * }, {
880
+ * language: 'en_us',
881
+ * diarization: true,
882
+ * speakersExpected: 3,
883
+ * summarization: true,
884
+ * sentimentAnalysis: true,
885
+ * entityDetection: true,
886
+ * customVocabulary: ['API', 'TypeScript', 'JavaScript']
887
+ * });
888
+ * ```
889
+ *
890
+ * @example With webhook (returns transcript ID immediately for polling)
891
+ * ```typescript
892
+ * // Submit transcription with webhook
893
+ * const result = await adapter.transcribe({
894
+ * type: 'url',
895
+ * url: 'https://example.com/meeting.mp3'
896
+ * }, {
897
+ * webhookUrl: 'https://myapp.com/webhook/transcription',
898
+ * language: 'en_us'
899
+ * });
900
+ *
901
+ * // Get transcript ID for polling
902
+ * const transcriptId = result.data?.id;
903
+ * console.log('Transcript ID:', transcriptId); // Use this to poll for status
904
+ *
905
+ * // Later: Poll for completion (if webhook fails or you want to check)
906
+ * const status = await adapter.getTranscript(transcriptId);
907
+ * if (status.data?.status === 'completed') {
908
+ * console.log('Transcript:', status.data.text);
909
+ * }
910
+ * ```
911
+ */
912
+ async transcribe(audio, options) {
913
+ this.validateConfig();
914
+ try {
915
+ const payload = this.buildTranscriptionRequest(audio, options);
916
+ const response = await this.client.post("/transcript", payload);
917
+ const transcriptId = response.data.id;
918
+ if (options?.webhookUrl) {
919
+ return {
920
+ success: true,
921
+ provider: this.name,
922
+ data: {
923
+ id: transcriptId,
924
+ text: "",
925
+ status: "queued"
926
+ },
927
+ raw: response.data
928
+ };
929
+ }
930
+ return await this.pollForCompletion(transcriptId);
931
+ } catch (error) {
932
+ return this.createErrorResponse(error);
933
+ }
934
+ }
935
+ /**
936
+ * Get transcription result by ID
937
+ */
938
+ async getTranscript(transcriptId) {
939
+ this.validateConfig();
940
+ try {
941
+ const response = await this.client.get(`/transcript/${transcriptId}`);
942
+ return this.normalizeResponse(response.data);
943
+ } catch (error) {
944
+ return this.createErrorResponse(error);
945
+ }
946
+ }
947
+ /**
948
+ * Build AssemblyAI transcription request from unified options
949
+ */
950
+ buildTranscriptionRequest(audio, options) {
951
+ let audioUrl;
952
+ if (audio.type === "url") {
953
+ audioUrl = audio.url;
954
+ } else {
955
+ throw new Error(
956
+ "AssemblyAI adapter currently only supports URL-based audio input. Use audio.type='url'"
957
+ );
958
+ }
959
+ const request = {
960
+ audio_url: audioUrl
961
+ };
962
+ if (options) {
963
+ if (options.language) {
964
+ const languageCode = options.language.includes("_") ? options.language : `${options.language}_us`;
965
+ request.language_code = languageCode;
966
+ }
967
+ if (options.languageDetection) {
968
+ request.language_detection = true;
969
+ }
970
+ if (options.diarization) {
971
+ request.speaker_labels = true;
972
+ if (options.speakersExpected) {
973
+ request.speakers_expected = options.speakersExpected;
974
+ }
975
+ }
976
+ if (options.customVocabulary && options.customVocabulary.length > 0) {
977
+ request.word_boost = options.customVocabulary;
978
+ request.boost_param = "high";
979
+ }
980
+ if (options.summarization) {
981
+ request.summarization = true;
982
+ request.summary_model = "informative";
983
+ request.summary_type = "bullets";
984
+ }
985
+ if (options.sentimentAnalysis) {
986
+ request.sentiment_analysis = true;
987
+ }
988
+ if (options.entityDetection) {
989
+ request.entity_detection = true;
990
+ }
991
+ if (options.piiRedaction) {
992
+ request.redact_pii = true;
993
+ }
994
+ if (options.webhookUrl) {
995
+ request.webhook_url = options.webhookUrl;
996
+ }
997
+ request.punctuate = true;
998
+ request.format_text = true;
999
+ }
1000
+ return request;
1001
+ }
1002
+ /**
1003
+ * Normalize AssemblyAI response to unified format
1004
+ */
1005
+ normalizeResponse(response) {
1006
+ let status;
1007
+ switch (response.status) {
1008
+ case "queued":
1009
+ status = "queued";
1010
+ break;
1011
+ case "processing":
1012
+ status = "processing";
1013
+ break;
1014
+ case "completed":
1015
+ status = "completed";
1016
+ break;
1017
+ case "error":
1018
+ status = "error";
1019
+ break;
1020
+ default:
1021
+ status = "queued";
1022
+ }
1023
+ if (response.status === "error") {
1024
+ return {
1025
+ success: false,
1026
+ provider: this.name,
1027
+ error: {
1028
+ code: "TRANSCRIPTION_ERROR",
1029
+ message: response.error || "Transcription failed"
1030
+ },
1031
+ raw: response
1032
+ };
1033
+ }
1034
+ return {
1035
+ success: true,
1036
+ provider: this.name,
1037
+ data: {
1038
+ id: response.id,
1039
+ text: response.text || "",
1040
+ confidence: response.confidence !== null ? response.confidence : void 0,
1041
+ status,
1042
+ language: response.language_code,
1043
+ duration: response.audio_duration ? response.audio_duration / 1e3 : void 0,
1044
+ // Convert ms to seconds
1045
+ speakers: this.extractSpeakers(response),
1046
+ words: this.extractWords(response),
1047
+ utterances: this.extractUtterances(response),
1048
+ summary: response.summary || void 0,
1049
+ metadata: {
1050
+ audioUrl: response.audio_url,
1051
+ entities: response.entities,
1052
+ sentimentAnalysis: response.sentiment_analysis_results,
1053
+ contentModeration: response.content_safety_labels
1054
+ }
1055
+ },
1056
+ raw: response
1057
+ };
1058
+ }
1059
+ /**
1060
+ * Extract speaker information from AssemblyAI response
1061
+ */
1062
+ extractSpeakers(transcript) {
1063
+ if (!transcript.utterances || transcript.utterances.length === 0) {
1064
+ return void 0;
1065
+ }
1066
+ const speakerSet = /* @__PURE__ */ new Set();
1067
+ transcript.utterances.forEach((utterance) => {
1068
+ if (utterance.speaker) {
1069
+ speakerSet.add(utterance.speaker);
1070
+ }
1071
+ });
1072
+ if (speakerSet.size === 0) {
1073
+ return void 0;
1074
+ }
1075
+ return Array.from(speakerSet).map((speakerId) => ({
1076
+ id: speakerId,
1077
+ label: speakerId
1078
+ // AssemblyAI uses format like "A", "B", "C"
1079
+ }));
1080
+ }
1081
+ /**
1082
+ * Extract word timestamps from AssemblyAI response
1083
+ */
1084
+ extractWords(transcript) {
1085
+ if (!transcript.words || transcript.words.length === 0) {
1086
+ return void 0;
1087
+ }
1088
+ return transcript.words.map((word) => ({
1089
+ text: word.text,
1090
+ start: word.start / 1e3,
1091
+ // Convert ms to seconds
1092
+ end: word.end / 1e3,
1093
+ // Convert ms to seconds
1094
+ confidence: word.confidence,
1095
+ speaker: word.speaker || void 0
1096
+ }));
1097
+ }
1098
+ /**
1099
+ * Extract utterances from AssemblyAI response
1100
+ */
1101
+ extractUtterances(transcript) {
1102
+ if (!transcript.utterances || transcript.utterances.length === 0) {
1103
+ return void 0;
1104
+ }
1105
+ return transcript.utterances.map((utterance) => ({
1106
+ text: utterance.text,
1107
+ start: utterance.start / 1e3,
1108
+ // Convert ms to seconds
1109
+ end: utterance.end / 1e3,
1110
+ // Convert ms to seconds
1111
+ speaker: utterance.speaker || void 0,
1112
+ confidence: utterance.confidence,
1113
+ words: utterance.words.map((word) => ({
1114
+ text: word.text,
1115
+ start: word.start / 1e3,
1116
+ end: word.end / 1e3,
1117
+ confidence: word.confidence
1118
+ }))
1119
+ }));
1120
+ }
1121
+ /**
1122
+ * Poll for transcription completion
1123
+ */
1124
+ async pollForCompletion(transcriptId, maxAttempts = 60, intervalMs = 3e3) {
1125
+ for (let attempt = 0; attempt < maxAttempts; attempt++) {
1126
+ const result = await this.getTranscript(transcriptId);
1127
+ if (!result.success) {
1128
+ return result;
1129
+ }
1130
+ const status = result.data?.status;
1131
+ if (status === "completed") {
1132
+ return result;
1133
+ }
1134
+ if (status === "error") {
1135
+ return {
1136
+ success: false,
1137
+ provider: this.name,
1138
+ error: {
1139
+ code: "TRANSCRIPTION_ERROR",
1140
+ message: "Transcription failed"
1141
+ },
1142
+ raw: result.raw
1143
+ };
1144
+ }
1145
+ await new Promise((resolve) => setTimeout(resolve, intervalMs));
1146
+ }
1147
+ return {
1148
+ success: false,
1149
+ provider: this.name,
1150
+ error: {
1151
+ code: "POLLING_TIMEOUT",
1152
+ message: `Transcription did not complete after ${maxAttempts} attempts`
1153
+ }
1154
+ };
1155
+ }
1156
+ /**
1157
+ * Stream audio for real-time transcription
1158
+ *
1159
+ * Creates a WebSocket connection to AssemblyAI for streaming transcription.
1160
+ * First obtains a temporary token, then connects and streams audio chunks.
1161
+ *
1162
+ * @param options - Streaming configuration options
1163
+ * @param callbacks - Event callbacks for transcription results
1164
+ * @returns Promise that resolves with a StreamingSession
1165
+ *
1166
+ * @example Real-time streaming
1167
+ * ```typescript
1168
+ * const session = await adapter.transcribeStream({
1169
+ * encoding: 'pcm_s16le',
1170
+ * sampleRate: 16000,
1171
+ * language: 'en',
1172
+ * interimResults: true
1173
+ * }, {
1174
+ * onOpen: () => console.log('Connected'),
1175
+ * onTranscript: (event) => {
1176
+ * if (event.isFinal) {
1177
+ * console.log('Final:', event.text);
1178
+ * } else {
1179
+ * console.log('Interim:', event.text);
1180
+ * }
1181
+ * },
1182
+ * onError: (error) => console.error('Error:', error),
1183
+ * onClose: () => console.log('Disconnected')
1184
+ * });
1185
+ *
1186
+ * // Send audio chunks
1187
+ * const audioChunk = getAudioChunk(); // Your audio source
1188
+ * await session.sendAudio({ data: audioChunk });
1189
+ *
1190
+ * // Close when done
1191
+ * await session.close();
1192
+ * ```
1193
+ */
1194
+ async transcribeStream(options, callbacks) {
1195
+ this.validateConfig();
1196
+ const tokenResponse = await this.client.post("/realtime/token", {
1197
+ expires_in: 3600
1198
+ // Token expires in 1 hour
1199
+ });
1200
+ const token = tokenResponse.data.token;
1201
+ const wsUrl = `${this.wsBaseUrl}?sample_rate=${options?.sampleRate || 16e3}&token=${token}`;
1202
+ const ws = new WebSocket2(wsUrl);
1203
+ let sessionStatus = "connecting";
1204
+ const sessionId = `assemblyai-${Date.now()}-${Math.random().toString(36).substring(7)}`;
1205
+ ws.on("open", () => {
1206
+ sessionStatus = "open";
1207
+ callbacks?.onOpen?.();
1208
+ });
1209
+ ws.on("message", (data) => {
1210
+ try {
1211
+ const message = JSON.parse(data.toString());
1212
+ if (message.message_type === "SessionBegins") {
1213
+ callbacks?.onMetadata?.({
1214
+ sessionId: message.session_id,
1215
+ expiresAt: message.expires_at
1216
+ });
1217
+ } else if (message.message_type === "PartialTranscript") {
1218
+ callbacks?.onTranscript?.({
1219
+ type: "transcript",
1220
+ text: message.text || "",
1221
+ isFinal: false,
1222
+ confidence: message.confidence,
1223
+ words: message.words?.map((word) => ({
1224
+ text: word.text,
1225
+ start: word.start / 1e3,
1226
+ end: word.end / 1e3,
1227
+ confidence: word.confidence
1228
+ })),
1229
+ data: message
1230
+ });
1231
+ } else if (message.message_type === "FinalTranscript") {
1232
+ callbacks?.onTranscript?.({
1233
+ type: "transcript",
1234
+ text: message.text || "",
1235
+ isFinal: true,
1236
+ confidence: message.confidence,
1237
+ words: message.words?.map((word) => ({
1238
+ text: word.text,
1239
+ start: word.start / 1e3,
1240
+ end: word.end / 1e3,
1241
+ confidence: word.confidence
1242
+ })),
1243
+ data: message
1244
+ });
1245
+ } else if (message.message_type === "SessionTerminated") {
1246
+ callbacks?.onMetadata?.({ terminated: true });
1247
+ }
1248
+ } catch (error) {
1249
+ callbacks?.onError?.({
1250
+ code: "PARSE_ERROR",
1251
+ message: "Failed to parse WebSocket message",
1252
+ details: error
1253
+ });
1254
+ }
1255
+ });
1256
+ ws.on("error", (error) => {
1257
+ callbacks?.onError?.({
1258
+ code: "WEBSOCKET_ERROR",
1259
+ message: error.message,
1260
+ details: error
1261
+ });
1262
+ });
1263
+ ws.on("close", (code, reason) => {
1264
+ sessionStatus = "closed";
1265
+ callbacks?.onClose?.(code, reason.toString());
1266
+ });
1267
+ await new Promise((resolve, reject) => {
1268
+ const timeout = setTimeout(() => {
1269
+ reject(new Error("WebSocket connection timeout"));
1270
+ }, 1e4);
1271
+ ws.once("open", () => {
1272
+ clearTimeout(timeout);
1273
+ resolve();
1274
+ });
1275
+ ws.once("error", (error) => {
1276
+ clearTimeout(timeout);
1277
+ reject(error);
1278
+ });
1279
+ });
1280
+ return {
1281
+ id: sessionId,
1282
+ provider: this.name,
1283
+ createdAt: /* @__PURE__ */ new Date(),
1284
+ getStatus: () => sessionStatus,
1285
+ sendAudio: async (chunk) => {
1286
+ if (sessionStatus !== "open") {
1287
+ throw new Error(`Cannot send audio: session is ${sessionStatus}`);
1288
+ }
1289
+ if (ws.readyState !== WebSocket2.OPEN) {
1290
+ throw new Error("WebSocket is not open");
1291
+ }
1292
+ const base64Audio = chunk.data.toString("base64");
1293
+ ws.send(
1294
+ JSON.stringify({
1295
+ audio_data: base64Audio
1296
+ })
1297
+ );
1298
+ if (chunk.isLast) {
1299
+ ws.send(
1300
+ JSON.stringify({
1301
+ terminate_session: true
1302
+ })
1303
+ );
1304
+ }
1305
+ },
1306
+ close: async () => {
1307
+ if (sessionStatus === "closed" || sessionStatus === "closing") {
1308
+ return;
1309
+ }
1310
+ sessionStatus = "closing";
1311
+ if (ws.readyState === WebSocket2.OPEN) {
1312
+ ws.send(
1313
+ JSON.stringify({
1314
+ terminate_session: true
1315
+ })
1316
+ );
1317
+ }
1318
+ return new Promise((resolve) => {
1319
+ const timeout = setTimeout(() => {
1320
+ ws.terminate();
1321
+ resolve();
1322
+ }, 5e3);
1323
+ ws.close();
1324
+ ws.once("close", () => {
1325
+ clearTimeout(timeout);
1326
+ sessionStatus = "closed";
1327
+ resolve();
1328
+ });
1329
+ });
1330
+ }
1331
+ };
1332
+ }
1333
+ };
1334
+ function createAssemblyAIAdapter(config) {
1335
+ const adapter = new AssemblyAIAdapter();
1336
+ adapter.initialize(config);
1337
+ return adapter;
1338
+ }
1339
+
1340
+ // src/adapters/deepgram-adapter.ts
1341
+ import axios3 from "axios";
1342
+ import WebSocket3 from "ws";
1343
+ var DeepgramAdapter = class extends BaseAdapter {
1344
+ constructor() {
1345
+ super(...arguments);
1346
+ this.name = "deepgram";
1347
+ this.capabilities = {
1348
+ streaming: true,
1349
+ diarization: true,
1350
+ wordTimestamps: true,
1351
+ languageDetection: true,
1352
+ customVocabulary: true,
1353
+ summarization: true,
1354
+ sentimentAnalysis: true,
1355
+ entityDetection: true,
1356
+ piiRedaction: true
1357
+ };
1358
+ this.baseUrl = "https://api.deepgram.com/v1";
1359
+ this.wsBaseUrl = "wss://api.deepgram.com/v1/listen";
1360
+ }
1361
+ initialize(config) {
1362
+ super.initialize(config);
1363
+ this.client = axios3.create({
1364
+ baseURL: config.baseUrl || this.baseUrl,
1365
+ timeout: config.timeout || 6e4,
1366
+ headers: {
1367
+ Authorization: `Token ${config.apiKey}`,
1368
+ "Content-Type": "application/json",
1369
+ ...config.headers
1370
+ }
1371
+ });
1372
+ }
1373
+ /**
1374
+ * Submit audio for transcription
1375
+ *
1376
+ * Sends audio to Deepgram API for transcription. Deepgram processes
1377
+ * synchronously and returns results immediately (no polling required).
1378
+ *
1379
+ * @param audio - Audio input (URL or file buffer)
1380
+ * @param options - Transcription options
1381
+ * @param options.language - Language code (e.g., 'en', 'es', 'fr')
1382
+ * @param options.languageDetection - Enable automatic language detection
1383
+ * @param options.diarization - Enable speaker identification (diarize)
1384
+ * @param options.speakersExpected - Expected number of speakers
1385
+ * @param options.summarization - Generate text summary
1386
+ * @param options.sentimentAnalysis - Analyze sentiment
1387
+ * @param options.entityDetection - Detect named entities
1388
+ * @param options.piiRedaction - Redact personally identifiable information
1389
+ * @param options.customVocabulary - Keywords to boost in recognition
1390
+ * @param options.webhookUrl - Callback URL for async processing
1391
+ * @returns Normalized transcription response
1392
+ *
1393
+ * @example Simple transcription
1394
+ * ```typescript
1395
+ * const result = await adapter.transcribe({
1396
+ * type: 'url',
1397
+ * url: 'https://example.com/meeting.mp3'
1398
+ * });
1399
+ * ```
1400
+ *
1401
+ * @example With advanced features
1402
+ * ```typescript
1403
+ * const result = await adapter.transcribe({
1404
+ * type: 'url',
1405
+ * url: 'https://example.com/meeting.mp3'
1406
+ * }, {
1407
+ * language: 'en',
1408
+ * diarization: true,
1409
+ * summarization: true,
1410
+ * sentimentAnalysis: true,
1411
+ * entityDetection: true,
1412
+ * customVocabulary: ['API', 'TypeScript', 'JavaScript']
1413
+ * });
1414
+ * ```
1415
+ */
1416
+ async transcribe(audio, options) {
1417
+ this.validateConfig();
1418
+ try {
1419
+ const params = this.buildTranscriptionParams(options);
1420
+ let response;
1421
+ if (audio.type === "url") {
1422
+ response = await this.client.post(
1423
+ "/listen",
1424
+ { url: audio.url },
1425
+ { params }
1426
+ ).then((res) => res.data);
1427
+ } else if (audio.type === "file") {
1428
+ response = await this.client.post("/listen", audio.file, {
1429
+ params,
1430
+ headers: {
1431
+ "Content-Type": "audio/*"
1432
+ }
1433
+ }).then((res) => res.data);
1434
+ } else {
1435
+ throw new Error(
1436
+ "Deepgram adapter does not support stream type for pre-recorded transcription. Use transcribeStream() for real-time streaming."
1437
+ );
1438
+ }
1439
+ return this.normalizeResponse(response);
1440
+ } catch (error) {
1441
+ return this.createErrorResponse(error);
1442
+ }
1443
+ }
1444
+ /**
1445
+ * Get transcription result by ID
1446
+ *
1447
+ * Note: Deepgram processes synchronously, so this method is primarily
1448
+ * for retrieving cached results if you've stored the request ID.
1449
+ * The initial transcribe() call already returns complete results.
1450
+ *
1451
+ * @param transcriptId - Request ID from Deepgram
1452
+ * @returns Normalized transcription response
1453
+ */
1454
+ async getTranscript(transcriptId) {
1455
+ this.validateConfig();
1456
+ return {
1457
+ success: false,
1458
+ provider: this.name,
1459
+ error: {
1460
+ code: "NOT_SUPPORTED",
1461
+ message: "Deepgram returns transcription results immediately. Store the response from transcribe() instead of using getTranscript()."
1462
+ }
1463
+ };
1464
+ }
1465
+ /**
1466
+ * Build Deepgram transcription parameters from unified options
1467
+ */
1468
+ buildTranscriptionParams(options) {
1469
+ const params = {};
1470
+ if (!options) {
1471
+ return params;
1472
+ }
1473
+ if (options.language) {
1474
+ params.language = options.language;
1475
+ }
1476
+ if (options.languageDetection) {
1477
+ params.detect_language = true;
1478
+ }
1479
+ if (options.diarization) {
1480
+ params.diarize = true;
1481
+ }
1482
+ if (options.customVocabulary && options.customVocabulary.length > 0) {
1483
+ params.keywords = options.customVocabulary;
1484
+ }
1485
+ if (options.summarization) {
1486
+ params.summarize = true;
1487
+ }
1488
+ if (options.sentimentAnalysis) {
1489
+ params.sentiment = true;
1490
+ }
1491
+ if (options.entityDetection) {
1492
+ params.detect_entities = true;
1493
+ }
1494
+ if (options.piiRedaction) {
1495
+ params.redact = true;
1496
+ }
1497
+ if (options.webhookUrl) {
1498
+ params.callback = options.webhookUrl;
1499
+ }
1500
+ params.punctuate = true;
1501
+ params.utterances = true;
1502
+ params.smart_format = true;
1503
+ return params;
1504
+ }
1505
+ /**
1506
+ * Normalize Deepgram response to unified format
1507
+ */
1508
+ normalizeResponse(response) {
1509
+ const channel = response.results.channels?.[0];
1510
+ const alternative = channel?.alternatives?.[0];
1511
+ if (!alternative) {
1512
+ return {
1513
+ success: false,
1514
+ provider: this.name,
1515
+ error: {
1516
+ code: "NO_RESULTS",
1517
+ message: "No transcription results returned by Deepgram"
1518
+ },
1519
+ raw: response
1520
+ };
1521
+ }
1522
+ return {
1523
+ success: true,
1524
+ provider: this.name,
1525
+ data: {
1526
+ id: response.metadata?.request_id || "",
1527
+ text: alternative.transcript || "",
1528
+ confidence: alternative.confidence,
1529
+ status: "completed",
1530
+ // Deepgram returns completed results immediately
1531
+ language: channel?.detected_language || void 0,
1532
+ duration: response.metadata?.duration,
1533
+ speakers: this.extractSpeakers(response),
1534
+ words: this.extractWords(alternative),
1535
+ utterances: this.extractUtterances(response),
1536
+ summary: this.extractSummary(alternative),
1537
+ metadata: {
1538
+ modelInfo: response.metadata?.model_info,
1539
+ channels: response.metadata?.channels,
1540
+ sentiment: response.results.sentiments,
1541
+ intents: response.results.intents,
1542
+ topics: response.results.topics
1543
+ }
1544
+ },
1545
+ raw: response
1546
+ };
1547
+ }
1548
+ /**
1549
+ * Extract speaker information from Deepgram response
1550
+ */
1551
+ extractSpeakers(response) {
1552
+ const utterances = response.results.utterances;
1553
+ if (!utterances || utterances.length === 0) {
1554
+ return void 0;
1555
+ }
1556
+ const speakerSet = /* @__PURE__ */ new Set();
1557
+ utterances.forEach((utterance) => {
1558
+ if (utterance.speaker !== void 0) {
1559
+ speakerSet.add(utterance.speaker);
1560
+ }
1561
+ });
1562
+ if (speakerSet.size === 0) {
1563
+ return void 0;
1564
+ }
1565
+ return Array.from(speakerSet).map((speakerId) => ({
1566
+ id: speakerId.toString(),
1567
+ label: `Speaker ${speakerId}`
1568
+ }));
1569
+ }
1570
+ /**
1571
+ * Extract word timestamps from Deepgram response
1572
+ */
1573
+ extractWords(alternative) {
1574
+ if (!alternative.words || alternative.words.length === 0) {
1575
+ return void 0;
1576
+ }
1577
+ return alternative.words.map(
1578
+ (word) => ({
1579
+ text: word.word || "",
1580
+ start: word.start || 0,
1581
+ end: word.end || 0,
1582
+ confidence: word.confidence,
1583
+ speaker: void 0
1584
+ // Speaker info is at utterance level, not word level
1585
+ })
1586
+ );
1587
+ }
1588
+ /**
1589
+ * Extract utterances from Deepgram response
1590
+ */
1591
+ extractUtterances(response) {
1592
+ const utterances = response.results.utterances;
1593
+ if (!utterances || utterances.length === 0) {
1594
+ return void 0;
1595
+ }
1596
+ return utterances.map((utterance) => ({
1597
+ text: utterance.transcript || "",
1598
+ start: utterance.start || 0,
1599
+ end: utterance.end || 0,
1600
+ speaker: utterance.speaker?.toString(),
1601
+ confidence: utterance.confidence,
1602
+ words: utterance.words?.map((word) => ({
1603
+ text: word.word || "",
1604
+ start: word.start || 0,
1605
+ end: word.end || 0,
1606
+ confidence: word.confidence
1607
+ }))
1608
+ }));
1609
+ }
1610
+ /**
1611
+ * Extract summary from Deepgram response
1612
+ */
1613
+ extractSummary(alternative) {
1614
+ if (!alternative.summaries || alternative.summaries.length === 0) {
1615
+ return void 0;
1616
+ }
1617
+ return alternative.summaries.map((summary) => summary.summary).filter(Boolean).join(" ");
1618
+ }
1619
+ /**
1620
+ * Stream audio for real-time transcription
1621
+ *
1622
+ * Creates a WebSocket connection to Deepgram for streaming transcription.
1623
+ * Send audio chunks via session.sendAudio() and receive results via callbacks.
1624
+ *
1625
+ * @param options - Streaming configuration options
1626
+ * @param callbacks - Event callbacks for transcription results
1627
+ * @returns Promise that resolves with a StreamingSession
1628
+ *
1629
+ * @example Real-time streaming
1630
+ * ```typescript
1631
+ * const session = await adapter.transcribeStream({
1632
+ * encoding: 'linear16',
1633
+ * sampleRate: 16000,
1634
+ * channels: 1,
1635
+ * language: 'en',
1636
+ * diarization: true,
1637
+ * interimResults: true
1638
+ * }, {
1639
+ * onOpen: () => console.log('Connected'),
1640
+ * onTranscript: (event) => {
1641
+ * if (event.isFinal) {
1642
+ * console.log('Final:', event.text);
1643
+ * } else {
1644
+ * console.log('Interim:', event.text);
1645
+ * }
1646
+ * },
1647
+ * onError: (error) => console.error('Error:', error),
1648
+ * onClose: () => console.log('Disconnected')
1649
+ * });
1650
+ *
1651
+ * // Send audio chunks
1652
+ * const audioChunk = getAudioChunk(); // Your audio source
1653
+ * await session.sendAudio({ data: audioChunk });
1654
+ *
1655
+ * // Close when done
1656
+ * await session.close();
1657
+ * ```
1658
+ */
1659
+ async transcribeStream(options, callbacks) {
1660
+ this.validateConfig();
1661
+ const params = new URLSearchParams();
1662
+ if (options?.encoding) params.append("encoding", options.encoding);
1663
+ if (options?.sampleRate) params.append("sample_rate", options.sampleRate.toString());
1664
+ if (options?.channels) params.append("channels", options.channels.toString());
1665
+ if (options?.language) params.append("language", options.language);
1666
+ if (options?.languageDetection) params.append("detect_language", "true");
1667
+ if (options?.diarization) params.append("diarize", "true");
1668
+ if (options?.interimResults) params.append("interim_results", "true");
1669
+ if (options?.summarization) params.append("summarize", "true");
1670
+ if (options?.sentimentAnalysis) params.append("sentiment", "true");
1671
+ if (options?.entityDetection) params.append("detect_entities", "true");
1672
+ if (options?.piiRedaction) params.append("redact", "pii");
1673
+ if (options?.customVocabulary && options.customVocabulary.length > 0) {
1674
+ params.append("keywords", options.customVocabulary.join(","));
1675
+ }
1676
+ const wsUrl = `${this.wsBaseUrl}?${params.toString()}`;
1677
+ const ws = new WebSocket3(wsUrl, {
1678
+ headers: {
1679
+ Authorization: `Token ${this.config.apiKey}`
1680
+ }
1681
+ });
1682
+ let sessionStatus = "connecting";
1683
+ const sessionId = `deepgram-${Date.now()}-${Math.random().toString(36).substring(7)}`;
1684
+ ws.on("open", () => {
1685
+ sessionStatus = "open";
1686
+ callbacks?.onOpen?.();
1687
+ });
1688
+ ws.on("message", (data) => {
1689
+ try {
1690
+ const message = JSON.parse(data.toString());
1691
+ if (message.type === "Results") {
1692
+ const result = message;
1693
+ const channel = result.channel?.alternatives?.[0];
1694
+ if (channel) {
1695
+ const transcript = channel.transcript || "";
1696
+ const isFinal = message.is_final === true;
1697
+ const words = channel.words?.map((word) => ({
1698
+ text: word.word || "",
1699
+ start: word.start || 0,
1700
+ end: word.end || 0,
1701
+ confidence: word.confidence
1702
+ }));
1703
+ callbacks?.onTranscript?.({
1704
+ type: "transcript",
1705
+ text: transcript,
1706
+ isFinal,
1707
+ words,
1708
+ confidence: channel.confidence,
1709
+ data: result
1710
+ });
1711
+ }
1712
+ } else if (message.type === "UtteranceEnd") {
1713
+ callbacks?.onMetadata?.(message);
1714
+ } else if (message.type === "Metadata") {
1715
+ callbacks?.onMetadata?.(message);
1716
+ }
1717
+ } catch (error) {
1718
+ callbacks?.onError?.({
1719
+ code: "PARSE_ERROR",
1720
+ message: "Failed to parse WebSocket message",
1721
+ details: error
1722
+ });
1723
+ }
1724
+ });
1725
+ ws.on("error", (error) => {
1726
+ callbacks?.onError?.({
1727
+ code: "WEBSOCKET_ERROR",
1728
+ message: error.message,
1729
+ details: error
1730
+ });
1731
+ });
1732
+ ws.on("close", (code, reason) => {
1733
+ sessionStatus = "closed";
1734
+ callbacks?.onClose?.(code, reason.toString());
1735
+ });
1736
+ await new Promise((resolve, reject) => {
1737
+ const timeout = setTimeout(() => {
1738
+ reject(new Error("WebSocket connection timeout"));
1739
+ }, 1e4);
1740
+ ws.once("open", () => {
1741
+ clearTimeout(timeout);
1742
+ resolve();
1743
+ });
1744
+ ws.once("error", (error) => {
1745
+ clearTimeout(timeout);
1746
+ reject(error);
1747
+ });
1748
+ });
1749
+ return {
1750
+ id: sessionId,
1751
+ provider: this.name,
1752
+ createdAt: /* @__PURE__ */ new Date(),
1753
+ getStatus: () => sessionStatus,
1754
+ sendAudio: async (chunk) => {
1755
+ if (sessionStatus !== "open") {
1756
+ throw new Error(`Cannot send audio: session is ${sessionStatus}`);
1757
+ }
1758
+ if (ws.readyState !== WebSocket3.OPEN) {
1759
+ throw new Error("WebSocket is not open");
1760
+ }
1761
+ ws.send(chunk.data);
1762
+ if (chunk.isLast) {
1763
+ ws.send(JSON.stringify({ type: "CloseStream" }));
1764
+ }
1765
+ },
1766
+ close: async () => {
1767
+ if (sessionStatus === "closed" || sessionStatus === "closing") {
1768
+ return;
1769
+ }
1770
+ sessionStatus = "closing";
1771
+ if (ws.readyState === WebSocket3.OPEN) {
1772
+ ws.send(JSON.stringify({ type: "CloseStream" }));
1773
+ }
1774
+ return new Promise((resolve) => {
1775
+ const timeout = setTimeout(() => {
1776
+ ws.terminate();
1777
+ resolve();
1778
+ }, 5e3);
1779
+ ws.close();
1780
+ ws.once("close", () => {
1781
+ clearTimeout(timeout);
1782
+ sessionStatus = "closed";
1783
+ resolve();
1784
+ });
1785
+ });
1786
+ }
1787
+ };
1788
+ }
1789
+ };
1790
+ function createDeepgramAdapter(config) {
1791
+ const adapter = new DeepgramAdapter();
1792
+ adapter.initialize(config);
1793
+ return adapter;
1794
+ }
1795
+
1796
+ // src/adapters/azure-stt-adapter.ts
1797
+ import axios4 from "axios";
1798
+ var AzureSTTAdapter = class extends BaseAdapter {
1799
+ constructor() {
1800
+ super(...arguments);
1801
+ this.name = "azure-stt";
1802
+ this.capabilities = {
1803
+ streaming: false,
1804
+ // Batch transcription only
1805
+ diarization: true,
1806
+ wordTimestamps: true,
1807
+ languageDetection: false,
1808
+ customVocabulary: true,
1809
+ summarization: false,
1810
+ sentimentAnalysis: false,
1811
+ entityDetection: false,
1812
+ piiRedaction: false
1813
+ };
1814
+ }
1815
+ initialize(config) {
1816
+ super.initialize(config);
1817
+ this.region = config.region || "eastus";
1818
+ this.baseUrl = config.baseUrl || `https://${this.region}.api.cognitive.microsoft.com/speechtotext/v3.1`;
1819
+ this.client = axios4.create({
1820
+ baseURL: this.baseUrl,
1821
+ timeout: config.timeout || 6e4,
1822
+ headers: {
1823
+ "Ocp-Apim-Subscription-Key": config.apiKey,
1824
+ "Content-Type": "application/json",
1825
+ ...config.headers
1826
+ }
1827
+ });
1828
+ }
1829
+ /**
1830
+ * Submit audio for transcription
1831
+ *
1832
+ * Azure Speech-to-Text uses batch transcription which processes asynchronously.
1833
+ * You need to poll getTranscript() to retrieve the completed transcription.
1834
+ *
1835
+ * @param audio - Audio input (URL only for batch transcription)
1836
+ * @param options - Transcription options
1837
+ * @returns Response with transcription ID for polling
1838
+ */
1839
+ async transcribe(audio, options) {
1840
+ this.validateConfig();
1841
+ if (audio.type !== "url") {
1842
+ return {
1843
+ success: false,
1844
+ provider: this.name,
1845
+ error: {
1846
+ code: "INVALID_INPUT",
1847
+ message: "Azure Speech-to-Text batch transcription only supports URL input"
1848
+ }
1849
+ };
1850
+ }
1851
+ try {
1852
+ const transcriptionRequest = {
1853
+ displayName: options?.metadata?.displayName || "SDK Transcription",
1854
+ description: options?.metadata?.description || "",
1855
+ locale: options?.language || "en-US",
1856
+ contentUrls: [audio.url],
1857
+ properties: this.buildTranscriptionProperties(options)
1858
+ };
1859
+ const response = await this.client.post(
1860
+ "/transcriptions",
1861
+ transcriptionRequest
1862
+ );
1863
+ const transcription = response.data;
1864
+ return {
1865
+ success: true,
1866
+ provider: this.name,
1867
+ data: {
1868
+ id: transcription.self?.split("/").pop() || "",
1869
+ text: "",
1870
+ // Will be populated after polling
1871
+ status: this.normalizeStatus(transcription.status),
1872
+ language: transcription.locale,
1873
+ createdAt: transcription.createdDateTime
1874
+ },
1875
+ raw: transcription
1876
+ };
1877
+ } catch (error) {
1878
+ return this.createErrorResponse(error);
1879
+ }
1880
+ }
1881
+ /**
1882
+ * Get transcription result by ID
1883
+ *
1884
+ * Poll this method to check transcription status and retrieve results.
1885
+ *
1886
+ * @param transcriptId - Transcription ID from Azure
1887
+ * @returns Transcription response with status and results
1888
+ */
1889
+ async getTranscript(transcriptId) {
1890
+ this.validateConfig();
1891
+ try {
1892
+ const statusResponse = await this.client.get(
1893
+ `/transcriptions/${transcriptId}`
1894
+ );
1895
+ const transcription = statusResponse.data;
1896
+ const status = this.normalizeStatus(transcription.status);
1897
+ if (status !== "completed") {
1898
+ return {
1899
+ success: true,
1900
+ provider: this.name,
1901
+ data: {
1902
+ id: transcriptId,
1903
+ text: "",
1904
+ status,
1905
+ language: transcription.locale,
1906
+ createdAt: transcription.createdDateTime
1907
+ },
1908
+ raw: transcription
1909
+ };
1910
+ }
1911
+ if (!transcription.links?.files) {
1912
+ return {
1913
+ success: false,
1914
+ provider: this.name,
1915
+ error: {
1916
+ code: "NO_RESULTS",
1917
+ message: "Transcription completed but no result files available"
1918
+ },
1919
+ raw: transcription
1920
+ };
1921
+ }
1922
+ const filesResponse = await this.client.get(transcription.links.files);
1923
+ const files = filesResponse.data?.values || [];
1924
+ const resultFile = files.find((file) => file.kind === "Transcription");
1925
+ if (!resultFile?.links?.contentUrl) {
1926
+ return {
1927
+ success: false,
1928
+ provider: this.name,
1929
+ error: {
1930
+ code: "NO_RESULTS",
1931
+ message: "Transcription result file not found"
1932
+ },
1933
+ raw: transcription
1934
+ };
1935
+ }
1936
+ const contentResponse = await axios4.get(resultFile.links.contentUrl);
1937
+ const transcriptionData = contentResponse.data;
1938
+ return this.normalizeResponse(transcription, transcriptionData);
1939
+ } catch (error) {
1940
+ return this.createErrorResponse(error);
1941
+ }
1942
+ }
1943
+ /**
1944
+ * Build Azure-specific transcription properties
1945
+ */
1946
+ buildTranscriptionProperties(options) {
1947
+ const properties = {
1948
+ wordLevelTimestampsEnabled: options?.wordTimestamps ?? true,
1949
+ punctuationMode: "DictatedAndAutomatic",
1950
+ profanityFilterMode: "Masked"
1951
+ };
1952
+ if (options?.diarization) {
1953
+ properties.diarizationEnabled = true;
1954
+ if (options.speakersExpected) {
1955
+ properties.diarization = {
1956
+ speakers: {
1957
+ minCount: 1,
1958
+ maxCount: options.speakersExpected
1959
+ }
1960
+ };
1961
+ }
1962
+ }
1963
+ if (options?.customVocabulary && options.customVocabulary.length > 0) {
1964
+ properties.customProperties = {
1965
+ phrases: options.customVocabulary.join(",")
1966
+ };
1967
+ }
1968
+ return properties;
1969
+ }
1970
+ /**
1971
+ * Normalize Azure status to unified status
1972
+ */
1973
+ normalizeStatus(status) {
1974
+ const statusStr = status?.toString().toLowerCase() || "";
1975
+ if (statusStr.includes("succeeded")) return "completed";
1976
+ if (statusStr.includes("running")) return "processing";
1977
+ if (statusStr.includes("notstarted")) return "queued";
1978
+ if (statusStr.includes("failed")) return "error";
1979
+ return "queued";
1980
+ }
1981
+ /**
1982
+ * Normalize Azure transcription response to unified format
1983
+ */
1984
+ normalizeResponse(transcription, transcriptionData) {
1985
+ const combinedPhrases = transcriptionData.combinedRecognizedPhrases || [];
1986
+ const recognizedPhrases = transcriptionData.recognizedPhrases || [];
1987
+ const fullText = combinedPhrases.map((phrase) => phrase.display || phrase.lexical).join(" ") || "";
1988
+ const words = recognizedPhrases.flatMap(
1989
+ (phrase) => (phrase.nBest?.[0]?.words || []).map((word) => ({
1990
+ text: word.word,
1991
+ start: word.offsetInTicks / 1e7,
1992
+ // Convert ticks to seconds
1993
+ end: (word.offsetInTicks + word.durationInTicks) / 1e7,
1994
+ confidence: word.confidence,
1995
+ speaker: phrase.speaker !== void 0 ? phrase.speaker.toString() : void 0
1996
+ }))
1997
+ );
1998
+ const speakers = recognizedPhrases.length > 0 && recognizedPhrases[0].speaker !== void 0 ? Array.from(
1999
+ new Set(
2000
+ recognizedPhrases.map((p) => p.speaker).filter((s) => s !== void 0)
2001
+ )
2002
+ ).map((speakerId) => ({
2003
+ id: String(speakerId),
2004
+ label: `Speaker ${speakerId}`
2005
+ })) : void 0;
2006
+ return {
2007
+ success: true,
2008
+ provider: this.name,
2009
+ data: {
2010
+ id: transcription.self?.split("/").pop() || "",
2011
+ text: fullText,
2012
+ confidence: recognizedPhrases[0]?.nBest?.[0]?.confidence,
2013
+ status: "completed",
2014
+ language: transcription.locale,
2015
+ duration: transcriptionData.duration ? transcriptionData.duration / 1e7 : void 0,
2016
+ speakers,
2017
+ words: words.length > 0 ? words : void 0,
2018
+ createdAt: transcription.createdDateTime,
2019
+ completedAt: transcription.lastActionDateTime
2020
+ },
2021
+ raw: {
2022
+ transcription,
2023
+ transcriptionData
2024
+ }
2025
+ };
2026
+ }
2027
+ };
2028
+ function createAzureSTTAdapter(config) {
2029
+ const adapter = new AzureSTTAdapter();
2030
+ adapter.initialize(config);
2031
+ return adapter;
2032
+ }
2033
+
2034
+ // src/adapters/openai-whisper-adapter.ts
2035
+ import axios5 from "axios";
2036
+ var OpenAIWhisperAdapter = class extends BaseAdapter {
2037
+ constructor() {
2038
+ super(...arguments);
2039
+ this.name = "openai-whisper";
2040
+ this.capabilities = {
2041
+ streaming: false,
2042
+ // Synchronous only (no streaming API for transcription)
2043
+ diarization: true,
2044
+ // Available with gpt-4o-transcribe-diarize model
2045
+ wordTimestamps: true,
2046
+ languageDetection: false,
2047
+ // Language should be provided for best accuracy
2048
+ customVocabulary: false,
2049
+ // Uses prompt instead
2050
+ summarization: false,
2051
+ sentimentAnalysis: false,
2052
+ entityDetection: false,
2053
+ piiRedaction: false
2054
+ };
2055
+ this.baseUrl = "https://api.openai.com/v1";
2056
+ }
2057
+ initialize(config) {
2058
+ super.initialize(config);
2059
+ this.baseUrl = config.baseUrl || this.baseUrl;
2060
+ this.client = axios5.create({
2061
+ baseURL: this.baseUrl,
2062
+ timeout: config.timeout || 12e4,
2063
+ // 2 minutes default (audio processing can take time)
2064
+ headers: {
2065
+ Authorization: `Bearer ${config.apiKey}`,
2066
+ "Content-Type": "multipart/form-data",
2067
+ ...config.headers
2068
+ }
2069
+ });
2070
+ }
2071
+ /**
2072
+ * Submit audio for transcription
2073
+ *
2074
+ * OpenAI Whisper API processes audio synchronously and returns results immediately.
2075
+ * Supports multiple models with different capabilities:
2076
+ * - whisper-1: Open source Whisper V2 model
2077
+ * - gpt-4o-transcribe: More accurate GPT-4o based transcription
2078
+ * - gpt-4o-mini-transcribe: Faster, cost-effective GPT-4o mini
2079
+ * - gpt-4o-transcribe-diarize: GPT-4o with speaker diarization
2080
+ *
2081
+ * @param audio - Audio input (URL or Buffer)
2082
+ * @param options - Transcription options
2083
+ * @returns Transcription response with full results
2084
+ */
2085
+ async transcribe(audio, options) {
2086
+ this.validateConfig();
2087
+ try {
2088
+ let audioData;
2089
+ let fileName = "audio.mp3";
2090
+ if (audio.type === "url") {
2091
+ const response2 = await axios5.get(audio.url, {
2092
+ responseType: "arraybuffer"
2093
+ });
2094
+ audioData = Buffer.from(response2.data);
2095
+ const urlPath = new URL(audio.url).pathname;
2096
+ const extractedName = urlPath.split("/").pop();
2097
+ if (extractedName) {
2098
+ fileName = extractedName;
2099
+ }
2100
+ } else if (audio.type === "file") {
2101
+ audioData = audio.file;
2102
+ fileName = audio.filename || fileName;
2103
+ } else {
2104
+ return {
2105
+ success: false,
2106
+ provider: this.name,
2107
+ error: {
2108
+ code: "INVALID_INPUT",
2109
+ message: "OpenAI Whisper only supports URL and File audio input (not stream)"
2110
+ }
2111
+ };
2112
+ }
2113
+ const model = this.selectModel(options);
2114
+ const isDiarization = model === "gpt-4o-transcribe-diarize";
2115
+ const needsWords = options?.wordTimestamps === true;
2116
+ const requestBody = {
2117
+ file: audioData,
2118
+ model
2119
+ };
2120
+ if (options?.language) {
2121
+ requestBody.language = options.language;
2122
+ }
2123
+ if (options?.metadata?.prompt) {
2124
+ requestBody.prompt = options.metadata.prompt;
2125
+ }
2126
+ if (options?.metadata?.temperature !== void 0) {
2127
+ requestBody.temperature = options.metadata.temperature;
2128
+ }
2129
+ if (isDiarization) {
2130
+ requestBody.response_format = "diarized_json";
2131
+ if (options?.metadata?.knownSpeakerNames) {
2132
+ requestBody["known_speaker_names"] = options.metadata.knownSpeakerNames;
2133
+ }
2134
+ if (options?.metadata?.knownSpeakerReferences) {
2135
+ requestBody["known_speaker_references"] = options.metadata.knownSpeakerReferences;
2136
+ }
2137
+ } else if (needsWords || options?.diarization) {
2138
+ requestBody.response_format = "verbose_json";
2139
+ if (needsWords) {
2140
+ requestBody.timestamp_granularities = ["word", "segment"];
2141
+ }
2142
+ } else {
2143
+ requestBody.response_format = "json";
2144
+ }
2145
+ const response = await this.client.post("/audio/transcriptions", requestBody, {
2146
+ headers: {
2147
+ "Content-Type": "multipart/form-data"
2148
+ }
2149
+ });
2150
+ return this.normalizeResponse(response.data, model, isDiarization);
2151
+ } catch (error) {
2152
+ return this.createErrorResponse(error);
2153
+ }
2154
+ }
2155
+ /**
2156
+ * OpenAI Whisper returns results synchronously, so getTranscript is not needed.
2157
+ * This method exists for interface compatibility but will return an error.
2158
+ */
2159
+ async getTranscript(transcriptId) {
2160
+ return {
2161
+ success: false,
2162
+ provider: this.name,
2163
+ error: {
2164
+ code: "NOT_SUPPORTED",
2165
+ message: "OpenAI Whisper processes transcriptions synchronously. Use transcribe() method directly."
2166
+ }
2167
+ };
2168
+ }
2169
+ /**
2170
+ * Select appropriate model based on transcription options
2171
+ */
2172
+ selectModel(options) {
2173
+ if (options?.metadata?.model) {
2174
+ return options.metadata.model;
2175
+ }
2176
+ if (options?.diarization) {
2177
+ return "gpt-4o-transcribe-diarize";
2178
+ }
2179
+ return "gpt-4o-transcribe";
2180
+ }
2181
+ /**
2182
+ * Normalize OpenAI response to unified format
2183
+ */
2184
+ normalizeResponse(response, model, isDiarization) {
2185
+ if ("text" in response && Object.keys(response).length === 1) {
2186
+ return {
2187
+ success: true,
2188
+ provider: this.name,
2189
+ data: {
2190
+ id: `openai-${Date.now()}`,
2191
+ text: response.text,
2192
+ status: "completed",
2193
+ language: void 0,
2194
+ confidence: void 0
2195
+ },
2196
+ raw: response
2197
+ };
2198
+ }
2199
+ if (isDiarization && "segments" in response) {
2200
+ const diarizedResponse = response;
2201
+ const speakerSet = new Set(diarizedResponse.segments.map((seg) => seg.speaker));
2202
+ const speakers = Array.from(speakerSet).map((speaker) => ({
2203
+ id: speaker,
2204
+ label: speaker
2205
+ // Already labeled by OpenAI (A, B, C or custom names)
2206
+ }));
2207
+ const utterances = diarizedResponse.segments.map((segment) => ({
2208
+ speaker: segment.speaker,
2209
+ text: segment.text,
2210
+ start: segment.start,
2211
+ end: segment.end,
2212
+ confidence: void 0
2213
+ }));
2214
+ return {
2215
+ success: true,
2216
+ provider: this.name,
2217
+ data: {
2218
+ id: `openai-${Date.now()}`,
2219
+ text: diarizedResponse.text,
2220
+ status: "completed",
2221
+ language: void 0,
2222
+ duration: diarizedResponse.duration,
2223
+ speakers,
2224
+ utterances
2225
+ },
2226
+ raw: response
2227
+ };
2228
+ }
2229
+ if ("duration" in response && "language" in response) {
2230
+ const verboseResponse = response;
2231
+ const words = verboseResponse.words?.map((word) => ({
2232
+ text: word.word,
2233
+ start: word.start,
2234
+ end: word.end,
2235
+ confidence: void 0
2236
+ }));
2237
+ return {
2238
+ success: true,
2239
+ provider: this.name,
2240
+ data: {
2241
+ id: `openai-${Date.now()}`,
2242
+ text: verboseResponse.text,
2243
+ status: "completed",
2244
+ language: verboseResponse.language,
2245
+ duration: verboseResponse.duration,
2246
+ words
2247
+ },
2248
+ raw: response
2249
+ };
2250
+ }
2251
+ return {
2252
+ success: true,
2253
+ provider: this.name,
2254
+ data: {
2255
+ id: `openai-${Date.now()}`,
2256
+ text: "text" in response ? response.text : "",
2257
+ status: "completed"
2258
+ },
2259
+ raw: response
2260
+ };
2261
+ }
2262
+ };
2263
+ function createOpenAIWhisperAdapter(config) {
2264
+ const adapter = new OpenAIWhisperAdapter();
2265
+ adapter.initialize(config);
2266
+ return adapter;
2267
+ }
2268
+
2269
+ // src/adapters/speechmatics-adapter.ts
2270
+ import axios6 from "axios";
2271
+ var SpeechmaticsAdapter = class extends BaseAdapter {
2272
+ constructor() {
2273
+ super(...arguments);
2274
+ this.name = "speechmatics";
2275
+ this.capabilities = {
2276
+ streaming: false,
2277
+ // Batch only (streaming available via separate WebSocket API)
2278
+ diarization: true,
2279
+ wordTimestamps: true,
2280
+ languageDetection: false,
2281
+ customVocabulary: true,
2282
+ summarization: true,
2283
+ sentimentAnalysis: true,
2284
+ entityDetection: true,
2285
+ piiRedaction: false
2286
+ };
2287
+ this.baseUrl = "https://asr.api.speechmatics.com/v2";
2288
+ }
2289
+ initialize(config) {
2290
+ super.initialize(config);
2291
+ this.baseUrl = config.baseUrl || this.baseUrl;
2292
+ this.client = axios6.create({
2293
+ baseURL: this.baseUrl,
2294
+ timeout: config.timeout || 12e4,
2295
+ headers: {
2296
+ Authorization: `Bearer ${config.apiKey}`,
2297
+ ...config.headers
2298
+ }
2299
+ });
2300
+ }
2301
+ /**
2302
+ * Submit audio for transcription
2303
+ *
2304
+ * Speechmatics uses async batch processing. Returns a job ID immediately.
2305
+ * Poll getTranscript() to retrieve results.
2306
+ *
2307
+ * @param audio - Audio input (URL or file)
2308
+ * @param options - Transcription options
2309
+ * @returns Job submission response with ID for polling
2310
+ */
2311
+ async transcribe(audio, options) {
2312
+ this.validateConfig();
2313
+ try {
2314
+ const jobConfig = {
2315
+ type: "transcription",
2316
+ transcription_config: {
2317
+ language: options?.language || "en",
2318
+ operating_point: options?.metadata?.operating_point || "standard"
2319
+ }
2320
+ };
2321
+ if (options?.diarization) {
2322
+ jobConfig.transcription_config.diarization = "speaker";
2323
+ if (options.speakersExpected) {
2324
+ jobConfig.transcription_config.speaker_diarization_config = {
2325
+ max_speakers: options.speakersExpected
2326
+ };
2327
+ }
2328
+ }
2329
+ if (options?.sentimentAnalysis) {
2330
+ jobConfig.transcription_config.enable_sentiment_analysis = true;
2331
+ }
2332
+ if (options?.summarization && options?.metadata?.summary_type) {
2333
+ jobConfig.transcription_config.summarization_config = {
2334
+ type: options.metadata.summary_type,
2335
+ length: options.metadata.summary_length || "medium"
2336
+ };
2337
+ }
2338
+ if (options?.customVocabulary && options.customVocabulary.length > 0) {
2339
+ jobConfig.transcription_config.additional_vocab = options.customVocabulary;
2340
+ }
2341
+ let requestBody;
2342
+ let headers = {};
2343
+ if (audio.type === "url") {
2344
+ jobConfig.fetch_data = {
2345
+ url: audio.url
2346
+ };
2347
+ requestBody = { config: JSON.stringify(jobConfig) };
2348
+ headers = { "Content-Type": "application/json" };
2349
+ } else if (audio.type === "file") {
2350
+ requestBody = {
2351
+ config: JSON.stringify(jobConfig),
2352
+ data_file: audio.file
2353
+ };
2354
+ headers = { "Content-Type": "multipart/form-data" };
2355
+ } else {
2356
+ return {
2357
+ success: false,
2358
+ provider: this.name,
2359
+ error: {
2360
+ code: "INVALID_INPUT",
2361
+ message: "Speechmatics only supports URL and File audio input"
2362
+ }
2363
+ };
2364
+ }
2365
+ const response = await this.client.post("/jobs", requestBody, { headers });
2366
+ return {
2367
+ success: true,
2368
+ provider: this.name,
2369
+ data: {
2370
+ id: response.data.id,
2371
+ text: "",
2372
+ status: "queued",
2373
+ createdAt: response.data.created_at
2374
+ },
2375
+ raw: response.data
2376
+ };
2377
+ } catch (error) {
2378
+ return this.createErrorResponse(error);
2379
+ }
2380
+ }
2381
+ /**
2382
+ * Get transcription result by job ID
2383
+ *
2384
+ * Poll this method to check job status and retrieve completed transcription.
2385
+ *
2386
+ * @param transcriptId - Job ID from Speechmatics
2387
+ * @returns Transcription response with status and results
2388
+ */
2389
+ async getTranscript(transcriptId) {
2390
+ this.validateConfig();
2391
+ try {
2392
+ const statusResponse = await this.client.get(`/jobs/${transcriptId}`);
2393
+ const status = this.normalizeStatus(statusResponse.data.job.status);
2394
+ if (status !== "completed") {
2395
+ return {
2396
+ success: true,
2397
+ provider: this.name,
2398
+ data: {
2399
+ id: transcriptId,
2400
+ text: "",
2401
+ status,
2402
+ createdAt: statusResponse.data.job.created_at
2403
+ },
2404
+ raw: statusResponse.data
2405
+ };
2406
+ }
2407
+ const transcriptResponse = await this.client.get(
2408
+ `/jobs/${transcriptId}/transcript`
2409
+ );
2410
+ return this.normalizeResponse(transcriptResponse.data);
2411
+ } catch (error) {
2412
+ return this.createErrorResponse(error);
2413
+ }
2414
+ }
2415
+ /**
2416
+ * Normalize Speechmatics status to unified status
2417
+ */
2418
+ normalizeStatus(status) {
2419
+ switch (status) {
2420
+ case "running":
2421
+ return "processing";
2422
+ case "done":
2423
+ return "completed";
2424
+ case "rejected":
2425
+ case "expired":
2426
+ return "error";
2427
+ default:
2428
+ return "queued";
2429
+ }
2430
+ }
2431
+ /**
2432
+ * Normalize Speechmatics response to unified format
2433
+ */
2434
+ normalizeResponse(response) {
2435
+ const text = response.results.filter((r) => r.type === "word").map((r) => r.alternatives[0]?.content || "").join(" ");
2436
+ const words = response.results.filter((r) => r.type === "word").map((result) => ({
2437
+ text: result.alternatives[0]?.content || "",
2438
+ start: result.start_time,
2439
+ end: result.end_time,
2440
+ confidence: result.alternatives[0]?.confidence,
2441
+ speaker: result.alternatives[0]?.speaker
2442
+ }));
2443
+ const speakerSet = /* @__PURE__ */ new Set();
2444
+ response.results.forEach((r) => {
2445
+ const speaker = r.alternatives[0]?.speaker;
2446
+ if (speaker) speakerSet.add(speaker);
2447
+ });
2448
+ const speakers = speakerSet.size > 0 ? Array.from(speakerSet).map((id) => ({
2449
+ id,
2450
+ label: `Speaker ${id}`
2451
+ })) : void 0;
2452
+ const utterances = [];
2453
+ if (speakers) {
2454
+ let currentSpeaker;
2455
+ let currentUtterance = [];
2456
+ let utteranceStart = 0;
2457
+ response.results.filter((r) => r.type === "word").forEach((result, idx) => {
2458
+ const speaker = result.alternatives[0]?.speaker;
2459
+ const word = result.alternatives[0]?.content || "";
2460
+ if (speaker !== currentSpeaker) {
2461
+ if (currentSpeaker && currentUtterance.length > 0) {
2462
+ const prevResult = response.results.filter((r) => r.type === "word")[idx - 1];
2463
+ utterances.push({
2464
+ speaker: currentSpeaker,
2465
+ text: currentUtterance.join(" "),
2466
+ start: utteranceStart,
2467
+ end: prevResult?.end_time || result.start_time
2468
+ });
2469
+ }
2470
+ currentSpeaker = speaker;
2471
+ currentUtterance = [word];
2472
+ utteranceStart = result.start_time;
2473
+ } else {
2474
+ currentUtterance.push(word);
2475
+ }
2476
+ });
2477
+ if (currentSpeaker && currentUtterance.length > 0) {
2478
+ const lastWord = response.results.filter((r) => r.type === "word").pop();
2479
+ utterances.push({
2480
+ speaker: currentSpeaker,
2481
+ text: currentUtterance.join(" "),
2482
+ start: utteranceStart,
2483
+ end: lastWord?.end_time || utteranceStart
2484
+ });
2485
+ }
2486
+ }
2487
+ return {
2488
+ success: true,
2489
+ provider: this.name,
2490
+ data: {
2491
+ id: response.job.id,
2492
+ text,
2493
+ status: "completed",
2494
+ language: response.metadata.transcription_config.language,
2495
+ duration: response.job.duration,
2496
+ speakers,
2497
+ words: words.length > 0 ? words : void 0,
2498
+ utterances: utterances.length > 0 ? utterances : void 0,
2499
+ summary: response.summary?.content,
2500
+ createdAt: response.job.created_at
2501
+ },
2502
+ raw: response
2503
+ };
2504
+ }
2505
+ };
2506
+ function createSpeechmaticsAdapter(config) {
2507
+ const adapter = new SpeechmaticsAdapter();
2508
+ adapter.initialize(config);
2509
+ return adapter;
2510
+ }
2511
+
2512
+ // src/webhooks/base-webhook.ts
2513
+ var BaseWebhookHandler = class {
2514
+ /**
2515
+ * Validate webhook payload structure
2516
+ *
2517
+ * Checks if payload has required fields and correct types
2518
+ *
2519
+ * @param payload - Raw webhook payload
2520
+ * @param options - Optional context (query params, headers, etc.)
2521
+ * @returns Validation result with details
2522
+ */
2523
+ validate(payload, options) {
2524
+ try {
2525
+ if (!this.matches(payload, options)) {
2526
+ return {
2527
+ valid: false,
2528
+ error: `Payload does not match ${this.provider} webhook format`
2529
+ };
2530
+ }
2531
+ const event = this.parse(payload, options);
2532
+ if (!event.provider || !event.eventType) {
2533
+ return {
2534
+ valid: false,
2535
+ error: "Parsed event missing required fields"
2536
+ };
2537
+ }
2538
+ return {
2539
+ valid: true,
2540
+ provider: this.provider,
2541
+ details: {
2542
+ eventType: event.eventType,
2543
+ success: event.success
2544
+ }
2545
+ };
2546
+ } catch (error) {
2547
+ return {
2548
+ valid: false,
2549
+ error: error instanceof Error ? error.message : "Unknown error",
2550
+ details: { error }
2551
+ };
2552
+ }
2553
+ }
2554
+ /**
2555
+ * Helper method to create error response
2556
+ */
2557
+ createErrorEvent(payload, errorMessage) {
2558
+ return {
2559
+ success: false,
2560
+ provider: this.provider,
2561
+ eventType: "transcription.failed",
2562
+ data: {
2563
+ id: "",
2564
+ status: "error",
2565
+ error: errorMessage
2566
+ },
2567
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
2568
+ raw: payload
2569
+ };
2570
+ }
2571
+ };
2572
+
2573
+ // src/webhooks/gladia-webhook.ts
2574
+ var GladiaWebhookHandler = class extends BaseWebhookHandler {
2575
+ constructor() {
2576
+ super(...arguments);
2577
+ this.provider = "gladia";
2578
+ }
2579
+ /**
2580
+ * Check if payload matches Gladia webhook format
2581
+ */
2582
+ matches(payload, _options) {
2583
+ if (!payload || typeof payload !== "object") {
2584
+ return false;
2585
+ }
2586
+ const obj = payload;
2587
+ if (!("event" in obj) || !("payload" in obj)) {
2588
+ return false;
2589
+ }
2590
+ if (typeof obj.event !== "string") {
2591
+ return false;
2592
+ }
2593
+ if (!obj.event.startsWith("transcription.")) {
2594
+ return false;
2595
+ }
2596
+ if (!obj.payload || typeof obj.payload !== "object") {
2597
+ return false;
2598
+ }
2599
+ const payloadObj = obj.payload;
2600
+ return typeof payloadObj.id === "string";
2601
+ }
2602
+ /**
2603
+ * Parse Gladia webhook payload to unified format
2604
+ */
2605
+ parse(payload, _options) {
2606
+ if (!this.matches(payload)) {
2607
+ return this.createErrorEvent(payload, "Invalid Gladia webhook payload");
2608
+ }
2609
+ const webhookPayload = payload;
2610
+ const jobId = webhookPayload.payload.id;
2611
+ const event = webhookPayload.event;
2612
+ if (event === "transcription.created") {
2613
+ return {
2614
+ success: true,
2615
+ provider: this.provider,
2616
+ eventType: "transcription.created",
2617
+ data: {
2618
+ id: jobId,
2619
+ status: "queued"
2620
+ },
2621
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
2622
+ raw: payload
2623
+ };
2624
+ }
2625
+ if (event === "transcription.success") {
2626
+ return {
2627
+ success: true,
2628
+ provider: this.provider,
2629
+ eventType: "transcription.completed",
2630
+ data: {
2631
+ id: jobId,
2632
+ status: "completed"
2633
+ // Note: Full transcript data needs to be fetched via API
2634
+ // using GladiaAdapter.getTranscript(jobId)
2635
+ },
2636
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
2637
+ raw: payload
2638
+ };
2639
+ }
2640
+ if (event === "transcription.error") {
2641
+ return {
2642
+ success: false,
2643
+ provider: this.provider,
2644
+ eventType: "transcription.failed",
2645
+ data: {
2646
+ id: jobId,
2647
+ status: "error",
2648
+ error: "Transcription failed"
2649
+ },
2650
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
2651
+ raw: payload
2652
+ };
2653
+ }
2654
+ return this.createErrorEvent(payload, `Unknown Gladia webhook event: ${event}`);
2655
+ }
2656
+ /**
2657
+ * Verify Gladia webhook signature
2658
+ *
2659
+ * Note: As of the current API version, Gladia does not provide
2660
+ * webhook signature verification. This method is a placeholder
2661
+ * for future implementation.
2662
+ *
2663
+ * @param payload - Webhook payload
2664
+ * @param options - Verification options
2665
+ * @returns Always returns true (no verification available)
2666
+ */
2667
+ verify() {
2668
+ return true;
2669
+ }
2670
+ };
2671
+ function createGladiaWebhookHandler() {
2672
+ return new GladiaWebhookHandler();
2673
+ }
2674
+
2675
+ // src/webhooks/assemblyai-webhook.ts
2676
+ import crypto from "node:crypto";
2677
+ var AssemblyAIWebhookHandler = class extends BaseWebhookHandler {
2678
+ constructor() {
2679
+ super(...arguments);
2680
+ this.provider = "assemblyai";
2681
+ }
2682
+ /**
2683
+ * Check if payload matches AssemblyAI webhook format
2684
+ */
2685
+ matches(payload, _options) {
2686
+ if (!payload || typeof payload !== "object") {
2687
+ return false;
2688
+ }
2689
+ const obj = payload;
2690
+ if (!("transcript_id" in obj) || !("status" in obj)) {
2691
+ return false;
2692
+ }
2693
+ if (typeof obj.transcript_id !== "string") {
2694
+ return false;
2695
+ }
2696
+ if (obj.status !== "completed" && obj.status !== "error") {
2697
+ return false;
2698
+ }
2699
+ return true;
2700
+ }
2701
+ /**
2702
+ * Parse AssemblyAI webhook payload to unified format
2703
+ */
2704
+ parse(payload, _options) {
2705
+ if (!this.matches(payload)) {
2706
+ return this.createErrorEvent(payload, "Invalid AssemblyAI webhook payload");
2707
+ }
2708
+ const notification = payload;
2709
+ const transcriptId = notification.transcript_id;
2710
+ const status = notification.status;
2711
+ if (status === "completed") {
2712
+ return {
2713
+ success: true,
2714
+ provider: this.provider,
2715
+ eventType: "transcription.completed",
2716
+ data: {
2717
+ id: transcriptId,
2718
+ status: "completed"
2719
+ // Note: Full transcript data needs to be fetched via API
2720
+ // using AssemblyAIAdapter.getTranscript(transcriptId)
2721
+ },
2722
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
2723
+ raw: payload
2724
+ };
2725
+ }
2726
+ if (status === "error") {
2727
+ return {
2728
+ success: false,
2729
+ provider: this.provider,
2730
+ eventType: "transcription.failed",
2731
+ data: {
2732
+ id: transcriptId,
2733
+ status: "error",
2734
+ error: "Transcription failed"
2735
+ },
2736
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
2737
+ raw: payload
2738
+ };
2739
+ }
2740
+ return this.createErrorEvent(payload, `Unknown AssemblyAI status: ${status}`);
2741
+ }
2742
+ /**
2743
+ * Verify AssemblyAI webhook signature
2744
+ *
2745
+ * AssemblyAI uses HMAC-SHA256 for webhook signature verification.
2746
+ * The signature is sent in the X-AssemblyAI-Signature header.
2747
+ *
2748
+ * @param payload - Webhook payload
2749
+ * @param options - Verification options with signature and secret
2750
+ * @returns true if signature is valid
2751
+ *
2752
+ * @example
2753
+ * ```typescript
2754
+ * const isValid = handler.verify(req.body, {
2755
+ * signature: req.headers['x-assemblyai-signature'],
2756
+ * secret: process.env.ASSEMBLYAI_WEBHOOK_SECRET,
2757
+ * rawBody: req.rawBody // Raw request body as string or Buffer
2758
+ * });
2759
+ * ```
2760
+ */
2761
+ verify(payload, options) {
2762
+ if (!options.signature || !options.secret) {
2763
+ return false;
2764
+ }
2765
+ try {
2766
+ const body = options.rawBody || (typeof payload === "string" ? payload : JSON.stringify(payload));
2767
+ const hmac = crypto.createHmac("sha256", options.secret);
2768
+ const bodyBuffer = typeof body === "string" ? Buffer.from(body) : body;
2769
+ hmac.update(bodyBuffer);
2770
+ const computedSignature = hmac.digest("hex");
2771
+ return crypto.timingSafeEqual(Buffer.from(options.signature), Buffer.from(computedSignature));
2772
+ } catch (error) {
2773
+ return false;
2774
+ }
2775
+ }
2776
+ };
2777
+ function createAssemblyAIWebhookHandler() {
2778
+ return new AssemblyAIWebhookHandler();
2779
+ }
2780
+
2781
+ // src/webhooks/deepgram-webhook.ts
2782
+ var DeepgramWebhookHandler = class extends BaseWebhookHandler {
2783
+ constructor() {
2784
+ super(...arguments);
2785
+ this.provider = "deepgram";
2786
+ }
2787
+ /**
2788
+ * Check if payload matches Deepgram webhook format
2789
+ */
2790
+ matches(payload, _options) {
2791
+ if (!payload || typeof payload !== "object") {
2792
+ return false;
2793
+ }
2794
+ const obj = payload;
2795
+ if (!("metadata" in obj) || !("results" in obj)) {
2796
+ return false;
2797
+ }
2798
+ if (!obj.metadata || typeof obj.metadata !== "object") {
2799
+ return false;
2800
+ }
2801
+ const metadata = obj.metadata;
2802
+ if (!("request_id" in metadata)) {
2803
+ return false;
2804
+ }
2805
+ if (!obj.results || typeof obj.results !== "object") {
2806
+ return false;
2807
+ }
2808
+ const results = obj.results;
2809
+ return "channels" in results;
2810
+ }
2811
+ /**
2812
+ * Parse Deepgram webhook payload to unified format
2813
+ */
2814
+ parse(payload, _options) {
2815
+ if (!this.matches(payload)) {
2816
+ return this.createErrorEvent(payload, "Invalid Deepgram webhook payload");
2817
+ }
2818
+ const response = payload;
2819
+ try {
2820
+ const requestId = response.metadata.request_id;
2821
+ const duration = response.metadata.duration;
2822
+ const channels = response.results.channels || [];
2823
+ if (channels.length === 0) {
2824
+ return {
2825
+ success: false,
2826
+ provider: this.provider,
2827
+ eventType: "transcription.failed",
2828
+ data: {
2829
+ id: requestId || "",
2830
+ status: "error",
2831
+ error: "No channels in response"
2832
+ },
2833
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
2834
+ raw: payload
2835
+ };
2836
+ }
2837
+ const channel = channels[0];
2838
+ const alternatives = channel.alternatives || [];
2839
+ if (alternatives.length === 0) {
2840
+ return {
2841
+ success: false,
2842
+ provider: this.provider,
2843
+ eventType: "transcription.failed",
2844
+ data: {
2845
+ id: requestId || "",
2846
+ status: "error",
2847
+ error: "No alternatives in response"
2848
+ },
2849
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
2850
+ raw: payload
2851
+ };
2852
+ }
2853
+ const alternative = alternatives[0];
2854
+ const transcript = alternative.transcript;
2855
+ if (!transcript) {
2856
+ return {
2857
+ success: false,
2858
+ provider: this.provider,
2859
+ eventType: "transcription.failed",
2860
+ data: {
2861
+ id: requestId || "",
2862
+ status: "error",
2863
+ error: "Empty transcript"
2864
+ },
2865
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
2866
+ raw: payload
2867
+ };
2868
+ }
2869
+ const words = alternative.words && alternative.words.length > 0 ? alternative.words.map((word) => ({
2870
+ text: word.word || "",
2871
+ start: word.start || 0,
2872
+ end: word.end || 0,
2873
+ confidence: word.confidence
2874
+ })) : void 0;
2875
+ const speakers = response.results.utterances && response.results.utterances.length > 0 ? response.results.utterances.map((utterance) => ({
2876
+ id: utterance.speaker?.toString() || "unknown",
2877
+ speaker: utterance.speaker?.toString() || "unknown",
2878
+ text: utterance.transcript || "",
2879
+ confidence: utterance.confidence
2880
+ })) : void 0;
2881
+ const utterances = response.results.utterances && response.results.utterances.length > 0 ? response.results.utterances.map((utterance) => ({
2882
+ text: utterance.transcript || "",
2883
+ start: utterance.start || 0,
2884
+ end: utterance.end || 0,
2885
+ speaker: utterance.speaker?.toString(),
2886
+ confidence: utterance.confidence,
2887
+ words: utterance.words && utterance.words.length > 0 ? utterance.words.map((word) => ({
2888
+ text: word.word || "",
2889
+ start: word.start || 0,
2890
+ end: word.end || 0,
2891
+ confidence: word.confidence
2892
+ })) : void 0
2893
+ })) : void 0;
2894
+ const summary = alternative.summaries?.[0]?.summary;
2895
+ return {
2896
+ success: true,
2897
+ provider: this.provider,
2898
+ eventType: "transcription.completed",
2899
+ data: {
2900
+ id: requestId || "",
2901
+ status: "completed",
2902
+ text: transcript,
2903
+ confidence: alternative.confidence,
2904
+ duration,
2905
+ language: response.metadata.models?.[0] || void 0,
2906
+ speakers: speakers && speakers.length > 0 ? speakers : void 0,
2907
+ words: words && words.length > 0 ? words : void 0,
2908
+ utterances: utterances && utterances.length > 0 ? utterances : void 0,
2909
+ summary,
2910
+ metadata: {
2911
+ channels: response.metadata.channels,
2912
+ created: response.metadata.created,
2913
+ models: response.metadata.models
2914
+ }
2915
+ },
2916
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
2917
+ raw: payload
2918
+ };
2919
+ } catch (error) {
2920
+ return this.createErrorEvent(
2921
+ payload,
2922
+ `Failed to parse Deepgram webhook: ${error instanceof Error ? error.message : "Unknown error"}`
2923
+ );
2924
+ }
2925
+ }
2926
+ /**
2927
+ * Verify Deepgram webhook signature
2928
+ *
2929
+ * Note: Deepgram does not currently support webhook signature verification.
2930
+ * For security, use HTTPS and validate the request source (IP allowlist, etc.).
2931
+ *
2932
+ * @returns Always returns true (no verification available)
2933
+ */
2934
+ verify() {
2935
+ return true;
2936
+ }
2937
+ };
2938
+ function createDeepgramWebhookHandler() {
2939
+ return new DeepgramWebhookHandler();
2940
+ }
2941
+
2942
+ // src/webhooks/azure-webhook.ts
2943
+ import crypto2 from "node:crypto";
2944
+ var AzureWebhookHandler = class extends BaseWebhookHandler {
2945
+ constructor() {
2946
+ super(...arguments);
2947
+ this.provider = "azure-stt";
2948
+ }
2949
+ /**
2950
+ * Check if payload matches Azure webhook format
2951
+ */
2952
+ matches(payload, _options) {
2953
+ if (!payload || typeof payload !== "object") {
2954
+ return false;
2955
+ }
2956
+ const obj = payload;
2957
+ if (!("action" in obj) || !("timestamp" in obj)) {
2958
+ return false;
2959
+ }
2960
+ if (typeof obj.action !== "string") {
2961
+ return false;
2962
+ }
2963
+ if (!obj.action.startsWith("Transcription")) {
2964
+ return false;
2965
+ }
2966
+ return true;
2967
+ }
2968
+ /**
2969
+ * Parse Azure webhook payload to unified format
2970
+ */
2971
+ parse(payload, _options) {
2972
+ if (!this.matches(payload)) {
2973
+ return this.createErrorEvent(payload, "Invalid Azure webhook payload");
2974
+ }
2975
+ const webhookPayload = payload;
2976
+ const action = webhookPayload.action;
2977
+ const timestamp = webhookPayload.timestamp;
2978
+ let transcriptionId = "";
2979
+ if (webhookPayload.self) {
2980
+ const match = webhookPayload.self.match(/\/transcriptions\/([^/?]+)/);
2981
+ if (match) {
2982
+ transcriptionId = match[1];
2983
+ }
2984
+ }
2985
+ if (action === "TranscriptionCreated") {
2986
+ return {
2987
+ success: true,
2988
+ provider: this.provider,
2989
+ eventType: "transcription.created",
2990
+ data: {
2991
+ id: transcriptionId,
2992
+ status: "queued",
2993
+ createdAt: timestamp
2994
+ },
2995
+ timestamp,
2996
+ raw: payload
2997
+ };
2998
+ }
2999
+ if (action === "TranscriptionRunning") {
3000
+ return {
3001
+ success: true,
3002
+ provider: this.provider,
3003
+ eventType: "transcription.processing",
3004
+ data: {
3005
+ id: transcriptionId,
3006
+ status: "processing"
3007
+ },
3008
+ timestamp,
3009
+ raw: payload
3010
+ };
3011
+ }
3012
+ if (action === "TranscriptionSucceeded") {
3013
+ return {
3014
+ success: true,
3015
+ provider: this.provider,
3016
+ eventType: "transcription.completed",
3017
+ data: {
3018
+ id: transcriptionId,
3019
+ status: "completed",
3020
+ completedAt: timestamp
3021
+ // Note: Full transcript data needs to be fetched via API
3022
+ // using AzureAdapter.getTranscript(transcriptionId)
3023
+ },
3024
+ timestamp,
3025
+ raw: payload
3026
+ };
3027
+ }
3028
+ if (action === "TranscriptionFailed") {
3029
+ return {
3030
+ success: false,
3031
+ provider: this.provider,
3032
+ eventType: "transcription.failed",
3033
+ data: {
3034
+ id: transcriptionId,
3035
+ status: "error",
3036
+ error: webhookPayload.error?.message || "Transcription failed",
3037
+ metadata: {
3038
+ errorCode: webhookPayload.error?.code
3039
+ }
3040
+ },
3041
+ timestamp,
3042
+ raw: payload
3043
+ };
3044
+ }
3045
+ return this.createErrorEvent(payload, `Unknown Azure webhook action: ${action}`);
3046
+ }
3047
+ /**
3048
+ * Verify Azure webhook signature
3049
+ *
3050
+ * Azure can optionally sign webhooks using HMAC-SHA256.
3051
+ * The signature is sent in the X-Azure-Signature header.
3052
+ *
3053
+ * Note: Signature verification is optional in Azure and must be
3054
+ * configured when creating the webhook.
3055
+ *
3056
+ * @param payload - Webhook payload
3057
+ * @param options - Verification options with signature and secret
3058
+ * @returns true if signature is valid or no signature provided
3059
+ *
3060
+ * @example
3061
+ * ```typescript
3062
+ * const isValid = handler.verify(req.body, {
3063
+ * signature: req.headers['x-azure-signature'],
3064
+ * secret: process.env.AZURE_WEBHOOK_SECRET,
3065
+ * rawBody: req.rawBody
3066
+ * });
3067
+ * ```
3068
+ */
3069
+ verify(payload, options) {
3070
+ if (!options.signature) {
3071
+ return true;
3072
+ }
3073
+ if (!options.secret) {
3074
+ return false;
3075
+ }
3076
+ try {
3077
+ const body = options.rawBody || (typeof payload === "string" ? payload : JSON.stringify(payload));
3078
+ const hmac = crypto2.createHmac("sha256", options.secret);
3079
+ const bodyBuffer = typeof body === "string" ? Buffer.from(body) : body;
3080
+ hmac.update(bodyBuffer);
3081
+ const computedSignature = hmac.digest("hex");
3082
+ return crypto2.timingSafeEqual(Buffer.from(options.signature), Buffer.from(computedSignature));
3083
+ } catch (error) {
3084
+ return false;
3085
+ }
3086
+ }
3087
+ };
3088
+ function createAzureWebhookHandler() {
3089
+ return new AzureWebhookHandler();
3090
+ }
3091
+
3092
+ // src/webhooks/speechmatics-webhook.ts
3093
+ var SpeechmaticsWebhookHandler = class extends BaseWebhookHandler {
3094
+ constructor() {
3095
+ super(...arguments);
3096
+ this.provider = "speechmatics";
3097
+ }
3098
+ /**
3099
+ * Check if payload matches Speechmatics webhook format
3100
+ */
3101
+ matches(payload, options) {
3102
+ if (options?.userAgent) {
3103
+ if (!options.userAgent.includes("Speechmatics-API")) {
3104
+ return false;
3105
+ }
3106
+ }
3107
+ if (options?.queryParams) {
3108
+ const { id, status } = options.queryParams;
3109
+ if (!id || !status) {
3110
+ return false;
3111
+ }
3112
+ }
3113
+ if (payload && typeof payload === "object") {
3114
+ const obj = payload;
3115
+ if ("format" in obj && "job" in obj && "metadata" in obj) {
3116
+ return true;
3117
+ }
3118
+ if ("job" in obj || "id" in obj) {
3119
+ return true;
3120
+ }
3121
+ }
3122
+ return !!options?.queryParams?.id && !!options?.queryParams?.status;
3123
+ }
3124
+ /**
3125
+ * Validate webhook request
3126
+ */
3127
+ validate(payload, options) {
3128
+ if (!options?.queryParams?.id) {
3129
+ return {
3130
+ valid: false,
3131
+ error: "Missing required query parameter: id"
3132
+ };
3133
+ }
3134
+ if (!options?.queryParams?.status) {
3135
+ return {
3136
+ valid: false,
3137
+ error: "Missing required query parameter: status"
3138
+ };
3139
+ }
3140
+ const validStatuses = ["success", "error", "fetch_error", "trim_error"];
3141
+ if (!validStatuses.includes(options.queryParams.status)) {
3142
+ return {
3143
+ valid: false,
3144
+ error: `Invalid status value: ${options.queryParams.status}`
3145
+ };
3146
+ }
3147
+ if (options?.userAgent && !options.userAgent.includes("Speechmatics-API")) {
3148
+ return {
3149
+ valid: false,
3150
+ error: "Invalid user agent (expected Speechmatics-API/2.0)"
3151
+ };
3152
+ }
3153
+ return { valid: true };
3154
+ }
3155
+ /**
3156
+ * Parse webhook payload into unified event format
3157
+ */
3158
+ parse(payload, options) {
3159
+ const queryParams = options?.queryParams || {};
3160
+ const jobId = queryParams.id;
3161
+ const status = queryParams.status;
3162
+ let eventType;
3163
+ if (status === "success") {
3164
+ eventType = "transcription.completed";
3165
+ } else if (status === "error" || status === "fetch_error" || status === "trim_error") {
3166
+ eventType = "transcription.failed";
3167
+ } else {
3168
+ eventType = "transcription.created";
3169
+ }
3170
+ if (status === "success" && payload && typeof payload === "object") {
3171
+ const transcript = payload;
3172
+ if (transcript.results && transcript.job) {
3173
+ const text = transcript.results.filter((r) => r.type === "word").map((r) => r.alternatives[0]?.content || "").join(" ");
3174
+ const speakerSet = /* @__PURE__ */ new Set();
3175
+ transcript.results.forEach((r) => {
3176
+ const speaker = r.alternatives[0]?.speaker;
3177
+ if (speaker) speakerSet.add(speaker);
3178
+ });
3179
+ const speakers = speakerSet.size > 0 ? Array.from(speakerSet).map((id) => ({
3180
+ id,
3181
+ label: `Speaker ${id}`
3182
+ })) : void 0;
3183
+ return {
3184
+ success: true,
3185
+ provider: this.provider,
3186
+ eventType,
3187
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
3188
+ data: {
3189
+ id: jobId,
3190
+ text,
3191
+ status: "completed",
3192
+ language: transcript.metadata.transcription_config.language,
3193
+ duration: transcript.job.duration,
3194
+ speakers,
3195
+ createdAt: transcript.job.created_at
3196
+ },
3197
+ raw: payload
3198
+ };
3199
+ }
3200
+ }
3201
+ return {
3202
+ success: status === "success",
3203
+ provider: this.provider,
3204
+ eventType,
3205
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
3206
+ data: {
3207
+ id: jobId,
3208
+ text: "",
3209
+ status: status === "success" ? "completed" : "error"
3210
+ },
3211
+ raw: payload
3212
+ };
3213
+ }
3214
+ };
3215
+
3216
+ // src/webhooks/webhook-router.ts
3217
+ var WebhookRouter = class {
3218
+ constructor() {
3219
+ this.handlers = /* @__PURE__ */ new Map([
3220
+ ["gladia", new GladiaWebhookHandler()],
3221
+ ["assemblyai", new AssemblyAIWebhookHandler()],
3222
+ ["deepgram", new DeepgramWebhookHandler()],
3223
+ ["azure-stt", new AzureWebhookHandler()],
3224
+ ["speechmatics", new SpeechmaticsWebhookHandler()]
3225
+ ]);
3226
+ }
3227
+ /**
3228
+ * Route webhook payload to the correct handler
3229
+ *
3230
+ * @param payload - Raw webhook payload
3231
+ * @param options - Routing options (provider, verification, etc.)
3232
+ * @returns Routing result with parsed event
3233
+ */
3234
+ route(payload, options) {
3235
+ if (options?.provider) {
3236
+ return this.routeToProvider(payload, options.provider, options);
3237
+ }
3238
+ const detectedProvider = this.detectProvider(payload, {
3239
+ queryParams: options?.queryParams,
3240
+ userAgent: options?.userAgent
3241
+ });
3242
+ if (!detectedProvider) {
3243
+ return {
3244
+ success: false,
3245
+ error: "Could not detect webhook provider from payload structure"
3246
+ };
3247
+ }
3248
+ return this.routeToProvider(payload, detectedProvider, options);
3249
+ }
3250
+ /**
3251
+ * Detect provider from webhook payload structure
3252
+ *
3253
+ * @param payload - Raw webhook payload
3254
+ * @param options - Detection options (query params, user agent, etc.)
3255
+ * @returns Detected provider or undefined
3256
+ */
3257
+ detectProvider(payload, options) {
3258
+ for (const [provider, handler] of this.handlers) {
3259
+ if (handler.matches(payload, options)) {
3260
+ return provider;
3261
+ }
3262
+ }
3263
+ return void 0;
3264
+ }
3265
+ /**
3266
+ * Validate webhook payload
3267
+ *
3268
+ * @param payload - Raw webhook payload
3269
+ * @param options - Routing options
3270
+ * @returns Validation result
3271
+ */
3272
+ validate(payload, options) {
3273
+ if (options?.provider) {
3274
+ const handler2 = this.handlers.get(options.provider);
3275
+ if (!handler2) {
3276
+ return {
3277
+ valid: false,
3278
+ error: `Unknown provider: ${options.provider}`
3279
+ };
3280
+ }
3281
+ return handler2.validate(payload, {
3282
+ queryParams: options.queryParams,
3283
+ userAgent: options.userAgent
3284
+ });
3285
+ }
3286
+ const detectedProvider = this.detectProvider(payload, {
3287
+ queryParams: options?.queryParams,
3288
+ userAgent: options?.userAgent
3289
+ });
3290
+ if (!detectedProvider) {
3291
+ return {
3292
+ valid: false,
3293
+ error: "Could not detect webhook provider from payload structure"
3294
+ };
3295
+ }
3296
+ const handler = this.handlers.get(detectedProvider);
3297
+ if (!handler) {
3298
+ return {
3299
+ valid: false,
3300
+ error: `Handler not found for provider: ${detectedProvider}`
3301
+ };
3302
+ }
3303
+ return handler.validate(payload, {
3304
+ queryParams: options?.queryParams,
3305
+ userAgent: options?.userAgent
3306
+ });
3307
+ }
3308
+ /**
3309
+ * Verify webhook signature
3310
+ *
3311
+ * @param payload - Raw webhook payload
3312
+ * @param provider - Provider name
3313
+ * @param options - Verification options
3314
+ * @returns true if signature is valid
3315
+ */
3316
+ verify(payload, provider, options) {
3317
+ const handler = this.handlers.get(provider);
3318
+ if (!handler || !handler.verify) {
3319
+ return true;
3320
+ }
3321
+ return handler.verify(payload, options);
3322
+ }
3323
+ /**
3324
+ * Route to a specific provider handler
3325
+ */
3326
+ routeToProvider(payload, provider, options) {
3327
+ const handler = this.handlers.get(provider);
3328
+ if (!handler) {
3329
+ return {
3330
+ success: false,
3331
+ error: `Handler not found for provider: ${provider}`
3332
+ };
3333
+ }
3334
+ let verified = true;
3335
+ if (options?.verifySignature !== false && options?.verification && handler.verify) {
3336
+ verified = handler.verify(payload, options.verification);
3337
+ if (!verified) {
3338
+ return {
3339
+ success: false,
3340
+ provider,
3341
+ error: "Webhook signature verification failed",
3342
+ verified: false
3343
+ };
3344
+ }
3345
+ }
3346
+ const validation = handler.validate(payload, {
3347
+ queryParams: options?.queryParams,
3348
+ userAgent: options?.userAgent
3349
+ });
3350
+ if (!validation.valid) {
3351
+ return {
3352
+ success: false,
3353
+ provider,
3354
+ error: validation.error,
3355
+ verified
3356
+ };
3357
+ }
3358
+ try {
3359
+ const event = handler.parse(payload, {
3360
+ queryParams: options?.queryParams
3361
+ });
3362
+ return {
3363
+ success: true,
3364
+ provider,
3365
+ event,
3366
+ verified
3367
+ };
3368
+ } catch (error) {
3369
+ return {
3370
+ success: false,
3371
+ provider,
3372
+ error: `Failed to parse webhook: ${error instanceof Error ? error.message : "Unknown error"}`,
3373
+ verified
3374
+ };
3375
+ }
3376
+ }
3377
+ /**
3378
+ * Get handler for a specific provider
3379
+ *
3380
+ * @param provider - Provider name
3381
+ * @returns Handler instance or undefined
3382
+ */
3383
+ getHandler(provider) {
3384
+ return this.handlers.get(provider);
3385
+ }
3386
+ /**
3387
+ * Get all registered providers
3388
+ *
3389
+ * @returns Array of provider names
3390
+ */
3391
+ getProviders() {
3392
+ return Array.from(this.handlers.keys());
3393
+ }
3394
+ };
3395
+ function createWebhookRouter() {
3396
+ return new WebhookRouter();
3397
+ }
3398
+
3399
+ // src/generated/gladia/schema/index.ts
3400
+ var schema_exports = {};
3401
+ __export(schema_exports, {
3402
+ AudioChunkAckMessageType: () => AudioChunkAckMessageType,
3403
+ AudioChunkActionType: () => AudioChunkActionType,
3404
+ AudioToTextControllerAudioTranscriptionBodyLanguage: () => AudioToTextControllerAudioTranscriptionBodyLanguage,
3405
+ AudioToTextControllerAudioTranscriptionBodyLanguageBehaviour: () => AudioToTextControllerAudioTranscriptionBodyLanguageBehaviour,
3406
+ AudioToTextControllerAudioTranscriptionBodyOutputFormat: () => AudioToTextControllerAudioTranscriptionBodyOutputFormat,
3407
+ AudioToTextControllerAudioTranscriptionBodyTargetTranslationLanguage: () => AudioToTextControllerAudioTranscriptionBodyTargetTranslationLanguage,
3408
+ CallbackLiveAudioChunkAckMessageEvent: () => CallbackLiveAudioChunkAckMessageEvent,
3409
+ CallbackLiveEndRecordingMessageEvent: () => CallbackLiveEndRecordingMessageEvent,
3410
+ CallbackLiveEndSessionMessageEvent: () => CallbackLiveEndSessionMessageEvent,
3411
+ CallbackLiveNamedEntityRecognitionMessageEvent: () => CallbackLiveNamedEntityRecognitionMessageEvent,
3412
+ CallbackLivePostChapterizationMessageEvent: () => CallbackLivePostChapterizationMessageEvent,
3413
+ CallbackLivePostFinalTranscriptMessageEvent: () => CallbackLivePostFinalTranscriptMessageEvent,
3414
+ CallbackLivePostSummarizationMessageEvent: () => CallbackLivePostSummarizationMessageEvent,
3415
+ CallbackLivePostTranscriptMessageEvent: () => CallbackLivePostTranscriptMessageEvent,
3416
+ CallbackLiveSentimentAnalysisMessageEvent: () => CallbackLiveSentimentAnalysisMessageEvent,
3417
+ CallbackLiveSpeechEndMessageEvent: () => CallbackLiveSpeechEndMessageEvent,
3418
+ CallbackLiveSpeechStartMessageEvent: () => CallbackLiveSpeechStartMessageEvent,
3419
+ CallbackLiveStartRecordingMessageEvent: () => CallbackLiveStartRecordingMessageEvent,
3420
+ CallbackLiveStartSessionMessageEvent: () => CallbackLiveStartSessionMessageEvent,
3421
+ CallbackLiveStopRecordingAckMessageEvent: () => CallbackLiveStopRecordingAckMessageEvent,
3422
+ CallbackLiveTranscriptMessageEvent: () => CallbackLiveTranscriptMessageEvent,
3423
+ CallbackLiveTranslationMessageEvent: () => CallbackLiveTranslationMessageEvent,
3424
+ CallbackMethodEnum: () => CallbackMethodEnum,
3425
+ CallbackTranscriptionErrorPayloadEvent: () => CallbackTranscriptionErrorPayloadEvent,
3426
+ CallbackTranscriptionSuccessPayloadEvent: () => CallbackTranscriptionSuccessPayloadEvent,
3427
+ EndRecordingMessageType: () => EndRecordingMessageType,
3428
+ EndSessionMessageType: () => EndSessionMessageType,
3429
+ HistoryControllerGetListV1KindItem: () => HistoryControllerGetListV1KindItem,
3430
+ HistoryControllerGetListV1StatusItem: () => HistoryControllerGetListV1StatusItem,
3431
+ NamedEntityRecognitionMessageType: () => NamedEntityRecognitionMessageType,
3432
+ PostChapterizationMessageType: () => PostChapterizationMessageType,
3433
+ PostFinalTranscriptMessageType: () => PostFinalTranscriptMessageType,
3434
+ PostSummarizationMessageType: () => PostSummarizationMessageType,
3435
+ PostTranscriptMessageType: () => PostTranscriptMessageType,
3436
+ PreRecordedControllerGetPreRecordedJobsV2StatusItem: () => PreRecordedControllerGetPreRecordedJobsV2StatusItem,
3437
+ PreRecordedResponseKind: () => PreRecordedResponseKind,
3438
+ PreRecordedResponseStatus: () => PreRecordedResponseStatus,
3439
+ SentimentAnalysisMessageType: () => SentimentAnalysisMessageType,
3440
+ SpeechEndMessageType: () => SpeechEndMessageType,
3441
+ SpeechStartMessageType: () => SpeechStartMessageType,
3442
+ StartRecordingMessageType: () => StartRecordingMessageType,
3443
+ StartSessionMessageType: () => StartSessionMessageType,
3444
+ StopRecordingAckMessageType: () => StopRecordingAckMessageType,
3445
+ StopRecordingActionType: () => StopRecordingActionType,
3446
+ StreamingControllerGetStreamingJobsV2StatusItem: () => StreamingControllerGetStreamingJobsV2StatusItem,
3447
+ StreamingResponseKind: () => StreamingResponseKind,
3448
+ StreamingResponseStatus: () => StreamingResponseStatus,
3449
+ StreamingSupportedBitDepthEnum: () => StreamingSupportedBitDepthEnum,
3450
+ StreamingSupportedEncodingEnum: () => StreamingSupportedEncodingEnum,
3451
+ StreamingSupportedModels: () => StreamingSupportedModels,
3452
+ StreamingSupportedRegions: () => StreamingSupportedRegions,
3453
+ StreamingSupportedSampleRateEnum: () => StreamingSupportedSampleRateEnum,
3454
+ SubtitlesFormatEnum: () => SubtitlesFormatEnum,
3455
+ SubtitlesStyleEnum: () => SubtitlesStyleEnum,
3456
+ SummaryTypesEnum: () => SummaryTypesEnum,
3457
+ TranscriptMessageType: () => TranscriptMessageType,
3458
+ TranscriptionControllerListV2KindItem: () => TranscriptionControllerListV2KindItem,
3459
+ TranscriptionControllerListV2StatusItem: () => TranscriptionControllerListV2StatusItem,
3460
+ TranscriptionLanguageCodeEnum: () => TranscriptionLanguageCodeEnum,
3461
+ TranslationLanguageCodeEnum: () => TranslationLanguageCodeEnum,
3462
+ TranslationMessageType: () => TranslationMessageType,
3463
+ TranslationModelEnum: () => TranslationModelEnum,
3464
+ VideoToTextControllerVideoTranscriptionBodyLanguage: () => VideoToTextControllerVideoTranscriptionBodyLanguage,
3465
+ VideoToTextControllerVideoTranscriptionBodyLanguageBehaviour: () => VideoToTextControllerVideoTranscriptionBodyLanguageBehaviour,
3466
+ VideoToTextControllerVideoTranscriptionBodyOutputFormat: () => VideoToTextControllerVideoTranscriptionBodyOutputFormat,
3467
+ VideoToTextControllerVideoTranscriptionBodyTargetTranslationLanguage: () => VideoToTextControllerVideoTranscriptionBodyTargetTranslationLanguage,
3468
+ WebhookLiveEndRecordingPayloadEvent: () => WebhookLiveEndRecordingPayloadEvent,
3469
+ WebhookLiveEndSessionPayloadEvent: () => WebhookLiveEndSessionPayloadEvent,
3470
+ WebhookLiveStartRecordingPayloadEvent: () => WebhookLiveStartRecordingPayloadEvent,
3471
+ WebhookLiveStartSessionPayloadEvent: () => WebhookLiveStartSessionPayloadEvent,
3472
+ WebhookTranscriptionCreatedPayloadEvent: () => WebhookTranscriptionCreatedPayloadEvent,
3473
+ WebhookTranscriptionErrorPayloadEvent: () => WebhookTranscriptionErrorPayloadEvent,
3474
+ WebhookTranscriptionSuccessPayloadEvent: () => WebhookTranscriptionSuccessPayloadEvent
3475
+ });
3476
+
3477
+ // src/generated/gladia/schema/audioChunkAckMessageType.ts
3478
+ var AudioChunkAckMessageType = {
3479
+ audio_chunk: "audio_chunk"
3480
+ };
3481
+
3482
+ // src/generated/gladia/schema/audioChunkActionType.ts
3483
+ var AudioChunkActionType = {
3484
+ audio_chunk: "audio_chunk"
3485
+ };
3486
+
3487
+ // src/generated/gladia/schema/audioToTextControllerAudioTranscriptionBodyLanguage.ts
3488
+ var AudioToTextControllerAudioTranscriptionBodyLanguage = {
3489
+ afrikaans: "afrikaans",
3490
+ albanian: "albanian",
3491
+ amharic: "amharic",
3492
+ arabic: "arabic",
3493
+ armenian: "armenian",
3494
+ assamese: "assamese",
3495
+ azerbaijani: "azerbaijani",
3496
+ bashkir: "bashkir",
3497
+ basque: "basque",
3498
+ belarusian: "belarusian",
3499
+ bengali: "bengali",
3500
+ bosnian: "bosnian",
3501
+ breton: "breton",
3502
+ bulgarian: "bulgarian",
3503
+ catalan: "catalan",
3504
+ chinese: "chinese",
3505
+ croatian: "croatian",
3506
+ czech: "czech",
3507
+ danish: "danish",
3508
+ dutch: "dutch",
3509
+ english: "english",
3510
+ estonian: "estonian",
3511
+ faroese: "faroese",
3512
+ finnish: "finnish",
3513
+ french: "french",
3514
+ galician: "galician",
3515
+ georgian: "georgian",
3516
+ german: "german",
3517
+ greek: "greek",
3518
+ gujarati: "gujarati",
3519
+ haitian_creole: "haitian creole",
3520
+ hausa: "hausa",
3521
+ hawaiian: "hawaiian",
3522
+ hebrew: "hebrew",
3523
+ hindi: "hindi",
3524
+ hungarian: "hungarian",
3525
+ icelandic: "icelandic",
3526
+ indonesian: "indonesian",
3527
+ italian: "italian",
3528
+ japanese: "japanese",
3529
+ javanese: "javanese",
3530
+ kannada: "kannada",
3531
+ kazakh: "kazakh",
3532
+ khmer: "khmer",
3533
+ korean: "korean",
3534
+ lao: "lao",
3535
+ latin: "latin",
3536
+ latvian: "latvian",
3537
+ lingala: "lingala",
3538
+ lithuanian: "lithuanian",
3539
+ luxembourgish: "luxembourgish",
3540
+ macedonian: "macedonian",
3541
+ malagasy: "malagasy",
3542
+ malay: "malay",
3543
+ malayalam: "malayalam",
3544
+ maltese: "maltese",
3545
+ maori: "maori",
3546
+ marathi: "marathi",
3547
+ mongolian: "mongolian",
3548
+ myanmar: "myanmar",
3549
+ nepali: "nepali",
3550
+ norwegian: "norwegian",
3551
+ nynorsk: "nynorsk",
3552
+ occitan: "occitan",
3553
+ pashto: "pashto",
3554
+ persian: "persian",
3555
+ polish: "polish",
3556
+ portuguese: "portuguese",
3557
+ punjabi: "punjabi",
3558
+ romanian: "romanian",
3559
+ russian: "russian",
3560
+ sanskrit: "sanskrit",
3561
+ serbian: "serbian",
3562
+ shona: "shona",
3563
+ sindhi: "sindhi",
3564
+ sinhala: "sinhala",
3565
+ slovak: "slovak",
3566
+ slovenian: "slovenian",
3567
+ somali: "somali",
3568
+ spanish: "spanish",
3569
+ sundanese: "sundanese",
3570
+ swahili: "swahili",
3571
+ swedish: "swedish",
3572
+ tagalog: "tagalog",
3573
+ tajik: "tajik",
3574
+ tamil: "tamil",
3575
+ tatar: "tatar",
3576
+ telugu: "telugu",
3577
+ thai: "thai",
3578
+ tibetan: "tibetan",
3579
+ turkish: "turkish",
3580
+ turkmen: "turkmen",
3581
+ ukrainian: "ukrainian",
3582
+ urdu: "urdu",
3583
+ uzbek: "uzbek",
3584
+ vietnamese: "vietnamese",
3585
+ welsh: "welsh",
3586
+ yiddish: "yiddish",
3587
+ yoruba: "yoruba"
3588
+ };
3589
+
3590
+ // src/generated/gladia/schema/audioToTextControllerAudioTranscriptionBodyLanguageBehaviour.ts
3591
+ var AudioToTextControllerAudioTranscriptionBodyLanguageBehaviour = {
3592
+ automatic_single_language: "automatic single language",
3593
+ automatic_multiple_languages: "automatic multiple languages",
3594
+ manual: "manual"
3595
+ };
3596
+
3597
+ // src/generated/gladia/schema/audioToTextControllerAudioTranscriptionBodyOutputFormat.ts
3598
+ var AudioToTextControllerAudioTranscriptionBodyOutputFormat = {
3599
+ json: "json",
3600
+ srt: "srt",
3601
+ vtt: "vtt",
3602
+ plain: "plain",
3603
+ txt: "txt"
3604
+ };
3605
+
3606
+ // src/generated/gladia/schema/audioToTextControllerAudioTranscriptionBodyTargetTranslationLanguage.ts
3607
+ var AudioToTextControllerAudioTranscriptionBodyTargetTranslationLanguage = {
3608
+ afrikaans: "afrikaans",
3609
+ albanian: "albanian",
3610
+ amharic: "amharic",
3611
+ arabic: "arabic",
3612
+ armenian: "armenian",
3613
+ assamese: "assamese",
3614
+ azerbaijani: "azerbaijani",
3615
+ bashkir: "bashkir",
3616
+ basque: "basque",
3617
+ belarusian: "belarusian",
3618
+ bengali: "bengali",
3619
+ bosnian: "bosnian",
3620
+ breton: "breton",
3621
+ bulgarian: "bulgarian",
3622
+ catalan: "catalan",
3623
+ chinese: "chinese",
3624
+ croatian: "croatian",
3625
+ czech: "czech",
3626
+ danish: "danish",
3627
+ dutch: "dutch",
3628
+ english: "english",
3629
+ estonian: "estonian",
3630
+ faroese: "faroese",
3631
+ finnish: "finnish",
3632
+ french: "french",
3633
+ galician: "galician",
3634
+ georgian: "georgian",
3635
+ german: "german",
3636
+ greek: "greek",
3637
+ gujarati: "gujarati",
3638
+ haitian_creole: "haitian creole",
3639
+ hausa: "hausa",
3640
+ hawaiian: "hawaiian",
3641
+ hebrew: "hebrew",
3642
+ hindi: "hindi",
3643
+ hungarian: "hungarian",
3644
+ icelandic: "icelandic",
3645
+ indonesian: "indonesian",
3646
+ italian: "italian",
3647
+ japanese: "japanese",
3648
+ javanese: "javanese",
3649
+ kannada: "kannada",
3650
+ kazakh: "kazakh",
3651
+ khmer: "khmer",
3652
+ korean: "korean",
3653
+ lao: "lao",
3654
+ latin: "latin",
3655
+ latvian: "latvian",
3656
+ lingala: "lingala",
3657
+ lithuanian: "lithuanian",
3658
+ luxembourgish: "luxembourgish",
3659
+ macedonian: "macedonian",
3660
+ malagasy: "malagasy",
3661
+ malay: "malay",
3662
+ malayalam: "malayalam",
3663
+ maltese: "maltese",
3664
+ maori: "maori",
3665
+ marathi: "marathi",
3666
+ mongolian: "mongolian",
3667
+ myanmar: "myanmar",
3668
+ nepali: "nepali",
3669
+ norwegian: "norwegian",
3670
+ nynorsk: "nynorsk",
3671
+ occitan: "occitan",
3672
+ pashto: "pashto",
3673
+ persian: "persian",
3674
+ polish: "polish",
3675
+ portuguese: "portuguese",
3676
+ punjabi: "punjabi",
3677
+ romanian: "romanian",
3678
+ russian: "russian",
3679
+ sanskrit: "sanskrit",
3680
+ serbian: "serbian",
3681
+ shona: "shona",
3682
+ sindhi: "sindhi",
3683
+ sinhala: "sinhala",
3684
+ slovak: "slovak",
3685
+ slovenian: "slovenian",
3686
+ somali: "somali",
3687
+ spanish: "spanish",
3688
+ sundanese: "sundanese",
3689
+ swahili: "swahili",
3690
+ swedish: "swedish",
3691
+ tagalog: "tagalog",
3692
+ tajik: "tajik",
3693
+ tamil: "tamil",
3694
+ tatar: "tatar",
3695
+ telugu: "telugu",
3696
+ thai: "thai",
3697
+ tibetan: "tibetan",
3698
+ turkish: "turkish",
3699
+ turkmen: "turkmen",
3700
+ ukrainian: "ukrainian",
3701
+ urdu: "urdu",
3702
+ uzbek: "uzbek",
3703
+ vietnamese: "vietnamese",
3704
+ welsh: "welsh",
3705
+ wolof: "wolof",
3706
+ yiddish: "yiddish",
3707
+ yoruba: "yoruba"
3708
+ };
3709
+
3710
+ // src/generated/gladia/schema/callbackLiveAudioChunkAckMessageEvent.ts
3711
+ var CallbackLiveAudioChunkAckMessageEvent = {
3712
+ liveaudio_chunk: "live.audio_chunk"
3713
+ };
3714
+
3715
+ // src/generated/gladia/schema/callbackLiveEndRecordingMessageEvent.ts
3716
+ var CallbackLiveEndRecordingMessageEvent = {
3717
+ liveend_recording: "live.end_recording"
3718
+ };
3719
+
3720
+ // src/generated/gladia/schema/callbackLiveEndSessionMessageEvent.ts
3721
+ var CallbackLiveEndSessionMessageEvent = {
3722
+ liveend_session: "live.end_session"
3723
+ };
3724
+
3725
+ // src/generated/gladia/schema/callbackLiveNamedEntityRecognitionMessageEvent.ts
3726
+ var CallbackLiveNamedEntityRecognitionMessageEvent = {
3727
+ livenamed_entity_recognition: "live.named_entity_recognition"
3728
+ };
3729
+
3730
+ // src/generated/gladia/schema/callbackLivePostChapterizationMessageEvent.ts
3731
+ var CallbackLivePostChapterizationMessageEvent = {
3732
+ livepost_chapterization: "live.post_chapterization"
3733
+ };
3734
+
3735
+ // src/generated/gladia/schema/callbackLivePostFinalTranscriptMessageEvent.ts
3736
+ var CallbackLivePostFinalTranscriptMessageEvent = {
3737
+ livepost_final_transcript: "live.post_final_transcript"
3738
+ };
3739
+
3740
+ // src/generated/gladia/schema/callbackLivePostSummarizationMessageEvent.ts
3741
+ var CallbackLivePostSummarizationMessageEvent = {
3742
+ livepost_summarization: "live.post_summarization"
3743
+ };
3744
+
3745
+ // src/generated/gladia/schema/callbackLivePostTranscriptMessageEvent.ts
3746
+ var CallbackLivePostTranscriptMessageEvent = {
3747
+ livepost_transcript: "live.post_transcript"
3748
+ };
3749
+
3750
+ // src/generated/gladia/schema/callbackLiveSentimentAnalysisMessageEvent.ts
3751
+ var CallbackLiveSentimentAnalysisMessageEvent = {
3752
+ livesentiment_analysis: "live.sentiment_analysis"
3753
+ };
3754
+
3755
+ // src/generated/gladia/schema/callbackLiveSpeechEndMessageEvent.ts
3756
+ var CallbackLiveSpeechEndMessageEvent = {
3757
+ livespeech_end: "live.speech_end"
3758
+ };
3759
+
3760
+ // src/generated/gladia/schema/callbackLiveSpeechStartMessageEvent.ts
3761
+ var CallbackLiveSpeechStartMessageEvent = {
3762
+ livespeech_start: "live.speech_start"
3763
+ };
3764
+
3765
+ // src/generated/gladia/schema/callbackLiveStartRecordingMessageEvent.ts
3766
+ var CallbackLiveStartRecordingMessageEvent = {
3767
+ livestart_recording: "live.start_recording"
3768
+ };
3769
+
3770
+ // src/generated/gladia/schema/callbackLiveStartSessionMessageEvent.ts
3771
+ var CallbackLiveStartSessionMessageEvent = {
3772
+ livestart_session: "live.start_session"
3773
+ };
3774
+
3775
+ // src/generated/gladia/schema/callbackLiveStopRecordingAckMessageEvent.ts
3776
+ var CallbackLiveStopRecordingAckMessageEvent = {
3777
+ livestop_recording: "live.stop_recording"
3778
+ };
3779
+
3780
+ // src/generated/gladia/schema/callbackLiveTranscriptMessageEvent.ts
3781
+ var CallbackLiveTranscriptMessageEvent = {
3782
+ livetranscript: "live.transcript"
3783
+ };
3784
+
3785
+ // src/generated/gladia/schema/callbackLiveTranslationMessageEvent.ts
3786
+ var CallbackLiveTranslationMessageEvent = {
3787
+ livetranslation: "live.translation"
3788
+ };
3789
+
3790
+ // src/generated/gladia/schema/callbackMethodEnum.ts
3791
+ var CallbackMethodEnum = {
3792
+ POST: "POST",
3793
+ PUT: "PUT"
3794
+ };
3795
+
3796
+ // src/generated/gladia/schema/callbackTranscriptionErrorPayloadEvent.ts
3797
+ var CallbackTranscriptionErrorPayloadEvent = {
3798
+ transcriptionerror: "transcription.error"
3799
+ };
3800
+
3801
+ // src/generated/gladia/schema/callbackTranscriptionSuccessPayloadEvent.ts
3802
+ var CallbackTranscriptionSuccessPayloadEvent = {
3803
+ transcriptionsuccess: "transcription.success"
3804
+ };
3805
+
3806
+ // src/generated/gladia/schema/endRecordingMessageType.ts
3807
+ var EndRecordingMessageType = {
3808
+ end_recording: "end_recording"
3809
+ };
3810
+
3811
+ // src/generated/gladia/schema/endSessionMessageType.ts
3812
+ var EndSessionMessageType = {
3813
+ end_session: "end_session"
3814
+ };
3815
+
3816
+ // src/generated/gladia/schema/historyControllerGetListV1KindItem.ts
3817
+ var HistoryControllerGetListV1KindItem = {
3818
+ "pre-recorded": "pre-recorded",
3819
+ live: "live"
3820
+ };
3821
+
3822
+ // src/generated/gladia/schema/historyControllerGetListV1StatusItem.ts
3823
+ var HistoryControllerGetListV1StatusItem = {
3824
+ queued: "queued",
3825
+ processing: "processing",
3826
+ done: "done",
3827
+ error: "error"
3828
+ };
3829
+
3830
+ // src/generated/gladia/schema/namedEntityRecognitionMessageType.ts
3831
+ var NamedEntityRecognitionMessageType = {
3832
+ named_entity_recognition: "named_entity_recognition"
3833
+ };
3834
+
3835
+ // src/generated/gladia/schema/postChapterizationMessageType.ts
3836
+ var PostChapterizationMessageType = {
3837
+ post_chapterization: "post_chapterization"
3838
+ };
3839
+
3840
+ // src/generated/gladia/schema/postFinalTranscriptMessageType.ts
3841
+ var PostFinalTranscriptMessageType = {
3842
+ post_final_transcript: "post_final_transcript"
3843
+ };
3844
+
3845
+ // src/generated/gladia/schema/postSummarizationMessageType.ts
3846
+ var PostSummarizationMessageType = {
3847
+ post_summarization: "post_summarization"
3848
+ };
3849
+
3850
+ // src/generated/gladia/schema/postTranscriptMessageType.ts
3851
+ var PostTranscriptMessageType = {
3852
+ post_transcript: "post_transcript"
3853
+ };
3854
+
3855
+ // src/generated/gladia/schema/preRecordedControllerGetPreRecordedJobsV2StatusItem.ts
3856
+ var PreRecordedControllerGetPreRecordedJobsV2StatusItem = {
3857
+ queued: "queued",
3858
+ processing: "processing",
3859
+ done: "done",
3860
+ error: "error"
3861
+ };
3862
+
3863
+ // src/generated/gladia/schema/preRecordedResponseKind.ts
3864
+ var PreRecordedResponseKind = {
3865
+ "pre-recorded": "pre-recorded"
3866
+ };
3867
+
3868
+ // src/generated/gladia/schema/preRecordedResponseStatus.ts
3869
+ var PreRecordedResponseStatus = {
3870
+ queued: "queued",
3871
+ processing: "processing",
3872
+ done: "done",
3873
+ error: "error"
3874
+ };
3875
+
3876
+ // src/generated/gladia/schema/sentimentAnalysisMessageType.ts
3877
+ var SentimentAnalysisMessageType = {
3878
+ sentiment_analysis: "sentiment_analysis"
3879
+ };
3880
+
3881
+ // src/generated/gladia/schema/speechEndMessageType.ts
3882
+ var SpeechEndMessageType = {
3883
+ speech_end: "speech_end"
3884
+ };
3885
+
3886
+ // src/generated/gladia/schema/speechStartMessageType.ts
3887
+ var SpeechStartMessageType = {
3888
+ speech_start: "speech_start"
3889
+ };
3890
+
3891
+ // src/generated/gladia/schema/startRecordingMessageType.ts
3892
+ var StartRecordingMessageType = {
3893
+ start_recording: "start_recording"
3894
+ };
3895
+
3896
+ // src/generated/gladia/schema/startSessionMessageType.ts
3897
+ var StartSessionMessageType = {
3898
+ start_session: "start_session"
3899
+ };
3900
+
3901
+ // src/generated/gladia/schema/stopRecordingAckMessageType.ts
3902
+ var StopRecordingAckMessageType = {
3903
+ stop_recording: "stop_recording"
3904
+ };
3905
+
3906
+ // src/generated/gladia/schema/stopRecordingActionType.ts
3907
+ var StopRecordingActionType = {
3908
+ stop_recording: "stop_recording"
3909
+ };
3910
+
3911
+ // src/generated/gladia/schema/streamingControllerGetStreamingJobsV2StatusItem.ts
3912
+ var StreamingControllerGetStreamingJobsV2StatusItem = {
3913
+ queued: "queued",
3914
+ processing: "processing",
3915
+ done: "done",
3916
+ error: "error"
3917
+ };
3918
+
3919
+ // src/generated/gladia/schema/streamingResponseKind.ts
3920
+ var StreamingResponseKind = {
3921
+ live: "live"
3922
+ };
3923
+
3924
+ // src/generated/gladia/schema/streamingResponseStatus.ts
3925
+ var StreamingResponseStatus = {
3926
+ queued: "queued",
3927
+ processing: "processing",
3928
+ done: "done",
3929
+ error: "error"
3930
+ };
3931
+
3932
+ // src/generated/gladia/schema/streamingSupportedBitDepthEnum.ts
3933
+ var StreamingSupportedBitDepthEnum = {
3934
+ NUMBER_8: 8,
3935
+ NUMBER_16: 16,
3936
+ NUMBER_24: 24,
3937
+ NUMBER_32: 32
3938
+ };
3939
+
3940
+ // src/generated/gladia/schema/streamingSupportedEncodingEnum.ts
3941
+ var StreamingSupportedEncodingEnum = {
3942
+ "wav/pcm": "wav/pcm",
3943
+ "wav/alaw": "wav/alaw",
3944
+ "wav/ulaw": "wav/ulaw"
3945
+ };
3946
+
3947
+ // src/generated/gladia/schema/streamingSupportedModels.ts
3948
+ var StreamingSupportedModels = {
3949
+ "solaria-1": "solaria-1"
3950
+ };
3951
+
3952
+ // src/generated/gladia/schema/streamingSupportedRegions.ts
3953
+ var StreamingSupportedRegions = {
3954
+ "us-west": "us-west",
3955
+ "eu-west": "eu-west"
3956
+ };
3957
+
3958
+ // src/generated/gladia/schema/streamingSupportedSampleRateEnum.ts
3959
+ var StreamingSupportedSampleRateEnum = {
3960
+ NUMBER_8000: 8e3,
3961
+ NUMBER_16000: 16e3,
3962
+ NUMBER_32000: 32e3,
3963
+ NUMBER_44100: 44100,
3964
+ NUMBER_48000: 48e3
3965
+ };
3966
+
3967
+ // src/generated/gladia/schema/subtitlesFormatEnum.ts
3968
+ var SubtitlesFormatEnum = {
3969
+ srt: "srt",
3970
+ vtt: "vtt"
3971
+ };
3972
+
3973
+ // src/generated/gladia/schema/subtitlesStyleEnum.ts
3974
+ var SubtitlesStyleEnum = {
3975
+ default: "default",
3976
+ compliance: "compliance"
3977
+ };
3978
+
3979
+ // src/generated/gladia/schema/summaryTypesEnum.ts
3980
+ var SummaryTypesEnum = {
3981
+ general: "general",
3982
+ bullet_points: "bullet_points",
3983
+ concise: "concise"
3984
+ };
3985
+
3986
+ // src/generated/gladia/schema/transcriptMessageType.ts
3987
+ var TranscriptMessageType = {
3988
+ transcript: "transcript"
3989
+ };
3990
+
3991
+ // src/generated/gladia/schema/transcriptionControllerListV2KindItem.ts
3992
+ var TranscriptionControllerListV2KindItem = {
3993
+ "pre-recorded": "pre-recorded",
3994
+ live: "live"
3995
+ };
3996
+
3997
+ // src/generated/gladia/schema/transcriptionControllerListV2StatusItem.ts
3998
+ var TranscriptionControllerListV2StatusItem = {
3999
+ queued: "queued",
4000
+ processing: "processing",
4001
+ done: "done",
4002
+ error: "error"
4003
+ };
4004
+
4005
+ // src/generated/gladia/schema/transcriptionLanguageCodeEnum.ts
4006
+ var TranscriptionLanguageCodeEnum = {
4007
+ af: "af",
4008
+ am: "am",
4009
+ ar: "ar",
4010
+ as: "as",
4011
+ az: "az",
4012
+ ba: "ba",
4013
+ be: "be",
4014
+ bg: "bg",
4015
+ bn: "bn",
4016
+ bo: "bo",
4017
+ br: "br",
4018
+ bs: "bs",
4019
+ ca: "ca",
4020
+ cs: "cs",
4021
+ cy: "cy",
4022
+ da: "da",
4023
+ de: "de",
4024
+ el: "el",
4025
+ en: "en",
4026
+ es: "es",
4027
+ et: "et",
4028
+ eu: "eu",
4029
+ fa: "fa",
4030
+ fi: "fi",
4031
+ fo: "fo",
4032
+ fr: "fr",
4033
+ gl: "gl",
4034
+ gu: "gu",
4035
+ ha: "ha",
4036
+ haw: "haw",
4037
+ he: "he",
4038
+ hi: "hi",
4039
+ hr: "hr",
4040
+ ht: "ht",
4041
+ hu: "hu",
4042
+ hy: "hy",
4043
+ id: "id",
4044
+ is: "is",
4045
+ it: "it",
4046
+ ja: "ja",
4047
+ jw: "jw",
4048
+ ka: "ka",
4049
+ kk: "kk",
4050
+ km: "km",
4051
+ kn: "kn",
4052
+ ko: "ko",
4053
+ la: "la",
4054
+ lb: "lb",
4055
+ ln: "ln",
4056
+ lo: "lo",
4057
+ lt: "lt",
4058
+ lv: "lv",
4059
+ mg: "mg",
4060
+ mi: "mi",
4061
+ mk: "mk",
4062
+ ml: "ml",
4063
+ mn: "mn",
4064
+ mr: "mr",
4065
+ ms: "ms",
4066
+ mt: "mt",
4067
+ my: "my",
4068
+ ne: "ne",
4069
+ nl: "nl",
4070
+ nn: "nn",
4071
+ no: "no",
4072
+ oc: "oc",
4073
+ pa: "pa",
4074
+ pl: "pl",
4075
+ ps: "ps",
4076
+ pt: "pt",
4077
+ ro: "ro",
4078
+ ru: "ru",
4079
+ sa: "sa",
4080
+ sd: "sd",
4081
+ si: "si",
4082
+ sk: "sk",
4083
+ sl: "sl",
4084
+ sn: "sn",
4085
+ so: "so",
4086
+ sq: "sq",
4087
+ sr: "sr",
4088
+ su: "su",
4089
+ sv: "sv",
4090
+ sw: "sw",
4091
+ ta: "ta",
4092
+ te: "te",
4093
+ tg: "tg",
4094
+ th: "th",
4095
+ tk: "tk",
4096
+ tl: "tl",
4097
+ tr: "tr",
4098
+ tt: "tt",
4099
+ uk: "uk",
4100
+ ur: "ur",
4101
+ uz: "uz",
4102
+ vi: "vi",
4103
+ yi: "yi",
4104
+ yo: "yo",
4105
+ zh: "zh"
4106
+ };
4107
+
4108
+ // src/generated/gladia/schema/translationLanguageCodeEnum.ts
4109
+ var TranslationLanguageCodeEnum = {
4110
+ af: "af",
4111
+ am: "am",
4112
+ ar: "ar",
4113
+ as: "as",
4114
+ az: "az",
4115
+ ba: "ba",
4116
+ be: "be",
4117
+ bg: "bg",
4118
+ bn: "bn",
4119
+ bo: "bo",
4120
+ br: "br",
4121
+ bs: "bs",
4122
+ ca: "ca",
4123
+ cs: "cs",
4124
+ cy: "cy",
4125
+ da: "da",
4126
+ de: "de",
4127
+ el: "el",
4128
+ en: "en",
4129
+ es: "es",
4130
+ et: "et",
4131
+ eu: "eu",
4132
+ fa: "fa",
4133
+ fi: "fi",
4134
+ fo: "fo",
4135
+ fr: "fr",
4136
+ gl: "gl",
4137
+ gu: "gu",
4138
+ ha: "ha",
4139
+ haw: "haw",
4140
+ he: "he",
4141
+ hi: "hi",
4142
+ hr: "hr",
4143
+ ht: "ht",
4144
+ hu: "hu",
4145
+ hy: "hy",
4146
+ id: "id",
4147
+ is: "is",
4148
+ it: "it",
4149
+ ja: "ja",
4150
+ jw: "jw",
4151
+ ka: "ka",
4152
+ kk: "kk",
4153
+ km: "km",
4154
+ kn: "kn",
4155
+ ko: "ko",
4156
+ la: "la",
4157
+ lb: "lb",
4158
+ ln: "ln",
4159
+ lo: "lo",
4160
+ lt: "lt",
4161
+ lv: "lv",
4162
+ mg: "mg",
4163
+ mi: "mi",
4164
+ mk: "mk",
4165
+ ml: "ml",
4166
+ mn: "mn",
4167
+ mr: "mr",
4168
+ ms: "ms",
4169
+ mt: "mt",
4170
+ my: "my",
4171
+ ne: "ne",
4172
+ nl: "nl",
4173
+ nn: "nn",
4174
+ no: "no",
4175
+ oc: "oc",
4176
+ pa: "pa",
4177
+ pl: "pl",
4178
+ ps: "ps",
4179
+ pt: "pt",
4180
+ ro: "ro",
4181
+ ru: "ru",
4182
+ sa: "sa",
4183
+ sd: "sd",
4184
+ si: "si",
4185
+ sk: "sk",
4186
+ sl: "sl",
4187
+ sn: "sn",
4188
+ so: "so",
4189
+ sq: "sq",
4190
+ sr: "sr",
4191
+ su: "su",
4192
+ sv: "sv",
4193
+ sw: "sw",
4194
+ ta: "ta",
4195
+ te: "te",
4196
+ tg: "tg",
4197
+ th: "th",
4198
+ tk: "tk",
4199
+ tl: "tl",
4200
+ tr: "tr",
4201
+ tt: "tt",
4202
+ uk: "uk",
4203
+ ur: "ur",
4204
+ uz: "uz",
4205
+ vi: "vi",
4206
+ wo: "wo",
4207
+ yi: "yi",
4208
+ yo: "yo",
4209
+ zh: "zh"
4210
+ };
4211
+
4212
+ // src/generated/gladia/schema/translationMessageType.ts
4213
+ var TranslationMessageType = {
4214
+ translation: "translation"
4215
+ };
4216
+
4217
+ // src/generated/gladia/schema/translationModelEnum.ts
4218
+ var TranslationModelEnum = {
4219
+ base: "base",
4220
+ enhanced: "enhanced"
4221
+ };
4222
+
4223
+ // src/generated/gladia/schema/videoToTextControllerVideoTranscriptionBodyLanguage.ts
4224
+ var VideoToTextControllerVideoTranscriptionBodyLanguage = {
4225
+ afrikaans: "afrikaans",
4226
+ albanian: "albanian",
4227
+ amharic: "amharic",
4228
+ arabic: "arabic",
4229
+ armenian: "armenian",
4230
+ assamese: "assamese",
4231
+ azerbaijani: "azerbaijani",
4232
+ bashkir: "bashkir",
4233
+ basque: "basque",
4234
+ belarusian: "belarusian",
4235
+ bengali: "bengali",
4236
+ bosnian: "bosnian",
4237
+ breton: "breton",
4238
+ bulgarian: "bulgarian",
4239
+ catalan: "catalan",
4240
+ chinese: "chinese",
4241
+ croatian: "croatian",
4242
+ czech: "czech",
4243
+ danish: "danish",
4244
+ dutch: "dutch",
4245
+ english: "english",
4246
+ estonian: "estonian",
4247
+ faroese: "faroese",
4248
+ finnish: "finnish",
4249
+ french: "french",
4250
+ galician: "galician",
4251
+ georgian: "georgian",
4252
+ german: "german",
4253
+ greek: "greek",
4254
+ gujarati: "gujarati",
4255
+ haitian_creole: "haitian creole",
4256
+ hausa: "hausa",
4257
+ hawaiian: "hawaiian",
4258
+ hebrew: "hebrew",
4259
+ hindi: "hindi",
4260
+ hungarian: "hungarian",
4261
+ icelandic: "icelandic",
4262
+ indonesian: "indonesian",
4263
+ italian: "italian",
4264
+ japanese: "japanese",
4265
+ javanese: "javanese",
4266
+ kannada: "kannada",
4267
+ kazakh: "kazakh",
4268
+ khmer: "khmer",
4269
+ korean: "korean",
4270
+ lao: "lao",
4271
+ latin: "latin",
4272
+ latvian: "latvian",
4273
+ lingala: "lingala",
4274
+ lithuanian: "lithuanian",
4275
+ luxembourgish: "luxembourgish",
4276
+ macedonian: "macedonian",
4277
+ malagasy: "malagasy",
4278
+ malay: "malay",
4279
+ malayalam: "malayalam",
4280
+ maltese: "maltese",
4281
+ maori: "maori",
4282
+ marathi: "marathi",
4283
+ mongolian: "mongolian",
4284
+ myanmar: "myanmar",
4285
+ nepali: "nepali",
4286
+ norwegian: "norwegian",
4287
+ nynorsk: "nynorsk",
4288
+ occitan: "occitan",
4289
+ pashto: "pashto",
4290
+ persian: "persian",
4291
+ polish: "polish",
4292
+ portuguese: "portuguese",
4293
+ punjabi: "punjabi",
4294
+ romanian: "romanian",
4295
+ russian: "russian",
4296
+ sanskrit: "sanskrit",
4297
+ serbian: "serbian",
4298
+ shona: "shona",
4299
+ sindhi: "sindhi",
4300
+ sinhala: "sinhala",
4301
+ slovak: "slovak",
4302
+ slovenian: "slovenian",
4303
+ somali: "somali",
4304
+ spanish: "spanish",
4305
+ sundanese: "sundanese",
4306
+ swahili: "swahili",
4307
+ swedish: "swedish",
4308
+ tagalog: "tagalog",
4309
+ tajik: "tajik",
4310
+ tamil: "tamil",
4311
+ tatar: "tatar",
4312
+ telugu: "telugu",
4313
+ thai: "thai",
4314
+ tibetan: "tibetan",
4315
+ turkish: "turkish",
4316
+ turkmen: "turkmen",
4317
+ ukrainian: "ukrainian",
4318
+ urdu: "urdu",
4319
+ uzbek: "uzbek",
4320
+ vietnamese: "vietnamese",
4321
+ welsh: "welsh",
4322
+ yiddish: "yiddish",
4323
+ yoruba: "yoruba"
4324
+ };
4325
+
4326
+ // src/generated/gladia/schema/videoToTextControllerVideoTranscriptionBodyLanguageBehaviour.ts
4327
+ var VideoToTextControllerVideoTranscriptionBodyLanguageBehaviour = {
4328
+ automatic_single_language: "automatic single language",
4329
+ automatic_multiple_languages: "automatic multiple languages",
4330
+ manual: "manual"
4331
+ };
4332
+
4333
+ // src/generated/gladia/schema/videoToTextControllerVideoTranscriptionBodyOutputFormat.ts
4334
+ var VideoToTextControllerVideoTranscriptionBodyOutputFormat = {
4335
+ json: "json",
4336
+ srt: "srt",
4337
+ vtt: "vtt",
4338
+ plain: "plain",
4339
+ txt: "txt"
4340
+ };
4341
+
4342
+ // src/generated/gladia/schema/videoToTextControllerVideoTranscriptionBodyTargetTranslationLanguage.ts
4343
+ var VideoToTextControllerVideoTranscriptionBodyTargetTranslationLanguage = {
4344
+ afrikaans: "afrikaans",
4345
+ albanian: "albanian",
4346
+ amharic: "amharic",
4347
+ arabic: "arabic",
4348
+ armenian: "armenian",
4349
+ assamese: "assamese",
4350
+ azerbaijani: "azerbaijani",
4351
+ bashkir: "bashkir",
4352
+ basque: "basque",
4353
+ belarusian: "belarusian",
4354
+ bengali: "bengali",
4355
+ bosnian: "bosnian",
4356
+ breton: "breton",
4357
+ bulgarian: "bulgarian",
4358
+ catalan: "catalan",
4359
+ chinese: "chinese",
4360
+ croatian: "croatian",
4361
+ czech: "czech",
4362
+ danish: "danish",
4363
+ dutch: "dutch",
4364
+ english: "english",
4365
+ estonian: "estonian",
4366
+ faroese: "faroese",
4367
+ finnish: "finnish",
4368
+ french: "french",
4369
+ galician: "galician",
4370
+ georgian: "georgian",
4371
+ german: "german",
4372
+ greek: "greek",
4373
+ gujarati: "gujarati",
4374
+ haitian_creole: "haitian creole",
4375
+ hausa: "hausa",
4376
+ hawaiian: "hawaiian",
4377
+ hebrew: "hebrew",
4378
+ hindi: "hindi",
4379
+ hungarian: "hungarian",
4380
+ icelandic: "icelandic",
4381
+ indonesian: "indonesian",
4382
+ italian: "italian",
4383
+ japanese: "japanese",
4384
+ javanese: "javanese",
4385
+ kannada: "kannada",
4386
+ kazakh: "kazakh",
4387
+ khmer: "khmer",
4388
+ korean: "korean",
4389
+ lao: "lao",
4390
+ latin: "latin",
4391
+ latvian: "latvian",
4392
+ lingala: "lingala",
4393
+ lithuanian: "lithuanian",
4394
+ luxembourgish: "luxembourgish",
4395
+ macedonian: "macedonian",
4396
+ malagasy: "malagasy",
4397
+ malay: "malay",
4398
+ malayalam: "malayalam",
4399
+ maltese: "maltese",
4400
+ maori: "maori",
4401
+ marathi: "marathi",
4402
+ mongolian: "mongolian",
4403
+ myanmar: "myanmar",
4404
+ nepali: "nepali",
4405
+ norwegian: "norwegian",
4406
+ nynorsk: "nynorsk",
4407
+ occitan: "occitan",
4408
+ pashto: "pashto",
4409
+ persian: "persian",
4410
+ polish: "polish",
4411
+ portuguese: "portuguese",
4412
+ punjabi: "punjabi",
4413
+ romanian: "romanian",
4414
+ russian: "russian",
4415
+ sanskrit: "sanskrit",
4416
+ serbian: "serbian",
4417
+ shona: "shona",
4418
+ sindhi: "sindhi",
4419
+ sinhala: "sinhala",
4420
+ slovak: "slovak",
4421
+ slovenian: "slovenian",
4422
+ somali: "somali",
4423
+ spanish: "spanish",
4424
+ sundanese: "sundanese",
4425
+ swahili: "swahili",
4426
+ swedish: "swedish",
4427
+ tagalog: "tagalog",
4428
+ tajik: "tajik",
4429
+ tamil: "tamil",
4430
+ tatar: "tatar",
4431
+ telugu: "telugu",
4432
+ thai: "thai",
4433
+ tibetan: "tibetan",
4434
+ turkish: "turkish",
4435
+ turkmen: "turkmen",
4436
+ ukrainian: "ukrainian",
4437
+ urdu: "urdu",
4438
+ uzbek: "uzbek",
4439
+ vietnamese: "vietnamese",
4440
+ welsh: "welsh",
4441
+ wolof: "wolof",
4442
+ yiddish: "yiddish",
4443
+ yoruba: "yoruba"
4444
+ };
4445
+
4446
+ // src/generated/gladia/schema/webhookLiveEndRecordingPayloadEvent.ts
4447
+ var WebhookLiveEndRecordingPayloadEvent = {
4448
+ liveend_recording: "live.end_recording"
4449
+ };
4450
+
4451
+ // src/generated/gladia/schema/webhookLiveEndSessionPayloadEvent.ts
4452
+ var WebhookLiveEndSessionPayloadEvent = {
4453
+ liveend_session: "live.end_session"
4454
+ };
4455
+
4456
+ // src/generated/gladia/schema/webhookLiveStartRecordingPayloadEvent.ts
4457
+ var WebhookLiveStartRecordingPayloadEvent = {
4458
+ livestart_recording: "live.start_recording"
4459
+ };
4460
+
4461
+ // src/generated/gladia/schema/webhookLiveStartSessionPayloadEvent.ts
4462
+ var WebhookLiveStartSessionPayloadEvent = {
4463
+ livestart_session: "live.start_session"
4464
+ };
4465
+
4466
+ // src/generated/gladia/schema/webhookTranscriptionCreatedPayloadEvent.ts
4467
+ var WebhookTranscriptionCreatedPayloadEvent = {
4468
+ transcriptioncreated: "transcription.created"
4469
+ };
4470
+
4471
+ // src/generated/gladia/schema/webhookTranscriptionErrorPayloadEvent.ts
4472
+ var WebhookTranscriptionErrorPayloadEvent = {
4473
+ transcriptionerror: "transcription.error"
4474
+ };
4475
+
4476
+ // src/generated/gladia/schema/webhookTranscriptionSuccessPayloadEvent.ts
4477
+ var WebhookTranscriptionSuccessPayloadEvent = {
4478
+ transcriptionsuccess: "transcription.success"
4479
+ };
4480
+
4481
+ // src/generated/assemblyai/schema/index.ts
4482
+ var schema_exports2 = {};
4483
+ __export(schema_exports2, {
4484
+ AudioIntelligenceModelStatus: () => AudioIntelligenceModelStatus,
4485
+ EntityType: () => EntityType,
4486
+ LemurModel: () => LemurModel,
4487
+ PiiPolicy: () => PiiPolicy,
4488
+ RedactPiiAudioQuality: () => RedactPiiAudioQuality,
4489
+ RedactedAudioStatus: () => RedactedAudioStatus,
4490
+ Sentiment: () => Sentiment,
4491
+ SpeechModel: () => SpeechModel,
4492
+ SubstitutionPolicy: () => SubstitutionPolicy,
4493
+ SubtitleFormat: () => SubtitleFormat,
4494
+ SummaryModel: () => SummaryModel,
4495
+ SummaryType: () => SummaryType,
4496
+ TranscriptBoostParam: () => TranscriptBoostParam,
4497
+ TranscriptLanguageCode: () => TranscriptLanguageCode,
4498
+ TranscriptReadyStatus: () => TranscriptReadyStatus,
4499
+ TranscriptStatus: () => TranscriptStatus
4500
+ });
4501
+
4502
+ // src/generated/assemblyai/schema/audioIntelligenceModelStatus.ts
4503
+ var AudioIntelligenceModelStatus = {
4504
+ success: "success",
4505
+ unavailable: "unavailable"
4506
+ };
4507
+
4508
+ // src/generated/assemblyai/schema/entityType.ts
4509
+ var EntityType = {
4510
+ account_number: "account_number",
4511
+ banking_information: "banking_information",
4512
+ blood_type: "blood_type",
4513
+ credit_card_cvv: "credit_card_cvv",
4514
+ credit_card_expiration: "credit_card_expiration",
4515
+ credit_card_number: "credit_card_number",
4516
+ date: "date",
4517
+ date_interval: "date_interval",
4518
+ date_of_birth: "date_of_birth",
4519
+ drivers_license: "drivers_license",
4520
+ drug: "drug",
4521
+ duration: "duration",
4522
+ email_address: "email_address",
4523
+ event: "event",
4524
+ filename: "filename",
4525
+ gender_sexuality: "gender_sexuality",
4526
+ healthcare_number: "healthcare_number",
4527
+ injury: "injury",
4528
+ ip_address: "ip_address",
4529
+ language: "language",
4530
+ location: "location",
4531
+ marital_status: "marital_status",
4532
+ medical_condition: "medical_condition",
4533
+ medical_process: "medical_process",
4534
+ money_amount: "money_amount",
4535
+ nationality: "nationality",
4536
+ number_sequence: "number_sequence",
4537
+ occupation: "occupation",
4538
+ organization: "organization",
4539
+ passport_number: "passport_number",
4540
+ password: "password",
4541
+ person_age: "person_age",
4542
+ person_name: "person_name",
4543
+ phone_number: "phone_number",
4544
+ physical_attribute: "physical_attribute",
4545
+ political_affiliation: "political_affiliation",
4546
+ religion: "religion",
4547
+ statistics: "statistics",
4548
+ time: "time",
4549
+ url: "url",
4550
+ us_social_security_number: "us_social_security_number",
4551
+ username: "username",
4552
+ vehicle_id: "vehicle_id",
4553
+ zodiac_sign: "zodiac_sign"
4554
+ };
4555
+
4556
+ // src/generated/assemblyai/schema/lemurModel.ts
4557
+ var LemurModel = {
4558
+ "anthropic/claude-3-5-sonnet": "anthropic/claude-3-5-sonnet",
4559
+ "anthropic/claude-3-opus": "anthropic/claude-3-opus",
4560
+ "anthropic/claude-3-haiku": "anthropic/claude-3-haiku"
4561
+ };
4562
+
4563
+ // src/generated/assemblyai/schema/piiPolicy.ts
4564
+ var PiiPolicy = {
4565
+ account_number: "account_number",
4566
+ banking_information: "banking_information",
4567
+ blood_type: "blood_type",
4568
+ credit_card_cvv: "credit_card_cvv",
4569
+ credit_card_expiration: "credit_card_expiration",
4570
+ credit_card_number: "credit_card_number",
4571
+ date: "date",
4572
+ date_interval: "date_interval",
4573
+ date_of_birth: "date_of_birth",
4574
+ drivers_license: "drivers_license",
4575
+ drug: "drug",
4576
+ duration: "duration",
4577
+ email_address: "email_address",
4578
+ event: "event",
4579
+ filename: "filename",
4580
+ gender_sexuality: "gender_sexuality",
4581
+ healthcare_number: "healthcare_number",
4582
+ injury: "injury",
4583
+ ip_address: "ip_address",
4584
+ language: "language",
4585
+ location: "location",
4586
+ marital_status: "marital_status",
4587
+ medical_condition: "medical_condition",
4588
+ medical_process: "medical_process",
4589
+ money_amount: "money_amount",
4590
+ nationality: "nationality",
4591
+ number_sequence: "number_sequence",
4592
+ occupation: "occupation",
4593
+ organization: "organization",
4594
+ passport_number: "passport_number",
4595
+ password: "password",
4596
+ person_age: "person_age",
4597
+ person_name: "person_name",
4598
+ phone_number: "phone_number",
4599
+ physical_attribute: "physical_attribute",
4600
+ political_affiliation: "political_affiliation",
4601
+ religion: "religion",
4602
+ statistics: "statistics",
4603
+ time: "time",
4604
+ url: "url",
4605
+ us_social_security_number: "us_social_security_number",
4606
+ username: "username",
4607
+ vehicle_id: "vehicle_id",
4608
+ zodiac_sign: "zodiac_sign"
4609
+ };
4610
+
4611
+ // src/generated/assemblyai/schema/redactPiiAudioQuality.ts
4612
+ var RedactPiiAudioQuality = {
4613
+ mp3: "mp3",
4614
+ wav: "wav"
4615
+ };
4616
+
4617
+ // src/generated/assemblyai/schema/redactedAudioStatus.ts
4618
+ var RedactedAudioStatus = {
4619
+ redacted_audio_ready: "redacted_audio_ready"
4620
+ };
4621
+
4622
+ // src/generated/assemblyai/schema/sentiment.ts
4623
+ var Sentiment = {
4624
+ POSITIVE: "POSITIVE",
4625
+ NEUTRAL: "NEUTRAL",
4626
+ NEGATIVE: "NEGATIVE"
4627
+ };
4628
+
4629
+ // src/generated/assemblyai/schema/speechModel.ts
4630
+ var SpeechModel = {
4631
+ best: "best",
4632
+ "slam-1": "slam-1",
4633
+ universal: "universal"
4634
+ };
4635
+
4636
+ // src/generated/assemblyai/schema/substitutionPolicy.ts
4637
+ var SubstitutionPolicy = {
4638
+ entity_name: "entity_name",
4639
+ hash: "hash"
4640
+ };
4641
+
4642
+ // src/generated/assemblyai/schema/subtitleFormat.ts
4643
+ var SubtitleFormat = {
4644
+ srt: "srt",
4645
+ vtt: "vtt"
4646
+ };
4647
+
4648
+ // src/generated/assemblyai/schema/summaryModel.ts
4649
+ var SummaryModel = {
4650
+ informative: "informative",
4651
+ conversational: "conversational",
4652
+ catchy: "catchy"
4653
+ };
4654
+
4655
+ // src/generated/assemblyai/schema/summaryType.ts
4656
+ var SummaryType = {
4657
+ bullets: "bullets",
4658
+ bullets_verbose: "bullets_verbose",
4659
+ gist: "gist",
4660
+ headline: "headline",
4661
+ paragraph: "paragraph"
4662
+ };
4663
+
4664
+ // src/generated/assemblyai/schema/transcriptBoostParam.ts
4665
+ var TranscriptBoostParam = {
4666
+ low: "low",
4667
+ default: "default",
4668
+ high: "high"
4669
+ };
4670
+
4671
+ // src/generated/assemblyai/schema/transcriptLanguageCode.ts
4672
+ var TranscriptLanguageCode = {
4673
+ en: "en",
4674
+ en_au: "en_au",
4675
+ en_uk: "en_uk",
4676
+ en_us: "en_us",
4677
+ es: "es",
4678
+ fr: "fr",
4679
+ de: "de",
4680
+ it: "it",
4681
+ pt: "pt",
4682
+ nl: "nl",
4683
+ af: "af",
4684
+ sq: "sq",
4685
+ am: "am",
4686
+ ar: "ar",
4687
+ hy: "hy",
4688
+ as: "as",
4689
+ az: "az",
4690
+ ba: "ba",
4691
+ eu: "eu",
4692
+ be: "be",
4693
+ bn: "bn",
4694
+ bs: "bs",
4695
+ br: "br",
4696
+ bg: "bg",
4697
+ my: "my",
4698
+ ca: "ca",
4699
+ zh: "zh",
4700
+ hr: "hr",
4701
+ cs: "cs",
4702
+ da: "da",
4703
+ et: "et",
4704
+ fo: "fo",
4705
+ fi: "fi",
4706
+ gl: "gl",
4707
+ ka: "ka",
4708
+ el: "el",
4709
+ gu: "gu",
4710
+ ht: "ht",
4711
+ ha: "ha",
4712
+ haw: "haw",
4713
+ he: "he",
4714
+ hi: "hi",
4715
+ hu: "hu",
4716
+ is: "is",
4717
+ id: "id",
4718
+ ja: "ja",
4719
+ jw: "jw",
4720
+ kn: "kn",
4721
+ kk: "kk",
4722
+ km: "km",
4723
+ ko: "ko",
4724
+ lo: "lo",
4725
+ la: "la",
4726
+ lv: "lv",
4727
+ ln: "ln",
4728
+ lt: "lt",
4729
+ lb: "lb",
4730
+ mk: "mk",
4731
+ mg: "mg",
4732
+ ms: "ms",
4733
+ ml: "ml",
4734
+ mt: "mt",
4735
+ mi: "mi",
4736
+ mr: "mr",
4737
+ mn: "mn",
4738
+ ne: "ne",
4739
+ no: "no",
4740
+ nn: "nn",
4741
+ oc: "oc",
4742
+ pa: "pa",
4743
+ ps: "ps",
4744
+ fa: "fa",
4745
+ pl: "pl",
4746
+ ro: "ro",
4747
+ ru: "ru",
4748
+ sa: "sa",
4749
+ sr: "sr",
4750
+ sn: "sn",
4751
+ sd: "sd",
4752
+ si: "si",
4753
+ sk: "sk",
4754
+ sl: "sl",
4755
+ so: "so",
4756
+ su: "su",
4757
+ sw: "sw",
4758
+ sv: "sv",
4759
+ tl: "tl",
4760
+ tg: "tg",
4761
+ ta: "ta",
4762
+ tt: "tt",
4763
+ te: "te",
4764
+ th: "th",
4765
+ bo: "bo",
4766
+ tr: "tr",
4767
+ tk: "tk",
4768
+ uk: "uk",
4769
+ ur: "ur",
4770
+ uz: "uz",
4771
+ vi: "vi",
4772
+ cy: "cy",
4773
+ yi: "yi",
4774
+ yo: "yo"
4775
+ };
4776
+
4777
+ // src/generated/assemblyai/schema/transcriptReadyStatus.ts
4778
+ var TranscriptReadyStatus = {
4779
+ completed: "completed",
4780
+ error: "error"
4781
+ };
4782
+
4783
+ // src/generated/assemblyai/schema/transcriptStatus.ts
4784
+ var TranscriptStatus = {
4785
+ queued: "queued",
4786
+ processing: "processing",
4787
+ completed: "completed",
4788
+ error: "error"
4789
+ };
4790
+ export {
4791
+ AssemblyAIAdapter,
4792
+ schema_exports2 as AssemblyAITypes,
4793
+ AssemblyAIWebhookHandler,
4794
+ AzureSTTAdapter,
4795
+ AzureWebhookHandler,
4796
+ BaseAdapter,
4797
+ BaseWebhookHandler,
4798
+ DeepgramAdapter,
4799
+ DeepgramWebhookHandler,
4800
+ GladiaAdapter,
4801
+ schema_exports as GladiaTypes,
4802
+ GladiaWebhookHandler,
4803
+ OpenAIWhisperAdapter,
4804
+ SpeechmaticsAdapter,
4805
+ SpeechmaticsWebhookHandler,
4806
+ VoiceRouter,
4807
+ WebhookRouter,
4808
+ createAssemblyAIAdapter,
4809
+ createAssemblyAIWebhookHandler,
4810
+ createAzureSTTAdapter,
4811
+ createAzureWebhookHandler,
4812
+ createDeepgramAdapter,
4813
+ createDeepgramWebhookHandler,
4814
+ createGladiaAdapter,
4815
+ createGladiaWebhookHandler,
4816
+ createOpenAIWhisperAdapter,
4817
+ createSpeechmaticsAdapter,
4818
+ createVoiceRouter,
4819
+ createWebhookRouter
4820
+ };
4821
+ //# sourceMappingURL=index.mjs.map