voice-router-dev 0.2.6 → 0.2.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -5648,6 +5648,247 @@ interface CreateTranscriptionRequest {
5648
5648
  known_speaker_references?: string[];
5649
5649
  }
5650
5650
 
5651
+ /**
5652
+ * Generated by orval v7.9.0 🍺
5653
+ * Do not edit manually.
5654
+ * Gladia Control API
5655
+ * OpenAPI spec version: 1.0
5656
+ */
5657
+ interface CallbackConfig {
5658
+ /** URL on which we will do a `POST` request with configured messages */
5659
+ url?: string;
5660
+ /** If true, partial transcript will be sent to the defined callback. */
5661
+ receive_partial_transcripts?: boolean;
5662
+ /** If true, final transcript will be sent to the defined callback. */
5663
+ receive_final_transcripts?: boolean;
5664
+ /** If true, begin and end speech events will be sent to the defined callback. */
5665
+ receive_speech_events?: boolean;
5666
+ /** If true, pre-processing events will be sent to the defined callback. */
5667
+ receive_pre_processing_events?: boolean;
5668
+ /** If true, realtime processing events will be sent to the defined callback. */
5669
+ receive_realtime_processing_events?: boolean;
5670
+ /** If true, post-processing events will be sent to the defined callback. */
5671
+ receive_post_processing_events?: boolean;
5672
+ /** If true, acknowledgments will be sent to the defined callback. */
5673
+ receive_acknowledgments?: boolean;
5674
+ /** If true, errors will be sent to the defined callback. */
5675
+ receive_errors?: boolean;
5676
+ /** If true, lifecycle events will be sent to the defined callback. */
5677
+ receive_lifecycle_events?: boolean;
5678
+ }
5679
+
5680
+ /**
5681
+ * Generated by orval v7.9.0 🍺
5682
+ * Do not edit manually.
5683
+ * Gladia Control API
5684
+ * OpenAPI spec version: 1.0
5685
+ */
5686
+ interface MessagesConfig {
5687
+ /** If true, partial transcript will be sent to websocket. */
5688
+ receive_partial_transcripts?: boolean;
5689
+ /** If true, final transcript will be sent to websocket. */
5690
+ receive_final_transcripts?: boolean;
5691
+ /** If true, begin and end speech events will be sent to websocket. */
5692
+ receive_speech_events?: boolean;
5693
+ /** If true, pre-processing events will be sent to websocket. */
5694
+ receive_pre_processing_events?: boolean;
5695
+ /** If true, realtime processing events will be sent to websocket. */
5696
+ receive_realtime_processing_events?: boolean;
5697
+ /** If true, post-processing events will be sent to websocket. */
5698
+ receive_post_processing_events?: boolean;
5699
+ /** If true, acknowledgments will be sent to websocket. */
5700
+ receive_acknowledgments?: boolean;
5701
+ /** If true, errors will be sent to websocket. */
5702
+ receive_errors?: boolean;
5703
+ /** If true, lifecycle events will be sent to websocket. */
5704
+ receive_lifecycle_events?: boolean;
5705
+ }
5706
+
5707
+ /**
5708
+ * Generated by orval v7.9.0 🍺
5709
+ * Do not edit manually.
5710
+ * Gladia Control API
5711
+ * OpenAPI spec version: 1.0
5712
+ */
5713
+
5714
+ interface PostProcessingConfig {
5715
+ /** If true, generates summarization for the whole transcription. */
5716
+ summarization?: boolean;
5717
+ /** Summarization configuration, if `summarization` is enabled */
5718
+ summarization_config?: SummarizationConfigDTO;
5719
+ /** If true, generates chapters for the whole transcription. */
5720
+ chapterization?: boolean;
5721
+ }
5722
+
5723
+ /**
5724
+ * Generated by orval v7.9.0 🍺
5725
+ * Do not edit manually.
5726
+ * Gladia Control API
5727
+ * OpenAPI spec version: 1.0
5728
+ */
5729
+ interface PreProcessingConfig {
5730
+ /** If true, apply pre-processing to the audio stream to enhance the quality. */
5731
+ audio_enhancer?: boolean;
5732
+ /**
5733
+ * Sensitivity configuration for Speech Threshold. A value close to 1 will apply stricter thresholds, making it less likely to detect background sounds as speech.
5734
+ * @minimum 0
5735
+ * @maximum 1
5736
+ */
5737
+ speech_threshold?: number;
5738
+ }
5739
+
5740
+ /**
5741
+ * Generated by orval v7.9.0 🍺
5742
+ * Do not edit manually.
5743
+ * Gladia Control API
5744
+ * OpenAPI spec version: 1.0
5745
+ */
5746
+
5747
+ interface RealtimeProcessingConfig {
5748
+ /** If true, enable custom vocabulary for the transcription. */
5749
+ custom_vocabulary?: boolean;
5750
+ /** Custom vocabulary configuration, if `custom_vocabulary` is enabled */
5751
+ custom_vocabulary_config?: CustomVocabularyConfigDTO;
5752
+ /** If true, enable custom spelling for the transcription. */
5753
+ custom_spelling?: boolean;
5754
+ /** Custom spelling configuration, if `custom_spelling` is enabled */
5755
+ custom_spelling_config?: CustomSpellingConfigDTO;
5756
+ /** If true, enable translation for the transcription */
5757
+ translation?: boolean;
5758
+ /** Translation configuration, if `translation` is enabled */
5759
+ translation_config?: TranslationConfigDTO;
5760
+ /** If true, enable named entity recognition for the transcription. */
5761
+ named_entity_recognition?: boolean;
5762
+ /** If true, enable sentiment analysis for the transcription. */
5763
+ sentiment_analysis?: boolean;
5764
+ }
5765
+
5766
+ /**
5767
+ * Generated by orval v7.9.0 🍺
5768
+ * Do not edit manually.
5769
+ * Gladia Control API
5770
+ * OpenAPI spec version: 1.0
5771
+ */
5772
+ /**
5773
+ * Custom metadata you can attach to this live transcription
5774
+ */
5775
+ type StreamingRequestCustomMetadata = {
5776
+ [key: string]: unknown;
5777
+ };
5778
+
5779
+ /**
5780
+ * Generated by orval v7.9.0 🍺
5781
+ * Do not edit manually.
5782
+ * Gladia Control API
5783
+ * OpenAPI spec version: 1.0
5784
+ */
5785
+ /**
5786
+ * The bit depth of the audio stream
5787
+ */
5788
+ type StreamingSupportedBitDepthEnum = (typeof StreamingSupportedBitDepthEnum)[keyof typeof StreamingSupportedBitDepthEnum];
5789
+ declare const StreamingSupportedBitDepthEnum: {
5790
+ readonly NUMBER_8: 8;
5791
+ readonly NUMBER_16: 16;
5792
+ readonly NUMBER_24: 24;
5793
+ readonly NUMBER_32: 32;
5794
+ };
5795
+
5796
+ /**
5797
+ * Generated by orval v7.9.0 🍺
5798
+ * Do not edit manually.
5799
+ * Gladia Control API
5800
+ * OpenAPI spec version: 1.0
5801
+ */
5802
+ /**
5803
+ * The encoding format of the audio stream. Supported formats:
5804
+ - PCM: 8, 16, 24, and 32 bits
5805
+ - A-law: 8 bits
5806
+ - μ-law: 8 bits
5807
+
5808
+ Note: No need to add WAV headers to raw audio as the API supports both formats.
5809
+ */
5810
+ type StreamingSupportedEncodingEnum = (typeof StreamingSupportedEncodingEnum)[keyof typeof StreamingSupportedEncodingEnum];
5811
+ declare const StreamingSupportedEncodingEnum: {
5812
+ readonly "wav/pcm": "wav/pcm";
5813
+ readonly "wav/alaw": "wav/alaw";
5814
+ readonly "wav/ulaw": "wav/ulaw";
5815
+ };
5816
+
5817
+ /**
5818
+ * Generated by orval v7.9.0 🍺
5819
+ * Do not edit manually.
5820
+ * Gladia Control API
5821
+ * OpenAPI spec version: 1.0
5822
+ */
5823
+ /**
5824
+ * The sample rate of the audio stream
5825
+ */
5826
+ type StreamingSupportedSampleRateEnum = (typeof StreamingSupportedSampleRateEnum)[keyof typeof StreamingSupportedSampleRateEnum];
5827
+ declare const StreamingSupportedSampleRateEnum: {
5828
+ readonly NUMBER_8000: 8000;
5829
+ readonly NUMBER_16000: 16000;
5830
+ readonly NUMBER_32000: 32000;
5831
+ readonly NUMBER_44100: 44100;
5832
+ readonly NUMBER_48000: 48000;
5833
+ };
5834
+
5835
+ /**
5836
+ * Generated by orval v7.9.0 🍺
5837
+ * Do not edit manually.
5838
+ * Gladia Control API
5839
+ * OpenAPI spec version: 1.0
5840
+ */
5841
+
5842
+ interface StreamingRequest {
5843
+ /** The encoding format of the audio stream. Supported formats:
5844
+ - PCM: 8, 16, 24, and 32 bits
5845
+ - A-law: 8 bits
5846
+ - μ-law: 8 bits
5847
+
5848
+ Note: No need to add WAV headers to raw audio as the API supports both formats. */
5849
+ encoding?: StreamingSupportedEncodingEnum;
5850
+ /** The bit depth of the audio stream */
5851
+ bit_depth?: StreamingSupportedBitDepthEnum;
5852
+ /** The sample rate of the audio stream */
5853
+ sample_rate?: StreamingSupportedSampleRateEnum;
5854
+ /**
5855
+ * The number of channels of the audio stream
5856
+ * @minimum 1
5857
+ * @maximum 8
5858
+ */
5859
+ channels?: number;
5860
+ /** Custom metadata you can attach to this live transcription */
5861
+ custom_metadata?: StreamingRequestCustomMetadata;
5862
+ /** The model used to process the audio. "solaria-1" is used by default. */
5863
+ model?: StreamingSupportedModels;
5864
+ /**
5865
+ * The endpointing duration in seconds. Endpointing is the duration of silence which will cause an utterance to be considered as finished
5866
+ * @minimum 0.01
5867
+ * @maximum 10
5868
+ */
5869
+ endpointing?: number;
5870
+ /**
5871
+ * The maximum duration in seconds without endpointing. If endpointing is not detected after this duration, current utterance will be considered as finished
5872
+ * @minimum 5
5873
+ * @maximum 60
5874
+ */
5875
+ maximum_duration_without_endpointing?: number;
5876
+ /** Specify the language configuration */
5877
+ language_config?: LanguageConfig;
5878
+ /** Specify the pre-processing configuration */
5879
+ pre_processing?: PreProcessingConfig;
5880
+ /** Specify the realtime processing configuration */
5881
+ realtime_processing?: RealtimeProcessingConfig;
5882
+ /** Specify the post-processing configuration */
5883
+ post_processing?: PostProcessingConfig;
5884
+ /** Specify the websocket messages configuration */
5885
+ messages_config?: MessagesConfig;
5886
+ /** If true, messages will be sent to configured url. */
5887
+ callback?: boolean;
5888
+ /** Specify the callback configuration */
5889
+ callback_config?: CallbackConfig;
5890
+ }
5891
+
5651
5892
  /**
5652
5893
  * Unified types for the Voice Router SDK
5653
5894
  * These types provide a provider-agnostic interface for transcription services
@@ -5678,6 +5919,79 @@ type TranscriptionModel = ListenV1ModelParameter | StreamingSupportedModels | Sp
5678
5919
  */
5679
5920
  type TranscriptionLanguage = TranscriptLanguageCode | TranscriptionLanguageCodeEnum | string;
5680
5921
 
5922
+ /**
5923
+ * Extended data from AssemblyAI transcription
5924
+ * Includes chapters, entities, sentiment, content safety, and more
5925
+ */
5926
+ interface AssemblyAIExtendedData {
5927
+ /** Auto-generated chapters with summaries */
5928
+ chapters?: Chapter[];
5929
+ /** Detected named entities (people, organizations, locations) */
5930
+ entities?: Entity[];
5931
+ /** Per-utterance sentiment analysis results */
5932
+ sentimentResults?: SentimentAnalysisResult$1[];
5933
+ /** Key phrases and highlights */
5934
+ highlights?: AutoHighlightsResult;
5935
+ /** Content safety/moderation labels */
5936
+ contentSafety?: ContentSafetyLabelsResult;
5937
+ /** IAB topic categories */
5938
+ topics?: TopicDetectionModelResult;
5939
+ /** Language detection confidence (0-1) */
5940
+ languageConfidence?: number;
5941
+ /** Whether the request was throttled */
5942
+ throttled?: boolean;
5943
+ }
5944
+ /**
5945
+ * Extended data from Gladia transcription
5946
+ * Includes translation, moderation, entities, LLM outputs, and more
5947
+ */
5948
+ interface GladiaExtendedData {
5949
+ /** Translation results (if translation enabled) */
5950
+ translation?: TranslationDTO;
5951
+ /** Content moderation results */
5952
+ moderation?: ModerationDTO;
5953
+ /** Named entity recognition results */
5954
+ entities?: NamedEntityRecognitionDTO;
5955
+ /** Sentiment analysis results */
5956
+ sentiment?: SentimentAnalysisDTO;
5957
+ /** Audio-to-LLM custom prompt results */
5958
+ audioToLlm?: AudioToLlmListDTO;
5959
+ /** Auto-generated chapters */
5960
+ chapters?: ChapterizationDTO;
5961
+ /** AI speaker reidentification results */
5962
+ speakerReidentification?: SpeakerReidentificationDTO;
5963
+ /** Structured data extraction results */
5964
+ structuredData?: StructuredDataExtractionDTO;
5965
+ /** Custom metadata echoed back */
5966
+ customMetadata?: Record<string, unknown>;
5967
+ }
5968
+ /**
5969
+ * Extended data from Deepgram transcription
5970
+ * Includes detailed metadata, model info, and feature-specific data
5971
+ */
5972
+ interface DeepgramExtendedData {
5973
+ /** Full response metadata */
5974
+ metadata?: ListenV1ResponseMetadata;
5975
+ /** Request ID for debugging/tracking */
5976
+ requestId?: string;
5977
+ /** SHA256 hash of the audio */
5978
+ sha256?: string;
5979
+ /** Model versions used */
5980
+ modelInfo?: Record<string, unknown>;
5981
+ /** Tags echoed back from request */
5982
+ tags?: string[];
5983
+ }
5984
+ /**
5985
+ * Map of provider names to their extended data types
5986
+ */
5987
+ type ProviderExtendedDataMap = {
5988
+ assemblyai: AssemblyAIExtendedData;
5989
+ gladia: GladiaExtendedData;
5990
+ deepgram: DeepgramExtendedData;
5991
+ "openai-whisper": Record<string, never>;
5992
+ "azure-stt": Record<string, never>;
5993
+ speechmatics: Record<string, never>;
5994
+ };
5681
5995
  /**
5682
5996
  * Supported transcription providers
5683
5997
  */
@@ -5832,7 +6146,7 @@ interface Speaker {
5832
6146
  */
5833
6147
  interface Word {
5834
6148
  /** The transcribed word */
5835
- text: string;
6149
+ word: string;
5836
6150
  /** Start time in seconds */
5837
6151
  start: number;
5838
6152
  /** End time in seconds */
@@ -5878,22 +6192,32 @@ type ProviderRawResponseMap = {
5878
6192
  /**
5879
6193
  * Unified transcription response with provider-specific type safety
5880
6194
  *
5881
- * When a specific provider is known at compile time, the `raw` field
5882
- * will be typed with that provider's actual response type.
6195
+ * When a specific provider is known at compile time, both `raw` and `extended`
6196
+ * fields will be typed with that provider's actual types.
5883
6197
  *
5884
6198
  * @template P - The transcription provider (defaults to all providers)
5885
6199
  *
5886
6200
  * @example Type narrowing with specific provider
5887
6201
  * ```typescript
5888
- * const result: UnifiedTranscriptResponse<'deepgram'> = await adapter.transcribe(audio);
5889
- * // result.raw is typed as ListenV1Response
5890
- * const deepgramMetadata = result.raw?.metadata;
6202
+ * const result: UnifiedTranscriptResponse<'assemblyai'> = await adapter.transcribe(audio);
6203
+ * // result.raw is typed as AssemblyAITranscript
6204
+ * // result.extended is typed as AssemblyAIExtendedData
6205
+ * const chapters = result.extended?.chapters; // AssemblyAIChapter[] | undefined
6206
+ * const entities = result.extended?.entities; // AssemblyAIEntity[] | undefined
6207
+ * ```
6208
+ *
6209
+ * @example Accessing Gladia extended data
6210
+ * ```typescript
6211
+ * const result: UnifiedTranscriptResponse<'gladia'> = await gladiaAdapter.transcribe(audio);
6212
+ * const translation = result.extended?.translation; // GladiaTranslation | undefined
6213
+ * const llmResults = result.extended?.audioToLlm; // GladiaAudioToLlmResult | undefined
5891
6214
  * ```
5892
6215
  *
5893
6216
  * @example Generic usage (all providers)
5894
6217
  * ```typescript
5895
6218
  * const result: UnifiedTranscriptResponse = await router.transcribe(audio);
5896
6219
  * // result.raw is typed as unknown (could be any provider)
6220
+ * // result.extended is typed as union of all extended types
5897
6221
  * ```
5898
6222
  */
5899
6223
  interface UnifiedTranscriptResponse<P extends TranscriptionProvider = TranscriptionProvider> {
@@ -5930,6 +6254,34 @@ interface UnifiedTranscriptResponse<P extends TranscriptionProvider = Transcript
5930
6254
  /** Completion timestamp */
5931
6255
  completedAt?: string;
5932
6256
  };
6257
+ /**
6258
+ * Extended provider-specific data (fully typed from OpenAPI specs)
6259
+ *
6260
+ * Contains rich data beyond basic transcription:
6261
+ * - AssemblyAI: chapters, entities, sentiment, content safety, topics
6262
+ * - Gladia: translation, moderation, entities, audio-to-llm, chapters
6263
+ * - Deepgram: detailed metadata, request tracking, model info
6264
+ *
6265
+ * @example Access AssemblyAI chapters
6266
+ * ```typescript
6267
+ * const result = await assemblyaiAdapter.transcribe(audio, { summarization: true });
6268
+ * result.extended?.chapters?.forEach(chapter => {
6269
+ * console.log(`${chapter.headline}: ${chapter.summary}`);
6270
+ * });
6271
+ * ```
6272
+ */
6273
+ extended?: P extends keyof ProviderExtendedDataMap ? ProviderExtendedDataMap[P] : unknown;
6274
+ /**
6275
+ * Request tracking information for debugging
6276
+ */
6277
+ tracking?: {
6278
+ /** Provider's request/job ID */
6279
+ requestId?: string;
6280
+ /** Audio fingerprint (SHA256) if available */
6281
+ audioHash?: string;
6282
+ /** Processing duration in milliseconds */
6283
+ processingTimeMs?: number;
6284
+ };
5933
6285
  /** Error information (only present on failure) */
5934
6286
  error?: {
5935
6287
  /** Error code (provider-specific or normalized) */
@@ -6049,6 +6401,29 @@ interface StreamingOptions extends Omit<TranscribeOptions, "webhookUrl"> {
6049
6401
  * { model: 'nova-2', language: 'fr' }
6050
6402
  */
6051
6403
  model?: TranscriptionModel;
6404
+ /**
6405
+ * Gladia-specific streaming options (passed directly to API)
6406
+ *
6407
+ * Includes pre_processing, realtime_processing, post_processing,
6408
+ * messages_config, and callback configuration.
6409
+ *
6410
+ * @see https://docs.gladia.io/api-reference/v2/live
6411
+ *
6412
+ * @example
6413
+ * ```typescript
6414
+ * await adapter.transcribeStream({
6415
+ * gladiaStreaming: {
6416
+ * realtime_processing: {
6417
+ * words_accurate_timestamps: true
6418
+ * },
6419
+ * messages_config: {
6420
+ * receive_partial_transcripts: true
6421
+ * }
6422
+ * }
6423
+ * });
6424
+ * ```
6425
+ */
6426
+ gladiaStreaming?: Partial<Omit<StreamingRequest, "encoding" | "sample_rate" | "bit_depth" | "channels">>;
6052
6427
  }
6053
6428
  /**
6054
6429
  * Callback functions for streaming events
@@ -6273,74 +6648,18 @@ declare abstract class BaseAdapter implements TranscriptionAdapter {
6273
6648
  };
6274
6649
  /**
6275
6650
  * Generic polling helper for async transcription jobs
6276
- *
6277
- * Polls getTranscript() until job completes or times out.
6278
- *
6279
- * @param transcriptId - Job/transcript ID to poll
6280
- * @param options - Polling configuration
6281
- * @returns Final transcription result
6282
- */
6283
- protected pollForCompletion(transcriptId: string, options?: {
6284
- maxAttempts?: number;
6285
- intervalMs?: number;
6286
- }): Promise<UnifiedTranscriptResponse>;
6287
- }
6288
-
6289
- /**
6290
- * Generated by orval v7.9.0 🍺
6291
- * Do not edit manually.
6292
- * Gladia Control API
6293
- * OpenAPI spec version: 1.0
6294
- */
6295
- /**
6296
- * The encoding format of the audio stream. Supported formats:
6297
- - PCM: 8, 16, 24, and 32 bits
6298
- - A-law: 8 bits
6299
- - μ-law: 8 bits
6300
-
6301
- Note: No need to add WAV headers to raw audio as the API supports both formats.
6302
- */
6303
- type StreamingSupportedEncodingEnum = (typeof StreamingSupportedEncodingEnum)[keyof typeof StreamingSupportedEncodingEnum];
6304
- declare const StreamingSupportedEncodingEnum: {
6305
- readonly "wav/pcm": "wav/pcm";
6306
- readonly "wav/alaw": "wav/alaw";
6307
- readonly "wav/ulaw": "wav/ulaw";
6308
- };
6309
-
6310
- /**
6311
- * Generated by orval v7.9.0 🍺
6312
- * Do not edit manually.
6313
- * Gladia Control API
6314
- * OpenAPI spec version: 1.0
6315
- */
6316
- /**
6317
- * The sample rate of the audio stream
6318
- */
6319
- type StreamingSupportedSampleRateEnum = (typeof StreamingSupportedSampleRateEnum)[keyof typeof StreamingSupportedSampleRateEnum];
6320
- declare const StreamingSupportedSampleRateEnum: {
6321
- readonly NUMBER_8000: 8000;
6322
- readonly NUMBER_16000: 16000;
6323
- readonly NUMBER_32000: 32000;
6324
- readonly NUMBER_44100: 44100;
6325
- readonly NUMBER_48000: 48000;
6326
- };
6327
-
6328
- /**
6329
- * Generated by orval v7.9.0 🍺
6330
- * Do not edit manually.
6331
- * Gladia Control API
6332
- * OpenAPI spec version: 1.0
6333
- */
6334
- /**
6335
- * The bit depth of the audio stream
6336
- */
6337
- type StreamingSupportedBitDepthEnum = (typeof StreamingSupportedBitDepthEnum)[keyof typeof StreamingSupportedBitDepthEnum];
6338
- declare const StreamingSupportedBitDepthEnum: {
6339
- readonly NUMBER_8: 8;
6340
- readonly NUMBER_16: 16;
6341
- readonly NUMBER_24: 24;
6342
- readonly NUMBER_32: 32;
6343
- };
6651
+ *
6652
+ * Polls getTranscript() until job completes or times out.
6653
+ *
6654
+ * @param transcriptId - Job/transcript ID to poll
6655
+ * @param options - Polling configuration
6656
+ * @returns Final transcription result
6657
+ */
6658
+ protected pollForCompletion(transcriptId: string, options?: {
6659
+ maxAttempts?: number;
6660
+ intervalMs?: number;
6661
+ }): Promise<UnifiedTranscriptResponse>;
6662
+ }
6344
6663
 
6345
6664
  /**
6346
6665
  * Provider-specific streaming option types using OpenAPI-generated schemas
@@ -7866,92 +8185,321 @@ declare function createOpenAIWhisperAdapter(config: ProviderConfig): OpenAIWhisp
7866
8185
  * await poll();
7867
8186
  * ```
7868
8187
  */
7869
- declare class SpeechmaticsAdapter extends BaseAdapter {
7870
- readonly name: "speechmatics";
7871
- readonly capabilities: ProviderCapabilities;
7872
- private client?;
7873
- protected baseUrl: string;
7874
- initialize(config: ProviderConfig): void;
7875
- /**
7876
- * Submit audio for transcription
7877
- *
7878
- * Speechmatics uses async batch processing. Returns a job ID immediately.
7879
- * Poll getTranscript() to retrieve results.
7880
- *
7881
- * @param audio - Audio input (URL or file)
7882
- * @param options - Transcription options
7883
- * @returns Job submission response with ID for polling
7884
- */
7885
- transcribe(audio: AudioInput, options?: TranscribeOptions): Promise<UnifiedTranscriptResponse>;
7886
- /**
7887
- * Get transcription result by job ID
7888
- *
7889
- * Poll this method to check job status and retrieve completed transcription.
7890
- *
7891
- * @param transcriptId - Job ID from Speechmatics
7892
- * @returns Transcription response with status and results
7893
- */
7894
- getTranscript(transcriptId: string): Promise<UnifiedTranscriptResponse>;
7895
- /**
7896
- * Delete a transcription job and its associated data
7897
- *
7898
- * Removes the job and all associated resources from Speechmatics' servers.
7899
- * This action is irreversible.
7900
- *
7901
- * @param transcriptId - The job ID to delete
7902
- * @param force - Force delete even if job is still running (default: false)
7903
- * @returns Promise with success status
7904
- *
7905
- * @example Delete a completed job
7906
- * ```typescript
7907
- * const result = await adapter.deleteTranscript('job-abc123');
7908
- * if (result.success) {
7909
- * console.log('Job deleted successfully');
7910
- * }
7911
- * ```
7912
- *
7913
- * @example Force delete a running job
7914
- * ```typescript
7915
- * const result = await adapter.deleteTranscript('job-abc123', true);
7916
- * ```
7917
- *
7918
- * @see https://docs.speechmatics.com/
7919
- */
7920
- deleteTranscript(transcriptId: string, force?: boolean): Promise<{
7921
- success: boolean;
7922
- }>;
7923
- /**
7924
- * Normalize Speechmatics status to unified status
7925
- */
7926
- private normalizeStatus;
7927
- /**
7928
- * Normalize Speechmatics response to unified format
7929
- */
7930
- private normalizeResponse;
8188
+ declare class SpeechmaticsAdapter extends BaseAdapter {
8189
+ readonly name: "speechmatics";
8190
+ readonly capabilities: ProviderCapabilities;
8191
+ private client?;
8192
+ protected baseUrl: string;
8193
+ initialize(config: ProviderConfig): void;
8194
+ /**
8195
+ * Submit audio for transcription
8196
+ *
8197
+ * Speechmatics uses async batch processing. Returns a job ID immediately.
8198
+ * Poll getTranscript() to retrieve results.
8199
+ *
8200
+ * @param audio - Audio input (URL or file)
8201
+ * @param options - Transcription options
8202
+ * @returns Job submission response with ID for polling
8203
+ */
8204
+ transcribe(audio: AudioInput, options?: TranscribeOptions): Promise<UnifiedTranscriptResponse>;
8205
+ /**
8206
+ * Get transcription result by job ID
8207
+ *
8208
+ * Poll this method to check job status and retrieve completed transcription.
8209
+ *
8210
+ * @param transcriptId - Job ID from Speechmatics
8211
+ * @returns Transcription response with status and results
8212
+ */
8213
+ getTranscript(transcriptId: string): Promise<UnifiedTranscriptResponse>;
8214
+ /**
8215
+ * Delete a transcription job and its associated data
8216
+ *
8217
+ * Removes the job and all associated resources from Speechmatics' servers.
8218
+ * This action is irreversible.
8219
+ *
8220
+ * @param transcriptId - The job ID to delete
8221
+ * @param force - Force delete even if job is still running (default: false)
8222
+ * @returns Promise with success status
8223
+ *
8224
+ * @example Delete a completed job
8225
+ * ```typescript
8226
+ * const result = await adapter.deleteTranscript('job-abc123');
8227
+ * if (result.success) {
8228
+ * console.log('Job deleted successfully');
8229
+ * }
8230
+ * ```
8231
+ *
8232
+ * @example Force delete a running job
8233
+ * ```typescript
8234
+ * const result = await adapter.deleteTranscript('job-abc123', true);
8235
+ * ```
8236
+ *
8237
+ * @see https://docs.speechmatics.com/
8238
+ */
8239
+ deleteTranscript(transcriptId: string, force?: boolean): Promise<{
8240
+ success: boolean;
8241
+ }>;
8242
+ /**
8243
+ * Normalize Speechmatics status to unified status
8244
+ */
8245
+ private normalizeStatus;
8246
+ /**
8247
+ * Normalize Speechmatics response to unified format
8248
+ */
8249
+ private normalizeResponse;
8250
+ }
8251
+ /**
8252
+ * Factory function to create a Speechmatics adapter
8253
+ */
8254
+ declare function createSpeechmaticsAdapter(config: ProviderConfig): SpeechmaticsAdapter;
8255
+
8256
+ /**
8257
+ * Generated by orval v7.9.0 🍺
8258
+ * Do not edit manually.
8259
+ * Gladia Control API
8260
+ * OpenAPI spec version: 1.0
8261
+ */
8262
+ /**
8263
+ * Custom metadata given in the initial request
8264
+ * @nullable
8265
+ */
8266
+ type CallbackTranscriptionSuccessPayloadCustomMetadata = {
8267
+ [key: string]: unknown;
8268
+ } | null;
8269
+
8270
+ /**
8271
+ * Generated by orval v7.9.0 🍺
8272
+ * Do not edit manually.
8273
+ * Gladia Control API
8274
+ * OpenAPI spec version: 1.0
8275
+ */
8276
+ /**
8277
+ * Type of event
8278
+ */
8279
+ type CallbackTranscriptionSuccessPayloadEvent = (typeof CallbackTranscriptionSuccessPayloadEvent)[keyof typeof CallbackTranscriptionSuccessPayloadEvent];
8280
+ declare const CallbackTranscriptionSuccessPayloadEvent: {
8281
+ readonly transcriptionsuccess: "transcription.success";
8282
+ };
8283
+
8284
+ /**
8285
+ * Generated by orval v7.9.0 🍺
8286
+ * Do not edit manually.
8287
+ * Gladia Control API
8288
+ * OpenAPI spec version: 1.0
8289
+ */
8290
+
8291
+ interface CallbackTranscriptionSuccessPayload {
8292
+ /** Id of the job */
8293
+ id: string;
8294
+ /** Type of event */
8295
+ event: CallbackTranscriptionSuccessPayloadEvent;
8296
+ /** Result of the transcription */
8297
+ payload: TranscriptionResultDTO;
8298
+ /**
8299
+ * Custom metadata given in the initial request
8300
+ * @nullable
8301
+ */
8302
+ custom_metadata?: CallbackTranscriptionSuccessPayloadCustomMetadata;
8303
+ }
8304
+
8305
+ /**
8306
+ * Generated by orval v7.9.0 🍺
8307
+ * Do not edit manually.
8308
+ * Gladia Control API
8309
+ * OpenAPI spec version: 1.0
8310
+ */
8311
+ /**
8312
+ * Custom metadata given in the initial request
8313
+ * @nullable
8314
+ */
8315
+ type CallbackTranscriptionErrorPayloadCustomMetadata = {
8316
+ [key: string]: unknown;
8317
+ } | null;
8318
+
8319
+ /**
8320
+ * Generated by orval v7.9.0 🍺
8321
+ * Do not edit manually.
8322
+ * Gladia Control API
8323
+ * OpenAPI spec version: 1.0
8324
+ */
8325
+ /**
8326
+ * Type of event
8327
+ */
8328
+ type CallbackTranscriptionErrorPayloadEvent = (typeof CallbackTranscriptionErrorPayloadEvent)[keyof typeof CallbackTranscriptionErrorPayloadEvent];
8329
+ declare const CallbackTranscriptionErrorPayloadEvent: {
8330
+ readonly transcriptionerror: "transcription.error";
8331
+ };
8332
+
8333
+ /**
8334
+ * Generated by orval v7.9.0 🍺
8335
+ * Do not edit manually.
8336
+ * Gladia Control API
8337
+ * OpenAPI spec version: 1.0
8338
+ */
8339
+ interface ErrorDTO {
8340
+ /** Error code */
8341
+ code: number;
8342
+ /** Error message */
8343
+ message: string;
8344
+ }
8345
+
8346
+ /**
8347
+ * Generated by orval v7.9.0 🍺
8348
+ * Do not edit manually.
8349
+ * Gladia Control API
8350
+ * OpenAPI spec version: 1.0
8351
+ */
8352
+
8353
+ interface CallbackTranscriptionErrorPayload {
8354
+ /** Id of the job */
8355
+ id: string;
8356
+ /** Type of event */
8357
+ event: CallbackTranscriptionErrorPayloadEvent;
8358
+ /** The error that occurred during the transcription */
8359
+ error: ErrorDTO;
8360
+ /**
8361
+ * Custom metadata given in the initial request
8362
+ * @nullable
8363
+ */
8364
+ custom_metadata?: CallbackTranscriptionErrorPayloadCustomMetadata;
8365
+ }
8366
+
8367
+ /**
8368
+ * Generated by orval v7.9.0 🍺
8369
+ * Do not edit manually.
8370
+ * AssemblyAI API
8371
+ * AssemblyAI API
8372
+ * OpenAPI spec version: 1.3.4
8373
+ */
8374
+ /**
8375
+ * The status of the redacted audio
8376
+ */
8377
+ type RedactedAudioStatus = (typeof RedactedAudioStatus)[keyof typeof RedactedAudioStatus];
8378
+ declare const RedactedAudioStatus: {
8379
+ readonly redacted_audio_ready: "redacted_audio_ready";
8380
+ };
8381
+
8382
+ /**
8383
+ * Generated by orval v7.9.0 🍺
8384
+ * Do not edit manually.
8385
+ * AssemblyAI API
8386
+ * AssemblyAI API
8387
+ * OpenAPI spec version: 1.3.4
8388
+ */
8389
+
8390
+ interface RedactedAudioResponse {
8391
+ /** The status of the redacted audio */
8392
+ status: RedactedAudioStatus;
8393
+ /** The URL of the redacted audio file */
8394
+ redacted_audio_url: string;
8395
+ }
8396
+
8397
+ /**
8398
+ * Generated by orval v7.9.0 🍺
8399
+ * Do not edit manually.
8400
+ * AssemblyAI API
8401
+ * AssemblyAI API
8402
+ * OpenAPI spec version: 1.3.4
8403
+ */
8404
+
8405
+ /**
8406
+ * The notification when the redacted audio is ready.
8407
+ */
8408
+ type RedactedAudioNotification = RedactedAudioResponse;
8409
+
8410
+ /**
8411
+ * Generated by orval v7.9.0 🍺
8412
+ * Do not edit manually.
8413
+ * AssemblyAI API
8414
+ * AssemblyAI API
8415
+ * OpenAPI spec version: 1.3.4
8416
+ */
8417
+ /**
8418
+ * The status of the transcript. Either completed or error.
8419
+ */
8420
+ type TranscriptReadyStatus = (typeof TranscriptReadyStatus)[keyof typeof TranscriptReadyStatus];
8421
+ declare const TranscriptReadyStatus: {
8422
+ readonly completed: "completed";
8423
+ readonly error: "error";
8424
+ };
8425
+
8426
+ /**
8427
+ * Generated by orval v7.9.0 🍺
8428
+ * Do not edit manually.
8429
+ * AssemblyAI API
8430
+ * AssemblyAI API
8431
+ * OpenAPI spec version: 1.3.4
8432
+ */
8433
+
8434
+ /**
8435
+ * The notification when the transcript status is completed or error.
8436
+ */
8437
+ interface TranscriptReadyNotification {
8438
+ /** The ID of the transcript */
8439
+ transcript_id: string;
8440
+ /** The status of the transcript. Either completed or error. */
8441
+ status: TranscriptReadyStatus;
7931
8442
  }
8443
+
7932
8444
  /**
7933
- * Factory function to create a Speechmatics adapter
8445
+ * Generated by orval v7.9.0 🍺
8446
+ * Do not edit manually.
8447
+ * AssemblyAI API
8448
+ * AssemblyAI API
8449
+ * OpenAPI spec version: 1.3.4
7934
8450
  */
7935
- declare function createSpeechmaticsAdapter(config: ProviderConfig): SpeechmaticsAdapter;
8451
+
8452
+ /**
8453
+ * The notifications sent to the webhook URL.
8454
+ */
8455
+ type TranscriptWebhookNotification = TranscriptReadyNotification | RedactedAudioNotification;
7936
8456
 
7937
8457
  /**
7938
8458
  * Unified webhook types for transcription providers
7939
8459
  * Normalizes webhook callbacks from different providers to a common format
7940
8460
  */
7941
8461
 
8462
+ /**
8463
+ * Union of all Gladia webhook payloads
8464
+ */
8465
+ type GladiaWebhookPayload = CallbackTranscriptionSuccessPayload | CallbackTranscriptionErrorPayload;
8466
+ /**
8467
+ * Map of provider names to their webhook payload types
8468
+ */
8469
+ type ProviderWebhookPayloadMap = {
8470
+ gladia: GladiaWebhookPayload;
8471
+ assemblyai: TranscriptWebhookNotification;
8472
+ deepgram: ListenV1Response;
8473
+ "azure-stt": unknown;
8474
+ "openai-whisper": never;
8475
+ speechmatics: unknown;
8476
+ };
7942
8477
  /**
7943
8478
  * Unified webhook event types
7944
8479
  */
7945
8480
  type WebhookEventType = "transcription.created" | "transcription.processing" | "transcription.completed" | "transcription.failed" | "live.session_started" | "live.session_ended" | "live.transcript";
7946
8481
  /**
7947
- * Unified webhook event
7948
- * Normalized across all transcription providers
8482
+ * Unified webhook event with provider-specific type safety
8483
+ *
8484
+ * When a specific provider is known at compile time, the `raw` field
8485
+ * will be typed with that provider's actual webhook payload type.
8486
+ *
8487
+ * @template P - The transcription provider (defaults to all providers)
8488
+ *
8489
+ * @example Type-safe Gladia webhook handling
8490
+ * ```typescript
8491
+ * const event: UnifiedWebhookEvent<'gladia'> = handler.parse(payload);
8492
+ * // event.raw is typed as GladiaWebhookPayload
8493
+ * if ('payload' in event.raw) {
8494
+ * const transcription = event.raw.payload; // TranscriptionResultDTO
8495
+ * }
8496
+ * ```
7949
8497
  */
7950
- interface UnifiedWebhookEvent {
8498
+ interface UnifiedWebhookEvent<P extends TranscriptionProvider = TranscriptionProvider> {
7951
8499
  /** Whether the operation was successful */
7952
8500
  success: boolean;
7953
8501
  /** Provider that sent this webhook */
7954
- provider: TranscriptionProvider;
8502
+ provider: P;
7955
8503
  /** Type of webhook event */
7956
8504
  eventType: WebhookEventType;
7957
8505
  /** Transcription data (if available) */
@@ -7987,8 +8535,15 @@ interface UnifiedWebhookEvent {
7987
8535
  };
7988
8536
  /** Event timestamp */
7989
8537
  timestamp: string;
7990
- /** Original webhook payload (for debugging/advanced usage) */
7991
- raw: unknown;
8538
+ /**
8539
+ * Original webhook payload (fully typed per provider)
8540
+ *
8541
+ * Type-safe based on the provider:
8542
+ * - `gladia`: GladiaWebhookPayload
8543
+ * - `assemblyai`: AssemblyAIWebhookPayload
8544
+ * - `deepgram`: DeepgramWebhookPayload
8545
+ */
8546
+ raw: P extends keyof ProviderWebhookPayloadMap ? ProviderWebhookPayloadMap[P] : unknown;
7992
8547
  }
7993
8548
  /**
7994
8549
  * Webhook validation result
@@ -9217,35 +9772,6 @@ interface BadRequestErrorResponse {
9217
9772
  validation_errors?: string[];
9218
9773
  }
9219
9774
 
9220
- /**
9221
- * Generated by orval v7.9.0 🍺
9222
- * Do not edit manually.
9223
- * Gladia Control API
9224
- * OpenAPI spec version: 1.0
9225
- */
9226
- interface CallbackConfig {
9227
- /** URL on which we will do a `POST` request with configured messages */
9228
- url?: string;
9229
- /** If true, partial transcript will be sent to the defined callback. */
9230
- receive_partial_transcripts?: boolean;
9231
- /** If true, final transcript will be sent to the defined callback. */
9232
- receive_final_transcripts?: boolean;
9233
- /** If true, begin and end speech events will be sent to the defined callback. */
9234
- receive_speech_events?: boolean;
9235
- /** If true, pre-processing events will be sent to the defined callback. */
9236
- receive_pre_processing_events?: boolean;
9237
- /** If true, realtime processing events will be sent to the defined callback. */
9238
- receive_realtime_processing_events?: boolean;
9239
- /** If true, post-processing events will be sent to the defined callback. */
9240
- receive_post_processing_events?: boolean;
9241
- /** If true, acknowledgments will be sent to the defined callback. */
9242
- receive_acknowledgments?: boolean;
9243
- /** If true, errors will be sent to the defined callback. */
9244
- receive_errors?: boolean;
9245
- /** If true, lifecycle events will be sent to the defined callback. */
9246
- receive_lifecycle_events?: boolean;
9247
- }
9248
-
9249
9775
  /**
9250
9776
  * Generated by orval v7.9.0 🍺
9251
9777
  * Do not edit manually.
@@ -10484,127 +11010,16 @@ interface TranslationMessage {
10484
11010
  /**
10485
11011
  * Generated by orval v7.9.0 🍺
10486
11012
  * Do not edit manually.
10487
- * Gladia Control API
10488
- * OpenAPI spec version: 1.0
10489
- */
10490
-
10491
- interface CallbackLiveTranslationMessage {
10492
- /** Id of the job */
10493
- id: string;
10494
- event: CallbackLiveTranslationMessageEvent;
10495
- /** The live message payload as sent to the WebSocket */
10496
- payload: TranslationMessage;
10497
- }
10498
-
10499
- /**
10500
- * Generated by orval v7.9.0 🍺
10501
- * Do not edit manually.
10502
- * Gladia Control API
10503
- * OpenAPI spec version: 1.0
10504
- */
10505
- /**
10506
- * Custom metadata given in the initial request
10507
- * @nullable
10508
- */
10509
- type CallbackTranscriptionErrorPayloadCustomMetadata = {
10510
- [key: string]: unknown;
10511
- } | null;
10512
-
10513
- /**
10514
- * Generated by orval v7.9.0 🍺
10515
- * Do not edit manually.
10516
- * Gladia Control API
10517
- * OpenAPI spec version: 1.0
10518
- */
10519
- /**
10520
- * Type of event
10521
- */
10522
- type CallbackTranscriptionErrorPayloadEvent = (typeof CallbackTranscriptionErrorPayloadEvent)[keyof typeof CallbackTranscriptionErrorPayloadEvent];
10523
- declare const CallbackTranscriptionErrorPayloadEvent: {
10524
- readonly transcriptionerror: "transcription.error";
10525
- };
10526
-
10527
- /**
10528
- * Generated by orval v7.9.0 🍺
10529
- * Do not edit manually.
10530
- * Gladia Control API
10531
- * OpenAPI spec version: 1.0
10532
- */
10533
- interface ErrorDTO {
10534
- /** Error code */
10535
- code: number;
10536
- /** Error message */
10537
- message: string;
10538
- }
10539
-
10540
- /**
10541
- * Generated by orval v7.9.0 🍺
10542
- * Do not edit manually.
10543
- * Gladia Control API
10544
- * OpenAPI spec version: 1.0
10545
- */
10546
-
10547
- interface CallbackTranscriptionErrorPayload {
10548
- /** Id of the job */
10549
- id: string;
10550
- /** Type of event */
10551
- event: CallbackTranscriptionErrorPayloadEvent;
10552
- /** The error that occurred during the transcription */
10553
- error: ErrorDTO;
10554
- /**
10555
- * Custom metadata given in the initial request
10556
- * @nullable
10557
- */
10558
- custom_metadata?: CallbackTranscriptionErrorPayloadCustomMetadata;
10559
- }
10560
-
10561
- /**
10562
- * Generated by orval v7.9.0 🍺
10563
- * Do not edit manually.
10564
- * Gladia Control API
10565
- * OpenAPI spec version: 1.0
10566
- */
10567
- /**
10568
- * Custom metadata given in the initial request
10569
- * @nullable
10570
- */
10571
- type CallbackTranscriptionSuccessPayloadCustomMetadata = {
10572
- [key: string]: unknown;
10573
- } | null;
10574
-
10575
- /**
10576
- * Generated by orval v7.9.0 🍺
10577
- * Do not edit manually.
10578
- * Gladia Control API
10579
- * OpenAPI spec version: 1.0
10580
- */
10581
- /**
10582
- * Type of event
10583
- */
10584
- type CallbackTranscriptionSuccessPayloadEvent = (typeof CallbackTranscriptionSuccessPayloadEvent)[keyof typeof CallbackTranscriptionSuccessPayloadEvent];
10585
- declare const CallbackTranscriptionSuccessPayloadEvent: {
10586
- readonly transcriptionsuccess: "transcription.success";
10587
- };
10588
-
10589
- /**
10590
- * Generated by orval v7.9.0 🍺
10591
- * Do not edit manually.
10592
- * Gladia Control API
10593
- * OpenAPI spec version: 1.0
10594
- */
10595
-
10596
- interface CallbackTranscriptionSuccessPayload {
10597
- /** Id of the job */
10598
- id: string;
10599
- /** Type of event */
10600
- event: CallbackTranscriptionSuccessPayloadEvent;
10601
- /** Result of the transcription */
10602
- payload: TranscriptionResultDTO;
10603
- /**
10604
- * Custom metadata given in the initial request
10605
- * @nullable
10606
- */
10607
- custom_metadata?: CallbackTranscriptionSuccessPayloadCustomMetadata;
11013
+ * Gladia Control API
11014
+ * OpenAPI spec version: 1.0
11015
+ */
11016
+
11017
+ interface CallbackLiveTranslationMessage {
11018
+ /** Id of the job */
11019
+ id: string;
11020
+ event: CallbackLiveTranslationMessageEvent;
11021
+ /** The live message payload as sent to the WebSocket */
11022
+ payload: TranslationMessage;
10608
11023
  }
10609
11024
 
10610
11025
  /**
@@ -10793,92 +11208,6 @@ type StreamingResponsePostSessionMetadata = {
10793
11208
  [key: string]: unknown;
10794
11209
  };
10795
11210
 
10796
- /**
10797
- * Generated by orval v7.9.0 🍺
10798
- * Do not edit manually.
10799
- * Gladia Control API
10800
- * OpenAPI spec version: 1.0
10801
- */
10802
- interface MessagesConfig {
10803
- /** If true, partial transcript will be sent to websocket. */
10804
- receive_partial_transcripts?: boolean;
10805
- /** If true, final transcript will be sent to websocket. */
10806
- receive_final_transcripts?: boolean;
10807
- /** If true, begin and end speech events will be sent to websocket. */
10808
- receive_speech_events?: boolean;
10809
- /** If true, pre-processing events will be sent to websocket. */
10810
- receive_pre_processing_events?: boolean;
10811
- /** If true, realtime processing events will be sent to websocket. */
10812
- receive_realtime_processing_events?: boolean;
10813
- /** If true, post-processing events will be sent to websocket. */
10814
- receive_post_processing_events?: boolean;
10815
- /** If true, acknowledgments will be sent to websocket. */
10816
- receive_acknowledgments?: boolean;
10817
- /** If true, errors will be sent to websocket. */
10818
- receive_errors?: boolean;
10819
- /** If true, lifecycle events will be sent to websocket. */
10820
- receive_lifecycle_events?: boolean;
10821
- }
10822
-
10823
- /**
10824
- * Generated by orval v7.9.0 🍺
10825
- * Do not edit manually.
10826
- * Gladia Control API
10827
- * OpenAPI spec version: 1.0
10828
- */
10829
-
10830
- interface PostProcessingConfig {
10831
- /** If true, generates summarization for the whole transcription. */
10832
- summarization?: boolean;
10833
- /** Summarization configuration, if `summarization` is enabled */
10834
- summarization_config?: SummarizationConfigDTO;
10835
- /** If true, generates chapters for the whole transcription. */
10836
- chapterization?: boolean;
10837
- }
10838
-
10839
- /**
10840
- * Generated by orval v7.9.0 🍺
10841
- * Do not edit manually.
10842
- * Gladia Control API
10843
- * OpenAPI spec version: 1.0
10844
- */
10845
- interface PreProcessingConfig {
10846
- /** If true, apply pre-processing to the audio stream to enhance the quality. */
10847
- audio_enhancer?: boolean;
10848
- /**
10849
- * Sensitivity configuration for Speech Threshold. A value close to 1 will apply stricter thresholds, making it less likely to detect background sounds as speech.
10850
- * @minimum 0
10851
- * @maximum 1
10852
- */
10853
- speech_threshold?: number;
10854
- }
10855
-
10856
- /**
10857
- * Generated by orval v7.9.0 🍺
10858
- * Do not edit manually.
10859
- * Gladia Control API
10860
- * OpenAPI spec version: 1.0
10861
- */
10862
-
10863
- interface RealtimeProcessingConfig {
10864
- /** If true, enable custom vocabulary for the transcription. */
10865
- custom_vocabulary?: boolean;
10866
- /** Custom vocabulary configuration, if `custom_vocabulary` is enabled */
10867
- custom_vocabulary_config?: CustomVocabularyConfigDTO;
10868
- /** If true, enable custom spelling for the transcription. */
10869
- custom_spelling?: boolean;
10870
- /** Custom spelling configuration, if `custom_spelling` is enabled */
10871
- custom_spelling_config?: CustomSpellingConfigDTO;
10872
- /** If true, enable translation for the transcription */
10873
- translation?: boolean;
10874
- /** Translation configuration, if `translation` is enabled */
10875
- translation_config?: TranslationConfigDTO;
10876
- /** If true, enable named entity recognition for the transcription. */
10877
- named_entity_recognition?: boolean;
10878
- /** If true, enable sentiment analysis for the transcription. */
10879
- sentiment_analysis?: boolean;
10880
- }
10881
-
10882
11211
  /**
10883
11212
  * Generated by orval v7.9.0 🍺
10884
11213
  * Do not edit manually.
@@ -11377,76 +11706,6 @@ type StreamingControllerInitStreamingSessionV2Params = {
11377
11706
  region?: StreamingSupportedRegions;
11378
11707
  };
11379
11708
 
11380
- /**
11381
- * Generated by orval v7.9.0 🍺
11382
- * Do not edit manually.
11383
- * Gladia Control API
11384
- * OpenAPI spec version: 1.0
11385
- */
11386
- /**
11387
- * Custom metadata you can attach to this live transcription
11388
- */
11389
- type StreamingRequestCustomMetadata = {
11390
- [key: string]: unknown;
11391
- };
11392
-
11393
- /**
11394
- * Generated by orval v7.9.0 🍺
11395
- * Do not edit manually.
11396
- * Gladia Control API
11397
- * OpenAPI spec version: 1.0
11398
- */
11399
-
11400
- interface StreamingRequest {
11401
- /** The encoding format of the audio stream. Supported formats:
11402
- - PCM: 8, 16, 24, and 32 bits
11403
- - A-law: 8 bits
11404
- - μ-law: 8 bits
11405
-
11406
- Note: No need to add WAV headers to raw audio as the API supports both formats. */
11407
- encoding?: StreamingSupportedEncodingEnum;
11408
- /** The bit depth of the audio stream */
11409
- bit_depth?: StreamingSupportedBitDepthEnum;
11410
- /** The sample rate of the audio stream */
11411
- sample_rate?: StreamingSupportedSampleRateEnum;
11412
- /**
11413
- * The number of channels of the audio stream
11414
- * @minimum 1
11415
- * @maximum 8
11416
- */
11417
- channels?: number;
11418
- /** Custom metadata you can attach to this live transcription */
11419
- custom_metadata?: StreamingRequestCustomMetadata;
11420
- /** The model used to process the audio. "solaria-1" is used by default. */
11421
- model?: StreamingSupportedModels;
11422
- /**
11423
- * The endpointing duration in seconds. Endpointing is the duration of silence which will cause an utterance to be considered as finished
11424
- * @minimum 0.01
11425
- * @maximum 10
11426
- */
11427
- endpointing?: number;
11428
- /**
11429
- * The maximum duration in seconds without endpointing. If endpointing is not detected after this duration, current utterance will be considered as finished
11430
- * @minimum 5
11431
- * @maximum 60
11432
- */
11433
- maximum_duration_without_endpointing?: number;
11434
- /** Specify the language configuration */
11435
- language_config?: LanguageConfig;
11436
- /** Specify the pre-processing configuration */
11437
- pre_processing?: PreProcessingConfig;
11438
- /** Specify the realtime processing configuration */
11439
- realtime_processing?: RealtimeProcessingConfig;
11440
- /** Specify the post-processing configuration */
11441
- post_processing?: PostProcessingConfig;
11442
- /** Specify the websocket messages configuration */
11443
- messages_config?: MessagesConfig;
11444
- /** If true, messages will be sent to configured url. */
11445
- callback?: boolean;
11446
- /** Specify the callback configuration */
11447
- callback_config?: CallbackConfig;
11448
- }
11449
-
11450
11709
  /**
11451
11710
  * Generated by orval v7.9.0 🍺
11452
11711
  * Do not edit manually.
@@ -12994,49 +13253,6 @@ interface RealtimeTemporaryTokenResponse {
12994
13253
  token: string;
12995
13254
  }
12996
13255
 
12997
- /**
12998
- * Generated by orval v7.9.0 🍺
12999
- * Do not edit manually.
13000
- * AssemblyAI API
13001
- * AssemblyAI API
13002
- * OpenAPI spec version: 1.3.4
13003
- */
13004
- /**
13005
- * The status of the redacted audio
13006
- */
13007
- type RedactedAudioStatus = (typeof RedactedAudioStatus)[keyof typeof RedactedAudioStatus];
13008
- declare const RedactedAudioStatus: {
13009
- readonly redacted_audio_ready: "redacted_audio_ready";
13010
- };
13011
-
13012
- /**
13013
- * Generated by orval v7.9.0 🍺
13014
- * Do not edit manually.
13015
- * AssemblyAI API
13016
- * AssemblyAI API
13017
- * OpenAPI spec version: 1.3.4
13018
- */
13019
-
13020
- interface RedactedAudioResponse {
13021
- /** The status of the redacted audio */
13022
- status: RedactedAudioStatus;
13023
- /** The URL of the redacted audio file */
13024
- redacted_audio_url: string;
13025
- }
13026
-
13027
- /**
13028
- * Generated by orval v7.9.0 🍺
13029
- * Do not edit manually.
13030
- * AssemblyAI API
13031
- * AssemblyAI API
13032
- * OpenAPI spec version: 1.3.4
13033
- */
13034
-
13035
- /**
13036
- * The notification when the redacted audio is ready.
13037
- */
13038
- type RedactedAudioNotification = RedactedAudioResponse;
13039
-
13040
13256
  /**
13041
13257
  * Generated by orval v7.9.0 🍺
13042
13258
  * Do not edit manually.
@@ -13253,53 +13469,6 @@ type TranscriptParamsAllOf = {
13253
13469
  */
13254
13470
  type TranscriptParams = TranscriptParamsAllOf & TranscriptOptionalParams;
13255
13471
 
13256
- /**
13257
- * Generated by orval v7.9.0 🍺
13258
- * Do not edit manually.
13259
- * AssemblyAI API
13260
- * AssemblyAI API
13261
- * OpenAPI spec version: 1.3.4
13262
- */
13263
- /**
13264
- * The status of the transcript. Either completed or error.
13265
- */
13266
- type TranscriptReadyStatus = (typeof TranscriptReadyStatus)[keyof typeof TranscriptReadyStatus];
13267
- declare const TranscriptReadyStatus: {
13268
- readonly completed: "completed";
13269
- readonly error: "error";
13270
- };
13271
-
13272
- /**
13273
- * Generated by orval v7.9.0 🍺
13274
- * Do not edit manually.
13275
- * AssemblyAI API
13276
- * AssemblyAI API
13277
- * OpenAPI spec version: 1.3.4
13278
- */
13279
-
13280
- /**
13281
- * The notification when the transcript status is completed or error.
13282
- */
13283
- interface TranscriptReadyNotification {
13284
- /** The ID of the transcript */
13285
- transcript_id: string;
13286
- /** The status of the transcript. Either completed or error. */
13287
- status: TranscriptReadyStatus;
13288
- }
13289
-
13290
- /**
13291
- * Generated by orval v7.9.0 🍺
13292
- * Do not edit manually.
13293
- * AssemblyAI API
13294
- * AssemblyAI API
13295
- * OpenAPI spec version: 1.3.4
13296
- */
13297
-
13298
- /**
13299
- * The notifications sent to the webhook URL.
13300
- */
13301
- type TranscriptWebhookNotification = TranscriptReadyNotification | RedactedAudioNotification;
13302
-
13303
13472
  /**
13304
13473
  * Generated by orval v7.9.0 🍺
13305
13474
  * Do not edit manually.
@@ -13558,4 +13727,4 @@ declare namespace index {
13558
13727
  export { index_AudioIntelligenceModelStatus as AudioIntelligenceModelStatus, type index_AutoHighlightResult as AutoHighlightResult, type index_AutoHighlightsResult as AutoHighlightsResult, type index_BadRequestResponse as BadRequestResponse, type index_CannotAccessUploadedFileResponse as CannotAccessUploadedFileResponse, type index_Chapter as Chapter, type index_ContentSafetyLabel as ContentSafetyLabel, type index_ContentSafetyLabelResult as ContentSafetyLabelResult, type index_ContentSafetyLabelsResult as ContentSafetyLabelsResult, type index_ContentSafetyLabelsResultSeverityScoreSummary as ContentSafetyLabelsResultSeverityScoreSummary, type index_ContentSafetyLabelsResultSummary as ContentSafetyLabelsResultSummary, type index_CreateRealtimeTemporaryTokenParams as CreateRealtimeTemporaryTokenParams, type index_Entity as Entity, index_EntityType as EntityType, type Error$1 as Error, type index_GatewayTimeoutResponse as GatewayTimeoutResponse, type index_GetSubtitlesParams as GetSubtitlesParams, type index_InternalServerErrorResponse as InternalServerErrorResponse, type index_LemurActionItemsParams as LemurActionItemsParams, type index_LemurActionItemsParamsAllOf as LemurActionItemsParamsAllOf, type index_LemurActionItemsResponse as LemurActionItemsResponse, type index_LemurBaseParams as LemurBaseParams, type index_LemurBaseParamsContext as LemurBaseParamsContext, type index_LemurBaseParamsContextOneOf as LemurBaseParamsContextOneOf, type index_LemurBaseParamsFinalModel as LemurBaseParamsFinalModel, type index_LemurBaseResponse as LemurBaseResponse, index_LemurModel as LemurModel, type index_LemurQuestion as LemurQuestion, type index_LemurQuestionAnswer as LemurQuestionAnswer, type index_LemurQuestionAnswerParams as LemurQuestionAnswerParams, type index_LemurQuestionAnswerParamsAllOf as LemurQuestionAnswerParamsAllOf, type index_LemurQuestionAnswerResponse as LemurQuestionAnswerResponse, type index_LemurQuestionAnswerResponseAllOf as LemurQuestionAnswerResponseAllOf, type index_LemurQuestionContext as LemurQuestionContext, type index_LemurQuestionContextOneOf as LemurQuestionContextOneOf, type index_LemurResponse as LemurResponse, type index_LemurStringResponse as LemurStringResponse, type index_LemurStringResponseAllOf as LemurStringResponseAllOf, type index_LemurSummaryParams as LemurSummaryParams, type index_LemurSummaryParamsAllOf as LemurSummaryParamsAllOf, type index_LemurSummaryResponse as LemurSummaryResponse, type index_LemurTaskParams as LemurTaskParams, type index_LemurTaskParamsAllOf as LemurTaskParamsAllOf, type index_LemurTaskResponse as LemurTaskResponse, type index_LemurUsage as LemurUsage, type index_ListTranscriptParams as ListTranscriptParams, type index_ListTranscriptsParams as ListTranscriptsParams, type index_NotFoundResponse as NotFoundResponse, type index_PageDetails as PageDetails, type index_PageDetailsNextUrl as PageDetailsNextUrl, type index_PageDetailsPrevUrl as PageDetailsPrevUrl, type index_ParagraphsResponse as ParagraphsResponse, index_PiiPolicy as PiiPolicy, type index_PurgeLemurRequestDataResponse as PurgeLemurRequestDataResponse, type index_RealtimeTemporaryTokenResponse as RealtimeTemporaryTokenResponse, index_RedactPiiAudioQuality as RedactPiiAudioQuality, type index_RedactedAudioNotification as RedactedAudioNotification, type index_RedactedAudioResponse as RedactedAudioResponse, index_RedactedAudioStatus as RedactedAudioStatus, type index_SentencesResponse as SentencesResponse, index_Sentiment as Sentiment, type SentimentAnalysisResult$1 as SentimentAnalysisResult, type index_SentimentAnalysisResultChannel as SentimentAnalysisResultChannel, type index_SentimentAnalysisResultSpeaker as SentimentAnalysisResultSpeaker, type index_ServiceUnavailableResponse as ServiceUnavailableResponse, type index_SeverityScoreSummary as SeverityScoreSummary, index_SpeechModel as SpeechModel, index_SubstitutionPolicy as SubstitutionPolicy, index_SubtitleFormat as SubtitleFormat, index_SummaryModel as SummaryModel, index_SummaryType as SummaryType, type index_Timestamp as Timestamp, type index_TooManyRequestsResponse as TooManyRequestsResponse, type index_TopicDetectionModelResult as TopicDetectionModelResult, type index_TopicDetectionModelResultSummary as TopicDetectionModelResultSummary, type index_TopicDetectionResult as TopicDetectionResult, type index_TopicDetectionResultLabelsItem as TopicDetectionResultLabelsItem, type index_Transcript as Transcript, type index_TranscriptAudioDuration as TranscriptAudioDuration, type index_TranscriptAudioEndAt as TranscriptAudioEndAt, type index_TranscriptAudioStartFrom as TranscriptAudioStartFrom, type index_TranscriptAutoChapters as TranscriptAutoChapters, type index_TranscriptAutoHighlightsResult as TranscriptAutoHighlightsResult, index_TranscriptBoostParam as TranscriptBoostParam, type index_TranscriptBoostParamProperty as TranscriptBoostParamProperty, type index_TranscriptChapters as TranscriptChapters, type index_TranscriptConfidence as TranscriptConfidence, type index_TranscriptContentSafety as TranscriptContentSafety, type index_TranscriptContentSafetyLabels as TranscriptContentSafetyLabels, type index_TranscriptCustomSpelling as TranscriptCustomSpelling, type index_TranscriptCustomSpellingProperty as TranscriptCustomSpellingProperty, type index_TranscriptCustomTopics as TranscriptCustomTopics, type index_TranscriptDisfluencies as TranscriptDisfluencies, type index_TranscriptEntities as TranscriptEntities, type index_TranscriptEntityDetection as TranscriptEntityDetection, type index_TranscriptFilterProfanity as TranscriptFilterProfanity, type index_TranscriptFormatText as TranscriptFormatText, type index_TranscriptIabCategories as TranscriptIabCategories, type index_TranscriptIabCategoriesResult as TranscriptIabCategoriesResult, index_TranscriptLanguageCode as TranscriptLanguageCode, type index_TranscriptLanguageCodeProperty as TranscriptLanguageCodeProperty, type index_TranscriptLanguageConfidence as TranscriptLanguageConfidence, type index_TranscriptLanguageConfidenceThreshold as TranscriptLanguageConfidenceThreshold, type index_TranscriptLanguageDetection as TranscriptLanguageDetection, type index_TranscriptList as TranscriptList, type index_TranscriptListItem as TranscriptListItem, type index_TranscriptListItemCompleted as TranscriptListItemCompleted, type index_TranscriptListItemError as TranscriptListItemError, type index_TranscriptMultichannel as TranscriptMultichannel, type index_TranscriptOptionalParams as TranscriptOptionalParams, type index_TranscriptOptionalParamsLanguageCode as TranscriptOptionalParamsLanguageCode, type index_TranscriptOptionalParamsLanguageCodeOneOf as TranscriptOptionalParamsLanguageCodeOneOf, type index_TranscriptOptionalParamsRedactPiiSub as TranscriptOptionalParamsRedactPiiSub, type index_TranscriptOptionalParamsSpeakersExpected as TranscriptOptionalParamsSpeakersExpected, type index_TranscriptOptionalParamsSpeechModel as TranscriptOptionalParamsSpeechModel, type index_TranscriptOptionalParamsSpeechThreshold as TranscriptOptionalParamsSpeechThreshold, type index_TranscriptOptionalParamsWebhookAuthHeaderName as TranscriptOptionalParamsWebhookAuthHeaderName, type index_TranscriptOptionalParamsWebhookAuthHeaderValue as TranscriptOptionalParamsWebhookAuthHeaderValue, type index_TranscriptParagraph as TranscriptParagraph, type index_TranscriptParams as TranscriptParams, type index_TranscriptParamsAllOf as TranscriptParamsAllOf, type index_TranscriptPunctuate as TranscriptPunctuate, type index_TranscriptReadyNotification as TranscriptReadyNotification, index_TranscriptReadyStatus as TranscriptReadyStatus, type index_TranscriptRedactPiiAudio as TranscriptRedactPiiAudio, type index_TranscriptRedactPiiAudioQuality as TranscriptRedactPiiAudioQuality, type index_TranscriptRedactPiiPolicies as TranscriptRedactPiiPolicies, type index_TranscriptSentence as TranscriptSentence, type index_TranscriptSentenceChannel as TranscriptSentenceChannel, type index_TranscriptSentenceSpeaker as TranscriptSentenceSpeaker, type index_TranscriptSentimentAnalysis as TranscriptSentimentAnalysis, type index_TranscriptSentimentAnalysisResults as TranscriptSentimentAnalysisResults, type index_TranscriptSpeakerLabels as TranscriptSpeakerLabels, type index_TranscriptSpeakersExpected as TranscriptSpeakersExpected, type index_TranscriptSpeechModel as TranscriptSpeechModel, type index_TranscriptSpeechThreshold as TranscriptSpeechThreshold, type index_TranscriptSpeedBoost as TranscriptSpeedBoost, index_TranscriptStatus as TranscriptStatus, type index_TranscriptSummary as TranscriptSummary, type index_TranscriptSummaryModel as TranscriptSummaryModel, type index_TranscriptSummaryType as TranscriptSummaryType, type index_TranscriptText as TranscriptText, type index_TranscriptThrottled as TranscriptThrottled, type index_TranscriptUtterance as TranscriptUtterance, type index_TranscriptUtteranceChannel as TranscriptUtteranceChannel, type index_TranscriptUtterances as TranscriptUtterances, type index_TranscriptWebhookAuthHeaderName as TranscriptWebhookAuthHeaderName, type index_TranscriptWebhookNotification as TranscriptWebhookNotification, type index_TranscriptWebhookStatusCode as TranscriptWebhookStatusCode, type index_TranscriptWebhookUrl as TranscriptWebhookUrl, type index_TranscriptWord as TranscriptWord, type index_TranscriptWordChannel as TranscriptWordChannel, type index_TranscriptWordSpeaker as TranscriptWordSpeaker, type index_TranscriptWords as TranscriptWords, type index_UnauthorizedResponse as UnauthorizedResponse, type index_UploadedFile as UploadedFile, type index_WordSearchMatch as WordSearchMatch, type index_WordSearchParams as WordSearchParams, type index_WordSearchResponse as WordSearchResponse, type index_WordSearchTimestamp as WordSearchTimestamp };
13559
13728
  }
13560
13729
 
13561
- export { AssemblyAIAdapter, type TranscriptOptionalParams as AssemblyAIOptions, type AssemblyAIStreamingOptions, index as AssemblyAITypes, AssemblyAIWebhookHandler, type AudioChunk, type AudioInput, AudioResponseFormat, AudioTranscriptionModel, AzureSTTAdapter, AzureWebhookHandler, BaseAdapter, BaseWebhookHandler, type BatchOnlyProvider, DeepgramAdapter, type ListenV1MediaTranscribeParams as DeepgramOptions, type DeepgramStreamingOptions, DeepgramWebhookHandler, GladiaAdapter, type AudioToLlmListConfigDTO as GladiaAudioToLlmConfig, type CodeSwitchingConfigDTO as GladiaCodeSwitchingConfig, type InitTranscriptionRequest as GladiaOptions, type GladiaStreamingOptions, index$1 as GladiaTypes, GladiaWebhookHandler, ListenV1EncodingParameter, type ListenV1LanguageParameter, type ListenV1ModelParameter, type ListenV1VersionParameter, OpenAIWhisperAdapter, type CreateTranscriptionRequest as OpenAIWhisperOptions, type ProviderCapabilities, type ProviderConfig, type ProviderRawResponseMap, type ProviderStreamingOptions, type SessionStatus, SpeakV1ContainerParameter, SpeakV1EncodingParameter, SpeakV1SampleRateParameter, type Speaker, SpeechmaticsAdapter, type SpeechmaticsOperatingPoint, SpeechmaticsWebhookHandler, type StreamEvent, type StreamEventType, type StreamingCallbacks, type StreamingOptions, type StreamingOptionsForProvider, type StreamingProvider, type StreamingSession, StreamingSupportedBitDepthEnum, StreamingSupportedEncodingEnum, StreamingSupportedSampleRateEnum, type TranscribeOptions, type TranscribeStreamParams, type TranscriptionAdapter, type TranscriptionLanguage, type TranscriptionModel, type TranscriptionProvider, type TranscriptionStatus, type UnifiedTranscriptResponse, type UnifiedWebhookEvent, type Utterance, VoiceRouter, type VoiceRouterConfig, type WebhookEventType, WebhookRouter, type WebhookRouterOptions, type WebhookRouterResult, type WebhookValidation, type WebhookVerificationOptions, type Word, createAssemblyAIAdapter, createAssemblyAIWebhookHandler, createAzureSTTAdapter, createAzureWebhookHandler, createDeepgramAdapter, createDeepgramWebhookHandler, createGladiaAdapter, createGladiaWebhookHandler, createOpenAIWhisperAdapter, createSpeechmaticsAdapter, createVoiceRouter, createWebhookRouter };
13730
+ export { AssemblyAIAdapter, type Chapter as AssemblyAIChapter, type ContentSafetyLabelsResult as AssemblyAIContentSafetyResult, type Entity as AssemblyAIEntity, type AssemblyAIExtendedData, type AutoHighlightsResult as AssemblyAIHighlightsResult, type TranscriptOptionalParams as AssemblyAIOptions, type SentimentAnalysisResult$1 as AssemblyAISentimentResult, type AssemblyAIStreamingOptions, type TopicDetectionModelResult as AssemblyAITopicsResult, index as AssemblyAITypes, AssemblyAIWebhookHandler, type TranscriptWebhookNotification as AssemblyAIWebhookPayload, type AudioChunk, type AudioInput, AudioResponseFormat, AudioTranscriptionModel, AzureSTTAdapter, AzureWebhookHandler, BaseAdapter, BaseWebhookHandler, type BatchOnlyProvider, DeepgramAdapter, type DeepgramExtendedData, type ListenV1ResponseMetadata as DeepgramMetadata, type ListenV1MediaTranscribeParams as DeepgramOptions, type DeepgramStreamingOptions, DeepgramWebhookHandler, type ListenV1Response as DeepgramWebhookPayload, GladiaAdapter, type AudioToLlmListConfigDTO as GladiaAudioToLlmConfig, type AudioToLlmListDTO as GladiaAudioToLlmResult, type ChapterizationDTO as GladiaChapters, type CodeSwitchingConfigDTO as GladiaCodeSwitchingConfig, type NamedEntityRecognitionDTO as GladiaEntities, type GladiaExtendedData, type ModerationDTO as GladiaModeration, type InitTranscriptionRequest as GladiaOptions, type SentimentAnalysisDTO as GladiaSentiment, type SpeakerReidentificationDTO as GladiaSpeakerReidentification, type GladiaStreamingOptions, type StreamingRequest as GladiaStreamingRequest, type StructuredDataExtractionDTO as GladiaStructuredData, type TranslationDTO as GladiaTranslation, index$1 as GladiaTypes, type CallbackTranscriptionErrorPayload as GladiaWebhookErrorPayload, GladiaWebhookHandler, type GladiaWebhookPayload, type CallbackTranscriptionSuccessPayload as GladiaWebhookSuccessPayload, ListenV1EncodingParameter, type ListenV1LanguageParameter, type ListenV1ModelParameter, type ListenV1VersionParameter, OpenAIWhisperAdapter, type CreateTranscriptionRequest as OpenAIWhisperOptions, type ProviderCapabilities, type ProviderConfig, type ProviderExtendedDataMap, type ProviderRawResponseMap, type ProviderStreamingOptions, type ProviderWebhookPayloadMap, type SessionStatus, SpeakV1ContainerParameter, SpeakV1EncodingParameter, SpeakV1SampleRateParameter, type Speaker, SpeechmaticsAdapter, type SpeechmaticsOperatingPoint, SpeechmaticsWebhookHandler, type StreamEvent, type StreamEventType, type StreamingCallbacks, type StreamingOptions, type StreamingOptionsForProvider, type StreamingProvider, type StreamingSession, StreamingSupportedBitDepthEnum, StreamingSupportedEncodingEnum, StreamingSupportedSampleRateEnum, type TranscribeOptions, type TranscribeStreamParams, type TranscriptionAdapter, type TranscriptionLanguage, type TranscriptionModel, type TranscriptionProvider, type TranscriptionStatus, type UnifiedTranscriptResponse, type UnifiedWebhookEvent, type Utterance, VoiceRouter, type VoiceRouterConfig, type WebhookEventType, WebhookRouter, type WebhookRouterOptions, type WebhookRouterResult, type WebhookValidation, type WebhookVerificationOptions, type Word, createAssemblyAIAdapter, createAssemblyAIWebhookHandler, createAzureSTTAdapter, createAzureWebhookHandler, createDeepgramAdapter, createDeepgramWebhookHandler, createGladiaAdapter, createGladiaWebhookHandler, createOpenAIWhisperAdapter, createSpeechmaticsAdapter, createVoiceRouter, createWebhookRouter };