voice-router-dev 0.9.0 → 0.9.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -5,6 +5,46 @@ All notable changes to this project will be documented in this file.
5
5
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
6
6
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
7
 
8
+ ## [0.9.2] - 2026-04-27
9
+
10
+ ### Changed
11
+
12
+ #### Restore And Strengthen Streaming Type Generation
13
+
14
+ Added generated streaming response types for Deepgram and ElevenLabs from their official SDKs, and wired the existing Soniox SDK-derived streaming response types into the adapter implementation.
15
+
16
+ #### Reduce Handwritten Streaming Protocol Code
17
+
18
+ Replaced hand-maintained Deepgram realtime message interfaces with generated types, migrated ElevenLabs realtime message parsing to a generated discriminated union, and updated Soniox realtime parsing and utterance assembly to use generated token/response types directly.
19
+
20
+ #### Speechmatics Realtime Typing Cleanup
21
+
22
+ Restored Speechmatics realtime support and tightened the outbound realtime configuration typing while preserving the RT-only `speaker_diarization_config.max_speakers` extension over the batch schema.
23
+
24
+ #### ElevenLabs Webhook Callback Support
25
+
26
+ Re-enabled ElevenLabs async webhook mode through the unified API by treating `webhookUrl` as an async intent flag, while documenting that the actual destination must already be configured in the ElevenLabs dashboard and can be targeted with `elevenlabs.webhook_id`.
27
+
28
+ #### Deepgram Callback Documentation
29
+
30
+ Clarified Deepgram callback behavior and request-history retrieval semantics: callback mode returns a `request_id` immediately and webhook delivery is the primary retrieval mechanism, while `getTranscript()` remains best-effort via request history.
31
+
32
+ ## [0.9.1] - 2026-04-26
33
+
34
+ ### Changed
35
+
36
+ #### Strengthen Generated Typing Across Providers
37
+
38
+ Replaced remaining weak request construction paths with generated request types across multiple adapters, including Soniox, Speechmatics, Azure STT, OpenAI Whisper, AssemblyAI, ElevenLabs, Deepgram, and Gladia.
39
+
40
+ #### Soniox: Use Current Generated Batch API Endpoints
41
+
42
+ Soniox batch transcription now uses the generated `/v1` transcription endpoints instead of legacy `/speech/*` calls. File uploads use the generated file API, transcription creation uses generated request/response types, and polling reads job metadata and transcript payloads from the current async API.
43
+
44
+ #### Soniox Streaming: Generate Realtime Types from Official SDK
45
+
46
+ Soniox realtime streaming types are now generated from the official `@soniox/speech-to-text-web` SDK instead of a handwritten local spec. The generation pipeline still enriches `model` and `audioFormat` with curated enums so field metadata, select options, and autocomplete remain available.
47
+
8
48
  ## [0.9.0] - 2026-04-24
9
49
 
10
50
  ### Changed
@@ -3415,7 +3415,7 @@ declare const OpenAIModel: {
3415
3415
  readonly "whisper-1": "whisper-1";
3416
3416
  };
3417
3417
  declare const OpenAIModelCodes: readonly ["gpt-4o-mini-realtime-preview", "gpt-4o-mini-realtime-preview-2024-12-17", "gpt-4o-mini-transcribe", "gpt-4o-mini-transcribe-2025-12-15", "gpt-4o-realtime-preview", "gpt-4o-realtime-preview-2024-10-01", "gpt-4o-realtime-preview-2024-12-17", "gpt-4o-realtime-preview-2025-06-03", "gpt-4o-transcribe", "gpt-4o-transcribe-diarize", "gpt-audio-1.5", "gpt-audio-mini", "gpt-audio-mini-2025-10-06", "gpt-audio-mini-2025-12-15", "gpt-realtime", "gpt-realtime-1.5", "gpt-realtime-2025-08-28", "gpt-realtime-mini", "gpt-realtime-mini-2025-10-06", "gpt-realtime-mini-2025-12-15", "whisper-1"];
3418
- declare const OpenAIModelLabels: Record<"whisper-1" | "gpt-4o-transcribe" | "gpt-4o-mini-transcribe" | "gpt-4o-mini-transcribe-2025-12-15" | "gpt-4o-transcribe-diarize" | "gpt-4o-mini-realtime-preview" | "gpt-4o-mini-realtime-preview-2024-12-17" | "gpt-4o-realtime-preview" | "gpt-4o-realtime-preview-2024-10-01" | "gpt-4o-realtime-preview-2024-12-17" | "gpt-4o-realtime-preview-2025-06-03" | "gpt-audio-1.5" | "gpt-audio-mini" | "gpt-audio-mini-2025-10-06" | "gpt-audio-mini-2025-12-15" | "gpt-realtime" | "gpt-realtime-1.5" | "gpt-realtime-2025-08-28" | "gpt-realtime-mini" | "gpt-realtime-mini-2025-10-06" | "gpt-realtime-mini-2025-12-15", string>;
3418
+ declare const OpenAIModelLabels: Record<"gpt-4o-mini-realtime-preview" | "gpt-4o-mini-realtime-preview-2024-12-17" | "gpt-4o-mini-transcribe" | "gpt-4o-mini-transcribe-2025-12-15" | "gpt-4o-realtime-preview" | "gpt-4o-realtime-preview-2024-10-01" | "gpt-4o-realtime-preview-2024-12-17" | "gpt-4o-realtime-preview-2025-06-03" | "gpt-4o-transcribe" | "gpt-4o-transcribe-diarize" | "gpt-audio-1.5" | "gpt-audio-mini" | "gpt-audio-mini-2025-10-06" | "gpt-audio-mini-2025-12-15" | "gpt-realtime" | "gpt-realtime-1.5" | "gpt-realtime-2025-08-28" | "gpt-realtime-mini" | "gpt-realtime-mini-2025-10-06" | "gpt-realtime-mini-2025-12-15" | "whisper-1", string>;
3419
3419
  /**
3420
3420
  * OpenAI Realtime API models (streaming)
3421
3421
  * @see scripts/generate-openai-models.js
@@ -3415,7 +3415,7 @@ declare const OpenAIModel: {
3415
3415
  readonly "whisper-1": "whisper-1";
3416
3416
  };
3417
3417
  declare const OpenAIModelCodes: readonly ["gpt-4o-mini-realtime-preview", "gpt-4o-mini-realtime-preview-2024-12-17", "gpt-4o-mini-transcribe", "gpt-4o-mini-transcribe-2025-12-15", "gpt-4o-realtime-preview", "gpt-4o-realtime-preview-2024-10-01", "gpt-4o-realtime-preview-2024-12-17", "gpt-4o-realtime-preview-2025-06-03", "gpt-4o-transcribe", "gpt-4o-transcribe-diarize", "gpt-audio-1.5", "gpt-audio-mini", "gpt-audio-mini-2025-10-06", "gpt-audio-mini-2025-12-15", "gpt-realtime", "gpt-realtime-1.5", "gpt-realtime-2025-08-28", "gpt-realtime-mini", "gpt-realtime-mini-2025-10-06", "gpt-realtime-mini-2025-12-15", "whisper-1"];
3418
- declare const OpenAIModelLabels: Record<"whisper-1" | "gpt-4o-transcribe" | "gpt-4o-mini-transcribe" | "gpt-4o-mini-transcribe-2025-12-15" | "gpt-4o-transcribe-diarize" | "gpt-4o-mini-realtime-preview" | "gpt-4o-mini-realtime-preview-2024-12-17" | "gpt-4o-realtime-preview" | "gpt-4o-realtime-preview-2024-10-01" | "gpt-4o-realtime-preview-2024-12-17" | "gpt-4o-realtime-preview-2025-06-03" | "gpt-audio-1.5" | "gpt-audio-mini" | "gpt-audio-mini-2025-10-06" | "gpt-audio-mini-2025-12-15" | "gpt-realtime" | "gpt-realtime-1.5" | "gpt-realtime-2025-08-28" | "gpt-realtime-mini" | "gpt-realtime-mini-2025-10-06" | "gpt-realtime-mini-2025-12-15", string>;
3418
+ declare const OpenAIModelLabels: Record<"gpt-4o-mini-realtime-preview" | "gpt-4o-mini-realtime-preview-2024-12-17" | "gpt-4o-mini-transcribe" | "gpt-4o-mini-transcribe-2025-12-15" | "gpt-4o-realtime-preview" | "gpt-4o-realtime-preview-2024-10-01" | "gpt-4o-realtime-preview-2024-12-17" | "gpt-4o-realtime-preview-2025-06-03" | "gpt-4o-transcribe" | "gpt-4o-transcribe-diarize" | "gpt-audio-1.5" | "gpt-audio-mini" | "gpt-audio-mini-2025-10-06" | "gpt-audio-mini-2025-12-15" | "gpt-realtime" | "gpt-realtime-1.5" | "gpt-realtime-2025-08-28" | "gpt-realtime-mini" | "gpt-realtime-mini-2025-10-06" | "gpt-realtime-mini-2025-12-15" | "whisper-1", string>;
3419
3419
  /**
3420
3420
  * OpenAI Realtime API models (streaming)
3421
3421
  * @see scripts/generate-openai-models.js