voice-router-dev 0.8.9 → 0.9.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -225,9 +225,7 @@ var listenTranscribeQueryParams = zod.object({
225
225
  ),
226
226
  dictation: zod.boolean().optional().describe("Dictation mode for controlling formatting with dictated speech"),
227
227
  encoding: zod.enum(["linear16", "flac", "mulaw", "amr-nb", "amr-wb", "opus", "speex", "g729"]).optional().describe("Specify the expected encoding of your submitted audio"),
228
- filler_words: zod.boolean().optional().describe(
229
- 'Filler Words can help transcribe interruptions in your audio, like "uh" and "um"'
230
- ),
228
+ filler_words: zod.boolean().optional().describe('Filler Words can help transcribe interruptions in your audio, like "uh" and "um"'),
231
229
  keyterm: zod.array(zod.string()).optional().describe(
232
230
  "Key term prompting can boost or suppress specialized terminology and brands. Only compatible with Nova-3"
233
231
  ),
@@ -917,10 +915,10 @@ var createTranscriptBody = zod3.object({
917
915
  "The point in time, in milliseconds, to begin transcribing in your media file. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details."
918
916
  ),
919
917
  auto_chapters: zod3.boolean().optional().describe(
920
- "Enable [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters), can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
918
+ "Enable [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters), can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
921
919
  ),
922
920
  auto_highlights: zod3.boolean().optional().describe(
923
- "Enable [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases), either true or false"
921
+ "Enable [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights), either true or false"
924
922
  ),
925
923
  content_safety: zod3.boolean().optional().describe(
926
924
  "Enable [Content Moderation](https://www.assemblyai.com/docs/content-moderation), can be true or false"
@@ -936,16 +934,16 @@ var createTranscriptBody = zod3.object({
936
934
  "Object containing words or phrases to replace, and the word or phrase to replace with"
937
935
  )
938
936
  ).optional().describe(
939
- "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details."
937
+ "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/correct-spelling-of-terms) for more details."
940
938
  ),
941
939
  disfluencies: zod3.boolean().optional().describe(
942
- 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false'
940
+ 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/include-filler-words), like "umm", in your media file; can be true or false'
943
941
  ),
944
942
  domain: zod3.string().nullish().describe(
945
943
  'Enable domain-specific transcription models to improve accuracy for specialized terminology. Set to `"medical-v1"` to enable [Medical Mode](https://www.assemblyai.com/docs/pre-recorded-audio/medical-mode) for improved accuracy of medical terms such as medications, procedures, conditions, and dosages.\n\nSupported languages: English (`en`), Spanish (`es`), German (`de`), French (`fr`). If used with an unsupported language, the parameter is ignored and a warning is returned.\n'
946
944
  ),
947
945
  entity_detection: zod3.boolean().optional().describe(
948
- "Enable [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection), can be true or false"
946
+ "Enable [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/detect-entities-in-transcript), can be true or false"
949
947
  ),
950
948
  filter_profanity: zod3.boolean().optional().describe(
951
949
  "Filter profanity from the transcribed text, can be true or false. See [Profanity Filtering](https://www.assemblyai.com/docs/profanity-filtering) for more details."
@@ -954,7 +952,7 @@ var createTranscriptBody = zod3.object({
954
952
  "Enable [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio), can be true or false"
955
953
  ),
956
954
  iab_categories: zod3.boolean().optional().describe(
957
- "Enable [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection), can be true or false"
955
+ "Enable [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics), can be true or false"
958
956
  ),
959
957
  keyterms_prompt: zod3.array(zod3.string()).optional().describe(
960
958
  "Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3 Pro) domain-specific words or phrases (maximum 6 words per phrase). See [Keyterms Prompting](https://www.assemblyai.com/docs/pre-recorded-audio/keyterms-prompting) for more details.\n"
@@ -1202,7 +1200,7 @@ var createTranscriptBody = zod3.object({
1202
1200
  "Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection)."
1203
1201
  ),
1204
1202
  multichannel: zod3.boolean().optional().describe(
1205
- "Enable [Multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) transcription, can be true or false."
1203
+ "Enable [Multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) transcription, can be true or false."
1206
1204
  ),
1207
1205
  prompt: zod3.string().optional().describe(
1208
1206
  "Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
@@ -1285,23 +1283,23 @@ var createTranscriptBody = zod3.object({
1285
1283
  "The replacement logic for detected PII, can be `entity_type` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
1286
1284
  ),
1287
1285
  sentiment_analysis: zod3.boolean().optional().describe(
1288
- "Enable [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis), can be true or false"
1286
+ "Enable [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/analyze-sentiment-of-speech), can be true or false"
1289
1287
  ),
1290
1288
  speaker_labels: zod3.boolean().optional().describe(
1291
- "Enable [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization), can be true or false"
1289
+ "Enable [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers), can be true or false"
1292
1290
  ),
1293
1291
  speaker_options: zod3.object({
1294
1292
  min_speakers_expected: zod3.number().default(createTranscriptBodySpeakerOptionsMinSpeakersExpectedDefault).describe(
1295
- "The minimum number of speakers expected in the audio file. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers) for more details."
1293
+ "The minimum number of speakers expected in the audio file. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers#set-a-range-of-possible-speakers) for more details."
1296
1294
  ),
1297
1295
  max_speakers_expected: zod3.number().optional().describe(
1298
- "<Warning>Setting this parameter too high may hurt model accuracy</Warning>\nThe maximum number of speakers expected in the audio file. The default depends on audio duration: no limit for 0-2 minutes, 10 for 2-10 minutes, and 30 for 10+ minutes. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers) for more details.\n"
1296
+ "<Warning>Setting this parameter too high may hurt model accuracy</Warning>\nThe maximum number of speakers expected in the audio file. The default depends on audio duration: no limit for 0-2 minutes, 10 for 2-10 minutes, and 30 for 10+ minutes. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers#set-a-range-of-possible-speakers) for more details.\n"
1299
1297
  )
1300
1298
  }).optional().describe(
1301
- "Specify options for [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers). Use this to set a range of possible speakers."
1299
+ "Specify options for [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers#set-a-range-of-possible-speakers). Use this to set a range of possible speakers."
1302
1300
  ),
1303
1301
  speakers_expected: zod3.number().nullish().describe(
1304
- "Tells the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details."
1302
+ "Tells the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers#set-number-of-speakers-expected) for more details."
1305
1303
  ),
1306
1304
  speech_models: zod3.array(
1307
1305
  zod3.string().describe(
@@ -1377,7 +1375,7 @@ var createTranscriptBody = zod3.object({
1377
1375
  "Enable speech understanding tasks like [Translation](https://www.assemblyai.com/docs/speech-understanding/translation), [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification), and [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting). See the task-specific docs for available options and configuration.\n"
1378
1376
  ),
1379
1377
  summarization: zod3.boolean().optional().describe(
1380
- "Enable [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization), can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
1378
+ "Enable [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts), can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
1381
1379
  ),
1382
1380
  summary_model: zod3.enum(["informative", "conversational", "catchy"]).optional().describe("The model to summarize the transcript"),
1383
1381
  summary_type: zod3.enum(["bullets", "bullets_verbose", "gist", "headline", "paragraph"]).optional().describe("The type of summary"),
@@ -1386,6 +1384,9 @@ var createTranscriptBody = zod3.object({
1386
1384
  ).or(zod3.null()).optional().describe(
1387
1385
  'Remove [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) from the transcript text. Set to `"all"` to remove all audio tags.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n'
1388
1386
  ),
1387
+ temperature: zod3.number().optional().describe(
1388
+ "Control the amount of randomness injected into the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.\n\nNote: This parameter can only be used with the Universal-3 Pro model.\n"
1389
+ ),
1389
1390
  webhook_auth_header_name: zod3.string().nullish().describe(
1390
1391
  "The header name to be sent with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests"
1391
1392
  ),
@@ -1404,7 +1405,7 @@ var createTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThres
1404
1405
  var createTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault = true;
1405
1406
  var createTranscriptResponse = zod3.object({
1406
1407
  audio_channels: zod3.number().optional().describe(
1407
- "The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) is enabled."
1408
+ "The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) is enabled."
1408
1409
  ),
1409
1410
  audio_duration: zod3.number().nullish().describe("The duration of this transcript object's media file, in seconds"),
1410
1411
  audio_end_at: zod3.number().nullish().describe(
@@ -1415,10 +1416,10 @@ var createTranscriptResponse = zod3.object({
1415
1416
  ),
1416
1417
  audio_url: zod3.string().describe("The URL of the media that was transcribed"),
1417
1418
  auto_chapters: zod3.boolean().nullish().describe(
1418
- "Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
1419
+ "Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
1419
1420
  ),
1420
1421
  auto_highlights: zod3.boolean().describe(
1421
- "Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) is enabled, either true or false"
1422
+ "Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) is enabled, either true or false"
1422
1423
  ),
1423
1424
  auto_highlights_result: zod3.object({
1424
1425
  status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
@@ -1438,9 +1439,9 @@ var createTranscriptResponse = zod3.object({
1438
1439
  })
1439
1440
  ).describe("A temporally-sequential array of Key Phrases")
1440
1441
  }).describe(
1441
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
1442
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) for more information.\n"
1442
1443
  ).or(zod3.null()).optional().describe(
1443
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
1444
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) for more information.\n"
1444
1445
  ),
1445
1446
  chapters: zod3.array(
1446
1447
  zod3.object({
@@ -1453,7 +1454,7 @@ var createTranscriptResponse = zod3.object({
1453
1454
  end: zod3.number().describe("The starting time, in milliseconds, for the chapter")
1454
1455
  }).describe("Chapter of the audio file")
1455
1456
  ).nullish().describe(
1456
- "An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for more information."
1457
+ "An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) for more information."
1457
1458
  ),
1458
1459
  confidence: zod3.number().nullish().describe(
1459
1460
  "The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)"
@@ -1509,10 +1510,10 @@ var createTranscriptResponse = zod3.object({
1509
1510
  "Object containing words or phrases to replace, and the word or phrase to replace with"
1510
1511
  )
1511
1512
  ).nullish().describe(
1512
- "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details."
1513
+ "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/correct-spelling-of-terms) for more details."
1513
1514
  ),
1514
1515
  disfluencies: zod3.boolean().nullish().describe(
1515
- 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false'
1516
+ 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/include-filler-words), like "umm", in your media file; can be true or false'
1516
1517
  ),
1517
1518
  domain: zod3.string().nullish().describe(
1518
1519
  'The domain-specific model applied to the transcript. When set to `"medical-v1"`, [Medical Mode](https://www.assemblyai.com/docs/pre-recorded-audio/medical-mode) was used to improve accuracy for medical terminology.\n'
@@ -1574,10 +1575,10 @@ var createTranscriptResponse = zod3.object({
1574
1575
  )
1575
1576
  }).describe("A detected entity")
1576
1577
  ).nullish().describe(
1577
- "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) for more information.\n"
1578
+ "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/detect-entities-in-transcript) for more information.\n"
1578
1579
  ),
1579
1580
  entity_detection: zod3.boolean().nullish().describe(
1580
- "Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) is enabled, can be true or false"
1581
+ "Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/detect-entities-in-transcript) is enabled, can be true or false"
1581
1582
  ),
1582
1583
  error: zod3.string().optional().describe("Error message of why the transcript failed"),
1583
1584
  filter_profanity: zod3.boolean().nullish().describe(
@@ -1587,7 +1588,7 @@ var createTranscriptResponse = zod3.object({
1587
1588
  "Whether [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false"
1588
1589
  ),
1589
1590
  iab_categories: zod3.boolean().nullish().describe(
1590
- "Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) is enabled, can be true or false"
1591
+ "Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) is enabled, can be true or false"
1591
1592
  ),
1592
1593
  iab_categories_result: zod3.object({
1593
1594
  status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
@@ -1610,9 +1611,9 @@ var createTranscriptResponse = zod3.object({
1610
1611
  ).describe("An array of results for the Topic Detection model"),
1611
1612
  summary: zod3.record(zod3.string(), zod3.number()).describe("The overall relevance of topic to the entire audio file")
1612
1613
  }).describe(
1613
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
1614
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) for more information.\n"
1614
1615
  ).or(zod3.null()).optional().describe(
1615
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
1616
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) for more information.\n"
1616
1617
  ),
1617
1618
  id: zod3.string().uuid().describe("The unique identifier of your transcript"),
1618
1619
  keyterms_prompt: zod3.array(zod3.string()).optional().describe(
@@ -1862,7 +1863,7 @@ var createTranscriptResponse = zod3.object({
1862
1863
  "Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection)."
1863
1864
  ),
1864
1865
  multichannel: zod3.boolean().nullish().describe(
1865
- "Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) was enabled in the transcription request, either true or false"
1866
+ "Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) was enabled in the transcription request, either true or false"
1866
1867
  ),
1867
1868
  prompt: zod3.string().optional().describe(
1868
1869
  "Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
@@ -1945,7 +1946,7 @@ var createTranscriptResponse = zod3.object({
1945
1946
  "The replacement logic for detected PII, can be `entity_name` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
1946
1947
  ),
1947
1948
  sentiment_analysis: zod3.boolean().nullish().describe(
1948
- "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) is enabled, can be true or false"
1949
+ "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/analyze-sentiment-of-speech) is enabled, can be true or false"
1949
1950
  ),
1950
1951
  sentiment_analysis_results: zod3.array(
1951
1952
  zod3.object({
@@ -1960,17 +1961,17 @@ var createTranscriptResponse = zod3.object({
1960
1961
  "The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
1961
1962
  ),
1962
1963
  speaker: zod3.string().nullable().describe(
1963
- "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
1964
+ "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
1964
1965
  )
1965
1966
  }).describe("The result of the Sentiment Analysis model")
1966
1967
  ).nullish().describe(
1967
- "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) for more information.\n"
1968
+ "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/analyze-sentiment-of-speech) for more information.\n"
1968
1969
  ),
1969
1970
  speaker_labels: zod3.boolean().nullish().describe(
1970
- "Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, can be true or false"
1971
+ "Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, can be true or false"
1971
1972
  ),
1972
1973
  speakers_expected: zod3.number().nullish().describe(
1973
- "Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details."
1974
+ "Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers#set-number-of-speakers-expected) for more details."
1974
1975
  ),
1975
1976
  speech_model_used: zod3.string().optional().describe(
1976
1977
  "The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
@@ -2073,22 +2074,25 @@ var createTranscriptResponse = zod3.object({
2073
2074
  "The status of your transcript. Possible values are queued, processing, completed, or error."
2074
2075
  ),
2075
2076
  summarization: zod3.boolean().describe(
2076
- "Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
2077
+ "Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
2077
2078
  ),
2078
2079
  summary: zod3.string().nullish().describe(
2079
- "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
2080
+ "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details."
2080
2081
  ),
2081
2082
  summary_model: zod3.string().nullish().describe(
2082
- "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n"
2083
+ "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details.\n"
2083
2084
  ),
2084
2085
  summary_type: zod3.string().nullish().describe(
2085
- "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
2086
+ "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details."
2086
2087
  ),
2087
2088
  remove_audio_tags: zod3.enum(["all"]).describe(
2088
2089
  "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
2089
2090
  ).or(zod3.null()).optional().describe(
2090
2091
  "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
2091
2092
  ),
2093
+ temperature: zod3.number().nullish().describe(
2094
+ "The temperature that was used for the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.\n\nNote: This parameter can only be used with the Universal-3 Pro model.\n"
2095
+ ),
2092
2096
  text: zod3.string().nullish().describe("The textual transcript of your media file"),
2093
2097
  throttled: zod3.boolean().nullish().describe(
2094
2098
  "True while a request is throttled and false when a request is no longer throttled"
@@ -2109,7 +2113,7 @@ var createTranscriptResponse = zod3.object({
2109
2113
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
2110
2114
  ),
2111
2115
  speaker: zod3.string().nullable().describe(
2112
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
2116
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
2113
2117
  )
2114
2118
  })
2115
2119
  ).describe("The words in the utterance."),
@@ -2124,7 +2128,7 @@ var createTranscriptResponse = zod3.object({
2124
2128
  )
2125
2129
  })
2126
2130
  ).nullish().describe(
2127
- "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) for more information.\n"
2131
+ "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) for more information.\n"
2128
2132
  ),
2129
2133
  webhook_auth: zod3.boolean().describe(
2130
2134
  "Whether [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) authentication details were provided"
@@ -2148,7 +2152,7 @@ var createTranscriptResponse = zod3.object({
2148
2152
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
2149
2153
  ),
2150
2154
  speaker: zod3.string().nullable().describe(
2151
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
2155
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
2152
2156
  )
2153
2157
  })
2154
2158
  ).nullish().describe(
@@ -2217,7 +2221,7 @@ var getTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThreshol
2217
2221
  var getTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault = true;
2218
2222
  var getTranscriptResponse = zod3.object({
2219
2223
  audio_channels: zod3.number().optional().describe(
2220
- "The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) is enabled."
2224
+ "The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) is enabled."
2221
2225
  ),
2222
2226
  audio_duration: zod3.number().nullish().describe("The duration of this transcript object's media file, in seconds"),
2223
2227
  audio_end_at: zod3.number().nullish().describe(
@@ -2228,10 +2232,10 @@ var getTranscriptResponse = zod3.object({
2228
2232
  ),
2229
2233
  audio_url: zod3.string().describe("The URL of the media that was transcribed"),
2230
2234
  auto_chapters: zod3.boolean().nullish().describe(
2231
- "Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
2235
+ "Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
2232
2236
  ),
2233
2237
  auto_highlights: zod3.boolean().describe(
2234
- "Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) is enabled, either true or false"
2238
+ "Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) is enabled, either true or false"
2235
2239
  ),
2236
2240
  auto_highlights_result: zod3.object({
2237
2241
  status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
@@ -2251,9 +2255,9 @@ var getTranscriptResponse = zod3.object({
2251
2255
  })
2252
2256
  ).describe("A temporally-sequential array of Key Phrases")
2253
2257
  }).describe(
2254
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
2258
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) for more information.\n"
2255
2259
  ).or(zod3.null()).optional().describe(
2256
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
2260
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) for more information.\n"
2257
2261
  ),
2258
2262
  chapters: zod3.array(
2259
2263
  zod3.object({
@@ -2266,7 +2270,7 @@ var getTranscriptResponse = zod3.object({
2266
2270
  end: zod3.number().describe("The starting time, in milliseconds, for the chapter")
2267
2271
  }).describe("Chapter of the audio file")
2268
2272
  ).nullish().describe(
2269
- "An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for more information."
2273
+ "An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) for more information."
2270
2274
  ),
2271
2275
  confidence: zod3.number().nullish().describe(
2272
2276
  "The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)"
@@ -2322,10 +2326,10 @@ var getTranscriptResponse = zod3.object({
2322
2326
  "Object containing words or phrases to replace, and the word or phrase to replace with"
2323
2327
  )
2324
2328
  ).nullish().describe(
2325
- "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details."
2329
+ "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/correct-spelling-of-terms) for more details."
2326
2330
  ),
2327
2331
  disfluencies: zod3.boolean().nullish().describe(
2328
- 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false'
2332
+ 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/include-filler-words), like "umm", in your media file; can be true or false'
2329
2333
  ),
2330
2334
  domain: zod3.string().nullish().describe(
2331
2335
  'The domain-specific model applied to the transcript. When set to `"medical-v1"`, [Medical Mode](https://www.assemblyai.com/docs/pre-recorded-audio/medical-mode) was used to improve accuracy for medical terminology.\n'
@@ -2387,10 +2391,10 @@ var getTranscriptResponse = zod3.object({
2387
2391
  )
2388
2392
  }).describe("A detected entity")
2389
2393
  ).nullish().describe(
2390
- "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) for more information.\n"
2394
+ "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/detect-entities-in-transcript) for more information.\n"
2391
2395
  ),
2392
2396
  entity_detection: zod3.boolean().nullish().describe(
2393
- "Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) is enabled, can be true or false"
2397
+ "Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/detect-entities-in-transcript) is enabled, can be true or false"
2394
2398
  ),
2395
2399
  error: zod3.string().optional().describe("Error message of why the transcript failed"),
2396
2400
  filter_profanity: zod3.boolean().nullish().describe(
@@ -2400,7 +2404,7 @@ var getTranscriptResponse = zod3.object({
2400
2404
  "Whether [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false"
2401
2405
  ),
2402
2406
  iab_categories: zod3.boolean().nullish().describe(
2403
- "Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) is enabled, can be true or false"
2407
+ "Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) is enabled, can be true or false"
2404
2408
  ),
2405
2409
  iab_categories_result: zod3.object({
2406
2410
  status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
@@ -2423,9 +2427,9 @@ var getTranscriptResponse = zod3.object({
2423
2427
  ).describe("An array of results for the Topic Detection model"),
2424
2428
  summary: zod3.record(zod3.string(), zod3.number()).describe("The overall relevance of topic to the entire audio file")
2425
2429
  }).describe(
2426
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
2430
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) for more information.\n"
2427
2431
  ).or(zod3.null()).optional().describe(
2428
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
2432
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) for more information.\n"
2429
2433
  ),
2430
2434
  id: zod3.string().uuid().describe("The unique identifier of your transcript"),
2431
2435
  keyterms_prompt: zod3.array(zod3.string()).optional().describe(
@@ -2675,7 +2679,7 @@ var getTranscriptResponse = zod3.object({
2675
2679
  "Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection)."
2676
2680
  ),
2677
2681
  multichannel: zod3.boolean().nullish().describe(
2678
- "Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) was enabled in the transcription request, either true or false"
2682
+ "Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) was enabled in the transcription request, either true or false"
2679
2683
  ),
2680
2684
  prompt: zod3.string().optional().describe(
2681
2685
  "Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
@@ -2758,7 +2762,7 @@ var getTranscriptResponse = zod3.object({
2758
2762
  "The replacement logic for detected PII, can be `entity_name` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
2759
2763
  ),
2760
2764
  sentiment_analysis: zod3.boolean().nullish().describe(
2761
- "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) is enabled, can be true or false"
2765
+ "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/analyze-sentiment-of-speech) is enabled, can be true or false"
2762
2766
  ),
2763
2767
  sentiment_analysis_results: zod3.array(
2764
2768
  zod3.object({
@@ -2773,17 +2777,17 @@ var getTranscriptResponse = zod3.object({
2773
2777
  "The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
2774
2778
  ),
2775
2779
  speaker: zod3.string().nullable().describe(
2776
- "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
2780
+ "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
2777
2781
  )
2778
2782
  }).describe("The result of the Sentiment Analysis model")
2779
2783
  ).nullish().describe(
2780
- "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) for more information.\n"
2784
+ "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/analyze-sentiment-of-speech) for more information.\n"
2781
2785
  ),
2782
2786
  speaker_labels: zod3.boolean().nullish().describe(
2783
- "Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, can be true or false"
2787
+ "Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, can be true or false"
2784
2788
  ),
2785
2789
  speakers_expected: zod3.number().nullish().describe(
2786
- "Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details."
2790
+ "Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers#set-number-of-speakers-expected) for more details."
2787
2791
  ),
2788
2792
  speech_model_used: zod3.string().optional().describe(
2789
2793
  "The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
@@ -2886,22 +2890,25 @@ var getTranscriptResponse = zod3.object({
2886
2890
  "The status of your transcript. Possible values are queued, processing, completed, or error."
2887
2891
  ),
2888
2892
  summarization: zod3.boolean().describe(
2889
- "Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
2893
+ "Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
2890
2894
  ),
2891
2895
  summary: zod3.string().nullish().describe(
2892
- "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
2896
+ "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details."
2893
2897
  ),
2894
2898
  summary_model: zod3.string().nullish().describe(
2895
- "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n"
2899
+ "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details.\n"
2896
2900
  ),
2897
2901
  summary_type: zod3.string().nullish().describe(
2898
- "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
2902
+ "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details."
2899
2903
  ),
2900
2904
  remove_audio_tags: zod3.enum(["all"]).describe(
2901
2905
  "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
2902
2906
  ).or(zod3.null()).optional().describe(
2903
2907
  "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
2904
2908
  ),
2909
+ temperature: zod3.number().nullish().describe(
2910
+ "The temperature that was used for the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.\n\nNote: This parameter can only be used with the Universal-3 Pro model.\n"
2911
+ ),
2905
2912
  text: zod3.string().nullish().describe("The textual transcript of your media file"),
2906
2913
  throttled: zod3.boolean().nullish().describe(
2907
2914
  "True while a request is throttled and false when a request is no longer throttled"
@@ -2922,7 +2929,7 @@ var getTranscriptResponse = zod3.object({
2922
2929
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
2923
2930
  ),
2924
2931
  speaker: zod3.string().nullable().describe(
2925
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
2932
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
2926
2933
  )
2927
2934
  })
2928
2935
  ).describe("The words in the utterance."),
@@ -2937,7 +2944,7 @@ var getTranscriptResponse = zod3.object({
2937
2944
  )
2938
2945
  })
2939
2946
  ).nullish().describe(
2940
- "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) for more information.\n"
2947
+ "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) for more information.\n"
2941
2948
  ),
2942
2949
  webhook_auth: zod3.boolean().describe(
2943
2950
  "Whether [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) authentication details were provided"
@@ -2961,7 +2968,7 @@ var getTranscriptResponse = zod3.object({
2961
2968
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
2962
2969
  ),
2963
2970
  speaker: zod3.string().nullable().describe(
2964
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
2971
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
2965
2972
  )
2966
2973
  })
2967
2974
  ).nullish().describe(
@@ -2991,7 +2998,7 @@ var deleteTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThres
2991
2998
  var deleteTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault = true;
2992
2999
  var deleteTranscriptResponse = zod3.object({
2993
3000
  audio_channels: zod3.number().optional().describe(
2994
- "The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) is enabled."
3001
+ "The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) is enabled."
2995
3002
  ),
2996
3003
  audio_duration: zod3.number().nullish().describe("The duration of this transcript object's media file, in seconds"),
2997
3004
  audio_end_at: zod3.number().nullish().describe(
@@ -3002,10 +3009,10 @@ var deleteTranscriptResponse = zod3.object({
3002
3009
  ),
3003
3010
  audio_url: zod3.string().describe("The URL of the media that was transcribed"),
3004
3011
  auto_chapters: zod3.boolean().nullish().describe(
3005
- "Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
3012
+ "Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
3006
3013
  ),
3007
3014
  auto_highlights: zod3.boolean().describe(
3008
- "Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) is enabled, either true or false"
3015
+ "Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) is enabled, either true or false"
3009
3016
  ),
3010
3017
  auto_highlights_result: zod3.object({
3011
3018
  status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
@@ -3025,9 +3032,9 @@ var deleteTranscriptResponse = zod3.object({
3025
3032
  })
3026
3033
  ).describe("A temporally-sequential array of Key Phrases")
3027
3034
  }).describe(
3028
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
3035
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) for more information.\n"
3029
3036
  ).or(zod3.null()).optional().describe(
3030
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
3037
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) for more information.\n"
3031
3038
  ),
3032
3039
  chapters: zod3.array(
3033
3040
  zod3.object({
@@ -3040,7 +3047,7 @@ var deleteTranscriptResponse = zod3.object({
3040
3047
  end: zod3.number().describe("The starting time, in milliseconds, for the chapter")
3041
3048
  }).describe("Chapter of the audio file")
3042
3049
  ).nullish().describe(
3043
- "An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for more information."
3050
+ "An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) for more information."
3044
3051
  ),
3045
3052
  confidence: zod3.number().nullish().describe(
3046
3053
  "The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)"
@@ -3096,10 +3103,10 @@ var deleteTranscriptResponse = zod3.object({
3096
3103
  "Object containing words or phrases to replace, and the word or phrase to replace with"
3097
3104
  )
3098
3105
  ).nullish().describe(
3099
- "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details."
3106
+ "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/correct-spelling-of-terms) for more details."
3100
3107
  ),
3101
3108
  disfluencies: zod3.boolean().nullish().describe(
3102
- 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false'
3109
+ 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/include-filler-words), like "umm", in your media file; can be true or false'
3103
3110
  ),
3104
3111
  domain: zod3.string().nullish().describe(
3105
3112
  'The domain-specific model applied to the transcript. When set to `"medical-v1"`, [Medical Mode](https://www.assemblyai.com/docs/pre-recorded-audio/medical-mode) was used to improve accuracy for medical terminology.\n'
@@ -3161,10 +3168,10 @@ var deleteTranscriptResponse = zod3.object({
3161
3168
  )
3162
3169
  }).describe("A detected entity")
3163
3170
  ).nullish().describe(
3164
- "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) for more information.\n"
3171
+ "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/detect-entities-in-transcript) for more information.\n"
3165
3172
  ),
3166
3173
  entity_detection: zod3.boolean().nullish().describe(
3167
- "Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) is enabled, can be true or false"
3174
+ "Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/detect-entities-in-transcript) is enabled, can be true or false"
3168
3175
  ),
3169
3176
  error: zod3.string().optional().describe("Error message of why the transcript failed"),
3170
3177
  filter_profanity: zod3.boolean().nullish().describe(
@@ -3174,7 +3181,7 @@ var deleteTranscriptResponse = zod3.object({
3174
3181
  "Whether [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false"
3175
3182
  ),
3176
3183
  iab_categories: zod3.boolean().nullish().describe(
3177
- "Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) is enabled, can be true or false"
3184
+ "Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) is enabled, can be true or false"
3178
3185
  ),
3179
3186
  iab_categories_result: zod3.object({
3180
3187
  status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
@@ -3197,9 +3204,9 @@ var deleteTranscriptResponse = zod3.object({
3197
3204
  ).describe("An array of results for the Topic Detection model"),
3198
3205
  summary: zod3.record(zod3.string(), zod3.number()).describe("The overall relevance of topic to the entire audio file")
3199
3206
  }).describe(
3200
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
3207
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) for more information.\n"
3201
3208
  ).or(zod3.null()).optional().describe(
3202
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
3209
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) for more information.\n"
3203
3210
  ),
3204
3211
  id: zod3.string().uuid().describe("The unique identifier of your transcript"),
3205
3212
  keyterms_prompt: zod3.array(zod3.string()).optional().describe(
@@ -3449,7 +3456,7 @@ var deleteTranscriptResponse = zod3.object({
3449
3456
  "Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection)."
3450
3457
  ),
3451
3458
  multichannel: zod3.boolean().nullish().describe(
3452
- "Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) was enabled in the transcription request, either true or false"
3459
+ "Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) was enabled in the transcription request, either true or false"
3453
3460
  ),
3454
3461
  prompt: zod3.string().optional().describe(
3455
3462
  "Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
@@ -3532,7 +3539,7 @@ var deleteTranscriptResponse = zod3.object({
3532
3539
  "The replacement logic for detected PII, can be `entity_name` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
3533
3540
  ),
3534
3541
  sentiment_analysis: zod3.boolean().nullish().describe(
3535
- "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) is enabled, can be true or false"
3542
+ "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/analyze-sentiment-of-speech) is enabled, can be true or false"
3536
3543
  ),
3537
3544
  sentiment_analysis_results: zod3.array(
3538
3545
  zod3.object({
@@ -3547,17 +3554,17 @@ var deleteTranscriptResponse = zod3.object({
3547
3554
  "The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
3548
3555
  ),
3549
3556
  speaker: zod3.string().nullable().describe(
3550
- "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
3557
+ "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
3551
3558
  )
3552
3559
  }).describe("The result of the Sentiment Analysis model")
3553
3560
  ).nullish().describe(
3554
- "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) for more information.\n"
3561
+ "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/analyze-sentiment-of-speech) for more information.\n"
3555
3562
  ),
3556
3563
  speaker_labels: zod3.boolean().nullish().describe(
3557
- "Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, can be true or false"
3564
+ "Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, can be true or false"
3558
3565
  ),
3559
3566
  speakers_expected: zod3.number().nullish().describe(
3560
- "Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details."
3567
+ "Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers#set-number-of-speakers-expected) for more details."
3561
3568
  ),
3562
3569
  speech_model_used: zod3.string().optional().describe(
3563
3570
  "The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
@@ -3660,22 +3667,25 @@ var deleteTranscriptResponse = zod3.object({
3660
3667
  "The status of your transcript. Possible values are queued, processing, completed, or error."
3661
3668
  ),
3662
3669
  summarization: zod3.boolean().describe(
3663
- "Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
3670
+ "Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
3664
3671
  ),
3665
3672
  summary: zod3.string().nullish().describe(
3666
- "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
3673
+ "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details."
3667
3674
  ),
3668
3675
  summary_model: zod3.string().nullish().describe(
3669
- "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n"
3676
+ "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details.\n"
3670
3677
  ),
3671
3678
  summary_type: zod3.string().nullish().describe(
3672
- "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
3679
+ "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details."
3673
3680
  ),
3674
3681
  remove_audio_tags: zod3.enum(["all"]).describe(
3675
3682
  "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
3676
3683
  ).or(zod3.null()).optional().describe(
3677
3684
  "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
3678
3685
  ),
3686
+ temperature: zod3.number().nullish().describe(
3687
+ "The temperature that was used for the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.\n\nNote: This parameter can only be used with the Universal-3 Pro model.\n"
3688
+ ),
3679
3689
  text: zod3.string().nullish().describe("The textual transcript of your media file"),
3680
3690
  throttled: zod3.boolean().nullish().describe(
3681
3691
  "True while a request is throttled and false when a request is no longer throttled"
@@ -3696,7 +3706,7 @@ var deleteTranscriptResponse = zod3.object({
3696
3706
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
3697
3707
  ),
3698
3708
  speaker: zod3.string().nullable().describe(
3699
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
3709
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
3700
3710
  )
3701
3711
  })
3702
3712
  ).describe("The words in the utterance."),
@@ -3711,7 +3721,7 @@ var deleteTranscriptResponse = zod3.object({
3711
3721
  )
3712
3722
  })
3713
3723
  ).nullish().describe(
3714
- "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) for more information.\n"
3724
+ "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) for more information.\n"
3715
3725
  ),
3716
3726
  webhook_auth: zod3.boolean().describe(
3717
3727
  "Whether [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) authentication details were provided"
@@ -3735,7 +3745,7 @@ var deleteTranscriptResponse = zod3.object({
3735
3745
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
3736
3746
  ),
3737
3747
  speaker: zod3.string().nullable().describe(
3738
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
3748
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
3739
3749
  )
3740
3750
  })
3741
3751
  ).nullish().describe(
@@ -3780,7 +3790,7 @@ var getTranscriptSentencesResponse = zod3.object({
3780
3790
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
3781
3791
  ),
3782
3792
  speaker: zod3.string().nullable().describe(
3783
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
3793
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
3784
3794
  )
3785
3795
  })
3786
3796
  ).describe("An array of words in the sentence"),
@@ -3788,7 +3798,7 @@ var getTranscriptSentencesResponse = zod3.object({
3788
3798
  "The channel of the sentence. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
3789
3799
  ),
3790
3800
  speaker: zod3.string().nullable().describe(
3791
- "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
3801
+ "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
3792
3802
  )
3793
3803
  })
3794
3804
  ).describe("An array of sentences in the transcript")
@@ -3816,7 +3826,7 @@ var getTranscriptParagraphsResponse = zod3.object({
3816
3826
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
3817
3827
  ),
3818
3828
  speaker: zod3.string().nullable().describe(
3819
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
3829
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
3820
3830
  )
3821
3831
  })
3822
3832
  ).describe("An array of words in the paragraph")
@@ -5695,23 +5705,6 @@ var preRecordedControllerGetPreRecordedJobsV2Response = zod5.object({
5695
5705
  }).optional().describe(
5696
5706
  "If `name_consistency` has been enabled, Gladia will improve consistency of the names accross the transcription"
5697
5707
  ),
5698
- speaker_reidentification: zod5.object({
5699
- success: zod5.boolean().describe("The audio intelligence model succeeded to get a valid output"),
5700
- is_empty: zod5.boolean().describe("The audio intelligence model returned an empty value"),
5701
- exec_time: zod5.number().describe("Time audio intelligence model took to complete the task"),
5702
- error: zod5.object({
5703
- status_code: zod5.number().describe("Status code of the addon error"),
5704
- exception: zod5.string().describe("Reason of the addon error"),
5705
- message: zod5.string().describe("Detailed message of the addon error")
5706
- }).nullable().describe(
5707
- "`null` if `success` is `true`. Contains the error details of the failed model"
5708
- ),
5709
- results: zod5.string().describe(
5710
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
5711
- )
5712
- }).optional().describe(
5713
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
5714
- ),
5715
5708
  structured_data_extraction: zod5.object({
5716
5709
  success: zod5.boolean().describe("The audio intelligence model succeeded to get a valid output"),
5717
5710
  is_empty: zod5.boolean().describe("The audio intelligence model returned an empty value"),
@@ -7176,23 +7169,6 @@ var preRecordedControllerGetPreRecordedJobV2Response = zod5.object({
7176
7169
  }).optional().describe(
7177
7170
  "If `name_consistency` has been enabled, Gladia will improve consistency of the names accross the transcription"
7178
7171
  ),
7179
- speaker_reidentification: zod5.object({
7180
- success: zod5.boolean().describe("The audio intelligence model succeeded to get a valid output"),
7181
- is_empty: zod5.boolean().describe("The audio intelligence model returned an empty value"),
7182
- exec_time: zod5.number().describe("Time audio intelligence model took to complete the task"),
7183
- error: zod5.object({
7184
- status_code: zod5.number().describe("Status code of the addon error"),
7185
- exception: zod5.string().describe("Reason of the addon error"),
7186
- message: zod5.string().describe("Detailed message of the addon error")
7187
- }).nullable().describe(
7188
- "`null` if `success` is `true`. Contains the error details of the failed model"
7189
- ),
7190
- results: zod5.string().describe(
7191
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
7192
- )
7193
- }).optional().describe(
7194
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
7195
- ),
7196
7172
  structured_data_extraction: zod5.object({
7197
7173
  success: zod5.boolean().describe("The audio intelligence model succeeded to get a valid output"),
7198
7174
  is_empty: zod5.boolean().describe("The audio intelligence model returned an empty value"),
@@ -9280,23 +9256,6 @@ var transcriptionControllerListV2Response = zod5.object({
9280
9256
  }).optional().describe(
9281
9257
  "If `name_consistency` has been enabled, Gladia will improve consistency of the names accross the transcription"
9282
9258
  ),
9283
- speaker_reidentification: zod5.object({
9284
- success: zod5.boolean().describe("The audio intelligence model succeeded to get a valid output"),
9285
- is_empty: zod5.boolean().describe("The audio intelligence model returned an empty value"),
9286
- exec_time: zod5.number().describe("Time audio intelligence model took to complete the task"),
9287
- error: zod5.object({
9288
- status_code: zod5.number().describe("Status code of the addon error"),
9289
- exception: zod5.string().describe("Reason of the addon error"),
9290
- message: zod5.string().describe("Detailed message of the addon error")
9291
- }).nullable().describe(
9292
- "`null` if `success` is `true`. Contains the error details of the failed model"
9293
- ),
9294
- results: zod5.string().describe(
9295
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
9296
- )
9297
- }).optional().describe(
9298
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
9299
- ),
9300
9259
  structured_data_extraction: zod5.object({
9301
9260
  success: zod5.boolean().describe("The audio intelligence model succeeded to get a valid output"),
9302
9261
  is_empty: zod5.boolean().describe("The audio intelligence model returned an empty value"),
@@ -9596,11 +9555,7 @@ var transcriptionControllerListV2Response = zod5.object({
9596
9555
  channels: zod5.number().min(1).max(transcriptionControllerListV2ResponseItemsItemRequestParamsChannelsMax).default(
9597
9556
  transcriptionControllerListV2ResponseItemsItemRequestParamsChannelsDefault
9598
9557
  ).describe("The number of channels of the audio stream"),
9599
- model: zod5.enum(["solaria-1"]).describe(
9600
- 'The model used to process the audio. "solaria-1" is used by default.'
9601
- ).default(transcriptionControllerListV2ResponseItemsItemRequestParamsModelDefault).describe(
9602
- 'The model used to process the audio. "solaria-1" is used by default.'
9603
- ),
9558
+ model: zod5.enum(["solaria-1"]).describe('The model used to process the audio. "solaria-1" is used by default.').default(transcriptionControllerListV2ResponseItemsItemRequestParamsModelDefault).describe('The model used to process the audio. "solaria-1" is used by default.'),
9604
9559
  endpointing: zod5.number().min(transcriptionControllerListV2ResponseItemsItemRequestParamsEndpointingMin).max(transcriptionControllerListV2ResponseItemsItemRequestParamsEndpointingMax).default(
9605
9560
  transcriptionControllerListV2ResponseItemsItemRequestParamsEndpointingDefault
9606
9561
  ).describe(
@@ -11990,23 +11945,6 @@ var transcriptionControllerGetTranscriptV2Response = zod5.discriminatedUnion("ki
11990
11945
  }).optional().describe(
11991
11946
  "If `name_consistency` has been enabled, Gladia will improve consistency of the names accross the transcription"
11992
11947
  ),
11993
- speaker_reidentification: zod5.object({
11994
- success: zod5.boolean().describe("The audio intelligence model succeeded to get a valid output"),
11995
- is_empty: zod5.boolean().describe("The audio intelligence model returned an empty value"),
11996
- exec_time: zod5.number().describe("Time audio intelligence model took to complete the task"),
11997
- error: zod5.object({
11998
- status_code: zod5.number().describe("Status code of the addon error"),
11999
- exception: zod5.string().describe("Reason of the addon error"),
12000
- message: zod5.string().describe("Detailed message of the addon error")
12001
- }).nullable().describe(
12002
- "`null` if `success` is `true`. Contains the error details of the failed model"
12003
- ),
12004
- results: zod5.string().describe(
12005
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
12006
- )
12007
- }).optional().describe(
12008
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
12009
- ),
12010
11948
  structured_data_extraction: zod5.object({
12011
11949
  success: zod5.boolean().describe("The audio intelligence model succeeded to get a valid output"),
12012
11950
  is_empty: zod5.boolean().describe("The audio intelligence model returned an empty value"),
@@ -15129,23 +15067,6 @@ var historyControllerGetListV1Response = zod5.object({
15129
15067
  }).optional().describe(
15130
15068
  "If `name_consistency` has been enabled, Gladia will improve consistency of the names accross the transcription"
15131
15069
  ),
15132
- speaker_reidentification: zod5.object({
15133
- success: zod5.boolean().describe("The audio intelligence model succeeded to get a valid output"),
15134
- is_empty: zod5.boolean().describe("The audio intelligence model returned an empty value"),
15135
- exec_time: zod5.number().describe("Time audio intelligence model took to complete the task"),
15136
- error: zod5.object({
15137
- status_code: zod5.number().describe("Status code of the addon error"),
15138
- exception: zod5.string().describe("Reason of the addon error"),
15139
- message: zod5.string().describe("Detailed message of the addon error")
15140
- }).nullable().describe(
15141
- "`null` if `success` is `true`. Contains the error details of the failed model"
15142
- ),
15143
- results: zod5.string().describe(
15144
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
15145
- )
15146
- }).optional().describe(
15147
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
15148
- ),
15149
15070
  structured_data_extraction: zod5.object({
15150
15071
  success: zod5.boolean().describe("The audio intelligence model succeeded to get a valid output"),
15151
15072
  is_empty: zod5.boolean().describe("The audio intelligence model returned an empty value"),
@@ -15439,11 +15360,7 @@ var historyControllerGetListV1Response = zod5.object({
15439
15360
  historyControllerGetListV1ResponseItemsItemRequestParamsSampleRateDefault
15440
15361
  ).describe("The sample rate of the audio stream"),
15441
15362
  channels: zod5.number().min(1).max(historyControllerGetListV1ResponseItemsItemRequestParamsChannelsMax).default(historyControllerGetListV1ResponseItemsItemRequestParamsChannelsDefault).describe("The number of channels of the audio stream"),
15442
- model: zod5.enum(["solaria-1"]).describe(
15443
- 'The model used to process the audio. "solaria-1" is used by default.'
15444
- ).default(historyControllerGetListV1ResponseItemsItemRequestParamsModelDefault).describe(
15445
- 'The model used to process the audio. "solaria-1" is used by default.'
15446
- ),
15363
+ model: zod5.enum(["solaria-1"]).describe('The model used to process the audio. "solaria-1" is used by default.').default(historyControllerGetListV1ResponseItemsItemRequestParamsModelDefault).describe('The model used to process the audio. "solaria-1" is used by default.'),
15447
15364
  endpointing: zod5.number().min(historyControllerGetListV1ResponseItemsItemRequestParamsEndpointingMin).max(historyControllerGetListV1ResponseItemsItemRequestParamsEndpointingMax).default(
15448
15365
  historyControllerGetListV1ResponseItemsItemRequestParamsEndpointingDefault
15449
15366
  ).describe(
@@ -24505,11 +24422,11 @@ var getTranscriptionsResponse = zod10.object({
24505
24422
  });
24506
24423
  var createTranscriptionBodyModelMaxThree = 32;
24507
24424
  var createTranscriptionBodyAudioUrlMaxOne = 4096;
24508
- var createTranscriptionBodyAudioUrlRegExpOne = new RegExp("^https?://[^\\s]+$");
24425
+ var createTranscriptionBodyAudioUrlRegExpOne = /^https?:\/\/[^\s]+$/;
24509
24426
  var createTranscriptionBodyLanguageHintsItemMax = 10;
24510
24427
  var createTranscriptionBodyLanguageHintsMaxOne = 100;
24511
24428
  var createTranscriptionBodyWebhookUrlMaxOne = 256;
24512
- var createTranscriptionBodyWebhookUrlRegExpOne = new RegExp("^https?://[^\\s]+$");
24429
+ var createTranscriptionBodyWebhookUrlRegExpOne = /^https?:\/\/[^\s]+$/;
24513
24430
  var createTranscriptionBodyWebhookAuthHeaderNameMaxOne = 256;
24514
24431
  var createTranscriptionBodyWebhookAuthHeaderValueMaxOne = 256;
24515
24432
  var createTranscriptionBodyClientReferenceIdMaxOne = 256;
@@ -24657,15 +24574,20 @@ var getModelsResponse = zod10.object({
24657
24574
  });
24658
24575
  var createTemporaryApiKeyBodyExpiresInSecondsMax = 3600;
24659
24576
  var createTemporaryApiKeyBodyClientReferenceIdMaxOne = 256;
24577
+ var createTemporaryApiKeyBodyMaxSessionDurationSecondsMaxOne = 18e3;
24660
24578
  var createTemporaryApiKeyBody = zod10.object({
24661
24579
  usage_type: zod10.enum(["transcribe_websocket"]),
24662
24580
  expires_in_seconds: zod10.number().min(1).max(createTemporaryApiKeyBodyExpiresInSecondsMax).describe("Duration in seconds until the temporary API key expires."),
24663
- client_reference_id: zod10.string().max(createTemporaryApiKeyBodyClientReferenceIdMaxOne).or(zod10.null()).optional().describe("Optional tracking identifier string. Does not need to be unique.")
24581
+ client_reference_id: zod10.string().max(createTemporaryApiKeyBodyClientReferenceIdMaxOne).or(zod10.null()).optional().describe("Optional tracking identifier string. Does not need to be unique."),
24582
+ single_use: zod10.boolean().or(zod10.null()).optional().describe("If true, the temporary API key can be used only once."),
24583
+ max_session_duration_seconds: zod10.number().min(1).max(createTemporaryApiKeyBodyMaxSessionDurationSecondsMaxOne).or(zod10.null()).optional().describe(
24584
+ "Maximum WebSocket connection duration in seconds. If exceeded, the connection will be dropped. If not set, no limit is applied."
24585
+ )
24664
24586
  });
24665
24587
 
24666
24588
  // src/generated/soniox/streaming-types.zod.ts
24667
24589
  import { z as zod11 } from "zod";
24668
- var sonioxAutoDetectedAudioFormatSchema = zod11.enum([
24590
+ var sonioxAudioFormatSchema = zod11.enum([
24669
24591
  "auto",
24670
24592
  "aac",
24671
24593
  "aiff",
@@ -24675,10 +24597,7 @@ var sonioxAutoDetectedAudioFormatSchema = zod11.enum([
24675
24597
  "mp3",
24676
24598
  "ogg",
24677
24599
  "wav",
24678
- "webm"
24679
- ]);
24680
- var sonioxPcmAudioEncodingSchema = zod11.enum([
24681
- // Signed PCM
24600
+ "webm",
24682
24601
  "pcm_s8",
24683
24602
  "pcm_s16le",
24684
24603
  "pcm_s16be",
@@ -24686,7 +24605,6 @@ var sonioxPcmAudioEncodingSchema = zod11.enum([
24686
24605
  "pcm_s24be",
24687
24606
  "pcm_s32le",
24688
24607
  "pcm_s32be",
24689
- // Unsigned PCM
24690
24608
  "pcm_u8",
24691
24609
  "pcm_u16le",
24692
24610
  "pcm_u16be",
@@ -24694,86 +24612,81 @@ var sonioxPcmAudioEncodingSchema = zod11.enum([
24694
24612
  "pcm_u24be",
24695
24613
  "pcm_u32le",
24696
24614
  "pcm_u32be",
24697
- // Float PCM
24698
24615
  "pcm_f32le",
24699
24616
  "pcm_f32be",
24700
24617
  "pcm_f64le",
24701
24618
  "pcm_f64be",
24702
- // Companded
24703
24619
  "mulaw",
24704
24620
  "alaw"
24705
24621
  ]);
24706
- var sonioxAudioFormatSchema = zod11.union([
24707
- sonioxAutoDetectedAudioFormatSchema,
24708
- sonioxPcmAudioEncodingSchema
24709
- ]);
24710
24622
  var sonioxOneWayTranslationSchema = zod11.object({
24711
24623
  type: zod11.literal("one_way"),
24712
- target_language: zod11.string().describe("Target language code for translation")
24624
+ target_language: zod11.string()
24713
24625
  });
24714
24626
  var sonioxTwoWayTranslationSchema = zod11.object({
24715
24627
  type: zod11.literal("two_way"),
24716
- language_a: zod11.string().describe("First language for bidirectional translation"),
24717
- language_b: zod11.string().describe("Second language for bidirectional translation")
24628
+ language_a: zod11.string(),
24629
+ language_b: zod11.string()
24718
24630
  });
24719
24631
  var sonioxTranslationConfigSchema = zod11.union([
24720
24632
  sonioxOneWayTranslationSchema,
24721
24633
  sonioxTwoWayTranslationSchema
24722
24634
  ]);
24723
24635
  var sonioxContextGeneralItemSchema = zod11.object({
24724
- key: zod11.string().describe("Context item key (e.g. 'Domain')"),
24725
- value: zod11.string().describe("Context item value (e.g. 'medicine')")
24636
+ key: zod11.string(),
24637
+ value: zod11.string()
24726
24638
  });
24727
24639
  var sonioxTranslationTermSchema = zod11.object({
24728
- source: zod11.string().describe("Source term"),
24729
- target: zod11.string().describe("Target term to translate to")
24640
+ source: zod11.string(),
24641
+ target: zod11.string()
24730
24642
  });
24731
24643
  var sonioxStructuredContextSchema = zod11.object({
24732
- general: zod11.array(sonioxContextGeneralItemSchema).optional().describe("General context items (key-value pairs)"),
24733
- text: zod11.string().optional().describe("Text context"),
24734
- terms: zod11.array(zod11.string()).optional().describe("Terms that might occur in speech"),
24735
- translation_terms: zod11.array(sonioxTranslationTermSchema).optional().describe("Hints how to translate specific terms (ignored if translation is not enabled)")
24644
+ general: zod11.array(sonioxContextGeneralItemSchema).optional(),
24645
+ text: zod11.string().optional(),
24646
+ terms: zod11.array(zod11.string()).optional(),
24647
+ translation_terms: zod11.array(sonioxTranslationTermSchema).optional()
24736
24648
  });
24737
24649
  var sonioxContextSchema = zod11.union([sonioxStructuredContextSchema, zod11.string()]);
24738
24650
  var sonioxRealtimeModelSchema = zod11.enum([
24651
+ "stt-rt-v4",
24739
24652
  "stt-rt-v3",
24740
24653
  "stt-rt-preview",
24741
24654
  "stt-rt-v3-preview",
24742
24655
  "stt-rt-preview-v2"
24743
24656
  ]);
24744
24657
  var streamingTranscriberParams3 = zod11.object({
24745
- model: sonioxRealtimeModelSchema.describe("Real-time model to use"),
24746
- audioFormat: sonioxAudioFormatSchema.optional().describe("Audio format specification. Use 'auto' for automatic detection"),
24747
- sampleRate: zod11.number().optional().describe("Sample rate in Hz (required for raw PCM formats)"),
24748
- numChannels: zod11.number().min(1).max(2).optional().describe("Number of audio channels (1 for mono, 2 for stereo) - required for raw PCM formats"),
24749
- languageHints: zod11.array(zod11.string()).optional().describe("Expected languages in the audio (ISO language codes)"),
24750
- context: sonioxContextSchema.optional().describe("Additional context to improve transcription accuracy"),
24751
- enableSpeakerDiarization: zod11.boolean().optional().describe("Enable speaker diarization - each token will include a speaker field"),
24752
- enableLanguageIdentification: zod11.boolean().optional().describe("Enable language identification - each token will include a language field"),
24753
- enableEndpointDetection: zod11.boolean().optional().describe("Enable endpoint detection to detect when a speaker has finished talking"),
24754
- translation: sonioxTranslationConfigSchema.optional().describe("Translation configuration"),
24755
- clientReferenceId: zod11.string().optional().describe("Optional tracking identifier (client-defined)")
24658
+ model: sonioxRealtimeModelSchema,
24659
+ audioFormat: sonioxAudioFormatSchema.optional(),
24660
+ sampleRate: zod11.number().optional(),
24661
+ numChannels: zod11.number().optional(),
24662
+ languageHints: zod11.array(zod11.string()).optional(),
24663
+ context: sonioxContextSchema.optional(),
24664
+ enableSpeakerDiarization: zod11.boolean().optional(),
24665
+ enableLanguageIdentification: zod11.boolean().optional(),
24666
+ enableEndpointDetection: zod11.boolean().optional(),
24667
+ translation: sonioxTranslationConfigSchema.optional(),
24668
+ clientReferenceId: zod11.string().optional()
24756
24669
  });
24757
- var sonioxTranslationStatusSchema = zod11.enum(["none", "original", "translation"]);
24670
+ var sonioxTranslationStatusSchema = zod11.enum(["original", "translation", "none"]);
24758
24671
  var sonioxTokenSchema = zod11.object({
24759
- text: zod11.string().describe("Token text content (subword, word, or space)"),
24760
- start_ms: zod11.number().optional().describe("Start time of the token in milliseconds"),
24761
- end_ms: zod11.number().optional().describe("End time of the token in milliseconds"),
24762
- confidence: zod11.number().min(0).max(1).optional().describe("Confidence score between 0.0 and 1.0"),
24763
- is_final: zod11.boolean().describe("Whether this token is final (confirmed) or provisional"),
24764
- speaker: zod11.string().optional().describe("Speaker identifier (only present when speaker diarization is enabled)"),
24765
- language: zod11.string().optional().describe("Detected language code (only present when language identification is enabled)"),
24766
- source_language: zod11.string().optional().describe("Original language code for translated tokens"),
24767
- translation_status: sonioxTranslationStatusSchema.optional().describe("Translation status: 'none', 'original', or 'translation'")
24672
+ text: zod11.string(),
24673
+ start_ms: zod11.number().optional(),
24674
+ end_ms: zod11.number().optional(),
24675
+ confidence: zod11.number(),
24676
+ is_final: zod11.boolean(),
24677
+ speaker: zod11.string().optional(),
24678
+ translation_status: sonioxTranslationStatusSchema.optional(),
24679
+ language: zod11.string().optional(),
24680
+ source_language: zod11.string().optional()
24768
24681
  });
24769
24682
  var sonioxStreamingResponseSchema = zod11.object({
24770
- text: zod11.string().optional().describe("Complete transcribed text"),
24771
- tokens: zod11.array(sonioxTokenSchema).describe("List of recognized tokens"),
24772
- final_audio_proc_ms: zod11.number().optional().describe("Milliseconds of audio processed into final tokens"),
24773
- total_audio_proc_ms: zod11.number().optional().describe("Milliseconds of audio processed (final + non-final)"),
24774
- finished: zod11.boolean().optional().describe("Whether the transcription is complete"),
24775
- error: zod11.string().optional().describe("Error message if an error occurred"),
24776
- error_code: zod11.number().optional().describe("Error code if an error occurred")
24683
+ text: zod11.string(),
24684
+ tokens: zod11.array(sonioxTokenSchema),
24685
+ final_audio_proc_ms: zod11.number(),
24686
+ total_audio_proc_ms: zod11.number(),
24687
+ finished: zod11.boolean().optional(),
24688
+ error_code: zod11.number().optional(),
24689
+ error_message: zod11.string().optional()
24777
24690
  });
24778
24691
  var sonioxRecorderStateSchema = zod11.enum([
24779
24692
  "Init",