voice-router-dev 0.8.3 → 0.8.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -2252,13 +2252,6 @@ var TranslationLanguageCodeEnum = {
2252
2252
  zh: "zh"
2253
2253
  };
2254
2254
 
2255
- // src/generated/assemblyai/schema/speechModel.ts
2256
- var SpeechModel = {
2257
- best: "best",
2258
- "slam-1": "slam-1",
2259
- universal: "universal"
2260
- };
2261
-
2262
2255
  // src/generated/assemblyai/schema/transcriptLanguageCode.ts
2263
2256
  var TranscriptLanguageCode = {
2264
2257
  en: "en",
@@ -2574,7 +2567,10 @@ var AssemblyAIEncoding = {
2574
2567
  /** μ-law (telephony) */
2575
2568
  pcmMulaw: "pcm_mulaw"
2576
2569
  };
2577
- var AssemblyAITranscriptionModel = SpeechModel;
2570
+ var AssemblyAITranscriptionModel = {
2571
+ "universal-3-pro": "universal-3-pro",
2572
+ "universal-2": "universal-2"
2573
+ };
2578
2574
  var AssemblyAILanguage = TranscriptLanguageCode;
2579
2575
  var AssemblyAISpeechModel = {
2580
2576
  /** Optimized for English */
@@ -5199,19 +5195,20 @@ var schema_exports2 = {};
5199
5195
  __export(schema_exports2, {
5200
5196
  AudioIntelligenceModelStatus: () => AudioIntelligenceModelStatus,
5201
5197
  EntityType: () => EntityType,
5202
- LemurModel: () => LemurModel,
5203
5198
  PiiPolicy: () => PiiPolicy,
5204
5199
  RedactPiiAudioQuality: () => RedactPiiAudioQuality,
5205
5200
  RedactedAudioStatus: () => RedactedAudioStatus,
5206
5201
  Sentiment: () => Sentiment,
5207
- SpeechModel: () => SpeechModel,
5202
+ SpeakerIdentificationRequestBodySpeakerIdentificationSpeakerType: () => SpeakerIdentificationRequestBodySpeakerIdentificationSpeakerType,
5208
5203
  SubstitutionPolicy: () => SubstitutionPolicy,
5209
5204
  SubtitleFormat: () => SubtitleFormat,
5210
5205
  SummaryModel: () => SummaryModel,
5211
5206
  SummaryType: () => SummaryType,
5212
- TranscriptBoostParam: () => TranscriptBoostParam,
5213
5207
  TranscriptLanguageCode: () => TranscriptLanguageCode,
5214
- TranscriptReadyStatus: () => TranscriptReadyStatus,
5208
+ TranscriptOptionalParamsRedactPiiAudioOptionsOverrideAudioRedactionMethod: () => TranscriptOptionalParamsRedactPiiAudioOptionsOverrideAudioRedactionMethod,
5209
+ TranscriptOptionalParamsRemoveAudioTags: () => TranscriptOptionalParamsRemoveAudioTags,
5210
+ TranscriptRedactPiiAudioOptionsOverrideAudioRedactionMethod: () => TranscriptRedactPiiAudioOptionsOverrideAudioRedactionMethod,
5211
+ TranscriptRemoveAudioTags: () => TranscriptRemoveAudioTags,
5215
5212
  TranscriptStatus: () => TranscriptStatus
5216
5213
  });
5217
5214
 
@@ -5269,13 +5266,6 @@ var EntityType = {
5269
5266
  zodiac_sign: "zodiac_sign"
5270
5267
  };
5271
5268
 
5272
- // src/generated/assemblyai/schema/lemurModel.ts
5273
- var LemurModel = {
5274
- "anthropic/claude-3-5-sonnet": "anthropic/claude-3-5-sonnet",
5275
- "anthropic/claude-3-opus": "anthropic/claude-3-opus",
5276
- "anthropic/claude-3-haiku": "anthropic/claude-3-haiku"
5277
- };
5278
-
5279
5269
  // src/generated/assemblyai/schema/piiPolicy.ts
5280
5270
  var PiiPolicy = {
5281
5271
  account_number: "account_number",
@@ -5342,6 +5332,12 @@ var Sentiment = {
5342
5332
  NEGATIVE: "NEGATIVE"
5343
5333
  };
5344
5334
 
5335
+ // src/generated/assemblyai/schema/speakerIdentificationRequestBodySpeakerIdentificationSpeakerType.ts
5336
+ var SpeakerIdentificationRequestBodySpeakerIdentificationSpeakerType = {
5337
+ role: "role",
5338
+ name: "name"
5339
+ };
5340
+
5345
5341
  // src/generated/assemblyai/schema/substitutionPolicy.ts
5346
5342
  var SubstitutionPolicy = {
5347
5343
  entity_name: "entity_name",
@@ -5370,17 +5366,24 @@ var SummaryType = {
5370
5366
  paragraph: "paragraph"
5371
5367
  };
5372
5368
 
5373
- // src/generated/assemblyai/schema/transcriptBoostParam.ts
5374
- var TranscriptBoostParam = {
5375
- low: "low",
5376
- default: "default",
5377
- high: "high"
5369
+ // src/generated/assemblyai/schema/transcriptOptionalParamsRedactPiiAudioOptionsOverrideAudioRedactionMethod.ts
5370
+ var TranscriptOptionalParamsRedactPiiAudioOptionsOverrideAudioRedactionMethod = {
5371
+ silence: "silence"
5378
5372
  };
5379
5373
 
5380
- // src/generated/assemblyai/schema/transcriptReadyStatus.ts
5381
- var TranscriptReadyStatus = {
5382
- completed: "completed",
5383
- error: "error"
5374
+ // src/generated/assemblyai/schema/transcriptOptionalParamsRemoveAudioTags.ts
5375
+ var TranscriptOptionalParamsRemoveAudioTags = {
5376
+ all: "all"
5377
+ };
5378
+
5379
+ // src/generated/assemblyai/schema/transcriptRedactPiiAudioOptionsOverrideAudioRedactionMethod.ts
5380
+ var TranscriptRedactPiiAudioOptionsOverrideAudioRedactionMethod = {
5381
+ silence: "silence"
5382
+ };
5383
+
5384
+ // src/generated/assemblyai/schema/transcriptRemoveAudioTags.ts
5385
+ var TranscriptRemoveAudioTags = {
5386
+ all: "all"
5384
5387
  };
5385
5388
 
5386
5389
  // src/generated/assemblyai/api/assemblyAIAPI.ts
@@ -5399,9 +5402,6 @@ var getTranscript = (transcriptId, options) => {
5399
5402
  var deleteTranscript = (transcriptId, options) => {
5400
5403
  return axios2.delete(`/v2/transcript/${transcriptId}`, options);
5401
5404
  };
5402
- var createTemporaryToken = (createRealtimeTemporaryTokenParams, options) => {
5403
- return axios2.post(`/v2/realtime/token`, createRealtimeTemporaryTokenParams, options);
5404
- };
5405
5405
 
5406
5406
  // src/adapters/assemblyai-adapter.ts
5407
5407
  var AssemblyAIAdapter = class extends BaseAdapter {
@@ -5737,16 +5737,27 @@ var AssemblyAIAdapter = class extends BaseAdapter {
5737
5737
  "AssemblyAI adapter currently only supports URL-based audio input. Use audio.type='url'"
5738
5738
  );
5739
5739
  }
5740
+ const aaiOpts = { ...options?.assemblyai };
5741
+ if ("speech_model" in aaiOpts && aaiOpts.speech_model != null) {
5742
+ if (!aaiOpts.speech_models) {
5743
+ aaiOpts.speech_models = [aaiOpts.speech_model];
5744
+ }
5745
+ delete aaiOpts.speech_model;
5746
+ }
5740
5747
  const request = {
5741
- ...options?.assemblyai,
5748
+ ...aaiOpts,
5742
5749
  audio_url: audioUrl,
5750
+ // speech_models is required — default to universal-3-pro
5751
+ speech_models: aaiOpts.speech_models ?? [
5752
+ "universal-3-pro"
5753
+ ],
5743
5754
  // Enable punctuation and formatting by default
5744
- punctuate: options?.assemblyai?.punctuate ?? true,
5745
- format_text: options?.assemblyai?.format_text ?? true
5755
+ punctuate: aaiOpts.punctuate ?? true,
5756
+ format_text: aaiOpts.format_text ?? true
5746
5757
  };
5747
5758
  if (options) {
5748
5759
  if (options.model) {
5749
- request.speech_model = options.model;
5760
+ request.speech_models = [options.model];
5750
5761
  }
5751
5762
  if (options.language) {
5752
5763
  const languageCode = options.language.includes("_") ? options.language : `${options.language}_us`;
@@ -5762,8 +5773,7 @@ var AssemblyAIAdapter = class extends BaseAdapter {
5762
5773
  }
5763
5774
  }
5764
5775
  if (options.customVocabulary && options.customVocabulary.length > 0) {
5765
- request.word_boost = options.customVocabulary;
5766
- request.boost_param = request.boost_param ?? "high";
5776
+ request.keyterms_prompt = options.customVocabulary;
5767
5777
  }
5768
5778
  if (options.summarization) {
5769
5779
  request.summarization = true;
@@ -11543,15 +11553,10 @@ var deepgramStreamingOnlyParams = zod2.object({
11543
11553
  // src/generated/assemblyai/api/assemblyAIAPI.zod.ts
11544
11554
  var assemblyAIAPI_zod_exports = {};
11545
11555
  __export(assemblyAIAPI_zod_exports, {
11546
- createTemporaryTokenBody: () => createTemporaryTokenBody,
11547
- createTemporaryTokenBodyExpiresInMin: () => createTemporaryTokenBodyExpiresInMin,
11548
- createTemporaryTokenResponse: () => createTemporaryTokenResponse,
11549
11556
  createTranscriptBody: () => createTranscriptBody,
11550
11557
  createTranscriptBodyAutoChaptersDefault: () => createTranscriptBodyAutoChaptersDefault,
11551
11558
  createTranscriptBodyAutoHighlightsDefault: () => createTranscriptBodyAutoHighlightsDefault,
11552
11559
  createTranscriptBodyContentSafetyConfidenceDefault: () => createTranscriptBodyContentSafetyConfidenceDefault,
11553
- createTranscriptBodyContentSafetyConfidenceMax: () => createTranscriptBodyContentSafetyConfidenceMax,
11554
- createTranscriptBodyContentSafetyConfidenceMin: () => createTranscriptBodyContentSafetyConfidenceMin,
11555
11560
  createTranscriptBodyContentSafetyDefault: () => createTranscriptBodyContentSafetyDefault,
11556
11561
  createTranscriptBodyCustomTopicsDefault: () => createTranscriptBodyCustomTopicsDefault,
11557
11562
  createTranscriptBodyDisfluenciesDefault: () => createTranscriptBodyDisfluenciesDefault,
@@ -11559,198 +11564,58 @@ __export(assemblyAIAPI_zod_exports, {
11559
11564
  createTranscriptBodyFilterProfanityDefault: () => createTranscriptBodyFilterProfanityDefault,
11560
11565
  createTranscriptBodyFormatTextDefault: () => createTranscriptBodyFormatTextDefault,
11561
11566
  createTranscriptBodyIabCategoriesDefault: () => createTranscriptBodyIabCategoriesDefault,
11562
- createTranscriptBodyLanguageCodeDefault: () => createTranscriptBodyLanguageCodeDefault,
11563
- createTranscriptBodyLanguageConfidenceThresholdDefault: () => createTranscriptBodyLanguageConfidenceThresholdDefault,
11564
- createTranscriptBodyLanguageConfidenceThresholdMax: () => createTranscriptBodyLanguageConfidenceThresholdMax,
11565
- createTranscriptBodyLanguageConfidenceThresholdMin: () => createTranscriptBodyLanguageConfidenceThresholdMin,
11566
11567
  createTranscriptBodyLanguageDetectionDefault: () => createTranscriptBodyLanguageDetectionDefault,
11568
+ createTranscriptBodyLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault: () => createTranscriptBodyLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault,
11569
+ createTranscriptBodyLanguageDetectionOptionsCodeSwitchingDefault: () => createTranscriptBodyLanguageDetectionOptionsCodeSwitchingDefault,
11570
+ createTranscriptBodyLanguageDetectionOptionsFallbackLanguageDefault: () => createTranscriptBodyLanguageDetectionOptionsFallbackLanguageDefault,
11567
11571
  createTranscriptBodyMultichannelDefault: () => createTranscriptBodyMultichannelDefault,
11568
11572
  createTranscriptBodyPunctuateDefault: () => createTranscriptBodyPunctuateDefault,
11569
11573
  createTranscriptBodyRedactPiiAudioDefault: () => createTranscriptBodyRedactPiiAudioDefault,
11574
+ createTranscriptBodyRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault: () => createTranscriptBodyRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault,
11570
11575
  createTranscriptBodyRedactPiiDefault: () => createTranscriptBodyRedactPiiDefault,
11571
- createTranscriptBodyRedactPiiSubDefault: () => createTranscriptBodyRedactPiiSubDefault,
11572
11576
  createTranscriptBodySentimentAnalysisDefault: () => createTranscriptBodySentimentAnalysisDefault,
11573
11577
  createTranscriptBodySpeakerLabelsDefault: () => createTranscriptBodySpeakerLabelsDefault,
11574
- createTranscriptBodySpeakersExpectedDefault: () => createTranscriptBodySpeakersExpectedDefault,
11575
- createTranscriptBodySpeechModelDefault: () => createTranscriptBodySpeechModelDefault,
11576
- createTranscriptBodySpeechThresholdDefault: () => createTranscriptBodySpeechThresholdDefault,
11577
- createTranscriptBodySpeechThresholdMax: () => createTranscriptBodySpeechThresholdMax,
11578
- createTranscriptBodySpeechThresholdMin: () => createTranscriptBodySpeechThresholdMin,
11578
+ createTranscriptBodySpeakerOptionsMinSpeakersExpectedDefault: () => createTranscriptBodySpeakerOptionsMinSpeakersExpectedDefault,
11579
+ createTranscriptBodySpeechUnderstandingRequestTranslationFormalDefault: () => createTranscriptBodySpeechUnderstandingRequestTranslationFormalDefault,
11580
+ createTranscriptBodySpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault: () => createTranscriptBodySpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault,
11579
11581
  createTranscriptBodySummarizationDefault: () => createTranscriptBodySummarizationDefault,
11580
- createTranscriptBodyWebhookAuthHeaderNameDefault: () => createTranscriptBodyWebhookAuthHeaderNameDefault,
11581
- createTranscriptBodyWebhookAuthHeaderValueDefault: () => createTranscriptBodyWebhookAuthHeaderValueDefault,
11582
+ createTranscriptBodyTemperatureDefault: () => createTranscriptBodyTemperatureDefault,
11582
11583
  createTranscriptResponse: () => createTranscriptResponse,
11583
- createTranscriptResponseAutoHighlightsResultResultsItemRankMax: () => createTranscriptResponseAutoHighlightsResultResultsItemRankMax,
11584
- createTranscriptResponseAutoHighlightsResultResultsItemRankMin: () => createTranscriptResponseAutoHighlightsResultResultsItemRankMin,
11585
- createTranscriptResponseConfidenceMax: () => createTranscriptResponseConfidenceMax,
11586
- createTranscriptResponseConfidenceMin: () => createTranscriptResponseConfidenceMin,
11587
- createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMax: () => createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMax,
11588
- createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMin: () => createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMin,
11589
- createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax: () => createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax,
11590
- createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin: () => createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin,
11591
- createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMax: () => createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMax,
11592
- createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMin: () => createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMin,
11593
- createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMax: () => createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMax,
11594
- createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMin: () => createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMin,
11595
- createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMax: () => createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMax,
11596
- createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMin: () => createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMin,
11597
- createTranscriptResponseContentSafetyLabelsSummaryMaxOne: () => createTranscriptResponseContentSafetyLabelsSummaryMaxOne,
11598
- createTranscriptResponseContentSafetyLabelsSummaryMinOne: () => createTranscriptResponseContentSafetyLabelsSummaryMinOne,
11599
- createTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax: () => createTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax,
11600
- createTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin: () => createTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin,
11601
- createTranscriptResponseIabCategoriesResultSummaryMaxOne: () => createTranscriptResponseIabCategoriesResultSummaryMaxOne,
11602
- createTranscriptResponseIabCategoriesResultSummaryMinOne: () => createTranscriptResponseIabCategoriesResultSummaryMinOne,
11603
- createTranscriptResponseLanguageConfidenceMax: () => createTranscriptResponseLanguageConfidenceMax,
11604
- createTranscriptResponseLanguageConfidenceMin: () => createTranscriptResponseLanguageConfidenceMin,
11605
- createTranscriptResponseLanguageConfidenceThresholdMax: () => createTranscriptResponseLanguageConfidenceThresholdMax,
11606
- createTranscriptResponseLanguageConfidenceThresholdMin: () => createTranscriptResponseLanguageConfidenceThresholdMin,
11607
- createTranscriptResponseSentimentAnalysisResultsItemConfidenceMax: () => createTranscriptResponseSentimentAnalysisResultsItemConfidenceMax,
11608
- createTranscriptResponseSentimentAnalysisResultsItemConfidenceMin: () => createTranscriptResponseSentimentAnalysisResultsItemConfidenceMin,
11609
- createTranscriptResponseSpeechModelDefault: () => createTranscriptResponseSpeechModelDefault,
11610
- createTranscriptResponseSpeechThresholdMax: () => createTranscriptResponseSpeechThresholdMax,
11611
- createTranscriptResponseSpeechThresholdMin: () => createTranscriptResponseSpeechThresholdMin,
11612
- createTranscriptResponseUtterancesItemConfidenceMax: () => createTranscriptResponseUtterancesItemConfidenceMax,
11613
- createTranscriptResponseUtterancesItemConfidenceMin: () => createTranscriptResponseUtterancesItemConfidenceMin,
11614
- createTranscriptResponseUtterancesItemWordsItemConfidenceMax: () => createTranscriptResponseUtterancesItemWordsItemConfidenceMax,
11615
- createTranscriptResponseUtterancesItemWordsItemConfidenceMin: () => createTranscriptResponseUtterancesItemWordsItemConfidenceMin,
11616
- createTranscriptResponseWordsItemConfidenceMax: () => createTranscriptResponseWordsItemConfidenceMax,
11617
- createTranscriptResponseWordsItemConfidenceMin: () => createTranscriptResponseWordsItemConfidenceMin,
11584
+ createTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault: () => createTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault,
11585
+ createTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault: () => createTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault,
11586
+ createTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault: () => createTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault,
11587
+ createTranscriptResponseRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault: () => createTranscriptResponseRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault,
11588
+ createTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault: () => createTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault,
11589
+ createTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault: () => createTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault,
11618
11590
  deleteTranscriptParams: () => deleteTranscriptParams,
11619
11591
  deleteTranscriptResponse: () => deleteTranscriptResponse,
11620
- deleteTranscriptResponseAutoHighlightsResultResultsItemRankMax: () => deleteTranscriptResponseAutoHighlightsResultResultsItemRankMax,
11621
- deleteTranscriptResponseAutoHighlightsResultResultsItemRankMin: () => deleteTranscriptResponseAutoHighlightsResultResultsItemRankMin,
11622
- deleteTranscriptResponseConfidenceMax: () => deleteTranscriptResponseConfidenceMax,
11623
- deleteTranscriptResponseConfidenceMin: () => deleteTranscriptResponseConfidenceMin,
11624
- deleteTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMax: () => deleteTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMax,
11625
- deleteTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMin: () => deleteTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMin,
11626
- deleteTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax: () => deleteTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax,
11627
- deleteTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin: () => deleteTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin,
11628
- deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMax: () => deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMax,
11629
- deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMin: () => deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMin,
11630
- deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMax: () => deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMax,
11631
- deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMin: () => deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMin,
11632
- deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMax: () => deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMax,
11633
- deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMin: () => deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMin,
11634
- deleteTranscriptResponseContentSafetyLabelsSummaryMaxOne: () => deleteTranscriptResponseContentSafetyLabelsSummaryMaxOne,
11635
- deleteTranscriptResponseContentSafetyLabelsSummaryMinOne: () => deleteTranscriptResponseContentSafetyLabelsSummaryMinOne,
11636
- deleteTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax: () => deleteTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax,
11637
- deleteTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin: () => deleteTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin,
11638
- deleteTranscriptResponseIabCategoriesResultSummaryMaxOne: () => deleteTranscriptResponseIabCategoriesResultSummaryMaxOne,
11639
- deleteTranscriptResponseIabCategoriesResultSummaryMinOne: () => deleteTranscriptResponseIabCategoriesResultSummaryMinOne,
11640
- deleteTranscriptResponseLanguageConfidenceMax: () => deleteTranscriptResponseLanguageConfidenceMax,
11641
- deleteTranscriptResponseLanguageConfidenceMin: () => deleteTranscriptResponseLanguageConfidenceMin,
11642
- deleteTranscriptResponseLanguageConfidenceThresholdMax: () => deleteTranscriptResponseLanguageConfidenceThresholdMax,
11643
- deleteTranscriptResponseLanguageConfidenceThresholdMin: () => deleteTranscriptResponseLanguageConfidenceThresholdMin,
11644
- deleteTranscriptResponseSentimentAnalysisResultsItemConfidenceMax: () => deleteTranscriptResponseSentimentAnalysisResultsItemConfidenceMax,
11645
- deleteTranscriptResponseSentimentAnalysisResultsItemConfidenceMin: () => deleteTranscriptResponseSentimentAnalysisResultsItemConfidenceMin,
11646
- deleteTranscriptResponseSpeechModelDefault: () => deleteTranscriptResponseSpeechModelDefault,
11647
- deleteTranscriptResponseSpeechThresholdMax: () => deleteTranscriptResponseSpeechThresholdMax,
11648
- deleteTranscriptResponseSpeechThresholdMin: () => deleteTranscriptResponseSpeechThresholdMin,
11649
- deleteTranscriptResponseUtterancesItemConfidenceMax: () => deleteTranscriptResponseUtterancesItemConfidenceMax,
11650
- deleteTranscriptResponseUtterancesItemConfidenceMin: () => deleteTranscriptResponseUtterancesItemConfidenceMin,
11651
- deleteTranscriptResponseUtterancesItemWordsItemConfidenceMax: () => deleteTranscriptResponseUtterancesItemWordsItemConfidenceMax,
11652
- deleteTranscriptResponseUtterancesItemWordsItemConfidenceMin: () => deleteTranscriptResponseUtterancesItemWordsItemConfidenceMin,
11653
- deleteTranscriptResponseWordsItemConfidenceMax: () => deleteTranscriptResponseWordsItemConfidenceMax,
11654
- deleteTranscriptResponseWordsItemConfidenceMin: () => deleteTranscriptResponseWordsItemConfidenceMin,
11655
- getLemurResponseParams: () => getLemurResponseParams,
11656
- getLemurResponseResponse: () => getLemurResponseResponse,
11657
- getLemurResponseResponseUsageInputTokensMin: () => getLemurResponseResponseUsageInputTokensMin,
11658
- getLemurResponseResponseUsageInputTokensMinOne: () => getLemurResponseResponseUsageInputTokensMinOne,
11659
- getLemurResponseResponseUsageOutputTokensMin: () => getLemurResponseResponseUsageOutputTokensMin,
11660
- getLemurResponseResponseUsageOutputTokensMinOne: () => getLemurResponseResponseUsageOutputTokensMinOne,
11592
+ deleteTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault: () => deleteTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault,
11593
+ deleteTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault: () => deleteTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault,
11594
+ deleteTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault: () => deleteTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault,
11595
+ deleteTranscriptResponseRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault: () => deleteTranscriptResponseRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault,
11596
+ deleteTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault: () => deleteTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault,
11597
+ deleteTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault: () => deleteTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault,
11661
11598
  getRedactedAudioParams: () => getRedactedAudioParams,
11662
11599
  getRedactedAudioResponse: () => getRedactedAudioResponse,
11663
11600
  getSubtitlesParams: () => getSubtitlesParams,
11664
11601
  getSubtitlesQueryParams: () => getSubtitlesQueryParams,
11602
+ getSubtitlesResponse: () => getSubtitlesResponse,
11665
11603
  getTranscriptParagraphsParams: () => getTranscriptParagraphsParams,
11666
11604
  getTranscriptParagraphsResponse: () => getTranscriptParagraphsResponse,
11667
- getTranscriptParagraphsResponseConfidenceMax: () => getTranscriptParagraphsResponseConfidenceMax,
11668
- getTranscriptParagraphsResponseConfidenceMin: () => getTranscriptParagraphsResponseConfidenceMin,
11669
- getTranscriptParagraphsResponseParagraphsItemConfidenceMax: () => getTranscriptParagraphsResponseParagraphsItemConfidenceMax,
11670
- getTranscriptParagraphsResponseParagraphsItemConfidenceMin: () => getTranscriptParagraphsResponseParagraphsItemConfidenceMin,
11671
- getTranscriptParagraphsResponseParagraphsItemWordsItemConfidenceMax: () => getTranscriptParagraphsResponseParagraphsItemWordsItemConfidenceMax,
11672
- getTranscriptParagraphsResponseParagraphsItemWordsItemConfidenceMin: () => getTranscriptParagraphsResponseParagraphsItemWordsItemConfidenceMin,
11673
11605
  getTranscriptParams: () => getTranscriptParams,
11674
11606
  getTranscriptResponse: () => getTranscriptResponse,
11675
- getTranscriptResponseAutoHighlightsResultResultsItemRankMax: () => getTranscriptResponseAutoHighlightsResultResultsItemRankMax,
11676
- getTranscriptResponseAutoHighlightsResultResultsItemRankMin: () => getTranscriptResponseAutoHighlightsResultResultsItemRankMin,
11677
- getTranscriptResponseConfidenceMax: () => getTranscriptResponseConfidenceMax,
11678
- getTranscriptResponseConfidenceMin: () => getTranscriptResponseConfidenceMin,
11679
- getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMax: () => getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMax,
11680
- getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMin: () => getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMin,
11681
- getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax: () => getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax,
11682
- getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin: () => getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin,
11683
- getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMax: () => getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMax,
11684
- getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMin: () => getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMin,
11685
- getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMax: () => getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMax,
11686
- getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMin: () => getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMin,
11687
- getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMax: () => getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMax,
11688
- getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMin: () => getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMin,
11689
- getTranscriptResponseContentSafetyLabelsSummaryMaxOne: () => getTranscriptResponseContentSafetyLabelsSummaryMaxOne,
11690
- getTranscriptResponseContentSafetyLabelsSummaryMinOne: () => getTranscriptResponseContentSafetyLabelsSummaryMinOne,
11691
- getTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax: () => getTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax,
11692
- getTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin: () => getTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin,
11693
- getTranscriptResponseIabCategoriesResultSummaryMaxOne: () => getTranscriptResponseIabCategoriesResultSummaryMaxOne,
11694
- getTranscriptResponseIabCategoriesResultSummaryMinOne: () => getTranscriptResponseIabCategoriesResultSummaryMinOne,
11695
- getTranscriptResponseLanguageConfidenceMax: () => getTranscriptResponseLanguageConfidenceMax,
11696
- getTranscriptResponseLanguageConfidenceMin: () => getTranscriptResponseLanguageConfidenceMin,
11697
- getTranscriptResponseLanguageConfidenceThresholdMax: () => getTranscriptResponseLanguageConfidenceThresholdMax,
11698
- getTranscriptResponseLanguageConfidenceThresholdMin: () => getTranscriptResponseLanguageConfidenceThresholdMin,
11699
- getTranscriptResponseSentimentAnalysisResultsItemConfidenceMax: () => getTranscriptResponseSentimentAnalysisResultsItemConfidenceMax,
11700
- getTranscriptResponseSentimentAnalysisResultsItemConfidenceMin: () => getTranscriptResponseSentimentAnalysisResultsItemConfidenceMin,
11701
- getTranscriptResponseSpeechModelDefault: () => getTranscriptResponseSpeechModelDefault,
11702
- getTranscriptResponseSpeechThresholdMax: () => getTranscriptResponseSpeechThresholdMax,
11703
- getTranscriptResponseSpeechThresholdMin: () => getTranscriptResponseSpeechThresholdMin,
11704
- getTranscriptResponseUtterancesItemConfidenceMax: () => getTranscriptResponseUtterancesItemConfidenceMax,
11705
- getTranscriptResponseUtterancesItemConfidenceMin: () => getTranscriptResponseUtterancesItemConfidenceMin,
11706
- getTranscriptResponseUtterancesItemWordsItemConfidenceMax: () => getTranscriptResponseUtterancesItemWordsItemConfidenceMax,
11707
- getTranscriptResponseUtterancesItemWordsItemConfidenceMin: () => getTranscriptResponseUtterancesItemWordsItemConfidenceMin,
11708
- getTranscriptResponseWordsItemConfidenceMax: () => getTranscriptResponseWordsItemConfidenceMax,
11709
- getTranscriptResponseWordsItemConfidenceMin: () => getTranscriptResponseWordsItemConfidenceMin,
11607
+ getTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault: () => getTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault,
11608
+ getTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault: () => getTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault,
11609
+ getTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault: () => getTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault,
11610
+ getTranscriptResponseRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault: () => getTranscriptResponseRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault,
11611
+ getTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault: () => getTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault,
11612
+ getTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault: () => getTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault,
11710
11613
  getTranscriptSentencesParams: () => getTranscriptSentencesParams,
11711
11614
  getTranscriptSentencesResponse: () => getTranscriptSentencesResponse,
11712
- getTranscriptSentencesResponseConfidenceMax: () => getTranscriptSentencesResponseConfidenceMax,
11713
- getTranscriptSentencesResponseConfidenceMin: () => getTranscriptSentencesResponseConfidenceMin,
11714
- getTranscriptSentencesResponseSentencesItemConfidenceMax: () => getTranscriptSentencesResponseSentencesItemConfidenceMax,
11715
- getTranscriptSentencesResponseSentencesItemConfidenceMin: () => getTranscriptSentencesResponseSentencesItemConfidenceMin,
11716
- getTranscriptSentencesResponseSentencesItemWordsItemConfidenceMax: () => getTranscriptSentencesResponseSentencesItemWordsItemConfidenceMax,
11717
- getTranscriptSentencesResponseSentencesItemWordsItemConfidenceMin: () => getTranscriptSentencesResponseSentencesItemWordsItemConfidenceMin,
11718
- lemurQuestionAnswerBody: () => lemurQuestionAnswerBody,
11719
- lemurQuestionAnswerBodyFinalModelDefault: () => lemurQuestionAnswerBodyFinalModelDefault,
11720
- lemurQuestionAnswerBodyMaxOutputSizeDefault: () => lemurQuestionAnswerBodyMaxOutputSizeDefault,
11721
- lemurQuestionAnswerBodyTemperatureDefault: () => lemurQuestionAnswerBodyTemperatureDefault,
11722
- lemurQuestionAnswerBodyTemperatureMax: () => lemurQuestionAnswerBodyTemperatureMax,
11723
- lemurQuestionAnswerBodyTemperatureMin: () => lemurQuestionAnswerBodyTemperatureMin,
11724
- lemurQuestionAnswerResponse: () => lemurQuestionAnswerResponse,
11725
- lemurQuestionAnswerResponseUsageInputTokensMin: () => lemurQuestionAnswerResponseUsageInputTokensMin,
11726
- lemurQuestionAnswerResponseUsageOutputTokensMin: () => lemurQuestionAnswerResponseUsageOutputTokensMin,
11727
- lemurSummaryBody: () => lemurSummaryBody,
11728
- lemurSummaryBodyFinalModelDefault: () => lemurSummaryBodyFinalModelDefault,
11729
- lemurSummaryBodyMaxOutputSizeDefault: () => lemurSummaryBodyMaxOutputSizeDefault,
11730
- lemurSummaryBodyTemperatureDefault: () => lemurSummaryBodyTemperatureDefault,
11731
- lemurSummaryBodyTemperatureMax: () => lemurSummaryBodyTemperatureMax,
11732
- lemurSummaryBodyTemperatureMin: () => lemurSummaryBodyTemperatureMin,
11733
- lemurSummaryResponse: () => lemurSummaryResponse,
11734
- lemurSummaryResponseUsageInputTokensMin: () => lemurSummaryResponseUsageInputTokensMin,
11735
- lemurSummaryResponseUsageOutputTokensMin: () => lemurSummaryResponseUsageOutputTokensMin,
11736
- lemurTaskBody: () => lemurTaskBody,
11737
- lemurTaskBodyFinalModelDefault: () => lemurTaskBodyFinalModelDefault,
11738
- lemurTaskBodyMaxOutputSizeDefault: () => lemurTaskBodyMaxOutputSizeDefault,
11739
- lemurTaskBodyTemperatureDefault: () => lemurTaskBodyTemperatureDefault,
11740
- lemurTaskBodyTemperatureMax: () => lemurTaskBodyTemperatureMax,
11741
- lemurTaskBodyTemperatureMin: () => lemurTaskBodyTemperatureMin,
11742
- lemurTaskResponse: () => lemurTaskResponse,
11743
- lemurTaskResponseUsageInputTokensMin: () => lemurTaskResponseUsageInputTokensMin,
11744
- lemurTaskResponseUsageOutputTokensMin: () => lemurTaskResponseUsageOutputTokensMin,
11745
11615
  listTranscriptsQueryLimitDefault: () => listTranscriptsQueryLimitDefault,
11746
- listTranscriptsQueryLimitMax: () => listTranscriptsQueryLimitMax,
11747
11616
  listTranscriptsQueryParams: () => listTranscriptsQueryParams,
11748
11617
  listTranscriptsQueryThrottledOnlyDefault: () => listTranscriptsQueryThrottledOnlyDefault,
11749
11618
  listTranscriptsResponse: () => listTranscriptsResponse,
11750
- listTranscriptsResponseTranscriptsItemCompletedRegExp: () => listTranscriptsResponseTranscriptsItemCompletedRegExp,
11751
- listTranscriptsResponseTranscriptsItemCreatedRegExp: () => listTranscriptsResponseTranscriptsItemCreatedRegExp,
11752
- purgeLemurRequestDataParams: () => purgeLemurRequestDataParams,
11753
- purgeLemurRequestDataResponse: () => purgeLemurRequestDataResponse,
11754
11619
  uploadFileResponse: () => uploadFileResponse,
11755
11620
  wordSearchParams: () => wordSearchParams,
11756
11621
  wordSearchQueryParams: () => wordSearchQueryParams,
@@ -11760,43 +11625,192 @@ import { z as zod3 } from "zod";
11760
11625
  var uploadFileResponse = zod3.object({
11761
11626
  upload_url: zod3.string().describe("A URL that points to your audio file, accessible only by AssemblyAI's servers\n")
11762
11627
  });
11763
- var createTranscriptBodyLanguageCodeDefault = "en_us";
11764
- var createTranscriptBodyLanguageDetectionDefault = false;
11765
- var createTranscriptBodyLanguageConfidenceThresholdDefault = 0;
11766
- var createTranscriptBodyLanguageConfidenceThresholdMin = 0;
11767
- var createTranscriptBodyLanguageConfidenceThresholdMax = 1;
11768
- var createTranscriptBodySpeechModelDefault = "best";
11769
- var createTranscriptBodyPunctuateDefault = true;
11770
- var createTranscriptBodyFormatTextDefault = true;
11771
- var createTranscriptBodyDisfluenciesDefault = false;
11772
- var createTranscriptBodyMultichannelDefault = false;
11773
- var createTranscriptBodyWebhookAuthHeaderNameDefault = null;
11774
- var createTranscriptBodyWebhookAuthHeaderValueDefault = null;
11628
+ var createTranscriptBodyAutoChaptersDefault = false;
11775
11629
  var createTranscriptBodyAutoHighlightsDefault = false;
11776
- var createTranscriptBodyFilterProfanityDefault = false;
11777
- var createTranscriptBodyRedactPiiDefault = false;
11778
- var createTranscriptBodyRedactPiiAudioDefault = false;
11779
- var createTranscriptBodyRedactPiiSubDefault = "hash";
11780
- var createTranscriptBodySpeakerLabelsDefault = false;
11781
- var createTranscriptBodySpeakersExpectedDefault = null;
11782
11630
  var createTranscriptBodyContentSafetyDefault = false;
11783
11631
  var createTranscriptBodyContentSafetyConfidenceDefault = 50;
11784
- var createTranscriptBodyContentSafetyConfidenceMin = 25;
11785
- var createTranscriptBodyContentSafetyConfidenceMax = 100;
11632
+ var createTranscriptBodyDisfluenciesDefault = false;
11633
+ var createTranscriptBodyEntityDetectionDefault = false;
11634
+ var createTranscriptBodyFilterProfanityDefault = false;
11635
+ var createTranscriptBodyFormatTextDefault = true;
11786
11636
  var createTranscriptBodyIabCategoriesDefault = false;
11637
+ var createTranscriptBodyLanguageDetectionDefault = false;
11638
+ var createTranscriptBodyLanguageDetectionOptionsFallbackLanguageDefault = "auto";
11639
+ var createTranscriptBodyLanguageDetectionOptionsCodeSwitchingDefault = false;
11640
+ var createTranscriptBodyLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault = 0.3;
11641
+ var createTranscriptBodyMultichannelDefault = false;
11642
+ var createTranscriptBodyPunctuateDefault = true;
11643
+ var createTranscriptBodyRedactPiiDefault = false;
11644
+ var createTranscriptBodyRedactPiiAudioDefault = false;
11645
+ var createTranscriptBodyRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault = false;
11787
11646
  var createTranscriptBodySentimentAnalysisDefault = false;
11788
- var createTranscriptBodyAutoChaptersDefault = false;
11789
- var createTranscriptBodyEntityDetectionDefault = false;
11790
- var createTranscriptBodySpeechThresholdDefault = 0;
11791
- var createTranscriptBodySpeechThresholdMin = 0;
11792
- var createTranscriptBodySpeechThresholdMax = 1;
11647
+ var createTranscriptBodySpeakerLabelsDefault = false;
11648
+ var createTranscriptBodySpeakerOptionsMinSpeakersExpectedDefault = 1;
11649
+ var createTranscriptBodySpeechUnderstandingRequestTranslationFormalDefault = true;
11650
+ var createTranscriptBodySpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault = false;
11793
11651
  var createTranscriptBodySummarizationDefault = false;
11652
+ var createTranscriptBodyTemperatureDefault = 0;
11794
11653
  var createTranscriptBodyCustomTopicsDefault = false;
11795
11654
  var createTranscriptBody = zod3.object({
11796
- audio_url: zod3.string().describe("The URL of the audio or video file to transcribe.")
11797
- }).and(
11798
- zod3.object({
11799
- language_code: zod3.enum([
11655
+ audio_end_at: zod3.number().optional().describe(
11656
+ "The point in time, in milliseconds, to stop transcribing in your media file. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details."
11657
+ ),
11658
+ audio_start_from: zod3.number().optional().describe(
11659
+ "The point in time, in milliseconds, to begin transcribing in your media file. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details."
11660
+ ),
11661
+ auto_chapters: zod3.boolean().optional().describe(
11662
+ "Enable [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters), can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
11663
+ ),
11664
+ auto_highlights: zod3.boolean().optional().describe(
11665
+ "Enable [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases), either true or false"
11666
+ ),
11667
+ content_safety: zod3.boolean().optional().describe(
11668
+ "Enable [Content Moderation](https://www.assemblyai.com/docs/content-moderation), can be true or false"
11669
+ ),
11670
+ content_safety_confidence: zod3.number().default(createTranscriptBodyContentSafetyConfidenceDefault).describe(
11671
+ "The confidence threshold for the [Content Moderation](https://www.assemblyai.com/docs/content-moderation) model. Values must be between 25 and 100."
11672
+ ),
11673
+ custom_spelling: zod3.array(
11674
+ zod3.object({
11675
+ from: zod3.array(zod3.string()).describe("Words or phrases to replace"),
11676
+ to: zod3.string().describe("Word to replace with")
11677
+ }).describe(
11678
+ "Object containing words or phrases to replace, and the word or phrase to replace with"
11679
+ )
11680
+ ).optional().describe(
11681
+ "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details."
11682
+ ),
11683
+ disfluencies: zod3.boolean().optional().describe(
11684
+ 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false'
11685
+ ),
11686
+ domain: zod3.string().nullish().describe(
11687
+ 'Enable domain-specific transcription models to improve accuracy for specialized terminology. Set to `"medical-v1"` to enable [Medical Mode](https://www.assemblyai.com/docs/pre-recorded-audio/medical-mode) for improved accuracy of medical terms such as medications, procedures, conditions, and dosages.\n\nSupported languages: English (`en`), Spanish (`es`), German (`de`), French (`fr`). If used with an unsupported language, the parameter is ignored and a warning is returned.\n'
11688
+ ),
11689
+ entity_detection: zod3.boolean().optional().describe(
11690
+ "Enable [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection), can be true or false"
11691
+ ),
11692
+ filter_profanity: zod3.boolean().optional().describe(
11693
+ "Filter profanity from the transcribed text, can be true or false. See [Profanity Filtering](https://www.assemblyai.com/docs/profanity-filtering) for more details."
11694
+ ),
11695
+ format_text: zod3.boolean().default(createTranscriptBodyFormatTextDefault).describe(
11696
+ "Enable [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio), can be true or false"
11697
+ ),
11698
+ iab_categories: zod3.boolean().optional().describe(
11699
+ "Enable [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection), can be true or false"
11700
+ ),
11701
+ keyterms_prompt: zod3.array(zod3.string()).optional().describe(
11702
+ "Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3 Pro) domain-specific words or phrases (maximum 6 words per phrase). See [Keyterms Prompting](https://www.assemblyai.com/docs/pre-recorded-audio/keyterms-prompting) for more details.\n"
11703
+ ),
11704
+ language_code: zod3.enum([
11705
+ "en",
11706
+ "en_au",
11707
+ "en_uk",
11708
+ "en_us",
11709
+ "es",
11710
+ "fr",
11711
+ "de",
11712
+ "it",
11713
+ "pt",
11714
+ "nl",
11715
+ "af",
11716
+ "sq",
11717
+ "am",
11718
+ "ar",
11719
+ "hy",
11720
+ "as",
11721
+ "az",
11722
+ "ba",
11723
+ "eu",
11724
+ "be",
11725
+ "bn",
11726
+ "bs",
11727
+ "br",
11728
+ "bg",
11729
+ "my",
11730
+ "ca",
11731
+ "zh",
11732
+ "hr",
11733
+ "cs",
11734
+ "da",
11735
+ "et",
11736
+ "fo",
11737
+ "fi",
11738
+ "gl",
11739
+ "ka",
11740
+ "el",
11741
+ "gu",
11742
+ "ht",
11743
+ "ha",
11744
+ "haw",
11745
+ "he",
11746
+ "hi",
11747
+ "hu",
11748
+ "is",
11749
+ "id",
11750
+ "ja",
11751
+ "jw",
11752
+ "kn",
11753
+ "kk",
11754
+ "km",
11755
+ "ko",
11756
+ "lo",
11757
+ "la",
11758
+ "lv",
11759
+ "ln",
11760
+ "lt",
11761
+ "lb",
11762
+ "mk",
11763
+ "mg",
11764
+ "ms",
11765
+ "ml",
11766
+ "mt",
11767
+ "mi",
11768
+ "mr",
11769
+ "mn",
11770
+ "ne",
11771
+ "no",
11772
+ "nn",
11773
+ "oc",
11774
+ "pa",
11775
+ "ps",
11776
+ "fa",
11777
+ "pl",
11778
+ "ro",
11779
+ "ru",
11780
+ "sa",
11781
+ "sr",
11782
+ "sn",
11783
+ "sd",
11784
+ "si",
11785
+ "sk",
11786
+ "sl",
11787
+ "so",
11788
+ "su",
11789
+ "sw",
11790
+ "sv",
11791
+ "tl",
11792
+ "tg",
11793
+ "ta",
11794
+ "tt",
11795
+ "te",
11796
+ "th",
11797
+ "bo",
11798
+ "tr",
11799
+ "tk",
11800
+ "uk",
11801
+ "ur",
11802
+ "uz",
11803
+ "vi",
11804
+ "cy",
11805
+ "yi",
11806
+ "yo"
11807
+ ]).describe(
11808
+ "The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).\nThe default value is 'en_us'.\n"
11809
+ ).or(zod3.null()).optional().describe(
11810
+ "The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).\nThe default value is 'en_us'.\n"
11811
+ ),
11812
+ language_codes: zod3.array(
11813
+ zod3.enum([
11800
11814
  "en",
11801
11815
  "en_au",
11802
11816
  "en_uk",
@@ -11900,53 +11914,360 @@ var createTranscriptBody = zod3.object({
11900
11914
  "yi",
11901
11915
  "yo"
11902
11916
  ]).describe(
11903
- "The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n"
11904
- ).or(zod3.string()).or(zod3.null()).default(createTranscriptBodyLanguageCodeDefault).describe(
11905
- "The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n"
11906
- ),
11907
- language_detection: zod3.boolean().optional().describe(
11908
- "Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false."
11909
- ),
11910
- language_confidence_threshold: zod3.number().min(createTranscriptBodyLanguageConfidenceThresholdMin).max(createTranscriptBodyLanguageConfidenceThresholdMax).optional().describe(
11911
- "The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nDefaults to 0.\n"
11912
- ),
11913
- speech_model: zod3.enum(["best", "slam-1", "universal"]).describe("The speech model to use for the transcription.").or(zod3.null()).default(createTranscriptBodySpeechModelDefault).describe(
11914
- 'The speech model to use for the transcription. When `null`, the "best" model is used.'
11915
- ),
11916
- punctuate: zod3.boolean().default(createTranscriptBodyPunctuateDefault).describe("Enable Automatic Punctuation, can be true or false"),
11917
- format_text: zod3.boolean().default(createTranscriptBodyFormatTextDefault).describe("Enable Text Formatting, can be true or false"),
11918
- disfluencies: zod3.boolean().optional().describe(
11919
- 'Transcribe Filler Words, like "umm", in your media file; can be true or false'
11920
- ),
11921
- multichannel: zod3.boolean().optional().describe(
11922
- "Enable [Multichannel](https://www.assemblyai.com/docs/models/speech-recognition#multichannel-transcription) transcription, can be true or false."
11923
- ),
11924
- webhook_url: zod3.string().optional().describe(
11925
- "The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n"
11917
+ "The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).\nThe default value is 'en_us'.\n"
11918
+ )
11919
+ ).nullish().describe(
11920
+ "The language codes of your audio file. Used for [Code switching](/docs/speech-to-text/pre-recorded-audio/code-switching)\nOne of the values specified must be `en`.\n"
11921
+ ),
11922
+ language_confidence_threshold: zod3.number().optional().describe(
11923
+ "The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nDefaults to 0. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.\n"
11924
+ ),
11925
+ language_detection: zod3.boolean().optional().describe(
11926
+ "Enable [Automatic language detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection), either true or false."
11927
+ ),
11928
+ language_detection_options: zod3.object({
11929
+ expected_languages: zod3.array(zod3.string()).optional().describe(
11930
+ 'List of languages expected in the audio file. Defaults to `["all"]` when unspecified. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.'
11926
11931
  ),
11927
- webhook_auth_header_name: zod3.string().nullish().describe(
11928
- "The header name to be sent with the transcript completed or failed webhook requests"
11932
+ fallback_language: zod3.string().default(createTranscriptBodyLanguageDetectionOptionsFallbackLanguageDefault).describe(
11933
+ 'If the detected language of the audio file is not in the list of expected languages, the `fallback_language` is used. Specify `["auto"]` to let our model choose the fallback language from `expected_languages` with the highest confidence score. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.\n'
11929
11934
  ),
11930
- webhook_auth_header_value: zod3.string().nullish().describe(
11931
- "The header value to send back with the transcript completed or failed webhook requests for added security"
11935
+ code_switching: zod3.boolean().optional().describe(
11936
+ "Whether [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) should be detected.\n"
11932
11937
  ),
11933
- auto_highlights: zod3.boolean().optional().describe("Enable Key Phrases, either true or false"),
11934
- audio_start_from: zod3.number().optional().describe("The point in time, in milliseconds, to begin transcribing in your media file"),
11935
- audio_end_at: zod3.number().optional().describe("The point in time, in milliseconds, to stop transcribing in your media file"),
11936
- word_boost: zod3.array(zod3.string()).optional().describe("The list of custom vocabulary to boost transcription probability for"),
11937
- boost_param: zod3.enum(["low", "default", "high"]).optional().describe("How much to boost specified words"),
11938
- filter_profanity: zod3.boolean().optional().describe("Filter profanity from the transcribed text, can be true or false"),
11939
- redact_pii: zod3.boolean().optional().describe(
11940
- "Redact PII from the transcribed text using the Redact PII model, can be true or false"
11938
+ code_switching_confidence_threshold: zod3.number().default(
11939
+ createTranscriptBodyLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault
11940
+ ).describe(
11941
+ "The confidence threshold for [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) detection. If the code switching confidence is below this threshold, the transcript will be processed in the language with the highest `language_detection_confidence` score.\n"
11942
+ )
11943
+ }).optional().describe(
11944
+ "Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection)."
11945
+ ),
11946
+ multichannel: zod3.boolean().optional().describe(
11947
+ "Enable [Multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) transcription, can be true or false."
11948
+ ),
11949
+ prompt: zod3.string().optional().describe(
11950
+ "Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
11951
+ ),
11952
+ punctuate: zod3.boolean().default(createTranscriptBodyPunctuateDefault).describe(
11953
+ "Enable [Automatic Punctuation](https://www.assemblyai.com/docs/pre-recorded-audio), can be true or false"
11954
+ ),
11955
+ redact_pii: zod3.boolean().optional().describe(
11956
+ "Redact PII from the transcribed text using the Redact PII model, can be true or false. See [PII Redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
11957
+ ),
11958
+ redact_pii_audio: zod3.boolean().optional().describe(
11959
+ 'Generate a copy of the original media file with spoken PII "beeped" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more details.'
11960
+ ),
11961
+ redact_pii_audio_options: zod3.object({
11962
+ return_redacted_no_speech_audio: zod3.boolean().optional().describe(
11963
+ "By default, audio redaction provides redacted audio URLs only when speech is detected. However, if your use-case specifically requires redacted audio files even for silent audio files without any dialogue, you can opt to receive these URLs by setting this parameter to `true`."
11941
11964
  ),
11942
- redact_pii_audio: zod3.boolean().optional().describe(
11943
- 'Generate a copy of the original media file with spoken PII "beeped" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.'
11965
+ override_audio_redaction_method: zod3.enum(["silence"]).optional().describe(
11966
+ "Specify the method used to redact audio. By default, redacted audio uses a beep sound. Set to `silence` to replace PII with silence instead of a beep."
11967
+ )
11968
+ }).optional().describe(
11969
+ "Specify options for [PII redacted audio](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) files."
11970
+ ),
11971
+ redact_pii_audio_quality: zod3.enum(["mp3", "wav"]).optional().describe(
11972
+ "Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more details."
11973
+ ),
11974
+ redact_pii_policies: zod3.array(
11975
+ zod3.enum([
11976
+ "account_number",
11977
+ "banking_information",
11978
+ "blood_type",
11979
+ "credit_card_cvv",
11980
+ "credit_card_expiration",
11981
+ "credit_card_number",
11982
+ "date",
11983
+ "date_interval",
11984
+ "date_of_birth",
11985
+ "drivers_license",
11986
+ "drug",
11987
+ "duration",
11988
+ "email_address",
11989
+ "event",
11990
+ "filename",
11991
+ "gender_sexuality",
11992
+ "healthcare_number",
11993
+ "injury",
11994
+ "ip_address",
11995
+ "language",
11996
+ "location",
11997
+ "marital_status",
11998
+ "medical_condition",
11999
+ "medical_process",
12000
+ "money_amount",
12001
+ "nationality",
12002
+ "number_sequence",
12003
+ "occupation",
12004
+ "organization",
12005
+ "passport_number",
12006
+ "password",
12007
+ "person_age",
12008
+ "person_name",
12009
+ "phone_number",
12010
+ "physical_attribute",
12011
+ "political_affiliation",
12012
+ "religion",
12013
+ "statistics",
12014
+ "time",
12015
+ "url",
12016
+ "us_social_security_number",
12017
+ "username",
12018
+ "vehicle_id",
12019
+ "zodiac_sign"
12020
+ ]).describe("The type of PII to redact")
12021
+ ).optional().describe(
12022
+ "The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
12023
+ ),
12024
+ redact_pii_sub: zod3.enum(["entity_name", "hash"]).describe(
12025
+ "The replacement logic for detected PII, can be `entity_name` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
12026
+ ).or(zod3.null()).optional().describe(
12027
+ "The replacement logic for detected PII, can be `entity_type` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
12028
+ ),
12029
+ sentiment_analysis: zod3.boolean().optional().describe(
12030
+ "Enable [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis), can be true or false"
12031
+ ),
12032
+ speaker_labels: zod3.boolean().optional().describe(
12033
+ "Enable [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization), can be true or false"
12034
+ ),
12035
+ speaker_options: zod3.object({
12036
+ min_speakers_expected: zod3.number().default(createTranscriptBodySpeakerOptionsMinSpeakersExpectedDefault).describe(
12037
+ "The minimum number of speakers expected in the audio file. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers) for more details."
11944
12038
  ),
11945
- redact_pii_audio_quality: zod3.enum(["mp3", "wav"]).optional().describe(
11946
- "Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details."
12039
+ max_speakers_expected: zod3.number().optional().describe(
12040
+ "<Warning>Setting this parameter too high may hurt model accuracy</Warning>\nThe maximum number of speakers expected in the audio file. The default depends on audio duration: no limit for 0-2 minutes, 10 for 2-10 minutes, and 30 for 10+ minutes. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers) for more details.\n"
12041
+ )
12042
+ }).optional().describe(
12043
+ "Specify options for [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers). Use this to set a range of possible speakers."
12044
+ ),
12045
+ speakers_expected: zod3.number().nullish().describe(
12046
+ "Tells the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details."
12047
+ ),
12048
+ speech_models: zod3.array(
12049
+ zod3.string().describe(
12050
+ "The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
12051
+ )
12052
+ ).describe(
12053
+ "List multiple speech models in priority order, allowing our system to automatically route your audio to the best available option. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models and routing behavior.\n"
12054
+ ),
12055
+ speech_threshold: zod3.number().nullish().describe(
12056
+ "Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive. See [Speech Threshold](https://www.assemblyai.com/docs/speech-threshold) for more details.\n"
12057
+ ),
12058
+ speech_understanding: zod3.object({
12059
+ request: zod3.object({
12060
+ translation: zod3.object({
12061
+ target_languages: zod3.array(zod3.string()).describe(
12062
+ 'List of target language codes (e.g., `["es", "de"]`). See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for supported languages.'
12063
+ ),
12064
+ formal: zod3.boolean().default(createTranscriptBodySpeechUnderstandingRequestTranslationFormalDefault).describe(
12065
+ "Use formal language style. See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for more details."
12066
+ ),
12067
+ match_original_utterance: zod3.boolean().optional().describe(
12068
+ "When enabled with Speaker Labels, returns translated text in the utterances array. Each utterance will include a `translated_texts` key containing translations for each target language."
12069
+ )
12070
+ })
12071
+ }).describe(
12072
+ "Request body for [Translation](https://www.assemblyai.com/docs/speech-understanding/translation)."
12073
+ ).or(
12074
+ zod3.object({
12075
+ speaker_identification: zod3.object({
12076
+ speaker_type: zod3.enum(["role", "name"]).describe(
12077
+ "Type of speaker identification. See [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification) for details on each type."
12078
+ ),
12079
+ known_values: zod3.array(zod3.string()).optional().describe(
12080
+ 'Required if speaker_type is "role". Each value must be 35 characters or less.'
12081
+ ),
12082
+ speakers: zod3.array(
12083
+ zod3.object({
12084
+ role: zod3.string().optional().describe(
12085
+ 'The role of the speaker. Required when `speaker_type` is "role".'
12086
+ ),
12087
+ name: zod3.string().optional().describe(
12088
+ 'The name of the speaker. Required when `speaker_type` is "name".'
12089
+ ),
12090
+ description: zod3.string().optional().describe(
12091
+ "A description of the speaker to help the model identify them based on conversational context."
12092
+ )
12093
+ })
12094
+ ).optional().describe(
12095
+ "An array of speaker objects with metadata to improve identification accuracy. Each object should include a `role` or `name` (depending on `speaker_type`) and an optional `description` to help the model identify the speaker. You can also include any additional custom properties (e.g., `company`, `title`) to provide more context. Use this as an alternative to `known_values` when you want to provide additional context about each speaker."
12096
+ )
12097
+ })
12098
+ }).describe(
12099
+ "Request body for [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification)."
12100
+ )
12101
+ ).or(
12102
+ zod3.object({
12103
+ custom_formatting: zod3.object({
12104
+ date: zod3.string().optional().describe(
12105
+ 'Date format pattern (e.g., `"mm/dd/yyyy"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
12106
+ ),
12107
+ phone_number: zod3.string().optional().describe(
12108
+ 'Phone number format pattern (e.g., `"(xxx)xxx-xxxx"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
12109
+ ),
12110
+ email: zod3.string().optional().describe(
12111
+ 'Email format pattern (e.g., `"username@domain.com"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
12112
+ )
12113
+ })
12114
+ }).describe(
12115
+ "Request body for [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting)."
12116
+ )
12117
+ )
12118
+ }).optional().describe(
12119
+ "Enable speech understanding tasks like [Translation](https://www.assemblyai.com/docs/speech-understanding/translation), [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification), and [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting). See the task-specific docs for available options and configuration.\n"
12120
+ ),
12121
+ summarization: zod3.boolean().optional().describe(
12122
+ "Enable [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization), can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
12123
+ ),
12124
+ summary_model: zod3.enum(["informative", "conversational", "catchy"]).optional().describe("The model to summarize the transcript"),
12125
+ summary_type: zod3.enum(["bullets", "bullets_verbose", "gist", "headline", "paragraph"]).optional().describe("The type of summary"),
12126
+ remove_audio_tags: zod3.enum(["all"]).describe(
12127
+ 'Remove [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) from the transcript text. Set to `"all"` to remove all audio tags.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n'
12128
+ ).or(zod3.null()).optional().describe(
12129
+ 'Remove [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) from the transcript text. Set to `"all"` to remove all audio tags.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n'
12130
+ ),
12131
+ temperature: zod3.number().optional().describe(
12132
+ "Control the amount of randomness injected into the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.\n\nNote: This parameter can only be used with the Universal-3 Pro model.\n"
12133
+ ),
12134
+ webhook_auth_header_name: zod3.string().nullish().describe(
12135
+ "The header name to be sent with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests"
12136
+ ),
12137
+ webhook_auth_header_value: zod3.string().nullish().describe(
12138
+ "The header value to send back with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests for added security"
12139
+ ),
12140
+ webhook_url: zod3.string().optional().describe(
12141
+ "The URL to which we send [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests.\n"
12142
+ ),
12143
+ custom_topics: zod3.boolean().optional().describe("This parameter does not currently have any functionality attached to it."),
12144
+ topics: zod3.array(zod3.string()).optional().describe("This parameter does not currently have any functionality attached to it."),
12145
+ audio_url: zod3.string().describe("The URL of the audio or video file to transcribe.")
12146
+ }).describe("The parameters for creating a transcript");
12147
+ var createTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault = "auto";
12148
+ var createTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault = false;
12149
+ var createTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault = 0.3;
12150
+ var createTranscriptResponseRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault = false;
12151
+ var createTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault = true;
12152
+ var createTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault = false;
12153
+ var createTranscriptResponse = zod3.object({
12154
+ audio_channels: zod3.number().optional().describe(
12155
+ "The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) is enabled."
12156
+ ),
12157
+ audio_duration: zod3.number().nullish().describe("The duration of this transcript object's media file, in seconds"),
12158
+ audio_end_at: zod3.number().nullish().describe(
12159
+ "The point in time, in milliseconds, in the file at which the transcription was terminated. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details."
12160
+ ),
12161
+ audio_start_from: zod3.number().nullish().describe(
12162
+ "The point in time, in milliseconds, in the file at which the transcription was started. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details."
12163
+ ),
12164
+ audio_url: zod3.string().describe("The URL of the media that was transcribed"),
12165
+ auto_chapters: zod3.boolean().nullish().describe(
12166
+ "Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
12167
+ ),
12168
+ auto_highlights: zod3.boolean().describe(
12169
+ "Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) is enabled, either true or false"
12170
+ ),
12171
+ auto_highlights_result: zod3.object({
12172
+ status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
12173
+ results: zod3.array(
12174
+ zod3.object({
12175
+ count: zod3.number().describe("The total number of times the key phrase appears in the audio file"),
12176
+ rank: zod3.number().describe(
12177
+ "The total relevancy to the overall audio file of this key phrase - a greater number means more relevant"
12178
+ ),
12179
+ text: zod3.string().describe("The text itself of the key phrase"),
12180
+ timestamps: zod3.array(
12181
+ zod3.object({
12182
+ start: zod3.number().describe("The start time in milliseconds"),
12183
+ end: zod3.number().describe("The end time in milliseconds")
12184
+ }).describe("Timestamp containing a start and end property in milliseconds")
12185
+ ).describe("The timestamp of the of the key phrase")
12186
+ })
12187
+ ).describe("A temporally-sequential array of Key Phrases")
12188
+ }).describe(
12189
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
12190
+ ).or(zod3.null()).optional().describe(
12191
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
12192
+ ),
12193
+ chapters: zod3.array(
12194
+ zod3.object({
12195
+ gist: zod3.string().describe(
12196
+ "An ultra-short summary (just a few words) of the content spoken in the chapter"
12197
+ ),
12198
+ headline: zod3.string().describe("A single sentence summary of the content spoken during the chapter"),
12199
+ summary: zod3.string().describe("A one paragraph summary of the content spoken during the chapter"),
12200
+ start: zod3.number().describe("The starting time, in milliseconds, for the chapter"),
12201
+ end: zod3.number().describe("The starting time, in milliseconds, for the chapter")
12202
+ }).describe("Chapter of the audio file")
12203
+ ).nullish().describe(
12204
+ "An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for more information."
12205
+ ),
12206
+ confidence: zod3.number().nullish().describe(
12207
+ "The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)"
12208
+ ),
12209
+ content_safety: zod3.boolean().nullish().describe(
12210
+ "Whether [Content Moderation](https://www.assemblyai.com/docs/content-moderation) is enabled, can be true or false"
12211
+ ),
12212
+ content_safety_labels: zod3.object({
12213
+ status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
12214
+ results: zod3.array(
12215
+ zod3.object({
12216
+ text: zod3.string().describe("The transcript of the section flagged by the Content Moderation model"),
12217
+ labels: zod3.array(
12218
+ zod3.object({
12219
+ label: zod3.string().describe("The label of the sensitive topic"),
12220
+ confidence: zod3.number().describe("The confidence score for the topic being discussed, from 0 to 1"),
12221
+ severity: zod3.number().describe("How severely the topic is discussed in the section, from 0 to 1")
12222
+ })
12223
+ ).describe(
12224
+ "An array of safety labels, one per sensitive topic that was detected in the section"
12225
+ ),
12226
+ sentences_idx_start: zod3.number().describe("The sentence index at which the section begins"),
12227
+ sentences_idx_end: zod3.number().describe("The sentence index at which the section ends"),
12228
+ timestamp: zod3.object({
12229
+ start: zod3.number().describe("The start time in milliseconds"),
12230
+ end: zod3.number().describe("The end time in milliseconds")
12231
+ }).describe("Timestamp containing a start and end property in milliseconds")
12232
+ })
12233
+ ).describe("An array of results for the Content Moderation model"),
12234
+ summary: zod3.record(zod3.string(), zod3.number()).describe(
12235
+ "A summary of the Content Moderation confidence results for the entire audio file"
11947
12236
  ),
11948
- redact_pii_policies: zod3.array(
11949
- zod3.enum([
12237
+ severity_score_summary: zod3.record(
12238
+ zod3.string(),
12239
+ zod3.object({
12240
+ low: zod3.number(),
12241
+ medium: zod3.number(),
12242
+ high: zod3.number()
12243
+ })
12244
+ ).describe(
12245
+ "A summary of the Content Moderation severity results for the entire audio file"
12246
+ )
12247
+ }).describe(
12248
+ "An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/content-moderation) for more information.\n"
12249
+ ).or(zod3.null()).optional().describe(
12250
+ "An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/content-moderation) for more information.\n"
12251
+ ),
12252
+ custom_spelling: zod3.array(
12253
+ zod3.object({
12254
+ from: zod3.array(zod3.string()).describe("Words or phrases to replace"),
12255
+ to: zod3.string().describe("Word to replace with")
12256
+ }).describe(
12257
+ "Object containing words or phrases to replace, and the word or phrase to replace with"
12258
+ )
12259
+ ).nullish().describe(
12260
+ "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details."
12261
+ ),
12262
+ disfluencies: zod3.boolean().nullish().describe(
12263
+ 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false'
12264
+ ),
12265
+ domain: zod3.string().nullish().describe(
12266
+ 'The domain-specific model applied to the transcript. When set to `"medical-v1"`, [Medical Mode](https://www.assemblyai.com/docs/pre-recorded-audio/medical-mode) was used to improve accuracy for medical terminology.\n'
12267
+ ),
12268
+ entities: zod3.array(
12269
+ zod3.object({
12270
+ entity_type: zod3.enum([
11950
12271
  "account_number",
11951
12272
  "banking_information",
11952
12273
  "blood_type",
@@ -11991,103 +12312,59 @@ var createTranscriptBody = zod3.object({
11991
12312
  "username",
11992
12313
  "vehicle_id",
11993
12314
  "zodiac_sign"
11994
- ]).describe("The type of PII to redact")
11995
- ).optional().describe(
11996
- "The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details."
11997
- ),
11998
- redact_pii_sub: zod3.enum(["entity_name", "hash"]).describe(
11999
- 'The replacement logic for detected PII, can be "entity_name" or "hash". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.'
12000
- ).or(zod3.null()).default(createTranscriptBodyRedactPiiSubDefault).describe(
12001
- 'The replacement logic for detected PII, can be "entity_type" or "hash". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.'
12002
- ),
12003
- speaker_labels: zod3.boolean().optional().describe(
12004
- "Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false"
12005
- ),
12006
- speakers_expected: zod3.number().nullish().describe(
12007
- "Tells the speaker label model how many speakers it should attempt to identify. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details."
12008
- ),
12009
- content_safety: zod3.boolean().optional().describe(
12010
- "Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false"
12011
- ),
12012
- content_safety_confidence: zod3.number().min(createTranscriptBodyContentSafetyConfidenceMin).max(createTranscriptBodyContentSafetyConfidenceMax).default(createTranscriptBodyContentSafetyConfidenceDefault).describe(
12013
- "The confidence threshold for the Content Moderation model. Values must be between 25 and 100."
12014
- ),
12015
- iab_categories: zod3.boolean().optional().describe(
12016
- "Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false"
12017
- ),
12018
- custom_spelling: zod3.array(
12019
- zod3.object({
12020
- from: zod3.array(zod3.string().describe("Word or phrase to replace")).describe("Words or phrases to replace"),
12021
- to: zod3.string().describe("Word to replace with")
12022
- }).describe(
12023
- "Object containing words or phrases to replace, and the word or phrase to replace with"
12315
+ ]).describe("The type of entity for the detected entity"),
12316
+ text: zod3.string().describe("The text for the detected entity"),
12317
+ start: zod3.number().describe(
12318
+ "The starting time, in milliseconds, at which the detected entity appears in the audio file"
12319
+ ),
12320
+ end: zod3.number().describe(
12321
+ "The ending time, in milliseconds, for the detected entity in the audio file"
12024
12322
  )
12025
- ).optional().describe("Customize how words are spelled and formatted using to and from values"),
12026
- keyterms_prompt: zod3.array(zod3.string()).optional().describe(
12027
- "<Warning>`keyterms_prompt` is only supported when the `speech_model` is specified as `slam-1`</Warning>\nImprove accuracy with up to 1000 domain-specific words or phrases (maximum 6 words per phrase).\n"
12028
- ),
12029
- prompt: zod3.string().optional().describe("This parameter does not currently have any functionality attached to it."),
12030
- sentiment_analysis: zod3.boolean().optional().describe(
12031
- "Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false"
12032
- ),
12033
- auto_chapters: zod3.boolean().optional().describe(
12034
- "Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false"
12035
- ),
12036
- entity_detection: zod3.boolean().optional().describe(
12037
- "Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false"
12038
- ),
12039
- speech_threshold: zod3.number().min(createTranscriptBodySpeechThresholdMin).max(createTranscriptBodySpeechThresholdMax).nullish().describe(
12040
- "Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n"
12041
- ),
12042
- summarization: zod3.boolean().optional().describe(
12043
- "Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false"
12044
- ),
12045
- summary_model: zod3.enum(["informative", "conversational", "catchy"]).optional().describe("The model to summarize the transcript"),
12046
- summary_type: zod3.enum(["bullets", "bullets_verbose", "gist", "headline", "paragraph"]).optional().describe("The type of summary"),
12047
- custom_topics: zod3.boolean().optional().describe("Enable custom topics, either true or false"),
12048
- topics: zod3.array(zod3.string()).optional().describe("The list of custom topics")
12049
- }).describe("The parameters for creating a transcript")
12050
- ).describe("The parameters for creating a transcript");
12051
- var createTranscriptResponseLanguageConfidenceThresholdMin = 0;
12052
- var createTranscriptResponseLanguageConfidenceThresholdMax = 1;
12053
- var createTranscriptResponseLanguageConfidenceMin = 0;
12054
- var createTranscriptResponseLanguageConfidenceMax = 1;
12055
- var createTranscriptResponseSpeechModelDefault = null;
12056
- var createTranscriptResponseWordsItemConfidenceMin = 0;
12057
- var createTranscriptResponseWordsItemConfidenceMax = 1;
12058
- var createTranscriptResponseUtterancesItemConfidenceMin = 0;
12059
- var createTranscriptResponseUtterancesItemConfidenceMax = 1;
12060
- var createTranscriptResponseUtterancesItemWordsItemConfidenceMin = 0;
12061
- var createTranscriptResponseUtterancesItemWordsItemConfidenceMax = 1;
12062
- var createTranscriptResponseConfidenceMin = 0;
12063
- var createTranscriptResponseConfidenceMax = 1;
12064
- var createTranscriptResponseAutoHighlightsResultResultsItemRankMin = 0;
12065
- var createTranscriptResponseAutoHighlightsResultResultsItemRankMax = 1;
12066
- var createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMin = 0;
12067
- var createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMax = 1;
12068
- var createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin = 0;
12069
- var createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax = 1;
12070
- var createTranscriptResponseContentSafetyLabelsSummaryMinOne = 0;
12071
- var createTranscriptResponseContentSafetyLabelsSummaryMaxOne = 1;
12072
- var createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMin = 0;
12073
- var createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMax = 1;
12074
- var createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMin = 0;
12075
- var createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMax = 1;
12076
- var createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMin = 0;
12077
- var createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMax = 1;
12078
- var createTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin = 0;
12079
- var createTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax = 1;
12080
- var createTranscriptResponseIabCategoriesResultSummaryMinOne = 0;
12081
- var createTranscriptResponseIabCategoriesResultSummaryMaxOne = 1;
12082
- var createTranscriptResponseSentimentAnalysisResultsItemConfidenceMin = 0;
12083
- var createTranscriptResponseSentimentAnalysisResultsItemConfidenceMax = 1;
12084
- var createTranscriptResponseSpeechThresholdMin = 0;
12085
- var createTranscriptResponseSpeechThresholdMax = 1;
12086
- var createTranscriptResponse = zod3.object({
12323
+ }).describe("A detected entity")
12324
+ ).nullish().describe(
12325
+ "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) for more information.\n"
12326
+ ),
12327
+ entity_detection: zod3.boolean().nullish().describe(
12328
+ "Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) is enabled, can be true or false"
12329
+ ),
12330
+ error: zod3.string().optional().describe("Error message of why the transcript failed"),
12331
+ filter_profanity: zod3.boolean().nullish().describe(
12332
+ "Whether [Profanity Filtering](https://www.assemblyai.com/docs/profanity-filtering) is enabled, either true or false"
12333
+ ),
12334
+ format_text: zod3.boolean().nullish().describe(
12335
+ "Whether [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false"
12336
+ ),
12337
+ iab_categories: zod3.boolean().nullish().describe(
12338
+ "Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) is enabled, can be true or false"
12339
+ ),
12340
+ iab_categories_result: zod3.object({
12341
+ status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
12342
+ results: zod3.array(
12343
+ zod3.object({
12344
+ text: zod3.string().describe("The text in the transcript in which a detected topic occurs"),
12345
+ labels: zod3.array(
12346
+ zod3.object({
12347
+ relevance: zod3.number().describe("How relevant the detected topic is of a detected topic"),
12348
+ label: zod3.string().describe(
12349
+ "The IAB taxonomical label for the label of the detected topic, where > denotes supertopic/subtopic relationship"
12350
+ )
12351
+ })
12352
+ ).optional().describe("An array of detected topics in the text"),
12353
+ timestamp: zod3.object({
12354
+ start: zod3.number().describe("The start time in milliseconds"),
12355
+ end: zod3.number().describe("The end time in milliseconds")
12356
+ }).optional().describe("Timestamp containing a start and end property in milliseconds")
12357
+ }).describe("The result of the topic detection model")
12358
+ ).describe("An array of results for the Topic Detection model"),
12359
+ summary: zod3.record(zod3.string(), zod3.number()).describe("The overall relevance of topic to the entire audio file")
12360
+ }).describe(
12361
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
12362
+ ).or(zod3.null()).optional().describe(
12363
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
12364
+ ),
12087
12365
  id: zod3.string().uuid().describe("The unique identifier of your transcript"),
12088
- audio_url: zod3.string().describe("The URL of the media that was transcribed"),
12089
- status: zod3.enum(["queued", "processing", "completed", "error"]).describe(
12090
- "The status of your transcript. Possible values are queued, processing, completed, or error."
12366
+ keyterms_prompt: zod3.array(zod3.string()).optional().describe(
12367
+ "Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3 Pro) domain-specific words or phrases (maximum 6 words per phrase). See [Keyterms Prompting](https://www.assemblyai.com/docs/pre-recorded-audio/keyterms-prompting) for more details.\n"
12091
12368
  ),
12092
12369
  language_code: zod3.enum([
12093
12370
  "en",
@@ -12192,138 +12469,175 @@ var createTranscriptResponse = zod3.object({
12192
12469
  "cy",
12193
12470
  "yi",
12194
12471
  "yo"
12195
- ]).describe(
12196
- "The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n"
12197
- ).or(zod3.string()).optional().describe(
12198
- "The language of your audio file.\nPossible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n"
12199
- ),
12200
- language_detection: zod3.boolean().nullish().describe(
12201
- "Whether [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection) is enabled, either true or false"
12202
- ),
12203
- language_confidence_threshold: zod3.number().min(createTranscriptResponseLanguageConfidenceThresholdMin).max(createTranscriptResponseLanguageConfidenceThresholdMax).nullable().describe(
12204
- "The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\n"
12205
- ),
12206
- language_confidence: zod3.number().min(createTranscriptResponseLanguageConfidenceMin).max(createTranscriptResponseLanguageConfidenceMax).nullable().describe(
12207
- "The confidence score for the detected language, between 0.0 (low confidence) and 1.0 (high confidence)"
12208
- ),
12209
- speech_model: zod3.enum(["best", "slam-1", "universal"]).describe("The speech model to use for the transcription.").or(zod3.null()).describe(
12210
- "The speech model used for the transcription. When `null`, the default model is used."
12211
- ),
12212
- text: zod3.string().nullish().describe("The textual transcript of your media file"),
12213
- words: zod3.array(
12214
- zod3.object({
12215
- confidence: zod3.number().min(createTranscriptResponseWordsItemConfidenceMin).max(createTranscriptResponseWordsItemConfidenceMax).describe("The confidence score for the transcript of this word"),
12216
- start: zod3.number().describe("The starting time, in milliseconds, for the word"),
12217
- end: zod3.number().describe("The ending time, in milliseconds, for the word"),
12218
- text: zod3.string().describe("The text of the word"),
12219
- channel: zod3.string().nullish().describe(
12220
- "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
12221
- ),
12222
- speaker: zod3.string().nullable().describe(
12223
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null"
12224
- )
12225
- })
12226
- ).nullish().describe(
12227
- "An array of temporally-sequential word objects, one for each word in the transcript.\nSee [Speech recognition](https://www.assemblyai.com/docs/models/speech-recognition) for more information.\n"
12472
+ ]).optional().describe(
12473
+ "The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).\nThe default value is 'en_us'.\n"
12228
12474
  ),
12229
- utterances: zod3.array(
12230
- zod3.object({
12231
- confidence: zod3.number().min(createTranscriptResponseUtterancesItemConfidenceMin).max(createTranscriptResponseUtterancesItemConfidenceMax).describe("The confidence score for the transcript of this utterance"),
12232
- start: zod3.number().describe("The starting time, in milliseconds, of the utterance in the audio file"),
12233
- end: zod3.number().describe("The ending time, in milliseconds, of the utterance in the audio file"),
12234
- text: zod3.string().describe("The text for this utterance"),
12235
- words: zod3.array(
12236
- zod3.object({
12237
- confidence: zod3.number().min(createTranscriptResponseUtterancesItemWordsItemConfidenceMin).max(createTranscriptResponseUtterancesItemWordsItemConfidenceMax).describe("The confidence score for the transcript of this word"),
12238
- start: zod3.number().describe("The starting time, in milliseconds, for the word"),
12239
- end: zod3.number().describe("The ending time, in milliseconds, for the word"),
12240
- text: zod3.string().describe("The text of the word"),
12241
- channel: zod3.string().nullish().describe(
12242
- "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
12243
- ),
12244
- speaker: zod3.string().nullable().describe(
12245
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null"
12246
- )
12247
- })
12248
- ).describe("The words in the utterance."),
12249
- channel: zod3.string().nullish().describe(
12250
- "The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
12251
- ),
12252
- speaker: zod3.string().describe(
12253
- 'The speaker of this utterance, where each speaker is assigned a sequential capital letter - e.g. "A" for Speaker A, "B" for Speaker B, etc.'
12254
- )
12255
- })
12475
+ language_codes: zod3.array(
12476
+ zod3.enum([
12477
+ "en",
12478
+ "en_au",
12479
+ "en_uk",
12480
+ "en_us",
12481
+ "es",
12482
+ "fr",
12483
+ "de",
12484
+ "it",
12485
+ "pt",
12486
+ "nl",
12487
+ "af",
12488
+ "sq",
12489
+ "am",
12490
+ "ar",
12491
+ "hy",
12492
+ "as",
12493
+ "az",
12494
+ "ba",
12495
+ "eu",
12496
+ "be",
12497
+ "bn",
12498
+ "bs",
12499
+ "br",
12500
+ "bg",
12501
+ "my",
12502
+ "ca",
12503
+ "zh",
12504
+ "hr",
12505
+ "cs",
12506
+ "da",
12507
+ "et",
12508
+ "fo",
12509
+ "fi",
12510
+ "gl",
12511
+ "ka",
12512
+ "el",
12513
+ "gu",
12514
+ "ht",
12515
+ "ha",
12516
+ "haw",
12517
+ "he",
12518
+ "hi",
12519
+ "hu",
12520
+ "is",
12521
+ "id",
12522
+ "ja",
12523
+ "jw",
12524
+ "kn",
12525
+ "kk",
12526
+ "km",
12527
+ "ko",
12528
+ "lo",
12529
+ "la",
12530
+ "lv",
12531
+ "ln",
12532
+ "lt",
12533
+ "lb",
12534
+ "mk",
12535
+ "mg",
12536
+ "ms",
12537
+ "ml",
12538
+ "mt",
12539
+ "mi",
12540
+ "mr",
12541
+ "mn",
12542
+ "ne",
12543
+ "no",
12544
+ "nn",
12545
+ "oc",
12546
+ "pa",
12547
+ "ps",
12548
+ "fa",
12549
+ "pl",
12550
+ "ro",
12551
+ "ru",
12552
+ "sa",
12553
+ "sr",
12554
+ "sn",
12555
+ "sd",
12556
+ "si",
12557
+ "sk",
12558
+ "sl",
12559
+ "so",
12560
+ "su",
12561
+ "sw",
12562
+ "sv",
12563
+ "tl",
12564
+ "tg",
12565
+ "ta",
12566
+ "tt",
12567
+ "te",
12568
+ "th",
12569
+ "bo",
12570
+ "tr",
12571
+ "tk",
12572
+ "uk",
12573
+ "ur",
12574
+ "uz",
12575
+ "vi",
12576
+ "cy",
12577
+ "yi",
12578
+ "yo"
12579
+ ]).describe(
12580
+ "The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).\nThe default value is 'en_us'.\n"
12581
+ )
12256
12582
  ).nullish().describe(
12257
- "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/speech-to-text/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/speech-to-text/speech-recognition#multichannel-transcription) for more information.\n"
12583
+ "The language codes of your audio file. Used for [Code switching](/docs/speech-to-text/pre-recorded-audio/code-switching)\nOne of the values specified must be `en`.\n"
12258
12584
  ),
12259
- confidence: zod3.number().min(createTranscriptResponseConfidenceMin).max(createTranscriptResponseConfidenceMax).nullish().describe(
12260
- "The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)"
12585
+ language_confidence: zod3.number().nullable().describe(
12586
+ "The confidence score for the detected language, between 0.0 (low confidence) and 1.0 (high confidence). See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details."
12261
12587
  ),
12262
- audio_duration: zod3.number().nullish().describe("The duration of this transcript object's media file, in seconds"),
12263
- punctuate: zod3.boolean().nullish().describe("Whether Automatic Punctuation is enabled, either true or false"),
12264
- format_text: zod3.boolean().nullish().describe("Whether Text Formatting is enabled, either true or false"),
12265
- disfluencies: zod3.boolean().nullish().describe('Transcribe Filler Words, like "umm", in your media file; can be true or false'),
12266
- multichannel: zod3.boolean().nullish().describe(
12267
- "Whether [Multichannel transcription](https://www.assemblyai.com/docs/models/speech-recognition#multichannel-transcription) was enabled in the transcription request, either true or false"
12268
- ),
12269
- audio_channels: zod3.number().optional().describe(
12270
- "The number of audio channels in the audio file. This is only present when multichannel is enabled."
12588
+ language_confidence_threshold: zod3.number().nullable().describe(
12589
+ "The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nSee [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.\n"
12271
12590
  ),
12272
- webhook_url: zod3.string().nullish().describe(
12273
- "The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n"
12274
- ),
12275
- webhook_status_code: zod3.number().nullish().describe(
12276
- "The status code we received from your server when delivering the transcript completed or failed webhook request, if a webhook URL was provided"
12277
- ),
12278
- webhook_auth: zod3.boolean().describe("Whether webhook authentication details were provided"),
12279
- webhook_auth_header_name: zod3.string().nullish().describe(
12280
- "The header name to be sent with the transcript completed or failed webhook requests"
12591
+ language_detection: zod3.boolean().nullish().describe(
12592
+ "Whether [Automatic language detection](/docs/pre-recorded-audio/automatic-language-detection) is enabled, either true or false"
12281
12593
  ),
12282
- speed_boost: zod3.boolean().nullish().describe("Whether speed boost is enabled"),
12283
- auto_highlights: zod3.boolean().describe("Whether Key Phrases is enabled, either true or false"),
12284
- auto_highlights_result: zod3.object({
12285
- status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
12286
- results: zod3.array(
12287
- zod3.object({
12288
- count: zod3.number().describe("The total number of times the key phrase appears in the audio file"),
12289
- rank: zod3.number().min(createTranscriptResponseAutoHighlightsResultResultsItemRankMin).max(createTranscriptResponseAutoHighlightsResultResultsItemRankMax).describe(
12290
- "The total relevancy to the overall audio file of this key phrase - a greater number means more relevant"
12291
- ),
12292
- text: zod3.string().describe("The text itself of the key phrase"),
12293
- timestamps: zod3.array(
12294
- zod3.object({
12295
- start: zod3.number().describe("The start time in milliseconds"),
12296
- end: zod3.number().describe("The end time in milliseconds")
12297
- }).describe("Timestamp containing a start and end property in milliseconds")
12298
- ).describe("The timestamp of the of the key phrase")
12299
- })
12300
- ).describe("A temporally-sequential array of Key Phrases")
12301
- }).describe(
12302
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/models/key-phrases) for more information.\n"
12303
- ).or(zod3.null()).optional().describe(
12304
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/models/key-phrases) for more information.\n"
12594
+ language_detection_options: zod3.object({
12595
+ expected_languages: zod3.array(zod3.string()).optional().describe(
12596
+ 'List of languages expected in the audio file. Defaults to `["all"]` when unspecified. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.'
12597
+ ),
12598
+ fallback_language: zod3.string().default(createTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault).describe(
12599
+ 'If the detected language of the audio file is not in the list of expected languages, the `fallback_language` is used. Specify `["auto"]` to let our model choose the fallback language from `expected_languages` with the highest confidence score. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.\n'
12600
+ ),
12601
+ code_switching: zod3.boolean().optional().describe(
12602
+ "Whether [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) should be detected.\n"
12603
+ ),
12604
+ code_switching_confidence_threshold: zod3.number().default(
12605
+ createTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault
12606
+ ).describe(
12607
+ "The confidence threshold for [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) detection. If the code switching confidence is below this threshold, the transcript will be processed in the language with the highest `language_detection_confidence` score.\n"
12608
+ )
12609
+ }).optional().describe(
12610
+ "Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection)."
12305
12611
  ),
12306
- audio_start_from: zod3.number().nullish().describe(
12307
- "The point in time, in milliseconds, in the file at which the transcription was started"
12612
+ multichannel: zod3.boolean().nullish().describe(
12613
+ "Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) was enabled in the transcription request, either true or false"
12308
12614
  ),
12309
- audio_end_at: zod3.number().nullish().describe(
12310
- "The point in time, in milliseconds, in the file at which the transcription was terminated"
12615
+ prompt: zod3.string().optional().describe(
12616
+ "Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
12311
12617
  ),
12312
- word_boost: zod3.array(zod3.string()).optional().describe("The list of custom vocabulary to boost transcription probability for"),
12313
- boost_param: zod3.string().nullish().describe("The word boost parameter value"),
12314
- filter_profanity: zod3.boolean().nullish().describe(
12315
- "Whether [Profanity Filtering](https://www.assemblyai.com/docs/models/speech-recognition#profanity-filtering) is enabled, either true or false"
12618
+ punctuate: zod3.boolean().nullish().describe(
12619
+ "Whether [Automatic Punctuation](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false"
12316
12620
  ),
12317
12621
  redact_pii: zod3.boolean().describe(
12318
- "Whether [PII Redaction](https://www.assemblyai.com/docs/models/pii-redaction) is enabled, either true or false"
12622
+ "Whether [PII Redaction](https://www.assemblyai.com/docs/pii-redaction) is enabled, either true or false"
12319
12623
  ),
12320
12624
  redact_pii_audio: zod3.boolean().nullish().describe(
12321
- "Whether a redacted version of the audio file was generated,\neither true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.\n"
12625
+ "Whether a redacted version of the audio file was generated,\neither true or false. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.\n"
12626
+ ),
12627
+ redact_pii_audio_options: zod3.object({
12628
+ return_redacted_no_speech_audio: zod3.boolean().optional().describe(
12629
+ "By default, audio redaction provides redacted audio URLs only when speech is detected. However, if your use-case specifically requires redacted audio files even for silent audio files without any dialogue, you can opt to receive these URLs by setting this parameter to `true`."
12630
+ ),
12631
+ override_audio_redaction_method: zod3.enum(["silence"]).optional().describe(
12632
+ "Specify the method used to redact audio. By default, redacted audio uses a beep sound. Set to `silence` to replace PII with silence instead of a beep."
12633
+ )
12634
+ }).optional().describe(
12635
+ "The options for PII-redacted audio, if redact_pii_audio is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.\n"
12322
12636
  ),
12323
12637
  redact_pii_audio_quality: zod3.enum(["mp3", "wav"]).describe(
12324
- "Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details."
12638
+ "Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more details."
12325
12639
  ).or(zod3.null()).optional().describe(
12326
- "The audio quality of the PII-redacted audio file, if redact_pii_audio is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.\n"
12640
+ "The audio quality of the PII-redacted audio file, if redact_pii_audio is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.\n"
12327
12641
  ),
12328
12642
  redact_pii_policies: zod3.array(
12329
12643
  zod3.enum([
@@ -12373,19 +12687,347 @@ var createTranscriptResponse = zod3.object({
12373
12687
  "zodiac_sign"
12374
12688
  ]).describe("The type of PII to redact")
12375
12689
  ).nullish().describe(
12376
- "The list of PII Redaction policies that were enabled, if PII Redaction is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.\n"
12690
+ "The list of PII Redaction policies that were enabled, if PII Redaction is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more information.\n"
12377
12691
  ),
12378
12692
  redact_pii_sub: zod3.enum(["entity_name", "hash"]).optional().describe(
12379
- 'The replacement logic for detected PII, can be "entity_name" or "hash". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.'
12693
+ "The replacement logic for detected PII, can be `entity_name` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
12694
+ ),
12695
+ sentiment_analysis: zod3.boolean().nullish().describe(
12696
+ "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) is enabled, can be true or false"
12697
+ ),
12698
+ sentiment_analysis_results: zod3.array(
12699
+ zod3.object({
12700
+ text: zod3.string().describe("The transcript of the sentence"),
12701
+ start: zod3.number().describe("The starting time, in milliseconds, of the sentence"),
12702
+ end: zod3.number().describe("The ending time, in milliseconds, of the sentence"),
12703
+ sentiment: zod3.enum(["POSITIVE", "NEUTRAL", "NEGATIVE"]),
12704
+ confidence: zod3.number().describe(
12705
+ "The confidence score for the detected sentiment of the sentence, from 0 to 1"
12706
+ ),
12707
+ channel: zod3.string().nullish().describe(
12708
+ "The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
12709
+ ),
12710
+ speaker: zod3.string().nullable().describe(
12711
+ "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
12712
+ )
12713
+ }).describe("The result of the Sentiment Analysis model")
12714
+ ).nullish().describe(
12715
+ "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) for more information.\n"
12380
12716
  ),
12381
12717
  speaker_labels: zod3.boolean().nullish().describe(
12382
- "Whether [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, can be true or false"
12718
+ "Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, can be true or false"
12383
12719
  ),
12384
12720
  speakers_expected: zod3.number().nullish().describe(
12385
- "Tell the speaker label model how many speakers it should attempt to identify. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details."
12721
+ "Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details."
12722
+ ),
12723
+ speech_model_used: zod3.string().optional().describe(
12724
+ "The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
12725
+ ),
12726
+ speech_models: zod3.array(
12727
+ zod3.string().describe(
12728
+ "The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
12729
+ )
12730
+ ).nullish().describe(
12731
+ "List multiple speech models in priority order, allowing our system to automatically route your audio to the best available option. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models and routing behavior.\n"
12732
+ ),
12733
+ speech_threshold: zod3.number().nullish().describe(
12734
+ "Defaults to null. Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive. See [Speech Threshold](https://www.assemblyai.com/docs/speech-threshold) for more details.\n"
12735
+ ),
12736
+ speech_understanding: zod3.object({
12737
+ request: zod3.object({
12738
+ translation: zod3.object({
12739
+ target_languages: zod3.array(zod3.string()).describe(
12740
+ 'List of target language codes (e.g., `["es", "de"]`). See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for supported languages.'
12741
+ ),
12742
+ formal: zod3.boolean().default(createTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault).describe(
12743
+ "Use formal language style. See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for more details."
12744
+ ),
12745
+ match_original_utterance: zod3.boolean().optional().describe(
12746
+ "When enabled with Speaker Labels, returns translated text in the utterances array. Each utterance will include a `translated_texts` key containing translations for each target language."
12747
+ )
12748
+ })
12749
+ }).describe(
12750
+ "Request body for [Translation](https://www.assemblyai.com/docs/speech-understanding/translation)."
12751
+ ).or(
12752
+ zod3.object({
12753
+ speaker_identification: zod3.object({
12754
+ speaker_type: zod3.enum(["role", "name"]).describe(
12755
+ "Type of speaker identification. See [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification) for details on each type."
12756
+ ),
12757
+ known_values: zod3.array(zod3.string()).optional().describe(
12758
+ 'Required if speaker_type is "role". Each value must be 35 characters or less.'
12759
+ ),
12760
+ speakers: zod3.array(
12761
+ zod3.object({
12762
+ role: zod3.string().optional().describe(
12763
+ 'The role of the speaker. Required when `speaker_type` is "role".'
12764
+ ),
12765
+ name: zod3.string().optional().describe(
12766
+ 'The name of the speaker. Required when `speaker_type` is "name".'
12767
+ ),
12768
+ description: zod3.string().optional().describe(
12769
+ "A description of the speaker to help the model identify them based on conversational context."
12770
+ )
12771
+ })
12772
+ ).optional().describe(
12773
+ "An array of speaker objects with metadata to improve identification accuracy. Each object should include a `role` or `name` (depending on `speaker_type`) and an optional `description` to help the model identify the speaker. You can also include any additional custom properties (e.g., `company`, `title`) to provide more context. Use this as an alternative to `known_values` when you want to provide additional context about each speaker."
12774
+ )
12775
+ })
12776
+ }).describe(
12777
+ "Request body for [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification)."
12778
+ )
12779
+ ).or(
12780
+ zod3.object({
12781
+ custom_formatting: zod3.object({
12782
+ date: zod3.string().optional().describe(
12783
+ 'Date format pattern (e.g., `"mm/dd/yyyy"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
12784
+ ),
12785
+ phone_number: zod3.string().optional().describe(
12786
+ 'Phone number format pattern (e.g., `"(xxx)xxx-xxxx"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
12787
+ ),
12788
+ email: zod3.string().optional().describe(
12789
+ 'Email format pattern (e.g., `"username@domain.com"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
12790
+ )
12791
+ })
12792
+ }).describe(
12793
+ "Request body for [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting)."
12794
+ )
12795
+ ).optional(),
12796
+ response: zod3.object({
12797
+ translation: zod3.object({
12798
+ status: zod3.string().optional()
12799
+ }).optional()
12800
+ }).or(
12801
+ zod3.object({
12802
+ speaker_identification: zod3.object({
12803
+ mapping: zod3.record(zod3.string(), zod3.string()).optional().describe(
12804
+ 'A mapping of the original generic speaker labels (e.g., "A", "B") to the identified speaker names or roles.'
12805
+ ),
12806
+ status: zod3.string().optional()
12807
+ }).optional()
12808
+ })
12809
+ ).or(
12810
+ zod3.object({
12811
+ custom_formatting: zod3.object({
12812
+ mapping: zod3.record(zod3.string(), zod3.string()).optional(),
12813
+ formatted_text: zod3.string().optional()
12814
+ }).optional()
12815
+ })
12816
+ ).optional()
12817
+ }).optional().describe(
12818
+ "Speech understanding tasks like [Translation](https://www.assemblyai.com/docs/speech-understanding/translation), [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification), and [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting). See the task-specific docs for available options and configuration.\n"
12819
+ ),
12820
+ status: zod3.enum(["queued", "processing", "completed", "error"]).describe(
12821
+ "The status of your transcript. Possible values are queued, processing, completed, or error."
12822
+ ),
12823
+ summarization: zod3.boolean().describe(
12824
+ "Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
12825
+ ),
12826
+ summary: zod3.string().nullish().describe(
12827
+ "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
12828
+ ),
12829
+ summary_model: zod3.string().nullish().describe(
12830
+ "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n"
12831
+ ),
12832
+ summary_type: zod3.string().nullish().describe(
12833
+ "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
12834
+ ),
12835
+ remove_audio_tags: zod3.enum(["all"]).describe(
12836
+ "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
12837
+ ).or(zod3.null()).optional().describe(
12838
+ "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
12839
+ ),
12840
+ temperature: zod3.number().nullish().describe(
12841
+ "The temperature that was used for the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.\n\nNote: This parameter can only be used with the Universal-3 Pro model.\n"
12842
+ ),
12843
+ text: zod3.string().nullish().describe("The textual transcript of your media file"),
12844
+ throttled: zod3.boolean().nullish().describe(
12845
+ "True while a request is throttled and false when a request is no longer throttled"
12846
+ ),
12847
+ utterances: zod3.array(
12848
+ zod3.object({
12849
+ confidence: zod3.number().describe("The confidence score for the transcript of this utterance"),
12850
+ start: zod3.number().describe("The starting time, in milliseconds, of the utterance in the audio file"),
12851
+ end: zod3.number().describe("The ending time, in milliseconds, of the utterance in the audio file"),
12852
+ text: zod3.string().describe("The text for this utterance"),
12853
+ words: zod3.array(
12854
+ zod3.object({
12855
+ confidence: zod3.number().describe("The confidence score for the transcript of this word"),
12856
+ start: zod3.number().describe("The starting time, in milliseconds, for the word"),
12857
+ end: zod3.number().describe("The ending time, in milliseconds, for the word"),
12858
+ text: zod3.string().describe("The text of the word"),
12859
+ channel: zod3.string().nullish().describe(
12860
+ "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
12861
+ ),
12862
+ speaker: zod3.string().nullable().describe(
12863
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
12864
+ )
12865
+ })
12866
+ ).describe("The words in the utterance."),
12867
+ channel: zod3.string().nullish().describe(
12868
+ "The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
12869
+ ),
12870
+ speaker: zod3.string().describe(
12871
+ 'The speaker of this utterance, where each speaker is assigned a sequential capital letter - e.g. "A" for Speaker A, "B" for Speaker B, etc.'
12872
+ ),
12873
+ translated_texts: zod3.record(zod3.string(), zod3.string()).optional().describe(
12874
+ 'Translations keyed by language code (e.g., `{"es": "Texto traducido", "de": "\xDCbersetzter Text"}`). Only present when `match_original_utterance` is enabled with translation.'
12875
+ )
12876
+ })
12877
+ ).nullish().describe(
12878
+ "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) for more information.\n"
12879
+ ),
12880
+ webhook_auth: zod3.boolean().describe(
12881
+ "Whether [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) authentication details were provided"
12882
+ ),
12883
+ webhook_auth_header_name: zod3.string().nullish().describe(
12884
+ "The header name to be sent with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests"
12885
+ ),
12886
+ webhook_status_code: zod3.number().nullish().describe(
12887
+ "The status code we received from your server when delivering the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) request, if a webhook URL was provided"
12888
+ ),
12889
+ webhook_url: zod3.string().nullish().describe(
12890
+ "The URL to which we send [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests.\n"
12891
+ ),
12892
+ words: zod3.array(
12893
+ zod3.object({
12894
+ confidence: zod3.number().describe("The confidence score for the transcript of this word"),
12895
+ start: zod3.number().describe("The starting time, in milliseconds, for the word"),
12896
+ end: zod3.number().describe("The ending time, in milliseconds, for the word"),
12897
+ text: zod3.string().describe("The text of the word"),
12898
+ channel: zod3.string().nullish().describe(
12899
+ "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
12900
+ ),
12901
+ speaker: zod3.string().nullable().describe(
12902
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
12903
+ )
12904
+ })
12905
+ ).nullish().describe(
12906
+ "An array of temporally-sequential word objects, one for each word in the transcript.\n"
12907
+ ),
12908
+ acoustic_model: zod3.string().describe("This parameter does not currently have any functionality attached to it."),
12909
+ custom_topics: zod3.boolean().nullish().describe("This parameter does not currently have any functionality attached to it."),
12910
+ language_model: zod3.string().describe("This parameter does not currently have any functionality attached to it."),
12911
+ speech_model: zod3.string().describe(
12912
+ "The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
12913
+ ).or(zod3.null()).describe(
12914
+ "This parameter has been replaced with the `speech_models` parameter, learn more about the `speech_models` parameter [here](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model).\n"
12915
+ ),
12916
+ speed_boost: zod3.boolean().nullish().describe("This parameter does not currently have any functionality attached to it."),
12917
+ topics: zod3.array(zod3.string()).optional().describe("This parameter does not currently have any functionality attached to it."),
12918
+ translated_texts: zod3.object({
12919
+ language_code: zod3.string().optional().describe("Translated text for this language code")
12920
+ }).optional().describe(
12921
+ "Translated text keyed by language code. See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for more details."
12922
+ )
12923
+ }).describe("A transcript object");
12924
+ var listTranscriptsQueryLimitDefault = 10;
12925
+ var listTranscriptsQueryThrottledOnlyDefault = false;
12926
+ var listTranscriptsQueryParams = zod3.object({
12927
+ limit: zod3.number().default(listTranscriptsQueryLimitDefault).describe("Maximum amount of transcripts to retrieve"),
12928
+ status: zod3.enum(["queued", "processing", "completed", "error"]).optional().describe("Filter by transcript status"),
12929
+ created_on: zod3.string().date().optional().describe("Only get transcripts created on this date"),
12930
+ before_id: zod3.string().uuid().optional().describe("Get transcripts that were created before this transcript ID"),
12931
+ after_id: zod3.string().uuid().optional().describe("Get transcripts that were created after this transcript ID"),
12932
+ throttled_only: zod3.boolean().optional().describe("Only get throttled transcripts, overrides the status filter")
12933
+ });
12934
+ var listTranscriptsResponse = zod3.object({
12935
+ page_details: zod3.object({
12936
+ limit: zod3.number().describe("The number of results this page is limited to"),
12937
+ result_count: zod3.number().describe("The actual number of results in the page"),
12938
+ current_url: zod3.string().describe("The URL used to retrieve the current page of transcripts"),
12939
+ prev_url: zod3.string().nullable().describe(
12940
+ "The URL to the next page of transcripts. The previous URL always points to a page with older transcripts."
12941
+ ),
12942
+ next_url: zod3.string().nullable().describe(
12943
+ "The URL to the next page of transcripts. The next URL always points to a page with newer transcripts."
12944
+ )
12945
+ }).describe(
12946
+ "Details of the transcript page. Transcripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts."
12947
+ ),
12948
+ transcripts: zod3.array(
12949
+ zod3.object({
12950
+ id: zod3.string().uuid().describe("The unique identifier for the transcript"),
12951
+ resource_url: zod3.string().describe("The URL to retrieve the transcript"),
12952
+ status: zod3.enum(["queued", "processing", "completed", "error"]).describe(
12953
+ "The status of your transcript. Possible values are queued, processing, completed, or error."
12954
+ ),
12955
+ created: zod3.string().datetime({}).describe("The date and time the transcript was created"),
12956
+ completed: zod3.string().datetime({}).optional().describe("The date and time the transcript was completed"),
12957
+ audio_url: zod3.string().describe("The URL to the audio file"),
12958
+ error: zod3.string().nullable().describe("Error message of why the transcript failed")
12959
+ })
12960
+ ).describe("An array of transcripts")
12961
+ }).describe(
12962
+ "A list of transcripts. Transcripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts."
12963
+ );
12964
+ var getTranscriptParams = zod3.object({
12965
+ transcript_id: zod3.string().describe("ID of the transcript")
12966
+ });
12967
+ var getTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault = "auto";
12968
+ var getTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault = false;
12969
+ var getTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault = 0.3;
12970
+ var getTranscriptResponseRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault = false;
12971
+ var getTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault = true;
12972
+ var getTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault = false;
12973
+ var getTranscriptResponse = zod3.object({
12974
+ audio_channels: zod3.number().optional().describe(
12975
+ "The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) is enabled."
12976
+ ),
12977
+ audio_duration: zod3.number().nullish().describe("The duration of this transcript object's media file, in seconds"),
12978
+ audio_end_at: zod3.number().nullish().describe(
12979
+ "The point in time, in milliseconds, in the file at which the transcription was terminated. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details."
12980
+ ),
12981
+ audio_start_from: zod3.number().nullish().describe(
12982
+ "The point in time, in milliseconds, in the file at which the transcription was started. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details."
12983
+ ),
12984
+ audio_url: zod3.string().describe("The URL of the media that was transcribed"),
12985
+ auto_chapters: zod3.boolean().nullish().describe(
12986
+ "Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
12987
+ ),
12988
+ auto_highlights: zod3.boolean().describe(
12989
+ "Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) is enabled, either true or false"
12990
+ ),
12991
+ auto_highlights_result: zod3.object({
12992
+ status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
12993
+ results: zod3.array(
12994
+ zod3.object({
12995
+ count: zod3.number().describe("The total number of times the key phrase appears in the audio file"),
12996
+ rank: zod3.number().describe(
12997
+ "The total relevancy to the overall audio file of this key phrase - a greater number means more relevant"
12998
+ ),
12999
+ text: zod3.string().describe("The text itself of the key phrase"),
13000
+ timestamps: zod3.array(
13001
+ zod3.object({
13002
+ start: zod3.number().describe("The start time in milliseconds"),
13003
+ end: zod3.number().describe("The end time in milliseconds")
13004
+ }).describe("Timestamp containing a start and end property in milliseconds")
13005
+ ).describe("The timestamp of the of the key phrase")
13006
+ })
13007
+ ).describe("A temporally-sequential array of Key Phrases")
13008
+ }).describe(
13009
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
13010
+ ).or(zod3.null()).optional().describe(
13011
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
13012
+ ),
13013
+ chapters: zod3.array(
13014
+ zod3.object({
13015
+ gist: zod3.string().describe(
13016
+ "An ultra-short summary (just a few words) of the content spoken in the chapter"
13017
+ ),
13018
+ headline: zod3.string().describe("A single sentence summary of the content spoken during the chapter"),
13019
+ summary: zod3.string().describe("A one paragraph summary of the content spoken during the chapter"),
13020
+ start: zod3.number().describe("The starting time, in milliseconds, for the chapter"),
13021
+ end: zod3.number().describe("The starting time, in milliseconds, for the chapter")
13022
+ }).describe("Chapter of the audio file")
13023
+ ).nullish().describe(
13024
+ "An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for more information."
13025
+ ),
13026
+ confidence: zod3.number().nullish().describe(
13027
+ "The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)"
12386
13028
  ),
12387
13029
  content_safety: zod3.boolean().nullish().describe(
12388
- "Whether [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation) is enabled, can be true or false"
13030
+ "Whether [Content Moderation](https://www.assemblyai.com/docs/content-moderation) is enabled, can be true or false"
12389
13031
  ),
12390
13032
  content_safety_labels: zod3.object({
12391
13033
  status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
@@ -12395,16 +13037,8 @@ var createTranscriptResponse = zod3.object({
12395
13037
  labels: zod3.array(
12396
13038
  zod3.object({
12397
13039
  label: zod3.string().describe("The label of the sensitive topic"),
12398
- confidence: zod3.number().min(
12399
- createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMin
12400
- ).max(
12401
- createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMax
12402
- ).describe("The confidence score for the topic being discussed, from 0 to 1"),
12403
- severity: zod3.number().min(
12404
- createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin
12405
- ).max(
12406
- createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax
12407
- ).describe("How severely the topic is discussed in the section, from 0 to 1")
13040
+ confidence: zod3.number().describe("The confidence score for the topic being discussed, from 0 to 1"),
13041
+ severity: zod3.number().describe("How severely the topic is discussed in the section, from 0 to 1")
12408
13042
  })
12409
13043
  ).describe(
12410
13044
  "An array of safety labels, one per sensitive topic that was detected in the section"
@@ -12417,128 +13051,39 @@ var createTranscriptResponse = zod3.object({
12417
13051
  }).describe("Timestamp containing a start and end property in milliseconds")
12418
13052
  })
12419
13053
  ).describe("An array of results for the Content Moderation model"),
12420
- summary: zod3.record(
12421
- zod3.string(),
12422
- zod3.number().min(createTranscriptResponseContentSafetyLabelsSummaryMinOne).max(createTranscriptResponseContentSafetyLabelsSummaryMaxOne).describe(
12423
- 'A confidence score for the presence of the sensitive topic "topic" across the entire audio file'
12424
- )
12425
- ).describe(
13054
+ summary: zod3.record(zod3.string(), zod3.number()).describe(
12426
13055
  "A summary of the Content Moderation confidence results for the entire audio file"
12427
13056
  ),
12428
13057
  severity_score_summary: zod3.record(
12429
13058
  zod3.string(),
12430
13059
  zod3.object({
12431
- low: zod3.number().min(createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMin).max(createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMax),
12432
- medium: zod3.number().min(createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMin).max(createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMax),
12433
- high: zod3.number().min(createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMin).max(createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMax)
13060
+ low: zod3.number(),
13061
+ medium: zod3.number(),
13062
+ high: zod3.number()
12434
13063
  })
12435
13064
  ).describe(
12436
13065
  "A summary of the Content Moderation severity results for the entire audio file"
12437
13066
  )
12438
13067
  }).describe(
12439
- "An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/models/content-moderation) for more information.\n"
13068
+ "An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/content-moderation) for more information.\n"
12440
13069
  ).or(zod3.null()).optional().describe(
12441
- "An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/models/content-moderation) for more information.\n"
12442
- ),
12443
- iab_categories: zod3.boolean().nullish().describe(
12444
- "Whether [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) is enabled, can be true or false"
12445
- ),
12446
- iab_categories_result: zod3.object({
12447
- status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
12448
- results: zod3.array(
12449
- zod3.object({
12450
- text: zod3.string().describe("The text in the transcript in which a detected topic occurs"),
12451
- labels: zod3.array(
12452
- zod3.object({
12453
- relevance: zod3.number().min(
12454
- createTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin
12455
- ).max(
12456
- createTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax
12457
- ).describe("How relevant the detected topic is of a detected topic"),
12458
- label: zod3.string().describe(
12459
- "The IAB taxonomical label for the label of the detected topic, where > denotes supertopic/subtopic relationship"
12460
- )
12461
- })
12462
- ).optional().describe("An array of detected topics in the text"),
12463
- timestamp: zod3.object({
12464
- start: zod3.number().describe("The start time in milliseconds"),
12465
- end: zod3.number().describe("The end time in milliseconds")
12466
- }).optional().describe("Timestamp containing a start and end property in milliseconds")
12467
- }).describe("The result of the topic detection model")
12468
- ).describe("An array of results for the Topic Detection model"),
12469
- summary: zod3.record(
12470
- zod3.string(),
12471
- zod3.number().min(createTranscriptResponseIabCategoriesResultSummaryMinOne).max(createTranscriptResponseIabCategoriesResultSummaryMaxOne)
12472
- ).describe("The overall relevance of topic to the entire audio file")
12473
- }).describe(
12474
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) for more information.\n"
12475
- ).or(zod3.null()).optional().describe(
12476
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) for more information.\n"
13070
+ "An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/content-moderation) for more information.\n"
12477
13071
  ),
12478
13072
  custom_spelling: zod3.array(
12479
13073
  zod3.object({
12480
- from: zod3.array(zod3.string().describe("Word or phrase to replace")).describe("Words or phrases to replace"),
13074
+ from: zod3.array(zod3.string()).describe("Words or phrases to replace"),
12481
13075
  to: zod3.string().describe("Word to replace with")
12482
13076
  }).describe(
12483
13077
  "Object containing words or phrases to replace, and the word or phrase to replace with"
12484
13078
  )
12485
- ).nullish().describe("Customize how words are spelled and formatted using to and from values"),
12486
- keyterms_prompt: zod3.array(zod3.string()).optional().describe(
12487
- "Improve accuracy with up to 1000 domain-specific words or phrases (maximum 6 words per phrase).\n"
12488
- ),
12489
- prompt: zod3.string().optional().describe("This parameter does not currently have any functionality attached to it."),
12490
- auto_chapters: zod3.boolean().nullish().describe(
12491
- "Whether [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters) is enabled, can be true or false"
12492
- ),
12493
- chapters: zod3.array(
12494
- zod3.object({
12495
- gist: zod3.string().describe(
12496
- "An ultra-short summary (just a few words) of the content spoken in the chapter"
12497
- ),
12498
- headline: zod3.string().describe("A single sentence summary of the content spoken during the chapter"),
12499
- summary: zod3.string().describe("A one paragraph summary of the content spoken during the chapter"),
12500
- start: zod3.number().describe("The starting time, in milliseconds, for the chapter"),
12501
- end: zod3.number().describe("The starting time, in milliseconds, for the chapter")
12502
- }).describe("Chapter of the audio file")
12503
- ).nullish().describe("An array of temporally sequential chapters for the audio file"),
12504
- summarization: zod3.boolean().describe(
12505
- "Whether [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled, either true or false"
12506
- ),
12507
- summary_type: zod3.string().nullish().describe(
12508
- "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled"
12509
- ),
12510
- summary_model: zod3.string().nullish().describe(
12511
- "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled\n"
12512
- ),
12513
- summary: zod3.string().nullish().describe(
12514
- "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled"
12515
- ),
12516
- custom_topics: zod3.boolean().nullish().describe("Whether custom topics is enabled, either true or false"),
12517
- topics: zod3.array(zod3.string()).optional().describe("The list of custom topics provided if custom topics is enabled"),
12518
- sentiment_analysis: zod3.boolean().nullish().describe(
12519
- "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) is enabled, can be true or false"
12520
- ),
12521
- sentiment_analysis_results: zod3.array(
12522
- zod3.object({
12523
- text: zod3.string().describe("The transcript of the sentence"),
12524
- start: zod3.number().describe("The starting time, in milliseconds, of the sentence"),
12525
- end: zod3.number().describe("The ending time, in milliseconds, of the sentence"),
12526
- sentiment: zod3.enum(["POSITIVE", "NEUTRAL", "NEGATIVE"]),
12527
- confidence: zod3.number().min(createTranscriptResponseSentimentAnalysisResultsItemConfidenceMin).max(createTranscriptResponseSentimentAnalysisResultsItemConfidenceMax).describe(
12528
- "The confidence score for the detected sentiment of the sentence, from 0 to 1"
12529
- ),
12530
- channel: zod3.string().nullish().describe(
12531
- "The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
12532
- ),
12533
- speaker: zod3.string().nullable().describe(
12534
- "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null"
12535
- )
12536
- }).describe("The result of the Sentiment Analysis model")
12537
13079
  ).nullish().describe(
12538
- "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) for more information.\n"
13080
+ "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details."
12539
13081
  ),
12540
- entity_detection: zod3.boolean().nullish().describe(
12541
- "Whether [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection) is enabled, can be true or false"
13082
+ disfluencies: zod3.boolean().nullish().describe(
13083
+ 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false'
13084
+ ),
13085
+ domain: zod3.string().nullish().describe(
13086
+ 'The domain-specific model applied to the transcript. When set to `"medical-v1"`, [Medical Mode](https://www.assemblyai.com/docs/pre-recorded-audio/medical-mode) was used to improve accuracy for medical terminology.\n'
12542
13087
  ),
12543
13088
  entities: zod3.array(
12544
13089
  zod3.object({
@@ -12597,108 +13142,49 @@ var createTranscriptResponse = zod3.object({
12597
13142
  )
12598
13143
  }).describe("A detected entity")
12599
13144
  ).nullish().describe(
12600
- "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/models/entity-detection) for more information.\n"
12601
- ),
12602
- speech_threshold: zod3.number().min(createTranscriptResponseSpeechThresholdMin).max(createTranscriptResponseSpeechThresholdMax).nullish().describe(
12603
- "Defaults to null. Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n"
13145
+ "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) for more information.\n"
12604
13146
  ),
12605
- throttled: zod3.boolean().nullish().describe(
12606
- "True while a request is throttled and false when a request is no longer throttled"
13147
+ entity_detection: zod3.boolean().nullish().describe(
13148
+ "Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) is enabled, can be true or false"
12607
13149
  ),
12608
13150
  error: zod3.string().optional().describe("Error message of why the transcript failed"),
12609
- language_model: zod3.string().describe("The language model that was used for the transcript"),
12610
- acoustic_model: zod3.string().describe("The acoustic model that was used for the transcript")
12611
- }).describe("A transcript object");
12612
- var listTranscriptsQueryLimitDefault = 10;
12613
- var listTranscriptsQueryLimitMax = 200;
12614
- var listTranscriptsQueryThrottledOnlyDefault = false;
12615
- var listTranscriptsQueryParams = zod3.object({
12616
- limit: zod3.number().min(1).max(listTranscriptsQueryLimitMax).default(listTranscriptsQueryLimitDefault).describe("Maximum amount of transcripts to retrieve"),
12617
- status: zod3.enum(["queued", "processing", "completed", "error"]).optional().describe("Filter by transcript status"),
12618
- created_on: zod3.string().date().optional().describe("Only get transcripts created on this date"),
12619
- before_id: zod3.string().uuid().optional().describe("Get transcripts that were created before this transcript ID"),
12620
- after_id: zod3.string().uuid().optional().describe("Get transcripts that were created after this transcript ID"),
12621
- throttled_only: zod3.boolean().optional().describe("Only get throttled transcripts, overrides the status filter")
12622
- });
12623
- var listTranscriptsResponseTranscriptsItemCreatedRegExp = new RegExp(
12624
- "^(?:(\\d{4}-\\d{2}-\\d{2})T(\\d{2}:\\d{2}:\\d{2}(?:\\.\\d+)?))$"
12625
- );
12626
- var listTranscriptsResponseTranscriptsItemCompletedRegExp = new RegExp(
12627
- "^(?:(\\d{4}-\\d{2}-\\d{2})T(\\d{2}:\\d{2}:\\d{2}(?:\\.\\d+)?))$"
12628
- );
12629
- var listTranscriptsResponse = zod3.object({
12630
- page_details: zod3.object({
12631
- limit: zod3.number().describe("The number of results this page is limited to"),
12632
- result_count: zod3.number().describe("The actual number of results in the page"),
12633
- current_url: zod3.string().describe("The URL used to retrieve the current page of transcripts"),
12634
- prev_url: zod3.string().nullable().describe(
12635
- "The URL to the next page of transcripts. The previous URL always points to a page with older transcripts."
12636
- ),
12637
- next_url: zod3.string().nullable().describe(
12638
- "The URL to the next page of transcripts. The next URL always points to a page with newer transcripts."
12639
- )
13151
+ filter_profanity: zod3.boolean().nullish().describe(
13152
+ "Whether [Profanity Filtering](https://www.assemblyai.com/docs/profanity-filtering) is enabled, either true or false"
13153
+ ),
13154
+ format_text: zod3.boolean().nullish().describe(
13155
+ "Whether [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false"
13156
+ ),
13157
+ iab_categories: zod3.boolean().nullish().describe(
13158
+ "Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) is enabled, can be true or false"
13159
+ ),
13160
+ iab_categories_result: zod3.object({
13161
+ status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
13162
+ results: zod3.array(
13163
+ zod3.object({
13164
+ text: zod3.string().describe("The text in the transcript in which a detected topic occurs"),
13165
+ labels: zod3.array(
13166
+ zod3.object({
13167
+ relevance: zod3.number().describe("How relevant the detected topic is of a detected topic"),
13168
+ label: zod3.string().describe(
13169
+ "The IAB taxonomical label for the label of the detected topic, where > denotes supertopic/subtopic relationship"
13170
+ )
13171
+ })
13172
+ ).optional().describe("An array of detected topics in the text"),
13173
+ timestamp: zod3.object({
13174
+ start: zod3.number().describe("The start time in milliseconds"),
13175
+ end: zod3.number().describe("The end time in milliseconds")
13176
+ }).optional().describe("Timestamp containing a start and end property in milliseconds")
13177
+ }).describe("The result of the topic detection model")
13178
+ ).describe("An array of results for the Topic Detection model"),
13179
+ summary: zod3.record(zod3.string(), zod3.number()).describe("The overall relevance of topic to the entire audio file")
12640
13180
  }).describe(
12641
- "Details of the transcript page. Transcripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts."
13181
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
13182
+ ).or(zod3.null()).optional().describe(
13183
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
12642
13184
  ),
12643
- transcripts: zod3.array(
12644
- zod3.object({
12645
- id: zod3.string().uuid().describe("The unique identifier for the transcript"),
12646
- resource_url: zod3.string().describe("The URL to retrieve the transcript"),
12647
- status: zod3.enum(["queued", "processing", "completed", "error"]).describe(
12648
- "The status of your transcript. Possible values are queued, processing, completed, or error."
12649
- ),
12650
- created: zod3.string().regex(listTranscriptsResponseTranscriptsItemCreatedRegExp).describe("The date and time the transcript was created"),
12651
- completed: zod3.string().regex(listTranscriptsResponseTranscriptsItemCompletedRegExp).nullable().describe("The date and time the transcript was completed"),
12652
- audio_url: zod3.string().describe("The URL to the audio file"),
12653
- error: zod3.string().nullable().describe("Error message of why the transcript failed")
12654
- })
12655
- ).describe("An array of transcripts")
12656
- }).describe(
12657
- "A list of transcripts. Transcripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts."
12658
- );
12659
- var getTranscriptParams = zod3.object({
12660
- transcript_id: zod3.string().describe("ID of the transcript")
12661
- });
12662
- var getTranscriptResponseLanguageConfidenceThresholdMin = 0;
12663
- var getTranscriptResponseLanguageConfidenceThresholdMax = 1;
12664
- var getTranscriptResponseLanguageConfidenceMin = 0;
12665
- var getTranscriptResponseLanguageConfidenceMax = 1;
12666
- var getTranscriptResponseSpeechModelDefault = null;
12667
- var getTranscriptResponseWordsItemConfidenceMin = 0;
12668
- var getTranscriptResponseWordsItemConfidenceMax = 1;
12669
- var getTranscriptResponseUtterancesItemConfidenceMin = 0;
12670
- var getTranscriptResponseUtterancesItemConfidenceMax = 1;
12671
- var getTranscriptResponseUtterancesItemWordsItemConfidenceMin = 0;
12672
- var getTranscriptResponseUtterancesItemWordsItemConfidenceMax = 1;
12673
- var getTranscriptResponseConfidenceMin = 0;
12674
- var getTranscriptResponseConfidenceMax = 1;
12675
- var getTranscriptResponseAutoHighlightsResultResultsItemRankMin = 0;
12676
- var getTranscriptResponseAutoHighlightsResultResultsItemRankMax = 1;
12677
- var getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMin = 0;
12678
- var getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMax = 1;
12679
- var getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin = 0;
12680
- var getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax = 1;
12681
- var getTranscriptResponseContentSafetyLabelsSummaryMinOne = 0;
12682
- var getTranscriptResponseContentSafetyLabelsSummaryMaxOne = 1;
12683
- var getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMin = 0;
12684
- var getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMax = 1;
12685
- var getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMin = 0;
12686
- var getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMax = 1;
12687
- var getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMin = 0;
12688
- var getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMax = 1;
12689
- var getTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin = 0;
12690
- var getTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax = 1;
12691
- var getTranscriptResponseIabCategoriesResultSummaryMinOne = 0;
12692
- var getTranscriptResponseIabCategoriesResultSummaryMaxOne = 1;
12693
- var getTranscriptResponseSentimentAnalysisResultsItemConfidenceMin = 0;
12694
- var getTranscriptResponseSentimentAnalysisResultsItemConfidenceMax = 1;
12695
- var getTranscriptResponseSpeechThresholdMin = 0;
12696
- var getTranscriptResponseSpeechThresholdMax = 1;
12697
- var getTranscriptResponse = zod3.object({
12698
13185
  id: zod3.string().uuid().describe("The unique identifier of your transcript"),
12699
- audio_url: zod3.string().describe("The URL of the media that was transcribed"),
12700
- status: zod3.enum(["queued", "processing", "completed", "error"]).describe(
12701
- "The status of your transcript. Possible values are queued, processing, completed, or error."
13186
+ keyterms_prompt: zod3.array(zod3.string()).optional().describe(
13187
+ "Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3 Pro) domain-specific words or phrases (maximum 6 words per phrase). See [Keyterms Prompting](https://www.assemblyai.com/docs/pre-recorded-audio/keyterms-prompting) for more details.\n"
12702
13188
  ),
12703
13189
  language_code: zod3.enum([
12704
13190
  "en",
@@ -12803,138 +13289,175 @@ var getTranscriptResponse = zod3.object({
12803
13289
  "cy",
12804
13290
  "yi",
12805
13291
  "yo"
12806
- ]).describe(
12807
- "The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n"
12808
- ).or(zod3.string()).optional().describe(
12809
- "The language of your audio file.\nPossible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n"
12810
- ),
12811
- language_detection: zod3.boolean().nullish().describe(
12812
- "Whether [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection) is enabled, either true or false"
12813
- ),
12814
- language_confidence_threshold: zod3.number().min(getTranscriptResponseLanguageConfidenceThresholdMin).max(getTranscriptResponseLanguageConfidenceThresholdMax).nullable().describe(
12815
- "The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\n"
12816
- ),
12817
- language_confidence: zod3.number().min(getTranscriptResponseLanguageConfidenceMin).max(getTranscriptResponseLanguageConfidenceMax).nullable().describe(
12818
- "The confidence score for the detected language, between 0.0 (low confidence) and 1.0 (high confidence)"
12819
- ),
12820
- speech_model: zod3.enum(["best", "slam-1", "universal"]).describe("The speech model to use for the transcription.").or(zod3.null()).describe(
12821
- "The speech model used for the transcription. When `null`, the default model is used."
12822
- ),
12823
- text: zod3.string().nullish().describe("The textual transcript of your media file"),
12824
- words: zod3.array(
12825
- zod3.object({
12826
- confidence: zod3.number().min(getTranscriptResponseWordsItemConfidenceMin).max(getTranscriptResponseWordsItemConfidenceMax).describe("The confidence score for the transcript of this word"),
12827
- start: zod3.number().describe("The starting time, in milliseconds, for the word"),
12828
- end: zod3.number().describe("The ending time, in milliseconds, for the word"),
12829
- text: zod3.string().describe("The text of the word"),
12830
- channel: zod3.string().nullish().describe(
12831
- "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
12832
- ),
12833
- speaker: zod3.string().nullable().describe(
12834
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null"
12835
- )
12836
- })
12837
- ).nullish().describe(
12838
- "An array of temporally-sequential word objects, one for each word in the transcript.\nSee [Speech recognition](https://www.assemblyai.com/docs/models/speech-recognition) for more information.\n"
13292
+ ]).optional().describe(
13293
+ "The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).\nThe default value is 'en_us'.\n"
12839
13294
  ),
12840
- utterances: zod3.array(
12841
- zod3.object({
12842
- confidence: zod3.number().min(getTranscriptResponseUtterancesItemConfidenceMin).max(getTranscriptResponseUtterancesItemConfidenceMax).describe("The confidence score for the transcript of this utterance"),
12843
- start: zod3.number().describe("The starting time, in milliseconds, of the utterance in the audio file"),
12844
- end: zod3.number().describe("The ending time, in milliseconds, of the utterance in the audio file"),
12845
- text: zod3.string().describe("The text for this utterance"),
12846
- words: zod3.array(
12847
- zod3.object({
12848
- confidence: zod3.number().min(getTranscriptResponseUtterancesItemWordsItemConfidenceMin).max(getTranscriptResponseUtterancesItemWordsItemConfidenceMax).describe("The confidence score for the transcript of this word"),
12849
- start: zod3.number().describe("The starting time, in milliseconds, for the word"),
12850
- end: zod3.number().describe("The ending time, in milliseconds, for the word"),
12851
- text: zod3.string().describe("The text of the word"),
12852
- channel: zod3.string().nullish().describe(
12853
- "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
12854
- ),
12855
- speaker: zod3.string().nullable().describe(
12856
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null"
12857
- )
12858
- })
12859
- ).describe("The words in the utterance."),
12860
- channel: zod3.string().nullish().describe(
12861
- "The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
12862
- ),
12863
- speaker: zod3.string().describe(
12864
- 'The speaker of this utterance, where each speaker is assigned a sequential capital letter - e.g. "A" for Speaker A, "B" for Speaker B, etc.'
12865
- )
12866
- })
13295
+ language_codes: zod3.array(
13296
+ zod3.enum([
13297
+ "en",
13298
+ "en_au",
13299
+ "en_uk",
13300
+ "en_us",
13301
+ "es",
13302
+ "fr",
13303
+ "de",
13304
+ "it",
13305
+ "pt",
13306
+ "nl",
13307
+ "af",
13308
+ "sq",
13309
+ "am",
13310
+ "ar",
13311
+ "hy",
13312
+ "as",
13313
+ "az",
13314
+ "ba",
13315
+ "eu",
13316
+ "be",
13317
+ "bn",
13318
+ "bs",
13319
+ "br",
13320
+ "bg",
13321
+ "my",
13322
+ "ca",
13323
+ "zh",
13324
+ "hr",
13325
+ "cs",
13326
+ "da",
13327
+ "et",
13328
+ "fo",
13329
+ "fi",
13330
+ "gl",
13331
+ "ka",
13332
+ "el",
13333
+ "gu",
13334
+ "ht",
13335
+ "ha",
13336
+ "haw",
13337
+ "he",
13338
+ "hi",
13339
+ "hu",
13340
+ "is",
13341
+ "id",
13342
+ "ja",
13343
+ "jw",
13344
+ "kn",
13345
+ "kk",
13346
+ "km",
13347
+ "ko",
13348
+ "lo",
13349
+ "la",
13350
+ "lv",
13351
+ "ln",
13352
+ "lt",
13353
+ "lb",
13354
+ "mk",
13355
+ "mg",
13356
+ "ms",
13357
+ "ml",
13358
+ "mt",
13359
+ "mi",
13360
+ "mr",
13361
+ "mn",
13362
+ "ne",
13363
+ "no",
13364
+ "nn",
13365
+ "oc",
13366
+ "pa",
13367
+ "ps",
13368
+ "fa",
13369
+ "pl",
13370
+ "ro",
13371
+ "ru",
13372
+ "sa",
13373
+ "sr",
13374
+ "sn",
13375
+ "sd",
13376
+ "si",
13377
+ "sk",
13378
+ "sl",
13379
+ "so",
13380
+ "su",
13381
+ "sw",
13382
+ "sv",
13383
+ "tl",
13384
+ "tg",
13385
+ "ta",
13386
+ "tt",
13387
+ "te",
13388
+ "th",
13389
+ "bo",
13390
+ "tr",
13391
+ "tk",
13392
+ "uk",
13393
+ "ur",
13394
+ "uz",
13395
+ "vi",
13396
+ "cy",
13397
+ "yi",
13398
+ "yo"
13399
+ ]).describe(
13400
+ "The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).\nThe default value is 'en_us'.\n"
13401
+ )
12867
13402
  ).nullish().describe(
12868
- "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/speech-to-text/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/speech-to-text/speech-recognition#multichannel-transcription) for more information.\n"
12869
- ),
12870
- confidence: zod3.number().min(getTranscriptResponseConfidenceMin).max(getTranscriptResponseConfidenceMax).nullish().describe(
12871
- "The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)"
13403
+ "The language codes of your audio file. Used for [Code switching](/docs/speech-to-text/pre-recorded-audio/code-switching)\nOne of the values specified must be `en`.\n"
12872
13404
  ),
12873
- audio_duration: zod3.number().nullish().describe("The duration of this transcript object's media file, in seconds"),
12874
- punctuate: zod3.boolean().nullish().describe("Whether Automatic Punctuation is enabled, either true or false"),
12875
- format_text: zod3.boolean().nullish().describe("Whether Text Formatting is enabled, either true or false"),
12876
- disfluencies: zod3.boolean().nullish().describe('Transcribe Filler Words, like "umm", in your media file; can be true or false'),
12877
- multichannel: zod3.boolean().nullish().describe(
12878
- "Whether [Multichannel transcription](https://www.assemblyai.com/docs/models/speech-recognition#multichannel-transcription) was enabled in the transcription request, either true or false"
12879
- ),
12880
- audio_channels: zod3.number().optional().describe(
12881
- "The number of audio channels in the audio file. This is only present when multichannel is enabled."
13405
+ language_confidence: zod3.number().nullable().describe(
13406
+ "The confidence score for the detected language, between 0.0 (low confidence) and 1.0 (high confidence). See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details."
12882
13407
  ),
12883
- webhook_url: zod3.string().nullish().describe(
12884
- "The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n"
12885
- ),
12886
- webhook_status_code: zod3.number().nullish().describe(
12887
- "The status code we received from your server when delivering the transcript completed or failed webhook request, if a webhook URL was provided"
13408
+ language_confidence_threshold: zod3.number().nullable().describe(
13409
+ "The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nSee [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.\n"
12888
13410
  ),
12889
- webhook_auth: zod3.boolean().describe("Whether webhook authentication details were provided"),
12890
- webhook_auth_header_name: zod3.string().nullish().describe(
12891
- "The header name to be sent with the transcript completed or failed webhook requests"
13411
+ language_detection: zod3.boolean().nullish().describe(
13412
+ "Whether [Automatic language detection](/docs/pre-recorded-audio/automatic-language-detection) is enabled, either true or false"
12892
13413
  ),
12893
- speed_boost: zod3.boolean().nullish().describe("Whether speed boost is enabled"),
12894
- auto_highlights: zod3.boolean().describe("Whether Key Phrases is enabled, either true or false"),
12895
- auto_highlights_result: zod3.object({
12896
- status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
12897
- results: zod3.array(
12898
- zod3.object({
12899
- count: zod3.number().describe("The total number of times the key phrase appears in the audio file"),
12900
- rank: zod3.number().min(getTranscriptResponseAutoHighlightsResultResultsItemRankMin).max(getTranscriptResponseAutoHighlightsResultResultsItemRankMax).describe(
12901
- "The total relevancy to the overall audio file of this key phrase - a greater number means more relevant"
12902
- ),
12903
- text: zod3.string().describe("The text itself of the key phrase"),
12904
- timestamps: zod3.array(
12905
- zod3.object({
12906
- start: zod3.number().describe("The start time in milliseconds"),
12907
- end: zod3.number().describe("The end time in milliseconds")
12908
- }).describe("Timestamp containing a start and end property in milliseconds")
12909
- ).describe("The timestamp of the of the key phrase")
12910
- })
12911
- ).describe("A temporally-sequential array of Key Phrases")
12912
- }).describe(
12913
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/models/key-phrases) for more information.\n"
12914
- ).or(zod3.null()).optional().describe(
12915
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/models/key-phrases) for more information.\n"
13414
+ language_detection_options: zod3.object({
13415
+ expected_languages: zod3.array(zod3.string()).optional().describe(
13416
+ 'List of languages expected in the audio file. Defaults to `["all"]` when unspecified. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.'
13417
+ ),
13418
+ fallback_language: zod3.string().default(getTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault).describe(
13419
+ 'If the detected language of the audio file is not in the list of expected languages, the `fallback_language` is used. Specify `["auto"]` to let our model choose the fallback language from `expected_languages` with the highest confidence score. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.\n'
13420
+ ),
13421
+ code_switching: zod3.boolean().optional().describe(
13422
+ "Whether [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) should be detected.\n"
13423
+ ),
13424
+ code_switching_confidence_threshold: zod3.number().default(
13425
+ getTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault
13426
+ ).describe(
13427
+ "The confidence threshold for [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) detection. If the code switching confidence is below this threshold, the transcript will be processed in the language with the highest `language_detection_confidence` score.\n"
13428
+ )
13429
+ }).optional().describe(
13430
+ "Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection)."
12916
13431
  ),
12917
- audio_start_from: zod3.number().nullish().describe(
12918
- "The point in time, in milliseconds, in the file at which the transcription was started"
13432
+ multichannel: zod3.boolean().nullish().describe(
13433
+ "Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) was enabled in the transcription request, either true or false"
12919
13434
  ),
12920
- audio_end_at: zod3.number().nullish().describe(
12921
- "The point in time, in milliseconds, in the file at which the transcription was terminated"
13435
+ prompt: zod3.string().optional().describe(
13436
+ "Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
12922
13437
  ),
12923
- word_boost: zod3.array(zod3.string()).optional().describe("The list of custom vocabulary to boost transcription probability for"),
12924
- boost_param: zod3.string().nullish().describe("The word boost parameter value"),
12925
- filter_profanity: zod3.boolean().nullish().describe(
12926
- "Whether [Profanity Filtering](https://www.assemblyai.com/docs/models/speech-recognition#profanity-filtering) is enabled, either true or false"
13438
+ punctuate: zod3.boolean().nullish().describe(
13439
+ "Whether [Automatic Punctuation](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false"
12927
13440
  ),
12928
13441
  redact_pii: zod3.boolean().describe(
12929
- "Whether [PII Redaction](https://www.assemblyai.com/docs/models/pii-redaction) is enabled, either true or false"
13442
+ "Whether [PII Redaction](https://www.assemblyai.com/docs/pii-redaction) is enabled, either true or false"
12930
13443
  ),
12931
13444
  redact_pii_audio: zod3.boolean().nullish().describe(
12932
- "Whether a redacted version of the audio file was generated,\neither true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.\n"
13445
+ "Whether a redacted version of the audio file was generated,\neither true or false. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.\n"
13446
+ ),
13447
+ redact_pii_audio_options: zod3.object({
13448
+ return_redacted_no_speech_audio: zod3.boolean().optional().describe(
13449
+ "By default, audio redaction provides redacted audio URLs only when speech is detected. However, if your use-case specifically requires redacted audio files even for silent audio files without any dialogue, you can opt to receive these URLs by setting this parameter to `true`."
13450
+ ),
13451
+ override_audio_redaction_method: zod3.enum(["silence"]).optional().describe(
13452
+ "Specify the method used to redact audio. By default, redacted audio uses a beep sound. Set to `silence` to replace PII with silence instead of a beep."
13453
+ )
13454
+ }).optional().describe(
13455
+ "The options for PII-redacted audio, if redact_pii_audio is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.\n"
12933
13456
  ),
12934
13457
  redact_pii_audio_quality: zod3.enum(["mp3", "wav"]).describe(
12935
- "Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details."
13458
+ "Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more details."
12936
13459
  ).or(zod3.null()).optional().describe(
12937
- "The audio quality of the PII-redacted audio file, if redact_pii_audio is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.\n"
13460
+ "The audio quality of the PII-redacted audio file, if redact_pii_audio is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.\n"
12938
13461
  ),
12939
13462
  redact_pii_policies: zod3.array(
12940
13463
  zod3.enum([
@@ -12984,19 +13507,307 @@ var getTranscriptResponse = zod3.object({
12984
13507
  "zodiac_sign"
12985
13508
  ]).describe("The type of PII to redact")
12986
13509
  ).nullish().describe(
12987
- "The list of PII Redaction policies that were enabled, if PII Redaction is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.\n"
13510
+ "The list of PII Redaction policies that were enabled, if PII Redaction is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more information.\n"
12988
13511
  ),
12989
13512
  redact_pii_sub: zod3.enum(["entity_name", "hash"]).optional().describe(
12990
- 'The replacement logic for detected PII, can be "entity_name" or "hash". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.'
13513
+ "The replacement logic for detected PII, can be `entity_name` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
13514
+ ),
13515
+ sentiment_analysis: zod3.boolean().nullish().describe(
13516
+ "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) is enabled, can be true or false"
13517
+ ),
13518
+ sentiment_analysis_results: zod3.array(
13519
+ zod3.object({
13520
+ text: zod3.string().describe("The transcript of the sentence"),
13521
+ start: zod3.number().describe("The starting time, in milliseconds, of the sentence"),
13522
+ end: zod3.number().describe("The ending time, in milliseconds, of the sentence"),
13523
+ sentiment: zod3.enum(["POSITIVE", "NEUTRAL", "NEGATIVE"]),
13524
+ confidence: zod3.number().describe(
13525
+ "The confidence score for the detected sentiment of the sentence, from 0 to 1"
13526
+ ),
13527
+ channel: zod3.string().nullish().describe(
13528
+ "The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
13529
+ ),
13530
+ speaker: zod3.string().nullable().describe(
13531
+ "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
13532
+ )
13533
+ }).describe("The result of the Sentiment Analysis model")
13534
+ ).nullish().describe(
13535
+ "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) for more information.\n"
12991
13536
  ),
12992
13537
  speaker_labels: zod3.boolean().nullish().describe(
12993
- "Whether [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, can be true or false"
13538
+ "Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, can be true or false"
12994
13539
  ),
12995
13540
  speakers_expected: zod3.number().nullish().describe(
12996
- "Tell the speaker label model how many speakers it should attempt to identify. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details."
13541
+ "Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details."
13542
+ ),
13543
+ speech_model_used: zod3.string().optional().describe(
13544
+ "The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
13545
+ ),
13546
+ speech_models: zod3.array(
13547
+ zod3.string().describe(
13548
+ "The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
13549
+ )
13550
+ ).nullish().describe(
13551
+ "List multiple speech models in priority order, allowing our system to automatically route your audio to the best available option. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models and routing behavior.\n"
13552
+ ),
13553
+ speech_threshold: zod3.number().nullish().describe(
13554
+ "Defaults to null. Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive. See [Speech Threshold](https://www.assemblyai.com/docs/speech-threshold) for more details.\n"
13555
+ ),
13556
+ speech_understanding: zod3.object({
13557
+ request: zod3.object({
13558
+ translation: zod3.object({
13559
+ target_languages: zod3.array(zod3.string()).describe(
13560
+ 'List of target language codes (e.g., `["es", "de"]`). See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for supported languages.'
13561
+ ),
13562
+ formal: zod3.boolean().default(getTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault).describe(
13563
+ "Use formal language style. See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for more details."
13564
+ ),
13565
+ match_original_utterance: zod3.boolean().optional().describe(
13566
+ "When enabled with Speaker Labels, returns translated text in the utterances array. Each utterance will include a `translated_texts` key containing translations for each target language."
13567
+ )
13568
+ })
13569
+ }).describe(
13570
+ "Request body for [Translation](https://www.assemblyai.com/docs/speech-understanding/translation)."
13571
+ ).or(
13572
+ zod3.object({
13573
+ speaker_identification: zod3.object({
13574
+ speaker_type: zod3.enum(["role", "name"]).describe(
13575
+ "Type of speaker identification. See [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification) for details on each type."
13576
+ ),
13577
+ known_values: zod3.array(zod3.string()).optional().describe(
13578
+ 'Required if speaker_type is "role". Each value must be 35 characters or less.'
13579
+ ),
13580
+ speakers: zod3.array(
13581
+ zod3.object({
13582
+ role: zod3.string().optional().describe(
13583
+ 'The role of the speaker. Required when `speaker_type` is "role".'
13584
+ ),
13585
+ name: zod3.string().optional().describe(
13586
+ 'The name of the speaker. Required when `speaker_type` is "name".'
13587
+ ),
13588
+ description: zod3.string().optional().describe(
13589
+ "A description of the speaker to help the model identify them based on conversational context."
13590
+ )
13591
+ })
13592
+ ).optional().describe(
13593
+ "An array of speaker objects with metadata to improve identification accuracy. Each object should include a `role` or `name` (depending on `speaker_type`) and an optional `description` to help the model identify the speaker. You can also include any additional custom properties (e.g., `company`, `title`) to provide more context. Use this as an alternative to `known_values` when you want to provide additional context about each speaker."
13594
+ )
13595
+ })
13596
+ }).describe(
13597
+ "Request body for [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification)."
13598
+ )
13599
+ ).or(
13600
+ zod3.object({
13601
+ custom_formatting: zod3.object({
13602
+ date: zod3.string().optional().describe(
13603
+ 'Date format pattern (e.g., `"mm/dd/yyyy"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
13604
+ ),
13605
+ phone_number: zod3.string().optional().describe(
13606
+ 'Phone number format pattern (e.g., `"(xxx)xxx-xxxx"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
13607
+ ),
13608
+ email: zod3.string().optional().describe(
13609
+ 'Email format pattern (e.g., `"username@domain.com"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
13610
+ )
13611
+ })
13612
+ }).describe(
13613
+ "Request body for [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting)."
13614
+ )
13615
+ ).optional(),
13616
+ response: zod3.object({
13617
+ translation: zod3.object({
13618
+ status: zod3.string().optional()
13619
+ }).optional()
13620
+ }).or(
13621
+ zod3.object({
13622
+ speaker_identification: zod3.object({
13623
+ mapping: zod3.record(zod3.string(), zod3.string()).optional().describe(
13624
+ 'A mapping of the original generic speaker labels (e.g., "A", "B") to the identified speaker names or roles.'
13625
+ ),
13626
+ status: zod3.string().optional()
13627
+ }).optional()
13628
+ })
13629
+ ).or(
13630
+ zod3.object({
13631
+ custom_formatting: zod3.object({
13632
+ mapping: zod3.record(zod3.string(), zod3.string()).optional(),
13633
+ formatted_text: zod3.string().optional()
13634
+ }).optional()
13635
+ })
13636
+ ).optional()
13637
+ }).optional().describe(
13638
+ "Speech understanding tasks like [Translation](https://www.assemblyai.com/docs/speech-understanding/translation), [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification), and [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting). See the task-specific docs for available options and configuration.\n"
13639
+ ),
13640
+ status: zod3.enum(["queued", "processing", "completed", "error"]).describe(
13641
+ "The status of your transcript. Possible values are queued, processing, completed, or error."
13642
+ ),
13643
+ summarization: zod3.boolean().describe(
13644
+ "Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
13645
+ ),
13646
+ summary: zod3.string().nullish().describe(
13647
+ "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
13648
+ ),
13649
+ summary_model: zod3.string().nullish().describe(
13650
+ "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n"
13651
+ ),
13652
+ summary_type: zod3.string().nullish().describe(
13653
+ "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
13654
+ ),
13655
+ remove_audio_tags: zod3.enum(["all"]).describe(
13656
+ "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
13657
+ ).or(zod3.null()).optional().describe(
13658
+ "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
13659
+ ),
13660
+ temperature: zod3.number().nullish().describe(
13661
+ "The temperature that was used for the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.\n\nNote: This parameter can only be used with the Universal-3 Pro model.\n"
13662
+ ),
13663
+ text: zod3.string().nullish().describe("The textual transcript of your media file"),
13664
+ throttled: zod3.boolean().nullish().describe(
13665
+ "True while a request is throttled and false when a request is no longer throttled"
13666
+ ),
13667
+ utterances: zod3.array(
13668
+ zod3.object({
13669
+ confidence: zod3.number().describe("The confidence score for the transcript of this utterance"),
13670
+ start: zod3.number().describe("The starting time, in milliseconds, of the utterance in the audio file"),
13671
+ end: zod3.number().describe("The ending time, in milliseconds, of the utterance in the audio file"),
13672
+ text: zod3.string().describe("The text for this utterance"),
13673
+ words: zod3.array(
13674
+ zod3.object({
13675
+ confidence: zod3.number().describe("The confidence score for the transcript of this word"),
13676
+ start: zod3.number().describe("The starting time, in milliseconds, for the word"),
13677
+ end: zod3.number().describe("The ending time, in milliseconds, for the word"),
13678
+ text: zod3.string().describe("The text of the word"),
13679
+ channel: zod3.string().nullish().describe(
13680
+ "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
13681
+ ),
13682
+ speaker: zod3.string().nullable().describe(
13683
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
13684
+ )
13685
+ })
13686
+ ).describe("The words in the utterance."),
13687
+ channel: zod3.string().nullish().describe(
13688
+ "The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
13689
+ ),
13690
+ speaker: zod3.string().describe(
13691
+ 'The speaker of this utterance, where each speaker is assigned a sequential capital letter - e.g. "A" for Speaker A, "B" for Speaker B, etc.'
13692
+ ),
13693
+ translated_texts: zod3.record(zod3.string(), zod3.string()).optional().describe(
13694
+ 'Translations keyed by language code (e.g., `{"es": "Texto traducido", "de": "\xDCbersetzter Text"}`). Only present when `match_original_utterance` is enabled with translation.'
13695
+ )
13696
+ })
13697
+ ).nullish().describe(
13698
+ "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) for more information.\n"
13699
+ ),
13700
+ webhook_auth: zod3.boolean().describe(
13701
+ "Whether [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) authentication details were provided"
13702
+ ),
13703
+ webhook_auth_header_name: zod3.string().nullish().describe(
13704
+ "The header name to be sent with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests"
13705
+ ),
13706
+ webhook_status_code: zod3.number().nullish().describe(
13707
+ "The status code we received from your server when delivering the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) request, if a webhook URL was provided"
13708
+ ),
13709
+ webhook_url: zod3.string().nullish().describe(
13710
+ "The URL to which we send [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests.\n"
13711
+ ),
13712
+ words: zod3.array(
13713
+ zod3.object({
13714
+ confidence: zod3.number().describe("The confidence score for the transcript of this word"),
13715
+ start: zod3.number().describe("The starting time, in milliseconds, for the word"),
13716
+ end: zod3.number().describe("The ending time, in milliseconds, for the word"),
13717
+ text: zod3.string().describe("The text of the word"),
13718
+ channel: zod3.string().nullish().describe(
13719
+ "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
13720
+ ),
13721
+ speaker: zod3.string().nullable().describe(
13722
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
13723
+ )
13724
+ })
13725
+ ).nullish().describe(
13726
+ "An array of temporally-sequential word objects, one for each word in the transcript.\n"
13727
+ ),
13728
+ acoustic_model: zod3.string().describe("This parameter does not currently have any functionality attached to it."),
13729
+ custom_topics: zod3.boolean().nullish().describe("This parameter does not currently have any functionality attached to it."),
13730
+ language_model: zod3.string().describe("This parameter does not currently have any functionality attached to it."),
13731
+ speech_model: zod3.string().describe(
13732
+ "The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
13733
+ ).or(zod3.null()).describe(
13734
+ "This parameter has been replaced with the `speech_models` parameter, learn more about the `speech_models` parameter [here](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model).\n"
13735
+ ),
13736
+ speed_boost: zod3.boolean().nullish().describe("This parameter does not currently have any functionality attached to it."),
13737
+ topics: zod3.array(zod3.string()).optional().describe("This parameter does not currently have any functionality attached to it."),
13738
+ translated_texts: zod3.object({
13739
+ language_code: zod3.string().optional().describe("Translated text for this language code")
13740
+ }).optional().describe(
13741
+ "Translated text keyed by language code. See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for more details."
13742
+ )
13743
+ }).describe("A transcript object");
13744
+ var deleteTranscriptParams = zod3.object({
13745
+ transcript_id: zod3.string().describe("ID of the transcript")
13746
+ });
13747
+ var deleteTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault = "auto";
13748
+ var deleteTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault = false;
13749
+ var deleteTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault = 0.3;
13750
+ var deleteTranscriptResponseRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault = false;
13751
+ var deleteTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault = true;
13752
+ var deleteTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault = false;
13753
+ var deleteTranscriptResponse = zod3.object({
13754
+ audio_channels: zod3.number().optional().describe(
13755
+ "The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) is enabled."
13756
+ ),
13757
+ audio_duration: zod3.number().nullish().describe("The duration of this transcript object's media file, in seconds"),
13758
+ audio_end_at: zod3.number().nullish().describe(
13759
+ "The point in time, in milliseconds, in the file at which the transcription was terminated. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details."
13760
+ ),
13761
+ audio_start_from: zod3.number().nullish().describe(
13762
+ "The point in time, in milliseconds, in the file at which the transcription was started. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details."
13763
+ ),
13764
+ audio_url: zod3.string().describe("The URL of the media that was transcribed"),
13765
+ auto_chapters: zod3.boolean().nullish().describe(
13766
+ "Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
13767
+ ),
13768
+ auto_highlights: zod3.boolean().describe(
13769
+ "Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) is enabled, either true or false"
13770
+ ),
13771
+ auto_highlights_result: zod3.object({
13772
+ status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
13773
+ results: zod3.array(
13774
+ zod3.object({
13775
+ count: zod3.number().describe("The total number of times the key phrase appears in the audio file"),
13776
+ rank: zod3.number().describe(
13777
+ "The total relevancy to the overall audio file of this key phrase - a greater number means more relevant"
13778
+ ),
13779
+ text: zod3.string().describe("The text itself of the key phrase"),
13780
+ timestamps: zod3.array(
13781
+ zod3.object({
13782
+ start: zod3.number().describe("The start time in milliseconds"),
13783
+ end: zod3.number().describe("The end time in milliseconds")
13784
+ }).describe("Timestamp containing a start and end property in milliseconds")
13785
+ ).describe("The timestamp of the of the key phrase")
13786
+ })
13787
+ ).describe("A temporally-sequential array of Key Phrases")
13788
+ }).describe(
13789
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
13790
+ ).or(zod3.null()).optional().describe(
13791
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
13792
+ ),
13793
+ chapters: zod3.array(
13794
+ zod3.object({
13795
+ gist: zod3.string().describe(
13796
+ "An ultra-short summary (just a few words) of the content spoken in the chapter"
13797
+ ),
13798
+ headline: zod3.string().describe("A single sentence summary of the content spoken during the chapter"),
13799
+ summary: zod3.string().describe("A one paragraph summary of the content spoken during the chapter"),
13800
+ start: zod3.number().describe("The starting time, in milliseconds, for the chapter"),
13801
+ end: zod3.number().describe("The starting time, in milliseconds, for the chapter")
13802
+ }).describe("Chapter of the audio file")
13803
+ ).nullish().describe(
13804
+ "An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for more information."
13805
+ ),
13806
+ confidence: zod3.number().nullish().describe(
13807
+ "The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)"
12997
13808
  ),
12998
13809
  content_safety: zod3.boolean().nullish().describe(
12999
- "Whether [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation) is enabled, can be true or false"
13810
+ "Whether [Content Moderation](https://www.assemblyai.com/docs/content-moderation) is enabled, can be true or false"
13000
13811
  ),
13001
13812
  content_safety_labels: zod3.object({
13002
13813
  status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
@@ -13006,12 +13817,8 @@ var getTranscriptResponse = zod3.object({
13006
13817
  labels: zod3.array(
13007
13818
  zod3.object({
13008
13819
  label: zod3.string().describe("The label of the sensitive topic"),
13009
- confidence: zod3.number().min(
13010
- getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMin
13011
- ).max(
13012
- getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMax
13013
- ).describe("The confidence score for the topic being discussed, from 0 to 1"),
13014
- severity: zod3.number().min(getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin).max(getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax).describe("How severely the topic is discussed in the section, from 0 to 1")
13820
+ confidence: zod3.number().describe("The confidence score for the topic being discussed, from 0 to 1"),
13821
+ severity: zod3.number().describe("How severely the topic is discussed in the section, from 0 to 1")
13015
13822
  })
13016
13823
  ).describe(
13017
13824
  "An array of safety labels, one per sensitive topic that was detected in the section"
@@ -13024,128 +13831,39 @@ var getTranscriptResponse = zod3.object({
13024
13831
  }).describe("Timestamp containing a start and end property in milliseconds")
13025
13832
  })
13026
13833
  ).describe("An array of results for the Content Moderation model"),
13027
- summary: zod3.record(
13028
- zod3.string(),
13029
- zod3.number().min(getTranscriptResponseContentSafetyLabelsSummaryMinOne).max(getTranscriptResponseContentSafetyLabelsSummaryMaxOne).describe(
13030
- 'A confidence score for the presence of the sensitive topic "topic" across the entire audio file'
13031
- )
13032
- ).describe(
13834
+ summary: zod3.record(zod3.string(), zod3.number()).describe(
13033
13835
  "A summary of the Content Moderation confidence results for the entire audio file"
13034
13836
  ),
13035
13837
  severity_score_summary: zod3.record(
13036
13838
  zod3.string(),
13037
13839
  zod3.object({
13038
- low: zod3.number().min(getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMin).max(getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMax),
13039
- medium: zod3.number().min(getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMin).max(getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMax),
13040
- high: zod3.number().min(getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMin).max(getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMax)
13840
+ low: zod3.number(),
13841
+ medium: zod3.number(),
13842
+ high: zod3.number()
13041
13843
  })
13042
13844
  ).describe(
13043
13845
  "A summary of the Content Moderation severity results for the entire audio file"
13044
13846
  )
13045
13847
  }).describe(
13046
- "An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/models/content-moderation) for more information.\n"
13047
- ).or(zod3.null()).optional().describe(
13048
- "An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/models/content-moderation) for more information.\n"
13049
- ),
13050
- iab_categories: zod3.boolean().nullish().describe(
13051
- "Whether [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) is enabled, can be true or false"
13052
- ),
13053
- iab_categories_result: zod3.object({
13054
- status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
13055
- results: zod3.array(
13056
- zod3.object({
13057
- text: zod3.string().describe("The text in the transcript in which a detected topic occurs"),
13058
- labels: zod3.array(
13059
- zod3.object({
13060
- relevance: zod3.number().min(
13061
- getTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin
13062
- ).max(
13063
- getTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax
13064
- ).describe("How relevant the detected topic is of a detected topic"),
13065
- label: zod3.string().describe(
13066
- "The IAB taxonomical label for the label of the detected topic, where > denotes supertopic/subtopic relationship"
13067
- )
13068
- })
13069
- ).optional().describe("An array of detected topics in the text"),
13070
- timestamp: zod3.object({
13071
- start: zod3.number().describe("The start time in milliseconds"),
13072
- end: zod3.number().describe("The end time in milliseconds")
13073
- }).optional().describe("Timestamp containing a start and end property in milliseconds")
13074
- }).describe("The result of the topic detection model")
13075
- ).describe("An array of results for the Topic Detection model"),
13076
- summary: zod3.record(
13077
- zod3.string(),
13078
- zod3.number().min(getTranscriptResponseIabCategoriesResultSummaryMinOne).max(getTranscriptResponseIabCategoriesResultSummaryMaxOne)
13079
- ).describe("The overall relevance of topic to the entire audio file")
13080
- }).describe(
13081
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) for more information.\n"
13848
+ "An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/content-moderation) for more information.\n"
13082
13849
  ).or(zod3.null()).optional().describe(
13083
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) for more information.\n"
13850
+ "An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/content-moderation) for more information.\n"
13084
13851
  ),
13085
13852
  custom_spelling: zod3.array(
13086
13853
  zod3.object({
13087
- from: zod3.array(zod3.string().describe("Word or phrase to replace")).describe("Words or phrases to replace"),
13854
+ from: zod3.array(zod3.string()).describe("Words or phrases to replace"),
13088
13855
  to: zod3.string().describe("Word to replace with")
13089
13856
  }).describe(
13090
13857
  "Object containing words or phrases to replace, and the word or phrase to replace with"
13091
13858
  )
13092
- ).nullish().describe("Customize how words are spelled and formatted using to and from values"),
13093
- keyterms_prompt: zod3.array(zod3.string()).optional().describe(
13094
- "Improve accuracy with up to 1000 domain-specific words or phrases (maximum 6 words per phrase).\n"
13095
- ),
13096
- prompt: zod3.string().optional().describe("This parameter does not currently have any functionality attached to it."),
13097
- auto_chapters: zod3.boolean().nullish().describe(
13098
- "Whether [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters) is enabled, can be true or false"
13099
- ),
13100
- chapters: zod3.array(
13101
- zod3.object({
13102
- gist: zod3.string().describe(
13103
- "An ultra-short summary (just a few words) of the content spoken in the chapter"
13104
- ),
13105
- headline: zod3.string().describe("A single sentence summary of the content spoken during the chapter"),
13106
- summary: zod3.string().describe("A one paragraph summary of the content spoken during the chapter"),
13107
- start: zod3.number().describe("The starting time, in milliseconds, for the chapter"),
13108
- end: zod3.number().describe("The starting time, in milliseconds, for the chapter")
13109
- }).describe("Chapter of the audio file")
13110
- ).nullish().describe("An array of temporally sequential chapters for the audio file"),
13111
- summarization: zod3.boolean().describe(
13112
- "Whether [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled, either true or false"
13113
- ),
13114
- summary_type: zod3.string().nullish().describe(
13115
- "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled"
13116
- ),
13117
- summary_model: zod3.string().nullish().describe(
13118
- "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled\n"
13119
- ),
13120
- summary: zod3.string().nullish().describe(
13121
- "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled"
13122
- ),
13123
- custom_topics: zod3.boolean().nullish().describe("Whether custom topics is enabled, either true or false"),
13124
- topics: zod3.array(zod3.string()).optional().describe("The list of custom topics provided if custom topics is enabled"),
13125
- sentiment_analysis: zod3.boolean().nullish().describe(
13126
- "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) is enabled, can be true or false"
13127
- ),
13128
- sentiment_analysis_results: zod3.array(
13129
- zod3.object({
13130
- text: zod3.string().describe("The transcript of the sentence"),
13131
- start: zod3.number().describe("The starting time, in milliseconds, of the sentence"),
13132
- end: zod3.number().describe("The ending time, in milliseconds, of the sentence"),
13133
- sentiment: zod3.enum(["POSITIVE", "NEUTRAL", "NEGATIVE"]),
13134
- confidence: zod3.number().min(getTranscriptResponseSentimentAnalysisResultsItemConfidenceMin).max(getTranscriptResponseSentimentAnalysisResultsItemConfidenceMax).describe(
13135
- "The confidence score for the detected sentiment of the sentence, from 0 to 1"
13136
- ),
13137
- channel: zod3.string().nullish().describe(
13138
- "The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
13139
- ),
13140
- speaker: zod3.string().nullable().describe(
13141
- "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null"
13142
- )
13143
- }).describe("The result of the Sentiment Analysis model")
13144
13859
  ).nullish().describe(
13145
- "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) for more information.\n"
13860
+ "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details."
13146
13861
  ),
13147
- entity_detection: zod3.boolean().nullish().describe(
13148
- "Whether [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection) is enabled, can be true or false"
13862
+ disfluencies: zod3.boolean().nullish().describe(
13863
+ 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false'
13864
+ ),
13865
+ domain: zod3.string().nullish().describe(
13866
+ 'The domain-specific model applied to the transcript. When set to `"medical-v1"`, [Medical Mode](https://www.assemblyai.com/docs/pre-recorded-audio/medical-mode) was used to improve accuracy for medical terminology.\n'
13149
13867
  ),
13150
13868
  entities: zod3.array(
13151
13869
  zod3.object({
@@ -13204,61 +13922,49 @@ var getTranscriptResponse = zod3.object({
13204
13922
  )
13205
13923
  }).describe("A detected entity")
13206
13924
  ).nullish().describe(
13207
- "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/models/entity-detection) for more information.\n"
13208
- ),
13209
- speech_threshold: zod3.number().min(getTranscriptResponseSpeechThresholdMin).max(getTranscriptResponseSpeechThresholdMax).nullish().describe(
13210
- "Defaults to null. Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n"
13925
+ "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) for more information.\n"
13211
13926
  ),
13212
- throttled: zod3.boolean().nullish().describe(
13213
- "True while a request is throttled and false when a request is no longer throttled"
13927
+ entity_detection: zod3.boolean().nullish().describe(
13928
+ "Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) is enabled, can be true or false"
13214
13929
  ),
13215
13930
  error: zod3.string().optional().describe("Error message of why the transcript failed"),
13216
- language_model: zod3.string().describe("The language model that was used for the transcript"),
13217
- acoustic_model: zod3.string().describe("The acoustic model that was used for the transcript")
13218
- }).describe("A transcript object");
13219
- var deleteTranscriptParams = zod3.object({
13220
- transcript_id: zod3.string().describe("ID of the transcript")
13221
- });
13222
- var deleteTranscriptResponseLanguageConfidenceThresholdMin = 0;
13223
- var deleteTranscriptResponseLanguageConfidenceThresholdMax = 1;
13224
- var deleteTranscriptResponseLanguageConfidenceMin = 0;
13225
- var deleteTranscriptResponseLanguageConfidenceMax = 1;
13226
- var deleteTranscriptResponseSpeechModelDefault = null;
13227
- var deleteTranscriptResponseWordsItemConfidenceMin = 0;
13228
- var deleteTranscriptResponseWordsItemConfidenceMax = 1;
13229
- var deleteTranscriptResponseUtterancesItemConfidenceMin = 0;
13230
- var deleteTranscriptResponseUtterancesItemConfidenceMax = 1;
13231
- var deleteTranscriptResponseUtterancesItemWordsItemConfidenceMin = 0;
13232
- var deleteTranscriptResponseUtterancesItemWordsItemConfidenceMax = 1;
13233
- var deleteTranscriptResponseConfidenceMin = 0;
13234
- var deleteTranscriptResponseConfidenceMax = 1;
13235
- var deleteTranscriptResponseAutoHighlightsResultResultsItemRankMin = 0;
13236
- var deleteTranscriptResponseAutoHighlightsResultResultsItemRankMax = 1;
13237
- var deleteTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMin = 0;
13238
- var deleteTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMax = 1;
13239
- var deleteTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin = 0;
13240
- var deleteTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax = 1;
13241
- var deleteTranscriptResponseContentSafetyLabelsSummaryMinOne = 0;
13242
- var deleteTranscriptResponseContentSafetyLabelsSummaryMaxOne = 1;
13243
- var deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMin = 0;
13244
- var deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMax = 1;
13245
- var deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMin = 0;
13246
- var deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMax = 1;
13247
- var deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMin = 0;
13248
- var deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMax = 1;
13249
- var deleteTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin = 0;
13250
- var deleteTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax = 1;
13251
- var deleteTranscriptResponseIabCategoriesResultSummaryMinOne = 0;
13252
- var deleteTranscriptResponseIabCategoriesResultSummaryMaxOne = 1;
13253
- var deleteTranscriptResponseSentimentAnalysisResultsItemConfidenceMin = 0;
13254
- var deleteTranscriptResponseSentimentAnalysisResultsItemConfidenceMax = 1;
13255
- var deleteTranscriptResponseSpeechThresholdMin = 0;
13256
- var deleteTranscriptResponseSpeechThresholdMax = 1;
13257
- var deleteTranscriptResponse = zod3.object({
13931
+ filter_profanity: zod3.boolean().nullish().describe(
13932
+ "Whether [Profanity Filtering](https://www.assemblyai.com/docs/profanity-filtering) is enabled, either true or false"
13933
+ ),
13934
+ format_text: zod3.boolean().nullish().describe(
13935
+ "Whether [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false"
13936
+ ),
13937
+ iab_categories: zod3.boolean().nullish().describe(
13938
+ "Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) is enabled, can be true or false"
13939
+ ),
13940
+ iab_categories_result: zod3.object({
13941
+ status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
13942
+ results: zod3.array(
13943
+ zod3.object({
13944
+ text: zod3.string().describe("The text in the transcript in which a detected topic occurs"),
13945
+ labels: zod3.array(
13946
+ zod3.object({
13947
+ relevance: zod3.number().describe("How relevant the detected topic is of a detected topic"),
13948
+ label: zod3.string().describe(
13949
+ "The IAB taxonomical label for the label of the detected topic, where > denotes supertopic/subtopic relationship"
13950
+ )
13951
+ })
13952
+ ).optional().describe("An array of detected topics in the text"),
13953
+ timestamp: zod3.object({
13954
+ start: zod3.number().describe("The start time in milliseconds"),
13955
+ end: zod3.number().describe("The end time in milliseconds")
13956
+ }).optional().describe("Timestamp containing a start and end property in milliseconds")
13957
+ }).describe("The result of the topic detection model")
13958
+ ).describe("An array of results for the Topic Detection model"),
13959
+ summary: zod3.record(zod3.string(), zod3.number()).describe("The overall relevance of topic to the entire audio file")
13960
+ }).describe(
13961
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
13962
+ ).or(zod3.null()).optional().describe(
13963
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
13964
+ ),
13258
13965
  id: zod3.string().uuid().describe("The unique identifier of your transcript"),
13259
- audio_url: zod3.string().describe("The URL of the media that was transcribed"),
13260
- status: zod3.enum(["queued", "processing", "completed", "error"]).describe(
13261
- "The status of your transcript. Possible values are queued, processing, completed, or error."
13966
+ keyterms_prompt: zod3.array(zod3.string()).optional().describe(
13967
+ "Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3 Pro) domain-specific words or phrases (maximum 6 words per phrase). See [Keyterms Prompting](https://www.assemblyai.com/docs/pre-recorded-audio/keyterms-prompting) for more details.\n"
13262
13968
  ),
13263
13969
  language_code: zod3.enum([
13264
13970
  "en",
@@ -13363,138 +14069,175 @@ var deleteTranscriptResponse = zod3.object({
13363
14069
  "cy",
13364
14070
  "yi",
13365
14071
  "yo"
13366
- ]).describe(
13367
- "The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n"
13368
- ).or(zod3.string()).optional().describe(
13369
- "The language of your audio file.\nPossible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n"
13370
- ),
13371
- language_detection: zod3.boolean().nullish().describe(
13372
- "Whether [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection) is enabled, either true or false"
13373
- ),
13374
- language_confidence_threshold: zod3.number().min(deleteTranscriptResponseLanguageConfidenceThresholdMin).max(deleteTranscriptResponseLanguageConfidenceThresholdMax).nullable().describe(
13375
- "The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\n"
13376
- ),
13377
- language_confidence: zod3.number().min(deleteTranscriptResponseLanguageConfidenceMin).max(deleteTranscriptResponseLanguageConfidenceMax).nullable().describe(
13378
- "The confidence score for the detected language, between 0.0 (low confidence) and 1.0 (high confidence)"
13379
- ),
13380
- speech_model: zod3.enum(["best", "slam-1", "universal"]).describe("The speech model to use for the transcription.").or(zod3.null()).describe(
13381
- "The speech model used for the transcription. When `null`, the default model is used."
13382
- ),
13383
- text: zod3.string().nullish().describe("The textual transcript of your media file"),
13384
- words: zod3.array(
13385
- zod3.object({
13386
- confidence: zod3.number().min(deleteTranscriptResponseWordsItemConfidenceMin).max(deleteTranscriptResponseWordsItemConfidenceMax).describe("The confidence score for the transcript of this word"),
13387
- start: zod3.number().describe("The starting time, in milliseconds, for the word"),
13388
- end: zod3.number().describe("The ending time, in milliseconds, for the word"),
13389
- text: zod3.string().describe("The text of the word"),
13390
- channel: zod3.string().nullish().describe(
13391
- "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
13392
- ),
13393
- speaker: zod3.string().nullable().describe(
13394
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null"
13395
- )
13396
- })
13397
- ).nullish().describe(
13398
- "An array of temporally-sequential word objects, one for each word in the transcript.\nSee [Speech recognition](https://www.assemblyai.com/docs/models/speech-recognition) for more information.\n"
14072
+ ]).optional().describe(
14073
+ "The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).\nThe default value is 'en_us'.\n"
13399
14074
  ),
13400
- utterances: zod3.array(
13401
- zod3.object({
13402
- confidence: zod3.number().min(deleteTranscriptResponseUtterancesItemConfidenceMin).max(deleteTranscriptResponseUtterancesItemConfidenceMax).describe("The confidence score for the transcript of this utterance"),
13403
- start: zod3.number().describe("The starting time, in milliseconds, of the utterance in the audio file"),
13404
- end: zod3.number().describe("The ending time, in milliseconds, of the utterance in the audio file"),
13405
- text: zod3.string().describe("The text for this utterance"),
13406
- words: zod3.array(
13407
- zod3.object({
13408
- confidence: zod3.number().min(deleteTranscriptResponseUtterancesItemWordsItemConfidenceMin).max(deleteTranscriptResponseUtterancesItemWordsItemConfidenceMax).describe("The confidence score for the transcript of this word"),
13409
- start: zod3.number().describe("The starting time, in milliseconds, for the word"),
13410
- end: zod3.number().describe("The ending time, in milliseconds, for the word"),
13411
- text: zod3.string().describe("The text of the word"),
13412
- channel: zod3.string().nullish().describe(
13413
- "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
13414
- ),
13415
- speaker: zod3.string().nullable().describe(
13416
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null"
13417
- )
13418
- })
13419
- ).describe("The words in the utterance."),
13420
- channel: zod3.string().nullish().describe(
13421
- "The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
13422
- ),
13423
- speaker: zod3.string().describe(
13424
- 'The speaker of this utterance, where each speaker is assigned a sequential capital letter - e.g. "A" for Speaker A, "B" for Speaker B, etc.'
13425
- )
13426
- })
14075
+ language_codes: zod3.array(
14076
+ zod3.enum([
14077
+ "en",
14078
+ "en_au",
14079
+ "en_uk",
14080
+ "en_us",
14081
+ "es",
14082
+ "fr",
14083
+ "de",
14084
+ "it",
14085
+ "pt",
14086
+ "nl",
14087
+ "af",
14088
+ "sq",
14089
+ "am",
14090
+ "ar",
14091
+ "hy",
14092
+ "as",
14093
+ "az",
14094
+ "ba",
14095
+ "eu",
14096
+ "be",
14097
+ "bn",
14098
+ "bs",
14099
+ "br",
14100
+ "bg",
14101
+ "my",
14102
+ "ca",
14103
+ "zh",
14104
+ "hr",
14105
+ "cs",
14106
+ "da",
14107
+ "et",
14108
+ "fo",
14109
+ "fi",
14110
+ "gl",
14111
+ "ka",
14112
+ "el",
14113
+ "gu",
14114
+ "ht",
14115
+ "ha",
14116
+ "haw",
14117
+ "he",
14118
+ "hi",
14119
+ "hu",
14120
+ "is",
14121
+ "id",
14122
+ "ja",
14123
+ "jw",
14124
+ "kn",
14125
+ "kk",
14126
+ "km",
14127
+ "ko",
14128
+ "lo",
14129
+ "la",
14130
+ "lv",
14131
+ "ln",
14132
+ "lt",
14133
+ "lb",
14134
+ "mk",
14135
+ "mg",
14136
+ "ms",
14137
+ "ml",
14138
+ "mt",
14139
+ "mi",
14140
+ "mr",
14141
+ "mn",
14142
+ "ne",
14143
+ "no",
14144
+ "nn",
14145
+ "oc",
14146
+ "pa",
14147
+ "ps",
14148
+ "fa",
14149
+ "pl",
14150
+ "ro",
14151
+ "ru",
14152
+ "sa",
14153
+ "sr",
14154
+ "sn",
14155
+ "sd",
14156
+ "si",
14157
+ "sk",
14158
+ "sl",
14159
+ "so",
14160
+ "su",
14161
+ "sw",
14162
+ "sv",
14163
+ "tl",
14164
+ "tg",
14165
+ "ta",
14166
+ "tt",
14167
+ "te",
14168
+ "th",
14169
+ "bo",
14170
+ "tr",
14171
+ "tk",
14172
+ "uk",
14173
+ "ur",
14174
+ "uz",
14175
+ "vi",
14176
+ "cy",
14177
+ "yi",
14178
+ "yo"
14179
+ ]).describe(
14180
+ "The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).\nThe default value is 'en_us'.\n"
14181
+ )
13427
14182
  ).nullish().describe(
13428
- "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/speech-to-text/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/speech-to-text/speech-recognition#multichannel-transcription) for more information.\n"
13429
- ),
13430
- confidence: zod3.number().min(deleteTranscriptResponseConfidenceMin).max(deleteTranscriptResponseConfidenceMax).nullish().describe(
13431
- "The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)"
13432
- ),
13433
- audio_duration: zod3.number().nullish().describe("The duration of this transcript object's media file, in seconds"),
13434
- punctuate: zod3.boolean().nullish().describe("Whether Automatic Punctuation is enabled, either true or false"),
13435
- format_text: zod3.boolean().nullish().describe("Whether Text Formatting is enabled, either true or false"),
13436
- disfluencies: zod3.boolean().nullish().describe('Transcribe Filler Words, like "umm", in your media file; can be true or false'),
13437
- multichannel: zod3.boolean().nullish().describe(
13438
- "Whether [Multichannel transcription](https://www.assemblyai.com/docs/models/speech-recognition#multichannel-transcription) was enabled in the transcription request, either true or false"
13439
- ),
13440
- audio_channels: zod3.number().optional().describe(
13441
- "The number of audio channels in the audio file. This is only present when multichannel is enabled."
14183
+ "The language codes of your audio file. Used for [Code switching](/docs/speech-to-text/pre-recorded-audio/code-switching)\nOne of the values specified must be `en`.\n"
13442
14184
  ),
13443
- webhook_url: zod3.string().nullish().describe(
13444
- "The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n"
14185
+ language_confidence: zod3.number().nullable().describe(
14186
+ "The confidence score for the detected language, between 0.0 (low confidence) and 1.0 (high confidence). See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details."
13445
14187
  ),
13446
- webhook_status_code: zod3.number().nullish().describe(
13447
- "The status code we received from your server when delivering the transcript completed or failed webhook request, if a webhook URL was provided"
14188
+ language_confidence_threshold: zod3.number().nullable().describe(
14189
+ "The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nSee [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.\n"
13448
14190
  ),
13449
- webhook_auth: zod3.boolean().describe("Whether webhook authentication details were provided"),
13450
- webhook_auth_header_name: zod3.string().nullish().describe(
13451
- "The header name to be sent with the transcript completed or failed webhook requests"
14191
+ language_detection: zod3.boolean().nullish().describe(
14192
+ "Whether [Automatic language detection](/docs/pre-recorded-audio/automatic-language-detection) is enabled, either true or false"
13452
14193
  ),
13453
- speed_boost: zod3.boolean().nullish().describe("Whether speed boost is enabled"),
13454
- auto_highlights: zod3.boolean().describe("Whether Key Phrases is enabled, either true or false"),
13455
- auto_highlights_result: zod3.object({
13456
- status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
13457
- results: zod3.array(
13458
- zod3.object({
13459
- count: zod3.number().describe("The total number of times the key phrase appears in the audio file"),
13460
- rank: zod3.number().min(deleteTranscriptResponseAutoHighlightsResultResultsItemRankMin).max(deleteTranscriptResponseAutoHighlightsResultResultsItemRankMax).describe(
13461
- "The total relevancy to the overall audio file of this key phrase - a greater number means more relevant"
13462
- ),
13463
- text: zod3.string().describe("The text itself of the key phrase"),
13464
- timestamps: zod3.array(
13465
- zod3.object({
13466
- start: zod3.number().describe("The start time in milliseconds"),
13467
- end: zod3.number().describe("The end time in milliseconds")
13468
- }).describe("Timestamp containing a start and end property in milliseconds")
13469
- ).describe("The timestamp of the of the key phrase")
13470
- })
13471
- ).describe("A temporally-sequential array of Key Phrases")
13472
- }).describe(
13473
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/models/key-phrases) for more information.\n"
13474
- ).or(zod3.null()).optional().describe(
13475
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/models/key-phrases) for more information.\n"
14194
+ language_detection_options: zod3.object({
14195
+ expected_languages: zod3.array(zod3.string()).optional().describe(
14196
+ 'List of languages expected in the audio file. Defaults to `["all"]` when unspecified. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.'
14197
+ ),
14198
+ fallback_language: zod3.string().default(deleteTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault).describe(
14199
+ 'If the detected language of the audio file is not in the list of expected languages, the `fallback_language` is used. Specify `["auto"]` to let our model choose the fallback language from `expected_languages` with the highest confidence score. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.\n'
14200
+ ),
14201
+ code_switching: zod3.boolean().optional().describe(
14202
+ "Whether [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) should be detected.\n"
14203
+ ),
14204
+ code_switching_confidence_threshold: zod3.number().default(
14205
+ deleteTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault
14206
+ ).describe(
14207
+ "The confidence threshold for [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) detection. If the code switching confidence is below this threshold, the transcript will be processed in the language with the highest `language_detection_confidence` score.\n"
14208
+ )
14209
+ }).optional().describe(
14210
+ "Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection)."
13476
14211
  ),
13477
- audio_start_from: zod3.number().nullish().describe(
13478
- "The point in time, in milliseconds, in the file at which the transcription was started"
14212
+ multichannel: zod3.boolean().nullish().describe(
14213
+ "Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) was enabled in the transcription request, either true or false"
13479
14214
  ),
13480
- audio_end_at: zod3.number().nullish().describe(
13481
- "The point in time, in milliseconds, in the file at which the transcription was terminated"
14215
+ prompt: zod3.string().optional().describe(
14216
+ "Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
13482
14217
  ),
13483
- word_boost: zod3.array(zod3.string()).optional().describe("The list of custom vocabulary to boost transcription probability for"),
13484
- boost_param: zod3.string().nullish().describe("The word boost parameter value"),
13485
- filter_profanity: zod3.boolean().nullish().describe(
13486
- "Whether [Profanity Filtering](https://www.assemblyai.com/docs/models/speech-recognition#profanity-filtering) is enabled, either true or false"
14218
+ punctuate: zod3.boolean().nullish().describe(
14219
+ "Whether [Automatic Punctuation](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false"
13487
14220
  ),
13488
14221
  redact_pii: zod3.boolean().describe(
13489
- "Whether [PII Redaction](https://www.assemblyai.com/docs/models/pii-redaction) is enabled, either true or false"
14222
+ "Whether [PII Redaction](https://www.assemblyai.com/docs/pii-redaction) is enabled, either true or false"
13490
14223
  ),
13491
14224
  redact_pii_audio: zod3.boolean().nullish().describe(
13492
- "Whether a redacted version of the audio file was generated,\neither true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.\n"
14225
+ "Whether a redacted version of the audio file was generated,\neither true or false. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.\n"
14226
+ ),
14227
+ redact_pii_audio_options: zod3.object({
14228
+ return_redacted_no_speech_audio: zod3.boolean().optional().describe(
14229
+ "By default, audio redaction provides redacted audio URLs only when speech is detected. However, if your use-case specifically requires redacted audio files even for silent audio files without any dialogue, you can opt to receive these URLs by setting this parameter to `true`."
14230
+ ),
14231
+ override_audio_redaction_method: zod3.enum(["silence"]).optional().describe(
14232
+ "Specify the method used to redact audio. By default, redacted audio uses a beep sound. Set to `silence` to replace PII with silence instead of a beep."
14233
+ )
14234
+ }).optional().describe(
14235
+ "The options for PII-redacted audio, if redact_pii_audio is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.\n"
13493
14236
  ),
13494
14237
  redact_pii_audio_quality: zod3.enum(["mp3", "wav"]).describe(
13495
- "Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details."
14238
+ "Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more details."
13496
14239
  ).or(zod3.null()).optional().describe(
13497
- "The audio quality of the PII-redacted audio file, if redact_pii_audio is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.\n"
14240
+ "The audio quality of the PII-redacted audio file, if redact_pii_audio is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.\n"
13498
14241
  ),
13499
14242
  redact_pii_policies: zod3.array(
13500
14243
  zod3.enum([
@@ -13544,271 +14287,256 @@ var deleteTranscriptResponse = zod3.object({
13544
14287
  "zodiac_sign"
13545
14288
  ]).describe("The type of PII to redact")
13546
14289
  ).nullish().describe(
13547
- "The list of PII Redaction policies that were enabled, if PII Redaction is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.\n"
14290
+ "The list of PII Redaction policies that were enabled, if PII Redaction is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more information.\n"
13548
14291
  ),
13549
14292
  redact_pii_sub: zod3.enum(["entity_name", "hash"]).optional().describe(
13550
- 'The replacement logic for detected PII, can be "entity_name" or "hash". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.'
14293
+ "The replacement logic for detected PII, can be `entity_name` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
14294
+ ),
14295
+ sentiment_analysis: zod3.boolean().nullish().describe(
14296
+ "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) is enabled, can be true or false"
14297
+ ),
14298
+ sentiment_analysis_results: zod3.array(
14299
+ zod3.object({
14300
+ text: zod3.string().describe("The transcript of the sentence"),
14301
+ start: zod3.number().describe("The starting time, in milliseconds, of the sentence"),
14302
+ end: zod3.number().describe("The ending time, in milliseconds, of the sentence"),
14303
+ sentiment: zod3.enum(["POSITIVE", "NEUTRAL", "NEGATIVE"]),
14304
+ confidence: zod3.number().describe(
14305
+ "The confidence score for the detected sentiment of the sentence, from 0 to 1"
14306
+ ),
14307
+ channel: zod3.string().nullish().describe(
14308
+ "The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
14309
+ ),
14310
+ speaker: zod3.string().nullable().describe(
14311
+ "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
14312
+ )
14313
+ }).describe("The result of the Sentiment Analysis model")
14314
+ ).nullish().describe(
14315
+ "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) for more information.\n"
13551
14316
  ),
13552
14317
  speaker_labels: zod3.boolean().nullish().describe(
13553
- "Whether [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, can be true or false"
14318
+ "Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, can be true or false"
13554
14319
  ),
13555
14320
  speakers_expected: zod3.number().nullish().describe(
13556
- "Tell the speaker label model how many speakers it should attempt to identify. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details."
14321
+ "Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details."
13557
14322
  ),
13558
- content_safety: zod3.boolean().nullish().describe(
13559
- "Whether [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation) is enabled, can be true or false"
14323
+ speech_model_used: zod3.string().optional().describe(
14324
+ "The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
13560
14325
  ),
13561
- content_safety_labels: zod3.object({
13562
- status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
13563
- results: zod3.array(
13564
- zod3.object({
13565
- text: zod3.string().describe("The transcript of the section flagged by the Content Moderation model"),
13566
- labels: zod3.array(
13567
- zod3.object({
13568
- label: zod3.string().describe("The label of the sensitive topic"),
13569
- confidence: zod3.number().min(
13570
- deleteTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMin
13571
- ).max(
13572
- deleteTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMax
13573
- ).describe("The confidence score for the topic being discussed, from 0 to 1"),
13574
- severity: zod3.number().min(
13575
- deleteTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin
13576
- ).max(
13577
- deleteTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax
13578
- ).describe("How severely the topic is discussed in the section, from 0 to 1")
13579
- })
13580
- ).describe(
13581
- "An array of safety labels, one per sensitive topic that was detected in the section"
14326
+ speech_models: zod3.array(
14327
+ zod3.string().describe(
14328
+ "The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
14329
+ )
14330
+ ).nullish().describe(
14331
+ "List multiple speech models in priority order, allowing our system to automatically route your audio to the best available option. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models and routing behavior.\n"
14332
+ ),
14333
+ speech_threshold: zod3.number().nullish().describe(
14334
+ "Defaults to null. Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive. See [Speech Threshold](https://www.assemblyai.com/docs/speech-threshold) for more details.\n"
14335
+ ),
14336
+ speech_understanding: zod3.object({
14337
+ request: zod3.object({
14338
+ translation: zod3.object({
14339
+ target_languages: zod3.array(zod3.string()).describe(
14340
+ 'List of target language codes (e.g., `["es", "de"]`). See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for supported languages.'
13582
14341
  ),
13583
- sentences_idx_start: zod3.number().describe("The sentence index at which the section begins"),
13584
- sentences_idx_end: zod3.number().describe("The sentence index at which the section ends"),
13585
- timestamp: zod3.object({
13586
- start: zod3.number().describe("The start time in milliseconds"),
13587
- end: zod3.number().describe("The end time in milliseconds")
13588
- }).describe("Timestamp containing a start and end property in milliseconds")
14342
+ formal: zod3.boolean().default(deleteTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault).describe(
14343
+ "Use formal language style. See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for more details."
14344
+ ),
14345
+ match_original_utterance: zod3.boolean().optional().describe(
14346
+ "When enabled with Speaker Labels, returns translated text in the utterances array. Each utterance will include a `translated_texts` key containing translations for each target language."
14347
+ )
13589
14348
  })
13590
- ).describe("An array of results for the Content Moderation model"),
13591
- summary: zod3.record(
13592
- zod3.string(),
13593
- zod3.number().min(deleteTranscriptResponseContentSafetyLabelsSummaryMinOne).max(deleteTranscriptResponseContentSafetyLabelsSummaryMaxOne).describe(
13594
- 'A confidence score for the presence of the sensitive topic "topic" across the entire audio file'
14349
+ }).describe(
14350
+ "Request body for [Translation](https://www.assemblyai.com/docs/speech-understanding/translation)."
14351
+ ).or(
14352
+ zod3.object({
14353
+ speaker_identification: zod3.object({
14354
+ speaker_type: zod3.enum(["role", "name"]).describe(
14355
+ "Type of speaker identification. See [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification) for details on each type."
14356
+ ),
14357
+ known_values: zod3.array(zod3.string()).optional().describe(
14358
+ 'Required if speaker_type is "role". Each value must be 35 characters or less.'
14359
+ ),
14360
+ speakers: zod3.array(
14361
+ zod3.object({
14362
+ role: zod3.string().optional().describe(
14363
+ 'The role of the speaker. Required when `speaker_type` is "role".'
14364
+ ),
14365
+ name: zod3.string().optional().describe(
14366
+ 'The name of the speaker. Required when `speaker_type` is "name".'
14367
+ ),
14368
+ description: zod3.string().optional().describe(
14369
+ "A description of the speaker to help the model identify them based on conversational context."
14370
+ )
14371
+ })
14372
+ ).optional().describe(
14373
+ "An array of speaker objects with metadata to improve identification accuracy. Each object should include a `role` or `name` (depending on `speaker_type`) and an optional `description` to help the model identify the speaker. You can also include any additional custom properties (e.g., `company`, `title`) to provide more context. Use this as an alternative to `known_values` when you want to provide additional context about each speaker."
14374
+ )
14375
+ })
14376
+ }).describe(
14377
+ "Request body for [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification)."
13595
14378
  )
13596
- ).describe(
13597
- "A summary of the Content Moderation confidence results for the entire audio file"
13598
- ),
13599
- severity_score_summary: zod3.record(
13600
- zod3.string(),
14379
+ ).or(
14380
+ zod3.object({
14381
+ custom_formatting: zod3.object({
14382
+ date: zod3.string().optional().describe(
14383
+ 'Date format pattern (e.g., `"mm/dd/yyyy"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
14384
+ ),
14385
+ phone_number: zod3.string().optional().describe(
14386
+ 'Phone number format pattern (e.g., `"(xxx)xxx-xxxx"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
14387
+ ),
14388
+ email: zod3.string().optional().describe(
14389
+ 'Email format pattern (e.g., `"username@domain.com"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
14390
+ )
14391
+ })
14392
+ }).describe(
14393
+ "Request body for [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting)."
14394
+ )
14395
+ ).optional(),
14396
+ response: zod3.object({
14397
+ translation: zod3.object({
14398
+ status: zod3.string().optional()
14399
+ }).optional()
14400
+ }).or(
13601
14401
  zod3.object({
13602
- low: zod3.number().min(deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMin).max(deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMax),
13603
- medium: zod3.number().min(deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMin).max(deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMax),
13604
- high: zod3.number().min(deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMin).max(deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMax)
14402
+ speaker_identification: zod3.object({
14403
+ mapping: zod3.record(zod3.string(), zod3.string()).optional().describe(
14404
+ 'A mapping of the original generic speaker labels (e.g., "A", "B") to the identified speaker names or roles.'
14405
+ ),
14406
+ status: zod3.string().optional()
14407
+ }).optional()
13605
14408
  })
13606
- ).describe(
13607
- "A summary of the Content Moderation severity results for the entire audio file"
13608
- )
13609
- }).describe(
13610
- "An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/models/content-moderation) for more information.\n"
13611
- ).or(zod3.null()).optional().describe(
13612
- "An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/models/content-moderation) for more information.\n"
13613
- ),
13614
- iab_categories: zod3.boolean().nullish().describe(
13615
- "Whether [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) is enabled, can be true or false"
13616
- ),
13617
- iab_categories_result: zod3.object({
13618
- status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
13619
- results: zod3.array(
14409
+ ).or(
13620
14410
  zod3.object({
13621
- text: zod3.string().describe("The text in the transcript in which a detected topic occurs"),
13622
- labels: zod3.array(
13623
- zod3.object({
13624
- relevance: zod3.number().min(
13625
- deleteTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin
13626
- ).max(
13627
- deleteTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax
13628
- ).describe("How relevant the detected topic is of a detected topic"),
13629
- label: zod3.string().describe(
13630
- "The IAB taxonomical label for the label of the detected topic, where > denotes supertopic/subtopic relationship"
13631
- )
13632
- })
13633
- ).optional().describe("An array of detected topics in the text"),
13634
- timestamp: zod3.object({
13635
- start: zod3.number().describe("The start time in milliseconds"),
13636
- end: zod3.number().describe("The end time in milliseconds")
13637
- }).optional().describe("Timestamp containing a start and end property in milliseconds")
13638
- }).describe("The result of the topic detection model")
13639
- ).describe("An array of results for the Topic Detection model"),
13640
- summary: zod3.record(
13641
- zod3.string(),
13642
- zod3.number().min(deleteTranscriptResponseIabCategoriesResultSummaryMinOne).max(deleteTranscriptResponseIabCategoriesResultSummaryMaxOne)
13643
- ).describe("The overall relevance of topic to the entire audio file")
13644
- }).describe(
13645
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) for more information.\n"
13646
- ).or(zod3.null()).optional().describe(
13647
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) for more information.\n"
13648
- ),
13649
- custom_spelling: zod3.array(
13650
- zod3.object({
13651
- from: zod3.array(zod3.string().describe("Word or phrase to replace")).describe("Words or phrases to replace"),
13652
- to: zod3.string().describe("Word to replace with")
13653
- }).describe(
13654
- "Object containing words or phrases to replace, and the word or phrase to replace with"
13655
- )
13656
- ).nullish().describe("Customize how words are spelled and formatted using to and from values"),
13657
- keyterms_prompt: zod3.array(zod3.string()).optional().describe(
13658
- "Improve accuracy with up to 1000 domain-specific words or phrases (maximum 6 words per phrase).\n"
14411
+ custom_formatting: zod3.object({
14412
+ mapping: zod3.record(zod3.string(), zod3.string()).optional(),
14413
+ formatted_text: zod3.string().optional()
14414
+ }).optional()
14415
+ })
14416
+ ).optional()
14417
+ }).optional().describe(
14418
+ "Speech understanding tasks like [Translation](https://www.assemblyai.com/docs/speech-understanding/translation), [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification), and [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting). See the task-specific docs for available options and configuration.\n"
13659
14419
  ),
13660
- prompt: zod3.string().optional().describe("This parameter does not currently have any functionality attached to it."),
13661
- auto_chapters: zod3.boolean().nullish().describe(
13662
- "Whether [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters) is enabled, can be true or false"
14420
+ status: zod3.enum(["queued", "processing", "completed", "error"]).describe(
14421
+ "The status of your transcript. Possible values are queued, processing, completed, or error."
13663
14422
  ),
13664
- chapters: zod3.array(
13665
- zod3.object({
13666
- gist: zod3.string().describe(
13667
- "An ultra-short summary (just a few words) of the content spoken in the chapter"
13668
- ),
13669
- headline: zod3.string().describe("A single sentence summary of the content spoken during the chapter"),
13670
- summary: zod3.string().describe("A one paragraph summary of the content spoken during the chapter"),
13671
- start: zod3.number().describe("The starting time, in milliseconds, for the chapter"),
13672
- end: zod3.number().describe("The starting time, in milliseconds, for the chapter")
13673
- }).describe("Chapter of the audio file")
13674
- ).nullish().describe("An array of temporally sequential chapters for the audio file"),
13675
14423
  summarization: zod3.boolean().describe(
13676
- "Whether [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled, either true or false"
14424
+ "Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
13677
14425
  ),
13678
- summary_type: zod3.string().nullish().describe(
13679
- "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled"
14426
+ summary: zod3.string().nullish().describe(
14427
+ "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
13680
14428
  ),
13681
14429
  summary_model: zod3.string().nullish().describe(
13682
- "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled\n"
14430
+ "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n"
13683
14431
  ),
13684
- summary: zod3.string().nullish().describe(
13685
- "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled"
14432
+ summary_type: zod3.string().nullish().describe(
14433
+ "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
13686
14434
  ),
13687
- custom_topics: zod3.boolean().nullish().describe("Whether custom topics is enabled, either true or false"),
13688
- topics: zod3.array(zod3.string()).optional().describe("The list of custom topics provided if custom topics is enabled"),
13689
- sentiment_analysis: zod3.boolean().nullish().describe(
13690
- "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) is enabled, can be true or false"
14435
+ remove_audio_tags: zod3.enum(["all"]).describe(
14436
+ "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
14437
+ ).or(zod3.null()).optional().describe(
14438
+ "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
13691
14439
  ),
13692
- sentiment_analysis_results: zod3.array(
14440
+ temperature: zod3.number().nullish().describe(
14441
+ "The temperature that was used for the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.\n\nNote: This parameter can only be used with the Universal-3 Pro model.\n"
14442
+ ),
14443
+ text: zod3.string().nullish().describe("The textual transcript of your media file"),
14444
+ throttled: zod3.boolean().nullish().describe(
14445
+ "True while a request is throttled and false when a request is no longer throttled"
14446
+ ),
14447
+ utterances: zod3.array(
13693
14448
  zod3.object({
13694
- text: zod3.string().describe("The transcript of the sentence"),
13695
- start: zod3.number().describe("The starting time, in milliseconds, of the sentence"),
13696
- end: zod3.number().describe("The ending time, in milliseconds, of the sentence"),
13697
- sentiment: zod3.enum(["POSITIVE", "NEUTRAL", "NEGATIVE"]),
13698
- confidence: zod3.number().min(deleteTranscriptResponseSentimentAnalysisResultsItemConfidenceMin).max(deleteTranscriptResponseSentimentAnalysisResultsItemConfidenceMax).describe(
13699
- "The confidence score for the detected sentiment of the sentence, from 0 to 1"
13700
- ),
14449
+ confidence: zod3.number().describe("The confidence score for the transcript of this utterance"),
14450
+ start: zod3.number().describe("The starting time, in milliseconds, of the utterance in the audio file"),
14451
+ end: zod3.number().describe("The ending time, in milliseconds, of the utterance in the audio file"),
14452
+ text: zod3.string().describe("The text for this utterance"),
14453
+ words: zod3.array(
14454
+ zod3.object({
14455
+ confidence: zod3.number().describe("The confidence score for the transcript of this word"),
14456
+ start: zod3.number().describe("The starting time, in milliseconds, for the word"),
14457
+ end: zod3.number().describe("The ending time, in milliseconds, for the word"),
14458
+ text: zod3.string().describe("The text of the word"),
14459
+ channel: zod3.string().nullish().describe(
14460
+ "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
14461
+ ),
14462
+ speaker: zod3.string().nullable().describe(
14463
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
14464
+ )
14465
+ })
14466
+ ).describe("The words in the utterance."),
13701
14467
  channel: zod3.string().nullish().describe(
13702
14468
  "The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
13703
14469
  ),
13704
- speaker: zod3.string().nullable().describe(
13705
- "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null"
14470
+ speaker: zod3.string().describe(
14471
+ 'The speaker of this utterance, where each speaker is assigned a sequential capital letter - e.g. "A" for Speaker A, "B" for Speaker B, etc.'
14472
+ ),
14473
+ translated_texts: zod3.record(zod3.string(), zod3.string()).optional().describe(
14474
+ 'Translations keyed by language code (e.g., `{"es": "Texto traducido", "de": "\xDCbersetzter Text"}`). Only present when `match_original_utterance` is enabled with translation.'
13706
14475
  )
13707
- }).describe("The result of the Sentiment Analysis model")
14476
+ })
13708
14477
  ).nullish().describe(
13709
- "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) for more information.\n"
14478
+ "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) for more information.\n"
13710
14479
  ),
13711
- entity_detection: zod3.boolean().nullish().describe(
13712
- "Whether [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection) is enabled, can be true or false"
14480
+ webhook_auth: zod3.boolean().describe(
14481
+ "Whether [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) authentication details were provided"
13713
14482
  ),
13714
- entities: zod3.array(
14483
+ webhook_auth_header_name: zod3.string().nullish().describe(
14484
+ "The header name to be sent with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests"
14485
+ ),
14486
+ webhook_status_code: zod3.number().nullish().describe(
14487
+ "The status code we received from your server when delivering the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) request, if a webhook URL was provided"
14488
+ ),
14489
+ webhook_url: zod3.string().nullish().describe(
14490
+ "The URL to which we send [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests.\n"
14491
+ ),
14492
+ words: zod3.array(
13715
14493
  zod3.object({
13716
- entity_type: zod3.enum([
13717
- "account_number",
13718
- "banking_information",
13719
- "blood_type",
13720
- "credit_card_cvv",
13721
- "credit_card_expiration",
13722
- "credit_card_number",
13723
- "date",
13724
- "date_interval",
13725
- "date_of_birth",
13726
- "drivers_license",
13727
- "drug",
13728
- "duration",
13729
- "email_address",
13730
- "event",
13731
- "filename",
13732
- "gender_sexuality",
13733
- "healthcare_number",
13734
- "injury",
13735
- "ip_address",
13736
- "language",
13737
- "location",
13738
- "marital_status",
13739
- "medical_condition",
13740
- "medical_process",
13741
- "money_amount",
13742
- "nationality",
13743
- "number_sequence",
13744
- "occupation",
13745
- "organization",
13746
- "passport_number",
13747
- "password",
13748
- "person_age",
13749
- "person_name",
13750
- "phone_number",
13751
- "physical_attribute",
13752
- "political_affiliation",
13753
- "religion",
13754
- "statistics",
13755
- "time",
13756
- "url",
13757
- "us_social_security_number",
13758
- "username",
13759
- "vehicle_id",
13760
- "zodiac_sign"
13761
- ]).describe("The type of entity for the detected entity"),
13762
- text: zod3.string().describe("The text for the detected entity"),
13763
- start: zod3.number().describe(
13764
- "The starting time, in milliseconds, at which the detected entity appears in the audio file"
14494
+ confidence: zod3.number().describe("The confidence score for the transcript of this word"),
14495
+ start: zod3.number().describe("The starting time, in milliseconds, for the word"),
14496
+ end: zod3.number().describe("The ending time, in milliseconds, for the word"),
14497
+ text: zod3.string().describe("The text of the word"),
14498
+ channel: zod3.string().nullish().describe(
14499
+ "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
13765
14500
  ),
13766
- end: zod3.number().describe(
13767
- "The ending time, in milliseconds, for the detected entity in the audio file"
14501
+ speaker: zod3.string().nullable().describe(
14502
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
13768
14503
  )
13769
- }).describe("A detected entity")
14504
+ })
13770
14505
  ).nullish().describe(
13771
- "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/models/entity-detection) for more information.\n"
13772
- ),
13773
- speech_threshold: zod3.number().min(deleteTranscriptResponseSpeechThresholdMin).max(deleteTranscriptResponseSpeechThresholdMax).nullish().describe(
13774
- "Defaults to null. Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n"
13775
- ),
13776
- throttled: zod3.boolean().nullish().describe(
13777
- "True while a request is throttled and false when a request is no longer throttled"
13778
- ),
13779
- error: zod3.string().optional().describe("Error message of why the transcript failed"),
13780
- language_model: zod3.string().describe("The language model that was used for the transcript"),
13781
- acoustic_model: zod3.string().describe("The acoustic model that was used for the transcript")
14506
+ "An array of temporally-sequential word objects, one for each word in the transcript.\n"
14507
+ ),
14508
+ acoustic_model: zod3.string().describe("This parameter does not currently have any functionality attached to it."),
14509
+ custom_topics: zod3.boolean().nullish().describe("This parameter does not currently have any functionality attached to it."),
14510
+ language_model: zod3.string().describe("This parameter does not currently have any functionality attached to it."),
14511
+ speech_model: zod3.string().describe(
14512
+ "The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
14513
+ ).or(zod3.null()).describe(
14514
+ "This parameter has been replaced with the `speech_models` parameter, learn more about the `speech_models` parameter [here](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model).\n"
14515
+ ),
14516
+ speed_boost: zod3.boolean().nullish().describe("This parameter does not currently have any functionality attached to it."),
14517
+ topics: zod3.array(zod3.string()).optional().describe("This parameter does not currently have any functionality attached to it."),
14518
+ translated_texts: zod3.object({
14519
+ language_code: zod3.string().optional().describe("Translated text for this language code")
14520
+ }).optional().describe(
14521
+ "Translated text keyed by language code. See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for more details."
14522
+ )
13782
14523
  }).describe("A transcript object");
13783
- var getSubtitlesParams = zod3.object({
13784
- transcript_id: zod3.string().describe("ID of the transcript"),
13785
- subtitle_format: zod3.enum(["srt", "vtt"]).describe("The format of the captions")
13786
- });
13787
- var getSubtitlesQueryParams = zod3.object({
13788
- chars_per_caption: zod3.number().optional().describe("The maximum number of characters per caption")
13789
- });
13790
14524
  var getTranscriptSentencesParams = zod3.object({
13791
14525
  transcript_id: zod3.string().describe("ID of the transcript")
13792
14526
  });
13793
- var getTranscriptSentencesResponseConfidenceMin = 0;
13794
- var getTranscriptSentencesResponseConfidenceMax = 1;
13795
- var getTranscriptSentencesResponseSentencesItemConfidenceMin = 0;
13796
- var getTranscriptSentencesResponseSentencesItemConfidenceMax = 1;
13797
- var getTranscriptSentencesResponseSentencesItemWordsItemConfidenceMin = 0;
13798
- var getTranscriptSentencesResponseSentencesItemWordsItemConfidenceMax = 1;
13799
14527
  var getTranscriptSentencesResponse = zod3.object({
13800
14528
  id: zod3.string().uuid().describe("The unique identifier for the transcript"),
13801
- confidence: zod3.number().min(getTranscriptSentencesResponseConfidenceMin).max(getTranscriptSentencesResponseConfidenceMax).describe("The confidence score for the transcript"),
14529
+ confidence: zod3.number().describe("The confidence score for the transcript"),
13802
14530
  audio_duration: zod3.number().describe("The duration of the audio file in seconds"),
13803
14531
  sentences: zod3.array(
13804
14532
  zod3.object({
13805
14533
  text: zod3.string().describe("The transcript of the sentence"),
13806
14534
  start: zod3.number().describe("The starting time, in milliseconds, for the sentence"),
13807
14535
  end: zod3.number().describe("The ending time, in milliseconds, for the sentence"),
13808
- confidence: zod3.number().min(getTranscriptSentencesResponseSentencesItemConfidenceMin).max(getTranscriptSentencesResponseSentencesItemConfidenceMax).describe("The confidence score for the transcript of this sentence"),
14536
+ confidence: zod3.number().describe("The confidence score for the transcript of this sentence"),
13809
14537
  words: zod3.array(
13810
14538
  zod3.object({
13811
- confidence: zod3.number().min(getTranscriptSentencesResponseSentencesItemWordsItemConfidenceMin).max(getTranscriptSentencesResponseSentencesItemWordsItemConfidenceMax).describe("The confidence score for the transcript of this word"),
14539
+ confidence: zod3.number().describe("The confidence score for the transcript of this word"),
13812
14540
  start: zod3.number().describe("The starting time, in milliseconds, for the word"),
13813
14541
  end: zod3.number().describe("The ending time, in milliseconds, for the word"),
13814
14542
  text: zod3.string().describe("The text of the word"),
@@ -13816,7 +14544,7 @@ var getTranscriptSentencesResponse = zod3.object({
13816
14544
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
13817
14545
  ),
13818
14546
  speaker: zod3.string().nullable().describe(
13819
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null"
14547
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
13820
14548
  )
13821
14549
  })
13822
14550
  ).describe("An array of words in the sentence"),
@@ -13824,7 +14552,7 @@ var getTranscriptSentencesResponse = zod3.object({
13824
14552
  "The channel of the sentence. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
13825
14553
  ),
13826
14554
  speaker: zod3.string().nullable().describe(
13827
- "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null"
14555
+ "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
13828
14556
  )
13829
14557
  })
13830
14558
  ).describe("An array of sentences in the transcript")
@@ -13832,25 +14560,19 @@ var getTranscriptSentencesResponse = zod3.object({
13832
14560
  var getTranscriptParagraphsParams = zod3.object({
13833
14561
  transcript_id: zod3.string().describe("ID of the transcript")
13834
14562
  });
13835
- var getTranscriptParagraphsResponseConfidenceMin = 0;
13836
- var getTranscriptParagraphsResponseConfidenceMax = 1;
13837
- var getTranscriptParagraphsResponseParagraphsItemConfidenceMin = 0;
13838
- var getTranscriptParagraphsResponseParagraphsItemConfidenceMax = 1;
13839
- var getTranscriptParagraphsResponseParagraphsItemWordsItemConfidenceMin = 0;
13840
- var getTranscriptParagraphsResponseParagraphsItemWordsItemConfidenceMax = 1;
13841
14563
  var getTranscriptParagraphsResponse = zod3.object({
13842
14564
  id: zod3.string().uuid().describe("The unique identifier of your transcript"),
13843
- confidence: zod3.number().min(getTranscriptParagraphsResponseConfidenceMin).max(getTranscriptParagraphsResponseConfidenceMax).describe("The confidence score for the transcript"),
14565
+ confidence: zod3.number().describe("The confidence score for the transcript"),
13844
14566
  audio_duration: zod3.number().describe("The duration of the audio file in seconds"),
13845
14567
  paragraphs: zod3.array(
13846
14568
  zod3.object({
13847
14569
  text: zod3.string().describe("The transcript of the paragraph"),
13848
14570
  start: zod3.number().describe("The starting time, in milliseconds, of the paragraph"),
13849
14571
  end: zod3.number().describe("The ending time, in milliseconds, of the paragraph"),
13850
- confidence: zod3.number().min(getTranscriptParagraphsResponseParagraphsItemConfidenceMin).max(getTranscriptParagraphsResponseParagraphsItemConfidenceMax).describe("The confidence score for the transcript of this paragraph"),
14572
+ confidence: zod3.number().describe("The confidence score for the transcript of this paragraph"),
13851
14573
  words: zod3.array(
13852
14574
  zod3.object({
13853
- confidence: zod3.number().min(getTranscriptParagraphsResponseParagraphsItemWordsItemConfidenceMin).max(getTranscriptParagraphsResponseParagraphsItemWordsItemConfidenceMax).describe("The confidence score for the transcript of this word"),
14575
+ confidence: zod3.number().describe("The confidence score for the transcript of this word"),
13854
14576
  start: zod3.number().describe("The starting time, in milliseconds, for the word"),
13855
14577
  end: zod3.number().describe("The ending time, in milliseconds, for the word"),
13856
14578
  text: zod3.string().describe("The text of the word"),
@@ -13858,13 +14580,28 @@ var getTranscriptParagraphsResponse = zod3.object({
13858
14580
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
13859
14581
  ),
13860
14582
  speaker: zod3.string().nullable().describe(
13861
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null"
14583
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
13862
14584
  )
13863
14585
  })
13864
14586
  ).describe("An array of words in the paragraph")
13865
14587
  })
13866
14588
  ).describe("An array of paragraphs in the transcript")
13867
14589
  });
14590
+ var getSubtitlesParams = zod3.object({
14591
+ transcript_id: zod3.string().describe("ID of the transcript"),
14592
+ subtitle_format: zod3.enum(["srt", "vtt"]).describe("The format of the captions")
14593
+ });
14594
+ var getSubtitlesQueryParams = zod3.object({
14595
+ chars_per_caption: zod3.number().optional().describe("The maximum number of characters per caption")
14596
+ });
14597
+ var getSubtitlesResponse = zod3.object({});
14598
+ var getRedactedAudioParams = zod3.object({
14599
+ transcript_id: zod3.string().describe("ID of the transcript")
14600
+ });
14601
+ var getRedactedAudioResponse = zod3.object({
14602
+ status: zod3.enum(["redacted_audio_ready"]).describe("The status of the redacted audio"),
14603
+ redacted_audio_url: zod3.string().describe("The URL of the redacted audio file")
14604
+ });
13868
14605
  var wordSearchParams = zod3.object({
13869
14606
  transcript_id: zod3.string().describe("ID of the transcript")
13870
14607
  });
@@ -13881,7 +14618,7 @@ var wordSearchResponse = zod3.object({
13881
14618
  text: zod3.string().describe("The matched word"),
13882
14619
  count: zod3.number().describe("The total amount of times the word is in the transcript"),
13883
14620
  timestamps: zod3.array(
13884
- zod3.array(zod3.number().describe("Timestamp in milliseconds")).describe(
14621
+ zod3.array(zod3.number()).describe(
13885
14622
  "An array of timestamps structured as [`start_time`, `end_time`] in milliseconds"
13886
14623
  )
13887
14624
  ).describe("An array of timestamps"),
@@ -13891,205 +14628,6 @@ var wordSearchResponse = zod3.object({
13891
14628
  })
13892
14629
  ).describe("The matches of the search")
13893
14630
  });
13894
- var getRedactedAudioParams = zod3.object({
13895
- transcript_id: zod3.string().describe("ID of the transcript")
13896
- });
13897
- var getRedactedAudioResponse = zod3.object({
13898
- status: zod3.enum(["redacted_audio_ready"]).describe("The status of the redacted audio"),
13899
- redacted_audio_url: zod3.string().describe("The URL of the redacted audio file")
13900
- });
13901
- var createTemporaryTokenBodyExpiresInMin = 60;
13902
- var createTemporaryTokenBody = zod3.object({
13903
- expires_in: zod3.number().min(createTemporaryTokenBodyExpiresInMin).describe("The amount of time until the token expires in seconds")
13904
- });
13905
- var createTemporaryTokenResponse = zod3.object({
13906
- token: zod3.string().describe("The temporary authentication token for Streaming Speech-to-Text")
13907
- });
13908
- var lemurTaskBodyFinalModelDefault = "default";
13909
- var lemurTaskBodyMaxOutputSizeDefault = 2e3;
13910
- var lemurTaskBodyTemperatureDefault = 0;
13911
- var lemurTaskBodyTemperatureMin = 0;
13912
- var lemurTaskBodyTemperatureMax = 1;
13913
- var lemurTaskBody = zod3.object({
13914
- prompt: zod3.string().describe(
13915
- "Your text to prompt the model to produce a desired output, including any context you want to pass into the model."
13916
- )
13917
- }).and(
13918
- zod3.object({
13919
- transcript_ids: zod3.array(zod3.string().uuid()).optional().describe(
13920
- "A list of completed transcripts with text. Up to a maximum of 100 hours of audio.\nUse either transcript_ids or input_text as input into LeMUR.\n"
13921
- ),
13922
- input_text: zod3.string().optional().describe(
13923
- "Custom formatted transcript data. Maximum size is the context limit of the selected model.\nUse either transcript_ids or input_text as input into LeMUR.\n"
13924
- ),
13925
- context: zod3.string().or(zod3.record(zod3.string(), zod3.any())).optional().describe("Context to provide the model. This can be a string or a free-form JSON value."),
13926
- final_model: zod3.enum([
13927
- "anthropic/claude-3-5-sonnet",
13928
- "anthropic/claude-3-opus",
13929
- "anthropic/claude-3-haiku"
13930
- ]).describe("The model that is used for the final prompt after compression is performed.\n").or(zod3.string()).describe("The model that is used for the final prompt after compression is performed.\n"),
13931
- max_output_size: zod3.number().default(lemurTaskBodyMaxOutputSizeDefault).describe("Max output size in tokens."),
13932
- temperature: zod3.number().min(lemurTaskBodyTemperatureMin).max(lemurTaskBodyTemperatureMax).optional().describe(
13933
- "The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n"
13934
- )
13935
- })
13936
- );
13937
- var lemurTaskResponseUsageInputTokensMin = 0;
13938
- var lemurTaskResponseUsageOutputTokensMin = 0;
13939
- var lemurTaskResponse = zod3.object({
13940
- response: zod3.string().describe("The response generated by LeMUR.")
13941
- }).and(
13942
- zod3.object({
13943
- request_id: zod3.string().uuid().describe("The ID of the LeMUR request"),
13944
- usage: zod3.object({
13945
- input_tokens: zod3.number().min(lemurTaskResponseUsageInputTokensMin).describe("The number of input tokens used by the model"),
13946
- output_tokens: zod3.number().min(lemurTaskResponseUsageOutputTokensMin).describe("The number of output tokens generated by the model")
13947
- }).describe("The usage numbers for the LeMUR request")
13948
- })
13949
- );
13950
- var lemurSummaryBodyFinalModelDefault = "default";
13951
- var lemurSummaryBodyMaxOutputSizeDefault = 2e3;
13952
- var lemurSummaryBodyTemperatureDefault = 0;
13953
- var lemurSummaryBodyTemperatureMin = 0;
13954
- var lemurSummaryBodyTemperatureMax = 1;
13955
- var lemurSummaryBody = zod3.object({
13956
- transcript_ids: zod3.array(zod3.string().uuid()).optional().describe(
13957
- "A list of completed transcripts with text. Up to a maximum of 100 hours of audio.\nUse either transcript_ids or input_text as input into LeMUR.\n"
13958
- ),
13959
- input_text: zod3.string().optional().describe(
13960
- "Custom formatted transcript data. Maximum size is the context limit of the selected model.\nUse either transcript_ids or input_text as input into LeMUR.\n"
13961
- ),
13962
- context: zod3.string().or(zod3.record(zod3.string(), zod3.any())).optional().describe("Context to provide the model. This can be a string or a free-form JSON value."),
13963
- final_model: zod3.enum(["anthropic/claude-3-5-sonnet", "anthropic/claude-3-opus", "anthropic/claude-3-haiku"]).describe("The model that is used for the final prompt after compression is performed.\n").or(zod3.string()).describe("The model that is used for the final prompt after compression is performed.\n"),
13964
- max_output_size: zod3.number().default(lemurSummaryBodyMaxOutputSizeDefault).describe("Max output size in tokens."),
13965
- temperature: zod3.number().min(lemurSummaryBodyTemperatureMin).max(lemurSummaryBodyTemperatureMax).optional().describe(
13966
- "The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n"
13967
- )
13968
- }).and(
13969
- zod3.object({
13970
- answer_format: zod3.string().optional().describe(
13971
- 'How you want the summary to be returned. This can be any text. Examples: "TLDR", "bullet points"\n'
13972
- )
13973
- })
13974
- );
13975
- var lemurSummaryResponseUsageInputTokensMin = 0;
13976
- var lemurSummaryResponseUsageOutputTokensMin = 0;
13977
- var lemurSummaryResponse = zod3.object({
13978
- response: zod3.string().describe("The response generated by LeMUR.")
13979
- }).and(
13980
- zod3.object({
13981
- request_id: zod3.string().uuid().describe("The ID of the LeMUR request"),
13982
- usage: zod3.object({
13983
- input_tokens: zod3.number().min(lemurSummaryResponseUsageInputTokensMin).describe("The number of input tokens used by the model"),
13984
- output_tokens: zod3.number().min(lemurSummaryResponseUsageOutputTokensMin).describe("The number of output tokens generated by the model")
13985
- }).describe("The usage numbers for the LeMUR request")
13986
- })
13987
- );
13988
- var lemurQuestionAnswerBodyFinalModelDefault = "default";
13989
- var lemurQuestionAnswerBodyMaxOutputSizeDefault = 2e3;
13990
- var lemurQuestionAnswerBodyTemperatureDefault = 0;
13991
- var lemurQuestionAnswerBodyTemperatureMin = 0;
13992
- var lemurQuestionAnswerBodyTemperatureMax = 1;
13993
- var lemurQuestionAnswerBody = zod3.object({
13994
- transcript_ids: zod3.array(zod3.string().uuid()).optional().describe(
13995
- "A list of completed transcripts with text. Up to a maximum of 100 hours of audio.\nUse either transcript_ids or input_text as input into LeMUR.\n"
13996
- ),
13997
- input_text: zod3.string().optional().describe(
13998
- "Custom formatted transcript data. Maximum size is the context limit of the selected model.\nUse either transcript_ids or input_text as input into LeMUR.\n"
13999
- ),
14000
- context: zod3.string().or(zod3.record(zod3.string(), zod3.any())).optional().describe("Context to provide the model. This can be a string or a free-form JSON value."),
14001
- final_model: zod3.enum(["anthropic/claude-3-5-sonnet", "anthropic/claude-3-opus", "anthropic/claude-3-haiku"]).describe("The model that is used for the final prompt after compression is performed.\n").or(zod3.string()).describe("The model that is used for the final prompt after compression is performed.\n"),
14002
- max_output_size: zod3.number().default(lemurQuestionAnswerBodyMaxOutputSizeDefault).describe("Max output size in tokens."),
14003
- temperature: zod3.number().min(lemurQuestionAnswerBodyTemperatureMin).max(lemurQuestionAnswerBodyTemperatureMax).optional().describe(
14004
- "The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n"
14005
- )
14006
- }).and(
14007
- zod3.object({
14008
- questions: zod3.array(
14009
- zod3.object({
14010
- question: zod3.string().describe(
14011
- "The question you wish to ask. For more complex questions use default model."
14012
- ),
14013
- context: zod3.string().or(zod3.record(zod3.string(), zod3.any())).optional().describe(
14014
- "Any context about the transcripts you wish to provide. This can be a string or any object."
14015
- ),
14016
- answer_format: zod3.string().optional().describe(
14017
- `How you want the answer to be returned. This can be any text. Can't be used with answer_options. Examples: "short sentence", "bullet points"
14018
- `
14019
- ),
14020
- answer_options: zod3.array(zod3.string()).optional().describe(
14021
- `What discrete options to return. Useful for precise responses. Can't be used with answer_format. Example: ["Yes", "No"]
14022
- `
14023
- )
14024
- })
14025
- ).describe("A list of questions to ask")
14026
- })
14027
- );
14028
- var lemurQuestionAnswerResponseUsageInputTokensMin = 0;
14029
- var lemurQuestionAnswerResponseUsageOutputTokensMin = 0;
14030
- var lemurQuestionAnswerResponse = zod3.object({
14031
- request_id: zod3.string().uuid().describe("The ID of the LeMUR request"),
14032
- usage: zod3.object({
14033
- input_tokens: zod3.number().min(lemurQuestionAnswerResponseUsageInputTokensMin).describe("The number of input tokens used by the model"),
14034
- output_tokens: zod3.number().min(lemurQuestionAnswerResponseUsageOutputTokensMin).describe("The number of output tokens generated by the model")
14035
- }).describe("The usage numbers for the LeMUR request")
14036
- }).and(
14037
- zod3.object({
14038
- response: zod3.array(
14039
- zod3.object({
14040
- question: zod3.string().describe("The question for LeMUR to answer"),
14041
- answer: zod3.string().describe("The answer generated by LeMUR")
14042
- }).describe("An answer generated by LeMUR and its question")
14043
- ).describe("The answers generated by LeMUR and their questions")
14044
- })
14045
- );
14046
- var getLemurResponseParams = zod3.object({
14047
- request_id: zod3.string().describe(
14048
- "The ID of the LeMUR request you previously made.\nThis would be found in the response of the original request.\n"
14049
- )
14050
- });
14051
- var getLemurResponseResponseUsageInputTokensMin = 0;
14052
- var getLemurResponseResponseUsageOutputTokensMin = 0;
14053
- var getLemurResponseResponseUsageInputTokensMinOne = 0;
14054
- var getLemurResponseResponseUsageOutputTokensMinOne = 0;
14055
- var getLemurResponseResponse = zod3.object({
14056
- response: zod3.string().describe("The response generated by LeMUR.")
14057
- }).and(
14058
- zod3.object({
14059
- request_id: zod3.string().uuid().describe("The ID of the LeMUR request"),
14060
- usage: zod3.object({
14061
- input_tokens: zod3.number().min(getLemurResponseResponseUsageInputTokensMin).describe("The number of input tokens used by the model"),
14062
- output_tokens: zod3.number().min(getLemurResponseResponseUsageOutputTokensMin).describe("The number of output tokens generated by the model")
14063
- }).describe("The usage numbers for the LeMUR request")
14064
- })
14065
- ).or(
14066
- zod3.object({
14067
- request_id: zod3.string().uuid().describe("The ID of the LeMUR request"),
14068
- usage: zod3.object({
14069
- input_tokens: zod3.number().min(getLemurResponseResponseUsageInputTokensMinOne).describe("The number of input tokens used by the model"),
14070
- output_tokens: zod3.number().min(getLemurResponseResponseUsageOutputTokensMinOne).describe("The number of output tokens generated by the model")
14071
- }).describe("The usage numbers for the LeMUR request")
14072
- }).and(
14073
- zod3.object({
14074
- response: zod3.array(
14075
- zod3.object({
14076
- question: zod3.string().describe("The question for LeMUR to answer"),
14077
- answer: zod3.string().describe("The answer generated by LeMUR")
14078
- }).describe("An answer generated by LeMUR and its question")
14079
- ).describe("The answers generated by LeMUR and their questions")
14080
- })
14081
- )
14082
- );
14083
- var purgeLemurRequestDataParams = zod3.object({
14084
- request_id: zod3.string().describe(
14085
- "The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request."
14086
- )
14087
- });
14088
- var purgeLemurRequestDataResponse = zod3.object({
14089
- request_id: zod3.string().uuid().describe("The ID of the deletion request of the LeMUR request"),
14090
- request_id_to_purge: zod3.string().uuid().describe("The ID of the LeMUR request to purge the data for"),
14091
- deleted: zod3.boolean().describe("Whether the request data was deleted")
14092
- });
14093
14631
 
14094
14632
  // src/generated/assemblyai/streaming-types.zod.ts
14095
14633
  import { z as zod4 } from "zod";
@@ -14104,25 +14642,37 @@ var streamingTranscriberParams = zod4.object({
14104
14642
  enableExtraSessionInformation: zod4.boolean().optional().describe(
14105
14643
  "Set to true to receive the SessionInformation message before the session ends. Defaults to false."
14106
14644
  ),
14645
+ domain: zod4.string().optional().describe(
14646
+ 'Enable domain-specific transcription models to improve accuracy for specialized terminology. Set to `"medical-v1"` to enable [Medical Mode](https://www.assemblyai.com/docs/streaming/medical-mode) for improved accuracy of medical terms such as medications, procedures, conditions, and dosages. Supported languages: English (`en`), Spanish (`es`), German (`de`), French (`fr`). If used with an unsupported language, the parameter is ignored and a warning is returned.'
14647
+ ),
14107
14648
  endOfTurnConfidenceThreshold: zod4.number().optional().describe("From SDK v3"),
14108
14649
  minEndOfTurnSilenceWhenConfident: zod4.number().optional().describe("From SDK v3"),
14650
+ minTurnSilence: zod4.number().optional().describe("From SDK v3"),
14109
14651
  maxTurnSilence: zod4.number().optional().describe("From SDK v3"),
14110
14652
  vadThreshold: zod4.number().optional().describe("From SDK v3"),
14111
14653
  formatTurns: zod4.boolean().optional().describe("From SDK v3"),
14112
14654
  filterProfanity: zod4.boolean().optional().describe("From SDK v3"),
14113
14655
  keyterms: zod4.array(zod4.string()).optional().describe("From SDK v3"),
14114
14656
  keytermsPrompt: zod4.array(zod4.string()).optional().describe("From SDK v3"),
14115
- speechModel: zod4.enum(["universal-streaming-english", "universal-streaming-multilingual"]).optional().describe("From SDK v3"),
14657
+ prompt: zod4.string().optional().describe("From SDK v3"),
14658
+ speechModel: zod4.enum(["universal-streaming-english", "universal-streaming-multilingual"]).describe("From SDK v3"),
14116
14659
  languageDetection: zod4.boolean().optional().describe("From SDK v3"),
14117
- inactivityTimeout: zod4.number().optional().describe("From SDK v3")
14660
+ inactivityTimeout: zod4.number().optional().describe("From SDK v3"),
14661
+ speakerLabels: zod4.boolean().optional().describe("From SDK v3"),
14662
+ maxSpeakers: zod4.number().optional().describe("From SDK v3"),
14663
+ llmGateway: zod4.unknown().optional().describe("From SDK v3")
14118
14664
  });
14119
14665
  var streamingUpdateConfigParams = zod4.object({
14120
14666
  end_utterance_silence_threshold: zod4.number().min(0).max(2e4).optional().describe("The duration threshold in milliseconds"),
14121
14667
  end_of_turn_confidence_threshold: zod4.number().optional().describe("From SDK v3"),
14122
14668
  min_end_of_turn_silence_when_confident: zod4.number().optional().describe("From SDK v3"),
14669
+ min_turn_silence: zod4.number().optional().describe("From SDK v3"),
14123
14670
  max_turn_silence: zod4.number().optional().describe("From SDK v3"),
14124
14671
  vad_threshold: zod4.number().optional().describe("From SDK v3"),
14125
- format_turns: zod4.boolean().optional().describe("From SDK v3")
14672
+ format_turns: zod4.boolean().optional().describe("From SDK v3"),
14673
+ keyterms_prompt: zod4.array(zod4.string()).optional().describe("From SDK v3"),
14674
+ prompt: zod4.string().optional().describe("From SDK v3"),
14675
+ filter_profanity: zod4.boolean().optional().describe("From SDK v3")
14126
14676
  });
14127
14677
 
14128
14678
  // src/generated/gladia/api/gladiaControlAPI.zod.ts
@@ -40145,7 +40695,6 @@ export {
40145
40695
  createOpenAIWhisperAdapter,
40146
40696
  createSonioxAdapter,
40147
40697
  createSpeechmaticsAdapter,
40148
- createTemporaryToken,
40149
40698
  createTranscript,
40150
40699
  createTranscription,
40151
40700
  createVoiceRouter,