voice-router-dev 0.8.9 → 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -754,60 +754,60 @@ var SonioxLanguage = {
754
754
  // src/generated/soniox/models.ts
755
755
  var SonioxModels = [
756
756
  { id: "stt-rt-v4", name: "Speech-to-Text Real-time v4", mode: "real_time" },
757
- { id: "stt-rt-v3", name: "Speech-to-Text Real-time v3", mode: "real_time" },
758
757
  { id: "stt-async-v4", name: "Speech-to-Text Async v4", mode: "async" },
759
- { id: "stt-async-v3", name: "Speech-to-Text Async v3", mode: "async" },
760
- { id: "stt-rt-preview", name: "Speech-to-Text Real-time Preview", mode: "real_time", aliasOf: "stt-rt-v3" },
761
- { id: "stt-async-preview", name: "Speech-to-Text Async Preview", mode: "async", aliasOf: "stt-async-v3" },
762
- { id: "stt-rt-v3-preview", name: "Speech-to-Text Real-time v3 Preview", mode: "real_time", aliasOf: "stt-rt-v3" },
763
- { id: "stt-rt-preview-v2", name: "Speech-to-Text Real-time Preview v2", mode: "real_time", aliasOf: "stt-rt-v3" },
764
- { id: "stt-async-preview-v1", name: "Speech-to-Text Async Preview v1", mode: "async", aliasOf: "stt-async-v3" }
758
+ { id: "stt-rt-preview", name: "Speech-to-Text Real-time Preview", mode: "real_time", aliasOf: "stt-rt-v4" },
759
+ { id: "stt-async-preview", name: "Speech-to-Text Async Preview", mode: "async", aliasOf: "stt-async-v4" },
760
+ { id: "stt-rt-v3-preview", name: "Speech-to-Text Real-time v3 Preview", mode: "real_time", aliasOf: "stt-rt-v4" },
761
+ { id: "stt-rt-preview-v2", name: "Speech-to-Text Real-time Preview v2", mode: "real_time", aliasOf: "stt-rt-v4" },
762
+ { id: "stt-async-preview-v1", name: "Speech-to-Text Async Preview v1", mode: "async", aliasOf: "stt-async-v4" },
763
+ { id: "stt-rt-v3", name: "Speech-to-Text Real-time v3", mode: "real_time", aliasOf: "stt-rt-v4" },
764
+ { id: "stt-async-v3", name: "Speech-to-Text Async v3", mode: "async", aliasOf: "stt-async-v4" }
765
765
  ];
766
766
  var SonioxModelCodes = [
767
767
  "stt-rt-v4",
768
- "stt-rt-v3",
769
768
  "stt-async-v4",
770
- "stt-async-v3",
771
769
  "stt-rt-preview",
772
770
  "stt-async-preview",
773
771
  "stt-rt-v3-preview",
774
772
  "stt-rt-preview-v2",
775
- "stt-async-preview-v1"
773
+ "stt-async-preview-v1",
774
+ "stt-rt-v3",
775
+ "stt-async-v3"
776
776
  ];
777
777
  var SonioxModelLabels = {
778
778
  "stt-rt-v4": "Speech-to-Text Real-time v4",
779
- "stt-rt-v3": "Speech-to-Text Real-time v3",
780
779
  "stt-async-v4": "Speech-to-Text Async v4",
781
- "stt-async-v3": "Speech-to-Text Async v3",
782
780
  "stt-rt-preview": "Speech-to-Text Real-time Preview",
783
781
  "stt-async-preview": "Speech-to-Text Async Preview",
784
782
  "stt-rt-v3-preview": "Speech-to-Text Real-time v3 Preview",
785
783
  "stt-rt-preview-v2": "Speech-to-Text Real-time Preview v2",
786
- "stt-async-preview-v1": "Speech-to-Text Async Preview v1"
784
+ "stt-async-preview-v1": "Speech-to-Text Async Preview v1",
785
+ "stt-rt-v3": "Speech-to-Text Real-time v3",
786
+ "stt-async-v3": "Speech-to-Text Async v3"
787
787
  };
788
788
  var SonioxModel = {
789
789
  stt_rt_v4: "stt-rt-v4",
790
- stt_rt_v3: "stt-rt-v3",
791
790
  stt_async_v4: "stt-async-v4",
792
- stt_async_v3: "stt-async-v3",
793
791
  stt_rt_preview: "stt-rt-preview",
794
792
  stt_async_preview: "stt-async-preview",
795
793
  stt_rt_v3_preview: "stt-rt-v3-preview",
796
794
  stt_rt_preview_v2: "stt-rt-preview-v2",
797
- stt_async_preview_v1: "stt-async-preview-v1"
795
+ stt_async_preview_v1: "stt-async-preview-v1",
796
+ stt_rt_v3: "stt-rt-v3",
797
+ stt_async_v3: "stt-async-v3"
798
798
  };
799
799
  var SonioxRealtimeModel = {
800
800
  stt_rt_v4: "stt-rt-v4",
801
- stt_rt_v3: "stt-rt-v3",
802
801
  stt_rt_preview: "stt-rt-preview",
803
802
  stt_rt_v3_preview: "stt-rt-v3-preview",
804
- stt_rt_preview_v2: "stt-rt-preview-v2"
803
+ stt_rt_preview_v2: "stt-rt-preview-v2",
804
+ stt_rt_v3: "stt-rt-v3"
805
805
  };
806
806
  var SonioxAsyncModel = {
807
807
  stt_async_v4: "stt-async-v4",
808
- stt_async_v3: "stt-async-v3",
809
808
  stt_async_preview: "stt-async-preview",
810
- stt_async_preview_v1: "stt-async-preview-v1"
809
+ stt_async_preview_v1: "stt-async-preview-v1",
810
+ stt_async_v3: "stt-async-v3"
811
811
  };
812
812
 
813
813
  // src/generated/speechmatics/languages.ts
@@ -3782,17 +3782,17 @@ var SummaryTypesEnum = {
3782
3782
  concise: "concise"
3783
3783
  };
3784
3784
 
3785
- // src/generated/gladia/schema/transcriptMessageType.ts
3786
- var TranscriptMessageType = {
3787
- transcript: "transcript"
3788
- };
3789
-
3790
3785
  // src/generated/gladia/schema/transcriptionControllerListV2KindItem.ts
3791
3786
  var TranscriptionControllerListV2KindItem = {
3792
3787
  "pre-recorded": "pre-recorded",
3793
3788
  live: "live"
3794
3789
  };
3795
3790
 
3791
+ // src/generated/gladia/schema/transcriptMessageType.ts
3792
+ var TranscriptMessageType = {
3793
+ transcript: "transcript"
3794
+ };
3795
+
3796
3796
  // src/generated/gladia/schema/translationMessageType.ts
3797
3797
  var TranslationMessageType = {
3798
3798
  translation: "translation"
@@ -4064,7 +4064,7 @@ var WebhookTranscriptionSuccessPayloadEvent = {
4064
4064
 
4065
4065
  // src/generated/gladia/api/gladiaControlAPI.ts
4066
4066
  var preRecordedControllerInitPreRecordedJobV2 = (initTranscriptionRequest, options) => {
4067
- return axios.post(`/v2/pre-recorded`, initTranscriptionRequest, options);
4067
+ return axios.post("/v2/pre-recorded", initTranscriptionRequest, options);
4068
4068
  };
4069
4069
  var preRecordedControllerGetPreRecordedJobV2 = (id, options) => {
4070
4070
  return axios.get(`/v2/pre-recorded/${id}`, options);
@@ -4079,13 +4079,13 @@ var preRecordedControllerGetAudioV2 = (id, options) => {
4079
4079
  });
4080
4080
  };
4081
4081
  var transcriptionControllerListV2 = (params, options) => {
4082
- return axios.get(`/v2/transcription`, {
4082
+ return axios.get("/v2/transcription", {
4083
4083
  ...options,
4084
4084
  params: { ...params, ...options?.params }
4085
4085
  });
4086
4086
  };
4087
4087
  var streamingControllerInitStreamingSessionV2 = (streamingRequest, params, options) => {
4088
- return axios.post(`/v2/live`, streamingRequest, {
4088
+ return axios.post("/v2/live", streamingRequest, {
4089
4089
  ...options,
4090
4090
  params: { ...params, ...options?.params }
4091
4091
  });
@@ -4351,7 +4351,6 @@ var GladiaAdapter = class extends BaseAdapter {
4351
4351
  sentiment: result?.sentiment_analysis || void 0,
4352
4352
  audioToLlm: result?.audio_to_llm || void 0,
4353
4353
  chapters: result?.chapterization || void 0,
4354
- speakerReidentification: result?.speaker_reidentification || void 0,
4355
4354
  structuredData: result?.structured_data_extraction || void 0,
4356
4355
  customMetadata: response.custom_metadata || void 0
4357
4356
  },
@@ -5413,17 +5412,17 @@ var PiiPolicy = {
5413
5412
  zodiac_sign: "zodiac_sign"
5414
5413
  };
5415
5414
 
5415
+ // src/generated/assemblyai/schema/redactedAudioStatus.ts
5416
+ var RedactedAudioStatus = {
5417
+ redacted_audio_ready: "redacted_audio_ready"
5418
+ };
5419
+
5416
5420
  // src/generated/assemblyai/schema/redactPiiAudioQuality.ts
5417
5421
  var RedactPiiAudioQuality = {
5418
5422
  mp3: "mp3",
5419
5423
  wav: "wav"
5420
5424
  };
5421
5425
 
5422
- // src/generated/assemblyai/schema/redactedAudioStatus.ts
5423
- var RedactedAudioStatus = {
5424
- redacted_audio_ready: "redacted_audio_ready"
5425
- };
5426
-
5427
5426
  // src/generated/assemblyai/schema/sentiment.ts
5428
5427
  var Sentiment = {
5429
5428
  POSITIVE: "POSITIVE",
@@ -5487,10 +5486,10 @@ var TranscriptRemoveAudioTags = {
5487
5486
 
5488
5487
  // src/generated/assemblyai/api/assemblyAIAPI.ts
5489
5488
  var createTranscript = (transcriptParams, options) => {
5490
- return axios2.post(`/v2/transcript`, transcriptParams, options);
5489
+ return axios2.post("/v2/transcript", transcriptParams, options);
5491
5490
  };
5492
5491
  var listTranscripts = (params, options) => {
5493
- return axios2.get(`/v2/transcript`, {
5492
+ return axios2.get("/v2/transcript", {
5494
5493
  ...options,
5495
5494
  params: { ...params, ...options?.params }
5496
5495
  });
@@ -5900,22 +5899,22 @@ var AssemblyAIAdapter = class extends BaseAdapter {
5900
5899
  normalizeResponse(response) {
5901
5900
  let status;
5902
5901
  switch (response.status) {
5903
- case "queued":
5902
+ case TranscriptStatus.queued:
5904
5903
  status = "queued";
5905
5904
  break;
5906
- case "processing":
5905
+ case TranscriptStatus.processing:
5907
5906
  status = "processing";
5908
5907
  break;
5909
- case "completed":
5908
+ case TranscriptStatus.completed:
5910
5909
  status = "completed";
5911
5910
  break;
5912
- case "error":
5911
+ case TranscriptStatus.error:
5913
5912
  status = "error";
5914
5913
  break;
5915
5914
  default:
5916
5915
  status = "queued";
5917
5916
  }
5918
- if (response.status === "error") {
5917
+ if (response.status === TranscriptStatus.error) {
5919
5918
  return {
5920
5919
  success: false,
5921
5920
  provider: this.name,
@@ -7627,13 +7626,13 @@ var TextNormalizationKind = {
7627
7626
 
7628
7627
  // src/generated/azure/api/speechServicesAPIVersion32.ts
7629
7628
  var transcriptionsList = (params, options) => {
7630
- return axios4.get(`/transcriptions`, {
7629
+ return axios4.get("/transcriptions", {
7631
7630
  ...options,
7632
7631
  params: { ...params, ...options?.params }
7633
7632
  });
7634
7633
  };
7635
7634
  var transcriptionsCreate = (transcription, options) => {
7636
- return axios4.post(`/transcriptions`, transcription, options);
7635
+ return axios4.post("/transcriptions", transcription, options);
7637
7636
  };
7638
7637
  var transcriptionsGet = (id, options) => {
7639
7638
  return axios4.get(`/transcriptions/${id}`, options);
@@ -7648,13 +7647,13 @@ var transcriptionsListFiles = (id, params, options) => {
7648
7647
  });
7649
7648
  };
7650
7649
  var webHooksList = (params, options) => {
7651
- return axios4.get(`/webhooks`, {
7650
+ return axios4.get("/webhooks", {
7652
7651
  ...options,
7653
7652
  params: { ...params, ...options?.params }
7654
7653
  });
7655
7654
  };
7656
7655
  var webHooksCreate = (webHook, options) => {
7657
- return axios4.post(`/webhooks`, webHook, options);
7656
+ return axios4.post("/webhooks", webHook, options);
7658
7657
  };
7659
7658
  var webHooksDelete = (id, options) => {
7660
7659
  return axios4.delete(`/webhooks/${id}`, options);
@@ -7782,7 +7781,7 @@ var AzureSTTAdapter = class extends BaseAdapter {
7782
7781
  this.getAxiosConfig()
7783
7782
  );
7784
7783
  const files = filesResponse.data?.values || [];
7785
- const resultFile = files.find((file) => file.kind === "Transcription");
7784
+ const resultFile = files.find((file) => file.kind === FileKind.Transcription);
7786
7785
  if (!resultFile?.links?.contentUrl) {
7787
7786
  return {
7788
7787
  success: false,
@@ -8006,15 +8005,20 @@ var AzureSTTAdapter = class extends BaseAdapter {
8006
8005
  return properties;
8007
8006
  }
8008
8007
  /**
8009
- * Normalize Azure status to unified status
8008
+ * Normalize Azure status to unified status using generated AzureStatus constants
8010
8009
  */
8011
8010
  normalizeStatus(status) {
8012
- const statusStr = status?.toString().toLowerCase() || "";
8013
- if (statusStr.includes("succeeded")) return "completed";
8014
- if (statusStr.includes("running")) return "processing";
8015
- if (statusStr.includes("notstarted")) return "queued";
8016
- if (statusStr.includes("failed")) return "error";
8017
- return "queued";
8011
+ switch (status) {
8012
+ case Status.Succeeded:
8013
+ return "completed";
8014
+ case Status.Running:
8015
+ return "processing";
8016
+ case Status.Failed:
8017
+ return "error";
8018
+ case Status.NotStarted:
8019
+ default:
8020
+ return "queued";
8021
+ }
8018
8022
  }
8019
8023
  /**
8020
8024
  * Normalize Azure transcription response to unified format
@@ -8134,30 +8138,30 @@ function getAzureOpenAIRealtimeUrl(endpoint, deployment, apiVersion = "2024-10-0
8134
8138
  import axios6 from "axios";
8135
8139
  var createTranscription = (createTranscriptionRequest, options) => {
8136
8140
  const formData = new FormData();
8137
- formData.append(`file`, createTranscriptionRequest.file);
8138
- formData.append(`model`, createTranscriptionRequest.model);
8141
+ formData.append("file", createTranscriptionRequest.file);
8142
+ formData.append("model", createTranscriptionRequest.model);
8139
8143
  if (createTranscriptionRequest.language !== void 0) {
8140
- formData.append(`language`, createTranscriptionRequest.language);
8144
+ formData.append("language", createTranscriptionRequest.language);
8141
8145
  }
8142
8146
  if (createTranscriptionRequest.prompt !== void 0) {
8143
- formData.append(`prompt`, createTranscriptionRequest.prompt);
8147
+ formData.append("prompt", createTranscriptionRequest.prompt);
8144
8148
  }
8145
8149
  if (createTranscriptionRequest.response_format !== void 0) {
8146
- formData.append(`response_format`, createTranscriptionRequest.response_format);
8150
+ formData.append("response_format", createTranscriptionRequest.response_format);
8147
8151
  }
8148
8152
  if (createTranscriptionRequest.temperature !== void 0) {
8149
- formData.append(`temperature`, createTranscriptionRequest.temperature.toString());
8153
+ formData.append("temperature", createTranscriptionRequest.temperature.toString());
8150
8154
  }
8151
8155
  if (createTranscriptionRequest.include !== void 0) {
8152
- createTranscriptionRequest.include.forEach((value) => formData.append(`include`, value));
8156
+ createTranscriptionRequest.include.forEach((value) => formData.append("include", value));
8153
8157
  }
8154
8158
  if (createTranscriptionRequest.timestamp_granularities !== void 0) {
8155
8159
  createTranscriptionRequest.timestamp_granularities.forEach(
8156
- (value) => formData.append(`timestamp_granularities`, value)
8160
+ (value) => formData.append("timestamp_granularities", value)
8157
8161
  );
8158
8162
  }
8159
8163
  if (createTranscriptionRequest.stream !== void 0 && createTranscriptionRequest.stream !== null) {
8160
- formData.append(`stream`, createTranscriptionRequest.stream.toString());
8164
+ formData.append("stream", createTranscriptionRequest.stream.toString());
8161
8165
  }
8162
8166
  if (createTranscriptionRequest.chunking_strategy !== void 0 && createTranscriptionRequest.chunking_strategy !== null) {
8163
8167
  formData.append(
@@ -8167,15 +8171,15 @@ var createTranscription = (createTranscriptionRequest, options) => {
8167
8171
  }
8168
8172
  if (createTranscriptionRequest.known_speaker_names !== void 0) {
8169
8173
  createTranscriptionRequest.known_speaker_names.forEach(
8170
- (value) => formData.append(`known_speaker_names`, value)
8174
+ (value) => formData.append("known_speaker_names", value)
8171
8175
  );
8172
8176
  }
8173
8177
  if (createTranscriptionRequest.known_speaker_references !== void 0) {
8174
8178
  createTranscriptionRequest.known_speaker_references.forEach(
8175
- (value) => formData.append(`known_speaker_references`, value)
8179
+ (value) => formData.append("known_speaker_references", value)
8176
8180
  );
8177
8181
  }
8178
- return axios6.post(`/audio/transcriptions`, formData, options);
8182
+ return axios6.post("/audio/transcriptions", formData, options);
8179
8183
  };
8180
8184
 
8181
8185
  // src/generated/openai/schema/createTranscriptionRequestTimestampGranularitiesItem.ts
@@ -8264,7 +8268,7 @@ var OpenAIWhisperAdapter = class extends BaseAdapter {
8264
8268
  const request = {
8265
8269
  ...options?.openai,
8266
8270
  file: audioData,
8267
- // Generated type expects Blob
8271
+ // Buffer/Blob both accepted at runtime; generated type expects Blob
8268
8272
  model
8269
8273
  };
8270
8274
  if (options?.language) {
@@ -8284,7 +8288,11 @@ var OpenAIWhisperAdapter = class extends BaseAdapter {
8284
8288
  request.response_format = OpenAIResponseFormat.json;
8285
8289
  }
8286
8290
  const response = await createTranscription(request, this.getAxiosConfig());
8287
- return this.normalizeResponse(response.data, model, isDiarization);
8291
+ return this.normalizeResponse(
8292
+ response.data,
8293
+ model,
8294
+ isDiarization
8295
+ );
8288
8296
  } catch (error) {
8289
8297
  return this.createErrorResponse(error);
8290
8298
  }
@@ -9622,8 +9630,23 @@ var SonioxAdapter = class extends BaseAdapter {
9622
9630
  terms: options.customVocabulary
9623
9631
  };
9624
9632
  }
9633
+ if (options?.webhookUrl) {
9634
+ requestBody.webhook_url = options.webhookUrl;
9635
+ }
9625
9636
  const response = await this.client.post("/transcriptions", requestBody);
9626
9637
  const transcriptionId = response.data.id;
9638
+ if (options?.webhookUrl) {
9639
+ return {
9640
+ success: true,
9641
+ provider: this.name,
9642
+ data: {
9643
+ id: transcriptionId,
9644
+ text: "",
9645
+ status: "queued"
9646
+ },
9647
+ raw: response.data
9648
+ };
9649
+ }
9627
9650
  return await this.pollForCompletion(transcriptionId);
9628
9651
  } catch (error) {
9629
9652
  return this.createErrorResponse(error);
@@ -9968,7 +9991,7 @@ var SonioxAdapter = class extends BaseAdapter {
9968
9991
  start: token.start_ms ? token.start_ms / 1e3 : 0,
9969
9992
  end: token.end_ms ? token.end_ms / 1e3 : 0,
9970
9993
  confidence: token.confidence,
9971
- speaker: token.speaker
9994
+ speaker: token.speaker ?? void 0
9972
9995
  }));
9973
9996
  return buildUtterancesFromWords(words);
9974
9997
  }
@@ -9976,29 +9999,24 @@ var SonioxAdapter = class extends BaseAdapter {
9976
9999
  * Normalize Soniox response to unified format
9977
10000
  */
9978
10001
  normalizeResponse(response) {
9979
- const text = response.text || (response.tokens ? response.tokens.filter((t) => t.is_final !== false).map((t) => t.text).join("") : "");
9980
- const words = response.tokens ? response.tokens.filter(
9981
- (t) => t.is_final !== false && t.start_ms !== void 0 && t.end_ms !== void 0
9982
- ).map((token) => ({
10002
+ const { text, tokens } = response;
10003
+ const words = tokens.map((token) => ({
9983
10004
  word: token.text,
9984
10005
  start: token.start_ms / 1e3,
9985
10006
  end: token.end_ms / 1e3,
9986
10007
  confidence: token.confidence,
9987
- speaker: token.speaker
9988
- })) : [];
10008
+ speaker: token.speaker ?? void 0
10009
+ }));
9989
10010
  const speakerSet = /* @__PURE__ */ new Set();
9990
- if (response.tokens) {
9991
- response.tokens.forEach((t) => {
9992
- if (t.speaker) speakerSet.add(t.speaker);
9993
- });
10011
+ for (const token of tokens) {
10012
+ if (token.speaker) speakerSet.add(token.speaker);
9994
10013
  }
9995
10014
  const speakers = speakerSet.size > 0 ? Array.from(speakerSet).map((id) => ({
9996
10015
  id,
9997
10016
  label: `Speaker ${id}`
9998
10017
  })) : void 0;
9999
- const tokens = response.tokens ? response.tokens.filter((t) => t.is_final !== false) : [];
10000
10018
  const utterances = tokens.length > 0 ? this.buildUtterancesFromTokens(tokens) : [];
10001
- const language = response.tokens?.find((t) => t.language)?.language;
10019
+ const language = tokens.find((t) => t.language)?.language ?? void 0;
10002
10020
  return {
10003
10021
  success: true,
10004
10022
  provider: this.name,
@@ -10172,11 +10190,29 @@ var ElevenLabsAdapter = class extends BaseAdapter {
10172
10190
  }
10173
10191
  }
10174
10192
  }
10193
+ if (options?.webhookUrl) {
10194
+ if (!formData.has("webhook")) {
10195
+ formData.append("webhook", "true");
10196
+ }
10197
+ }
10175
10198
  const response = await this.client.post("/v1/speech-to-text", formData, {
10176
10199
  headers: {
10177
10200
  "Content-Type": "multipart/form-data"
10178
10201
  }
10179
10202
  });
10203
+ if (options?.webhookUrl) {
10204
+ const transcriptionId = response.data.transcription_id || response.data.id || `elevenlabs_${Date.now()}`;
10205
+ return {
10206
+ success: true,
10207
+ provider: this.name,
10208
+ data: {
10209
+ id: transcriptionId,
10210
+ text: "",
10211
+ status: "queued"
10212
+ },
10213
+ raw: response.data
10214
+ };
10215
+ }
10180
10216
  return this.normalizeResponse(response.data);
10181
10217
  } catch (error) {
10182
10218
  return this.createErrorResponse(error);
@@ -10440,7 +10476,7 @@ var ElevenLabsAdapter = class extends BaseAdapter {
10440
10476
  * - Multi-channel: `MultichannelSpeechToTextResponseModel` with `transcripts[]`
10441
10477
  */
10442
10478
  normalizeResponse(response) {
10443
- const chunks = response.transcripts ? response.transcripts : [response];
10479
+ const chunks = "transcripts" in response ? response.transcripts : [response];
10444
10480
  const text = chunks.map((c) => c.text).join(" ");
10445
10481
  const words = [];
10446
10482
  const speakerSet = /* @__PURE__ */ new Set();
@@ -10490,7 +10526,7 @@ var ElevenLabsAdapter = class extends BaseAdapter {
10490
10526
  }
10491
10527
  }
10492
10528
  }
10493
- const transcriptionId = response.transcription_id || chunks[0]?.transcription_id || `elevenlabs_${Date.now()}`;
10529
+ const transcriptionId = ("transcription_id" in response ? response.transcription_id : response.transcription_id) || chunks[0]?.transcription_id || `elevenlabs_${Date.now()}`;
10494
10530
  return {
10495
10531
  success: true,
10496
10532
  provider: this.name,
@@ -10834,9 +10870,7 @@ var listenTranscribeQueryParams = zod.object({
10834
10870
  ),
10835
10871
  dictation: zod.boolean().optional().describe("Dictation mode for controlling formatting with dictated speech"),
10836
10872
  encoding: zod.enum(["linear16", "flac", "mulaw", "amr-nb", "amr-wb", "opus", "speex", "g729"]).optional().describe("Specify the expected encoding of your submitted audio"),
10837
- filler_words: zod.boolean().optional().describe(
10838
- 'Filler Words can help transcribe interruptions in your audio, like "uh" and "um"'
10839
- ),
10873
+ filler_words: zod.boolean().optional().describe('Filler Words can help transcribe interruptions in your audio, like "uh" and "um"'),
10840
10874
  keyterm: zod.array(zod.string()).optional().describe(
10841
10875
  "Key term prompting can boost or suppress specialized terminology and brands. Only compatible with Nova-3"
10842
10876
  ),
@@ -11540,6 +11574,7 @@ __export(assemblyAIAPI_zod_exports, {
11540
11574
  createTranscriptBodySpeechUnderstandingRequestTranslationFormalDefault: () => createTranscriptBodySpeechUnderstandingRequestTranslationFormalDefault,
11541
11575
  createTranscriptBodySpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault: () => createTranscriptBodySpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault,
11542
11576
  createTranscriptBodySummarizationDefault: () => createTranscriptBodySummarizationDefault,
11577
+ createTranscriptBodyTemperatureDefault: () => createTranscriptBodyTemperatureDefault,
11543
11578
  createTranscriptResponse: () => createTranscriptResponse,
11544
11579
  createTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault: () => createTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault,
11545
11580
  createTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault: () => createTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault,
@@ -11609,6 +11644,7 @@ var createTranscriptBodySpeakerOptionsMinSpeakersExpectedDefault = 1;
11609
11644
  var createTranscriptBodySpeechUnderstandingRequestTranslationFormalDefault = true;
11610
11645
  var createTranscriptBodySpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault = false;
11611
11646
  var createTranscriptBodySummarizationDefault = false;
11647
+ var createTranscriptBodyTemperatureDefault = 0;
11612
11648
  var createTranscriptBodyCustomTopicsDefault = false;
11613
11649
  var createTranscriptBody = zod3.object({
11614
11650
  audio_end_at: zod3.number().optional().describe(
@@ -11618,10 +11654,10 @@ var createTranscriptBody = zod3.object({
11618
11654
  "The point in time, in milliseconds, to begin transcribing in your media file. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details."
11619
11655
  ),
11620
11656
  auto_chapters: zod3.boolean().optional().describe(
11621
- "Enable [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters), can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
11657
+ "Enable [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters), can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
11622
11658
  ),
11623
11659
  auto_highlights: zod3.boolean().optional().describe(
11624
- "Enable [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases), either true or false"
11660
+ "Enable [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights), either true or false"
11625
11661
  ),
11626
11662
  content_safety: zod3.boolean().optional().describe(
11627
11663
  "Enable [Content Moderation](https://www.assemblyai.com/docs/content-moderation), can be true or false"
@@ -11637,16 +11673,16 @@ var createTranscriptBody = zod3.object({
11637
11673
  "Object containing words or phrases to replace, and the word or phrase to replace with"
11638
11674
  )
11639
11675
  ).optional().describe(
11640
- "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details."
11676
+ "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/correct-spelling-of-terms) for more details."
11641
11677
  ),
11642
11678
  disfluencies: zod3.boolean().optional().describe(
11643
- 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false'
11679
+ 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/include-filler-words), like "umm", in your media file; can be true or false'
11644
11680
  ),
11645
11681
  domain: zod3.string().nullish().describe(
11646
11682
  'Enable domain-specific transcription models to improve accuracy for specialized terminology. Set to `"medical-v1"` to enable [Medical Mode](https://www.assemblyai.com/docs/pre-recorded-audio/medical-mode) for improved accuracy of medical terms such as medications, procedures, conditions, and dosages.\n\nSupported languages: English (`en`), Spanish (`es`), German (`de`), French (`fr`). If used with an unsupported language, the parameter is ignored and a warning is returned.\n'
11647
11683
  ),
11648
11684
  entity_detection: zod3.boolean().optional().describe(
11649
- "Enable [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection), can be true or false"
11685
+ "Enable [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/detect-entities-in-transcript), can be true or false"
11650
11686
  ),
11651
11687
  filter_profanity: zod3.boolean().optional().describe(
11652
11688
  "Filter profanity from the transcribed text, can be true or false. See [Profanity Filtering](https://www.assemblyai.com/docs/profanity-filtering) for more details."
@@ -11655,7 +11691,7 @@ var createTranscriptBody = zod3.object({
11655
11691
  "Enable [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio), can be true or false"
11656
11692
  ),
11657
11693
  iab_categories: zod3.boolean().optional().describe(
11658
- "Enable [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection), can be true or false"
11694
+ "Enable [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics), can be true or false"
11659
11695
  ),
11660
11696
  keyterms_prompt: zod3.array(zod3.string()).optional().describe(
11661
11697
  "Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3 Pro) domain-specific words or phrases (maximum 6 words per phrase). See [Keyterms Prompting](https://www.assemblyai.com/docs/pre-recorded-audio/keyterms-prompting) for more details.\n"
@@ -11903,7 +11939,7 @@ var createTranscriptBody = zod3.object({
11903
11939
  "Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection)."
11904
11940
  ),
11905
11941
  multichannel: zod3.boolean().optional().describe(
11906
- "Enable [Multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) transcription, can be true or false."
11942
+ "Enable [Multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) transcription, can be true or false."
11907
11943
  ),
11908
11944
  prompt: zod3.string().optional().describe(
11909
11945
  "Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
@@ -11986,23 +12022,23 @@ var createTranscriptBody = zod3.object({
11986
12022
  "The replacement logic for detected PII, can be `entity_type` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
11987
12023
  ),
11988
12024
  sentiment_analysis: zod3.boolean().optional().describe(
11989
- "Enable [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis), can be true or false"
12025
+ "Enable [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/analyze-sentiment-of-speech), can be true or false"
11990
12026
  ),
11991
12027
  speaker_labels: zod3.boolean().optional().describe(
11992
- "Enable [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization), can be true or false"
12028
+ "Enable [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers), can be true or false"
11993
12029
  ),
11994
12030
  speaker_options: zod3.object({
11995
12031
  min_speakers_expected: zod3.number().default(createTranscriptBodySpeakerOptionsMinSpeakersExpectedDefault).describe(
11996
- "The minimum number of speakers expected in the audio file. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers) for more details."
12032
+ "The minimum number of speakers expected in the audio file. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers#set-a-range-of-possible-speakers) for more details."
11997
12033
  ),
11998
12034
  max_speakers_expected: zod3.number().optional().describe(
11999
- "<Warning>Setting this parameter too high may hurt model accuracy</Warning>\nThe maximum number of speakers expected in the audio file. The default depends on audio duration: no limit for 0-2 minutes, 10 for 2-10 minutes, and 30 for 10+ minutes. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers) for more details.\n"
12035
+ "<Warning>Setting this parameter too high may hurt model accuracy</Warning>\nThe maximum number of speakers expected in the audio file. The default depends on audio duration: no limit for 0-2 minutes, 10 for 2-10 minutes, and 30 for 10+ minutes. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers#set-a-range-of-possible-speakers) for more details.\n"
12000
12036
  )
12001
12037
  }).optional().describe(
12002
- "Specify options for [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers). Use this to set a range of possible speakers."
12038
+ "Specify options for [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers#set-a-range-of-possible-speakers). Use this to set a range of possible speakers."
12003
12039
  ),
12004
12040
  speakers_expected: zod3.number().nullish().describe(
12005
- "Tells the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details."
12041
+ "Tells the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers#set-number-of-speakers-expected) for more details."
12006
12042
  ),
12007
12043
  speech_models: zod3.array(
12008
12044
  zod3.string().describe(
@@ -12078,7 +12114,7 @@ var createTranscriptBody = zod3.object({
12078
12114
  "Enable speech understanding tasks like [Translation](https://www.assemblyai.com/docs/speech-understanding/translation), [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification), and [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting). See the task-specific docs for available options and configuration.\n"
12079
12115
  ),
12080
12116
  summarization: zod3.boolean().optional().describe(
12081
- "Enable [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization), can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
12117
+ "Enable [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts), can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
12082
12118
  ),
12083
12119
  summary_model: zod3.enum(["informative", "conversational", "catchy"]).optional().describe("The model to summarize the transcript"),
12084
12120
  summary_type: zod3.enum(["bullets", "bullets_verbose", "gist", "headline", "paragraph"]).optional().describe("The type of summary"),
@@ -12087,6 +12123,9 @@ var createTranscriptBody = zod3.object({
12087
12123
  ).or(zod3.null()).optional().describe(
12088
12124
  'Remove [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) from the transcript text. Set to `"all"` to remove all audio tags.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n'
12089
12125
  ),
12126
+ temperature: zod3.number().optional().describe(
12127
+ "Control the amount of randomness injected into the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.\n\nNote: This parameter can only be used with the Universal-3 Pro model.\n"
12128
+ ),
12090
12129
  webhook_auth_header_name: zod3.string().nullish().describe(
12091
12130
  "The header name to be sent with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests"
12092
12131
  ),
@@ -12108,7 +12147,7 @@ var createTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault =
12108
12147
  var createTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault = false;
12109
12148
  var createTranscriptResponse = zod3.object({
12110
12149
  audio_channels: zod3.number().optional().describe(
12111
- "The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) is enabled."
12150
+ "The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) is enabled."
12112
12151
  ),
12113
12152
  audio_duration: zod3.number().nullish().describe("The duration of this transcript object's media file, in seconds"),
12114
12153
  audio_end_at: zod3.number().nullish().describe(
@@ -12119,10 +12158,10 @@ var createTranscriptResponse = zod3.object({
12119
12158
  ),
12120
12159
  audio_url: zod3.string().describe("The URL of the media that was transcribed"),
12121
12160
  auto_chapters: zod3.boolean().nullish().describe(
12122
- "Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
12161
+ "Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
12123
12162
  ),
12124
12163
  auto_highlights: zod3.boolean().describe(
12125
- "Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) is enabled, either true or false"
12164
+ "Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) is enabled, either true or false"
12126
12165
  ),
12127
12166
  auto_highlights_result: zod3.object({
12128
12167
  status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
@@ -12142,9 +12181,9 @@ var createTranscriptResponse = zod3.object({
12142
12181
  })
12143
12182
  ).describe("A temporally-sequential array of Key Phrases")
12144
12183
  }).describe(
12145
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
12184
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) for more information.\n"
12146
12185
  ).or(zod3.null()).optional().describe(
12147
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
12186
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) for more information.\n"
12148
12187
  ),
12149
12188
  chapters: zod3.array(
12150
12189
  zod3.object({
@@ -12157,7 +12196,7 @@ var createTranscriptResponse = zod3.object({
12157
12196
  end: zod3.number().describe("The starting time, in milliseconds, for the chapter")
12158
12197
  }).describe("Chapter of the audio file")
12159
12198
  ).nullish().describe(
12160
- "An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for more information."
12199
+ "An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) for more information."
12161
12200
  ),
12162
12201
  confidence: zod3.number().nullish().describe(
12163
12202
  "The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)"
@@ -12213,10 +12252,10 @@ var createTranscriptResponse = zod3.object({
12213
12252
  "Object containing words or phrases to replace, and the word or phrase to replace with"
12214
12253
  )
12215
12254
  ).nullish().describe(
12216
- "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details."
12255
+ "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/correct-spelling-of-terms) for more details."
12217
12256
  ),
12218
12257
  disfluencies: zod3.boolean().nullish().describe(
12219
- 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false'
12258
+ 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/include-filler-words), like "umm", in your media file; can be true or false'
12220
12259
  ),
12221
12260
  domain: zod3.string().nullish().describe(
12222
12261
  'The domain-specific model applied to the transcript. When set to `"medical-v1"`, [Medical Mode](https://www.assemblyai.com/docs/pre-recorded-audio/medical-mode) was used to improve accuracy for medical terminology.\n'
@@ -12278,10 +12317,10 @@ var createTranscriptResponse = zod3.object({
12278
12317
  )
12279
12318
  }).describe("A detected entity")
12280
12319
  ).nullish().describe(
12281
- "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) for more information.\n"
12320
+ "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/detect-entities-in-transcript) for more information.\n"
12282
12321
  ),
12283
12322
  entity_detection: zod3.boolean().nullish().describe(
12284
- "Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) is enabled, can be true or false"
12323
+ "Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/detect-entities-in-transcript) is enabled, can be true or false"
12285
12324
  ),
12286
12325
  error: zod3.string().optional().describe("Error message of why the transcript failed"),
12287
12326
  filter_profanity: zod3.boolean().nullish().describe(
@@ -12291,7 +12330,7 @@ var createTranscriptResponse = zod3.object({
12291
12330
  "Whether [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false"
12292
12331
  ),
12293
12332
  iab_categories: zod3.boolean().nullish().describe(
12294
- "Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) is enabled, can be true or false"
12333
+ "Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) is enabled, can be true or false"
12295
12334
  ),
12296
12335
  iab_categories_result: zod3.object({
12297
12336
  status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
@@ -12314,9 +12353,9 @@ var createTranscriptResponse = zod3.object({
12314
12353
  ).describe("An array of results for the Topic Detection model"),
12315
12354
  summary: zod3.record(zod3.string(), zod3.number()).describe("The overall relevance of topic to the entire audio file")
12316
12355
  }).describe(
12317
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
12356
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) for more information.\n"
12318
12357
  ).or(zod3.null()).optional().describe(
12319
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
12358
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) for more information.\n"
12320
12359
  ),
12321
12360
  id: zod3.string().uuid().describe("The unique identifier of your transcript"),
12322
12361
  keyterms_prompt: zod3.array(zod3.string()).optional().describe(
@@ -12566,7 +12605,7 @@ var createTranscriptResponse = zod3.object({
12566
12605
  "Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection)."
12567
12606
  ),
12568
12607
  multichannel: zod3.boolean().nullish().describe(
12569
- "Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) was enabled in the transcription request, either true or false"
12608
+ "Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) was enabled in the transcription request, either true or false"
12570
12609
  ),
12571
12610
  prompt: zod3.string().optional().describe(
12572
12611
  "Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
@@ -12649,7 +12688,7 @@ var createTranscriptResponse = zod3.object({
12649
12688
  "The replacement logic for detected PII, can be `entity_name` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
12650
12689
  ),
12651
12690
  sentiment_analysis: zod3.boolean().nullish().describe(
12652
- "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) is enabled, can be true or false"
12691
+ "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/analyze-sentiment-of-speech) is enabled, can be true or false"
12653
12692
  ),
12654
12693
  sentiment_analysis_results: zod3.array(
12655
12694
  zod3.object({
@@ -12664,17 +12703,17 @@ var createTranscriptResponse = zod3.object({
12664
12703
  "The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
12665
12704
  ),
12666
12705
  speaker: zod3.string().nullable().describe(
12667
- "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
12706
+ "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
12668
12707
  )
12669
12708
  }).describe("The result of the Sentiment Analysis model")
12670
12709
  ).nullish().describe(
12671
- "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) for more information.\n"
12710
+ "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/analyze-sentiment-of-speech) for more information.\n"
12672
12711
  ),
12673
12712
  speaker_labels: zod3.boolean().nullish().describe(
12674
- "Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, can be true or false"
12713
+ "Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, can be true or false"
12675
12714
  ),
12676
12715
  speakers_expected: zod3.number().nullish().describe(
12677
- "Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details."
12716
+ "Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers#set-number-of-speakers-expected) for more details."
12678
12717
  ),
12679
12718
  speech_model_used: zod3.string().optional().describe(
12680
12719
  "The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
@@ -12777,22 +12816,25 @@ var createTranscriptResponse = zod3.object({
12777
12816
  "The status of your transcript. Possible values are queued, processing, completed, or error."
12778
12817
  ),
12779
12818
  summarization: zod3.boolean().describe(
12780
- "Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
12819
+ "Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
12781
12820
  ),
12782
12821
  summary: zod3.string().nullish().describe(
12783
- "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
12822
+ "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details."
12784
12823
  ),
12785
12824
  summary_model: zod3.string().nullish().describe(
12786
- "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n"
12825
+ "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details.\n"
12787
12826
  ),
12788
12827
  summary_type: zod3.string().nullish().describe(
12789
- "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
12828
+ "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details."
12790
12829
  ),
12791
12830
  remove_audio_tags: zod3.enum(["all"]).describe(
12792
12831
  "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
12793
12832
  ).or(zod3.null()).optional().describe(
12794
12833
  "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
12795
12834
  ),
12835
+ temperature: zod3.number().nullish().describe(
12836
+ "The temperature that was used for the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.\n\nNote: This parameter can only be used with the Universal-3 Pro model.\n"
12837
+ ),
12796
12838
  text: zod3.string().nullish().describe("The textual transcript of your media file"),
12797
12839
  throttled: zod3.boolean().nullish().describe(
12798
12840
  "True while a request is throttled and false when a request is no longer throttled"
@@ -12813,7 +12855,7 @@ var createTranscriptResponse = zod3.object({
12813
12855
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
12814
12856
  ),
12815
12857
  speaker: zod3.string().nullable().describe(
12816
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
12858
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
12817
12859
  )
12818
12860
  })
12819
12861
  ).describe("The words in the utterance."),
@@ -12828,7 +12870,7 @@ var createTranscriptResponse = zod3.object({
12828
12870
  )
12829
12871
  })
12830
12872
  ).nullish().describe(
12831
- "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) for more information.\n"
12873
+ "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) for more information.\n"
12832
12874
  ),
12833
12875
  webhook_auth: zod3.boolean().describe(
12834
12876
  "Whether [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) authentication details were provided"
@@ -12852,7 +12894,7 @@ var createTranscriptResponse = zod3.object({
12852
12894
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
12853
12895
  ),
12854
12896
  speaker: zod3.string().nullable().describe(
12855
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
12897
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
12856
12898
  )
12857
12899
  })
12858
12900
  ).nullish().describe(
@@ -12925,7 +12967,7 @@ var getTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault = tr
12925
12967
  var getTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault = false;
12926
12968
  var getTranscriptResponse = zod3.object({
12927
12969
  audio_channels: zod3.number().optional().describe(
12928
- "The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) is enabled."
12970
+ "The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) is enabled."
12929
12971
  ),
12930
12972
  audio_duration: zod3.number().nullish().describe("The duration of this transcript object's media file, in seconds"),
12931
12973
  audio_end_at: zod3.number().nullish().describe(
@@ -12936,10 +12978,10 @@ var getTranscriptResponse = zod3.object({
12936
12978
  ),
12937
12979
  audio_url: zod3.string().describe("The URL of the media that was transcribed"),
12938
12980
  auto_chapters: zod3.boolean().nullish().describe(
12939
- "Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
12981
+ "Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
12940
12982
  ),
12941
12983
  auto_highlights: zod3.boolean().describe(
12942
- "Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) is enabled, either true or false"
12984
+ "Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) is enabled, either true or false"
12943
12985
  ),
12944
12986
  auto_highlights_result: zod3.object({
12945
12987
  status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
@@ -12959,9 +13001,9 @@ var getTranscriptResponse = zod3.object({
12959
13001
  })
12960
13002
  ).describe("A temporally-sequential array of Key Phrases")
12961
13003
  }).describe(
12962
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
13004
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) for more information.\n"
12963
13005
  ).or(zod3.null()).optional().describe(
12964
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
13006
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) for more information.\n"
12965
13007
  ),
12966
13008
  chapters: zod3.array(
12967
13009
  zod3.object({
@@ -12974,7 +13016,7 @@ var getTranscriptResponse = zod3.object({
12974
13016
  end: zod3.number().describe("The starting time, in milliseconds, for the chapter")
12975
13017
  }).describe("Chapter of the audio file")
12976
13018
  ).nullish().describe(
12977
- "An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for more information."
13019
+ "An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) for more information."
12978
13020
  ),
12979
13021
  confidence: zod3.number().nullish().describe(
12980
13022
  "The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)"
@@ -13030,10 +13072,10 @@ var getTranscriptResponse = zod3.object({
13030
13072
  "Object containing words or phrases to replace, and the word or phrase to replace with"
13031
13073
  )
13032
13074
  ).nullish().describe(
13033
- "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details."
13075
+ "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/correct-spelling-of-terms) for more details."
13034
13076
  ),
13035
13077
  disfluencies: zod3.boolean().nullish().describe(
13036
- 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false'
13078
+ 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/include-filler-words), like "umm", in your media file; can be true or false'
13037
13079
  ),
13038
13080
  domain: zod3.string().nullish().describe(
13039
13081
  'The domain-specific model applied to the transcript. When set to `"medical-v1"`, [Medical Mode](https://www.assemblyai.com/docs/pre-recorded-audio/medical-mode) was used to improve accuracy for medical terminology.\n'
@@ -13095,10 +13137,10 @@ var getTranscriptResponse = zod3.object({
13095
13137
  )
13096
13138
  }).describe("A detected entity")
13097
13139
  ).nullish().describe(
13098
- "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) for more information.\n"
13140
+ "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/detect-entities-in-transcript) for more information.\n"
13099
13141
  ),
13100
13142
  entity_detection: zod3.boolean().nullish().describe(
13101
- "Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) is enabled, can be true or false"
13143
+ "Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/detect-entities-in-transcript) is enabled, can be true or false"
13102
13144
  ),
13103
13145
  error: zod3.string().optional().describe("Error message of why the transcript failed"),
13104
13146
  filter_profanity: zod3.boolean().nullish().describe(
@@ -13108,7 +13150,7 @@ var getTranscriptResponse = zod3.object({
13108
13150
  "Whether [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false"
13109
13151
  ),
13110
13152
  iab_categories: zod3.boolean().nullish().describe(
13111
- "Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) is enabled, can be true or false"
13153
+ "Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) is enabled, can be true or false"
13112
13154
  ),
13113
13155
  iab_categories_result: zod3.object({
13114
13156
  status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
@@ -13131,9 +13173,9 @@ var getTranscriptResponse = zod3.object({
13131
13173
  ).describe("An array of results for the Topic Detection model"),
13132
13174
  summary: zod3.record(zod3.string(), zod3.number()).describe("The overall relevance of topic to the entire audio file")
13133
13175
  }).describe(
13134
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
13176
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) for more information.\n"
13135
13177
  ).or(zod3.null()).optional().describe(
13136
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
13178
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) for more information.\n"
13137
13179
  ),
13138
13180
  id: zod3.string().uuid().describe("The unique identifier of your transcript"),
13139
13181
  keyterms_prompt: zod3.array(zod3.string()).optional().describe(
@@ -13383,7 +13425,7 @@ var getTranscriptResponse = zod3.object({
13383
13425
  "Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection)."
13384
13426
  ),
13385
13427
  multichannel: zod3.boolean().nullish().describe(
13386
- "Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) was enabled in the transcription request, either true or false"
13428
+ "Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) was enabled in the transcription request, either true or false"
13387
13429
  ),
13388
13430
  prompt: zod3.string().optional().describe(
13389
13431
  "Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
@@ -13466,7 +13508,7 @@ var getTranscriptResponse = zod3.object({
13466
13508
  "The replacement logic for detected PII, can be `entity_name` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
13467
13509
  ),
13468
13510
  sentiment_analysis: zod3.boolean().nullish().describe(
13469
- "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) is enabled, can be true or false"
13511
+ "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/analyze-sentiment-of-speech) is enabled, can be true or false"
13470
13512
  ),
13471
13513
  sentiment_analysis_results: zod3.array(
13472
13514
  zod3.object({
@@ -13481,17 +13523,17 @@ var getTranscriptResponse = zod3.object({
13481
13523
  "The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
13482
13524
  ),
13483
13525
  speaker: zod3.string().nullable().describe(
13484
- "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
13526
+ "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
13485
13527
  )
13486
13528
  }).describe("The result of the Sentiment Analysis model")
13487
13529
  ).nullish().describe(
13488
- "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) for more information.\n"
13530
+ "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/analyze-sentiment-of-speech) for more information.\n"
13489
13531
  ),
13490
13532
  speaker_labels: zod3.boolean().nullish().describe(
13491
- "Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, can be true or false"
13533
+ "Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, can be true or false"
13492
13534
  ),
13493
13535
  speakers_expected: zod3.number().nullish().describe(
13494
- "Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details."
13536
+ "Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers#set-number-of-speakers-expected) for more details."
13495
13537
  ),
13496
13538
  speech_model_used: zod3.string().optional().describe(
13497
13539
  "The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
@@ -13594,22 +13636,25 @@ var getTranscriptResponse = zod3.object({
13594
13636
  "The status of your transcript. Possible values are queued, processing, completed, or error."
13595
13637
  ),
13596
13638
  summarization: zod3.boolean().describe(
13597
- "Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
13639
+ "Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
13598
13640
  ),
13599
13641
  summary: zod3.string().nullish().describe(
13600
- "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
13642
+ "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details."
13601
13643
  ),
13602
13644
  summary_model: zod3.string().nullish().describe(
13603
- "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n"
13645
+ "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details.\n"
13604
13646
  ),
13605
13647
  summary_type: zod3.string().nullish().describe(
13606
- "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
13648
+ "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details."
13607
13649
  ),
13608
13650
  remove_audio_tags: zod3.enum(["all"]).describe(
13609
13651
  "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
13610
13652
  ).or(zod3.null()).optional().describe(
13611
13653
  "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
13612
13654
  ),
13655
+ temperature: zod3.number().nullish().describe(
13656
+ "The temperature that was used for the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.\n\nNote: This parameter can only be used with the Universal-3 Pro model.\n"
13657
+ ),
13613
13658
  text: zod3.string().nullish().describe("The textual transcript of your media file"),
13614
13659
  throttled: zod3.boolean().nullish().describe(
13615
13660
  "True while a request is throttled and false when a request is no longer throttled"
@@ -13630,7 +13675,7 @@ var getTranscriptResponse = zod3.object({
13630
13675
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
13631
13676
  ),
13632
13677
  speaker: zod3.string().nullable().describe(
13633
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
13678
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
13634
13679
  )
13635
13680
  })
13636
13681
  ).describe("The words in the utterance."),
@@ -13645,7 +13690,7 @@ var getTranscriptResponse = zod3.object({
13645
13690
  )
13646
13691
  })
13647
13692
  ).nullish().describe(
13648
- "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) for more information.\n"
13693
+ "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) for more information.\n"
13649
13694
  ),
13650
13695
  webhook_auth: zod3.boolean().describe(
13651
13696
  "Whether [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) authentication details were provided"
@@ -13669,7 +13714,7 @@ var getTranscriptResponse = zod3.object({
13669
13714
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
13670
13715
  ),
13671
13716
  speaker: zod3.string().nullable().describe(
13672
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
13717
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
13673
13718
  )
13674
13719
  })
13675
13720
  ).nullish().describe(
@@ -13702,7 +13747,7 @@ var deleteTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault =
13702
13747
  var deleteTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault = false;
13703
13748
  var deleteTranscriptResponse = zod3.object({
13704
13749
  audio_channels: zod3.number().optional().describe(
13705
- "The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) is enabled."
13750
+ "The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) is enabled."
13706
13751
  ),
13707
13752
  audio_duration: zod3.number().nullish().describe("The duration of this transcript object's media file, in seconds"),
13708
13753
  audio_end_at: zod3.number().nullish().describe(
@@ -13713,10 +13758,10 @@ var deleteTranscriptResponse = zod3.object({
13713
13758
  ),
13714
13759
  audio_url: zod3.string().describe("The URL of the media that was transcribed"),
13715
13760
  auto_chapters: zod3.boolean().nullish().describe(
13716
- "Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
13761
+ "Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
13717
13762
  ),
13718
13763
  auto_highlights: zod3.boolean().describe(
13719
- "Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) is enabled, either true or false"
13764
+ "Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) is enabled, either true or false"
13720
13765
  ),
13721
13766
  auto_highlights_result: zod3.object({
13722
13767
  status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
@@ -13736,9 +13781,9 @@ var deleteTranscriptResponse = zod3.object({
13736
13781
  })
13737
13782
  ).describe("A temporally-sequential array of Key Phrases")
13738
13783
  }).describe(
13739
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
13784
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) for more information.\n"
13740
13785
  ).or(zod3.null()).optional().describe(
13741
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
13786
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) for more information.\n"
13742
13787
  ),
13743
13788
  chapters: zod3.array(
13744
13789
  zod3.object({
@@ -13751,7 +13796,7 @@ var deleteTranscriptResponse = zod3.object({
13751
13796
  end: zod3.number().describe("The starting time, in milliseconds, for the chapter")
13752
13797
  }).describe("Chapter of the audio file")
13753
13798
  ).nullish().describe(
13754
- "An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for more information."
13799
+ "An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) for more information."
13755
13800
  ),
13756
13801
  confidence: zod3.number().nullish().describe(
13757
13802
  "The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)"
@@ -13807,10 +13852,10 @@ var deleteTranscriptResponse = zod3.object({
13807
13852
  "Object containing words or phrases to replace, and the word or phrase to replace with"
13808
13853
  )
13809
13854
  ).nullish().describe(
13810
- "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details."
13855
+ "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/correct-spelling-of-terms) for more details."
13811
13856
  ),
13812
13857
  disfluencies: zod3.boolean().nullish().describe(
13813
- 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false'
13858
+ 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/include-filler-words), like "umm", in your media file; can be true or false'
13814
13859
  ),
13815
13860
  domain: zod3.string().nullish().describe(
13816
13861
  'The domain-specific model applied to the transcript. When set to `"medical-v1"`, [Medical Mode](https://www.assemblyai.com/docs/pre-recorded-audio/medical-mode) was used to improve accuracy for medical terminology.\n'
@@ -13872,10 +13917,10 @@ var deleteTranscriptResponse = zod3.object({
13872
13917
  )
13873
13918
  }).describe("A detected entity")
13874
13919
  ).nullish().describe(
13875
- "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) for more information.\n"
13920
+ "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/detect-entities-in-transcript) for more information.\n"
13876
13921
  ),
13877
13922
  entity_detection: zod3.boolean().nullish().describe(
13878
- "Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) is enabled, can be true or false"
13923
+ "Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/detect-entities-in-transcript) is enabled, can be true or false"
13879
13924
  ),
13880
13925
  error: zod3.string().optional().describe("Error message of why the transcript failed"),
13881
13926
  filter_profanity: zod3.boolean().nullish().describe(
@@ -13885,7 +13930,7 @@ var deleteTranscriptResponse = zod3.object({
13885
13930
  "Whether [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false"
13886
13931
  ),
13887
13932
  iab_categories: zod3.boolean().nullish().describe(
13888
- "Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) is enabled, can be true or false"
13933
+ "Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) is enabled, can be true or false"
13889
13934
  ),
13890
13935
  iab_categories_result: zod3.object({
13891
13936
  status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
@@ -13908,9 +13953,9 @@ var deleteTranscriptResponse = zod3.object({
13908
13953
  ).describe("An array of results for the Topic Detection model"),
13909
13954
  summary: zod3.record(zod3.string(), zod3.number()).describe("The overall relevance of topic to the entire audio file")
13910
13955
  }).describe(
13911
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
13956
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) for more information.\n"
13912
13957
  ).or(zod3.null()).optional().describe(
13913
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
13958
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) for more information.\n"
13914
13959
  ),
13915
13960
  id: zod3.string().uuid().describe("The unique identifier of your transcript"),
13916
13961
  keyterms_prompt: zod3.array(zod3.string()).optional().describe(
@@ -14160,7 +14205,7 @@ var deleteTranscriptResponse = zod3.object({
14160
14205
  "Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection)."
14161
14206
  ),
14162
14207
  multichannel: zod3.boolean().nullish().describe(
14163
- "Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) was enabled in the transcription request, either true or false"
14208
+ "Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) was enabled in the transcription request, either true or false"
14164
14209
  ),
14165
14210
  prompt: zod3.string().optional().describe(
14166
14211
  "Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
@@ -14243,7 +14288,7 @@ var deleteTranscriptResponse = zod3.object({
14243
14288
  "The replacement logic for detected PII, can be `entity_name` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
14244
14289
  ),
14245
14290
  sentiment_analysis: zod3.boolean().nullish().describe(
14246
- "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) is enabled, can be true or false"
14291
+ "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/analyze-sentiment-of-speech) is enabled, can be true or false"
14247
14292
  ),
14248
14293
  sentiment_analysis_results: zod3.array(
14249
14294
  zod3.object({
@@ -14258,17 +14303,17 @@ var deleteTranscriptResponse = zod3.object({
14258
14303
  "The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
14259
14304
  ),
14260
14305
  speaker: zod3.string().nullable().describe(
14261
- "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
14306
+ "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
14262
14307
  )
14263
14308
  }).describe("The result of the Sentiment Analysis model")
14264
14309
  ).nullish().describe(
14265
- "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) for more information.\n"
14310
+ "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/analyze-sentiment-of-speech) for more information.\n"
14266
14311
  ),
14267
14312
  speaker_labels: zod3.boolean().nullish().describe(
14268
- "Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, can be true or false"
14313
+ "Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, can be true or false"
14269
14314
  ),
14270
14315
  speakers_expected: zod3.number().nullish().describe(
14271
- "Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details."
14316
+ "Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers#set-number-of-speakers-expected) for more details."
14272
14317
  ),
14273
14318
  speech_model_used: zod3.string().optional().describe(
14274
14319
  "The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
@@ -14371,22 +14416,25 @@ var deleteTranscriptResponse = zod3.object({
14371
14416
  "The status of your transcript. Possible values are queued, processing, completed, or error."
14372
14417
  ),
14373
14418
  summarization: zod3.boolean().describe(
14374
- "Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
14419
+ "Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
14375
14420
  ),
14376
14421
  summary: zod3.string().nullish().describe(
14377
- "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
14422
+ "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details."
14378
14423
  ),
14379
14424
  summary_model: zod3.string().nullish().describe(
14380
- "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n"
14425
+ "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details.\n"
14381
14426
  ),
14382
14427
  summary_type: zod3.string().nullish().describe(
14383
- "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
14428
+ "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details."
14384
14429
  ),
14385
14430
  remove_audio_tags: zod3.enum(["all"]).describe(
14386
14431
  "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
14387
14432
  ).or(zod3.null()).optional().describe(
14388
14433
  "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
14389
14434
  ),
14435
+ temperature: zod3.number().nullish().describe(
14436
+ "The temperature that was used for the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.\n\nNote: This parameter can only be used with the Universal-3 Pro model.\n"
14437
+ ),
14390
14438
  text: zod3.string().nullish().describe("The textual transcript of your media file"),
14391
14439
  throttled: zod3.boolean().nullish().describe(
14392
14440
  "True while a request is throttled and false when a request is no longer throttled"
@@ -14407,7 +14455,7 @@ var deleteTranscriptResponse = zod3.object({
14407
14455
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
14408
14456
  ),
14409
14457
  speaker: zod3.string().nullable().describe(
14410
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
14458
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
14411
14459
  )
14412
14460
  })
14413
14461
  ).describe("The words in the utterance."),
@@ -14422,7 +14470,7 @@ var deleteTranscriptResponse = zod3.object({
14422
14470
  )
14423
14471
  })
14424
14472
  ).nullish().describe(
14425
- "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) for more information.\n"
14473
+ "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) for more information.\n"
14426
14474
  ),
14427
14475
  webhook_auth: zod3.boolean().describe(
14428
14476
  "Whether [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) authentication details were provided"
@@ -14446,7 +14494,7 @@ var deleteTranscriptResponse = zod3.object({
14446
14494
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
14447
14495
  ),
14448
14496
  speaker: zod3.string().nullable().describe(
14449
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
14497
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
14450
14498
  )
14451
14499
  })
14452
14500
  ).nullish().describe(
@@ -14491,7 +14539,7 @@ var getTranscriptSentencesResponse = zod3.object({
14491
14539
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
14492
14540
  ),
14493
14541
  speaker: zod3.string().nullable().describe(
14494
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
14542
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
14495
14543
  )
14496
14544
  })
14497
14545
  ).describe("An array of words in the sentence"),
@@ -14499,7 +14547,7 @@ var getTranscriptSentencesResponse = zod3.object({
14499
14547
  "The channel of the sentence. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
14500
14548
  ),
14501
14549
  speaker: zod3.string().nullable().describe(
14502
- "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
14550
+ "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
14503
14551
  )
14504
14552
  })
14505
14553
  ).describe("An array of sentences in the transcript")
@@ -14527,7 +14575,7 @@ var getTranscriptParagraphsResponse = zod3.object({
14527
14575
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
14528
14576
  ),
14529
14577
  speaker: zod3.string().nullable().describe(
14530
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
14578
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
14531
14579
  )
14532
14580
  })
14533
14581
  ).describe("An array of words in the paragraph")
@@ -17139,23 +17187,6 @@ var preRecordedControllerGetPreRecordedJobsV2Response = zod5.object({
17139
17187
  }).optional().describe(
17140
17188
  "If `name_consistency` has been enabled, Gladia will improve consistency of the names accross the transcription"
17141
17189
  ),
17142
- speaker_reidentification: zod5.object({
17143
- success: zod5.boolean().describe("The audio intelligence model succeeded to get a valid output"),
17144
- is_empty: zod5.boolean().describe("The audio intelligence model returned an empty value"),
17145
- exec_time: zod5.number().describe("Time audio intelligence model took to complete the task"),
17146
- error: zod5.object({
17147
- status_code: zod5.number().describe("Status code of the addon error"),
17148
- exception: zod5.string().describe("Reason of the addon error"),
17149
- message: zod5.string().describe("Detailed message of the addon error")
17150
- }).nullable().describe(
17151
- "`null` if `success` is `true`. Contains the error details of the failed model"
17152
- ),
17153
- results: zod5.string().describe(
17154
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
17155
- )
17156
- }).optional().describe(
17157
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
17158
- ),
17159
17190
  structured_data_extraction: zod5.object({
17160
17191
  success: zod5.boolean().describe("The audio intelligence model succeeded to get a valid output"),
17161
17192
  is_empty: zod5.boolean().describe("The audio intelligence model returned an empty value"),
@@ -18636,23 +18667,6 @@ var preRecordedControllerGetPreRecordedJobV2Response = zod5.object({
18636
18667
  }).optional().describe(
18637
18668
  "If `name_consistency` has been enabled, Gladia will improve consistency of the names accross the transcription"
18638
18669
  ),
18639
- speaker_reidentification: zod5.object({
18640
- success: zod5.boolean().describe("The audio intelligence model succeeded to get a valid output"),
18641
- is_empty: zod5.boolean().describe("The audio intelligence model returned an empty value"),
18642
- exec_time: zod5.number().describe("Time audio intelligence model took to complete the task"),
18643
- error: zod5.object({
18644
- status_code: zod5.number().describe("Status code of the addon error"),
18645
- exception: zod5.string().describe("Reason of the addon error"),
18646
- message: zod5.string().describe("Detailed message of the addon error")
18647
- }).nullable().describe(
18648
- "`null` if `success` is `true`. Contains the error details of the failed model"
18649
- ),
18650
- results: zod5.string().describe(
18651
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
18652
- )
18653
- }).optional().describe(
18654
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
18655
- ),
18656
18670
  structured_data_extraction: zod5.object({
18657
18671
  success: zod5.boolean().describe("The audio intelligence model succeeded to get a valid output"),
18658
18672
  is_empty: zod5.boolean().describe("The audio intelligence model returned an empty value"),
@@ -20790,23 +20804,6 @@ var transcriptionControllerListV2Response = zod5.object({
20790
20804
  }).optional().describe(
20791
20805
  "If `name_consistency` has been enabled, Gladia will improve consistency of the names accross the transcription"
20792
20806
  ),
20793
- speaker_reidentification: zod5.object({
20794
- success: zod5.boolean().describe("The audio intelligence model succeeded to get a valid output"),
20795
- is_empty: zod5.boolean().describe("The audio intelligence model returned an empty value"),
20796
- exec_time: zod5.number().describe("Time audio intelligence model took to complete the task"),
20797
- error: zod5.object({
20798
- status_code: zod5.number().describe("Status code of the addon error"),
20799
- exception: zod5.string().describe("Reason of the addon error"),
20800
- message: zod5.string().describe("Detailed message of the addon error")
20801
- }).nullable().describe(
20802
- "`null` if `success` is `true`. Contains the error details of the failed model"
20803
- ),
20804
- results: zod5.string().describe(
20805
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
20806
- )
20807
- }).optional().describe(
20808
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
20809
- ),
20810
20807
  structured_data_extraction: zod5.object({
20811
20808
  success: zod5.boolean().describe("The audio intelligence model succeeded to get a valid output"),
20812
20809
  is_empty: zod5.boolean().describe("The audio intelligence model returned an empty value"),
@@ -21106,11 +21103,7 @@ var transcriptionControllerListV2Response = zod5.object({
21106
21103
  channels: zod5.number().min(1).max(transcriptionControllerListV2ResponseItemsItemRequestParamsChannelsMax).default(
21107
21104
  transcriptionControllerListV2ResponseItemsItemRequestParamsChannelsDefault
21108
21105
  ).describe("The number of channels of the audio stream"),
21109
- model: zod5.enum(["solaria-1"]).describe(
21110
- 'The model used to process the audio. "solaria-1" is used by default.'
21111
- ).default(transcriptionControllerListV2ResponseItemsItemRequestParamsModelDefault).describe(
21112
- 'The model used to process the audio. "solaria-1" is used by default.'
21113
- ),
21106
+ model: zod5.enum(["solaria-1"]).describe('The model used to process the audio. "solaria-1" is used by default.').default(transcriptionControllerListV2ResponseItemsItemRequestParamsModelDefault).describe('The model used to process the audio. "solaria-1" is used by default.'),
21114
21107
  endpointing: zod5.number().min(transcriptionControllerListV2ResponseItemsItemRequestParamsEndpointingMin).max(transcriptionControllerListV2ResponseItemsItemRequestParamsEndpointingMax).default(
21115
21108
  transcriptionControllerListV2ResponseItemsItemRequestParamsEndpointingDefault
21116
21109
  ).describe(
@@ -23534,23 +23527,6 @@ var transcriptionControllerGetTranscriptV2Response = zod5.discriminatedUnion("ki
23534
23527
  }).optional().describe(
23535
23528
  "If `name_consistency` has been enabled, Gladia will improve consistency of the names accross the transcription"
23536
23529
  ),
23537
- speaker_reidentification: zod5.object({
23538
- success: zod5.boolean().describe("The audio intelligence model succeeded to get a valid output"),
23539
- is_empty: zod5.boolean().describe("The audio intelligence model returned an empty value"),
23540
- exec_time: zod5.number().describe("Time audio intelligence model took to complete the task"),
23541
- error: zod5.object({
23542
- status_code: zod5.number().describe("Status code of the addon error"),
23543
- exception: zod5.string().describe("Reason of the addon error"),
23544
- message: zod5.string().describe("Detailed message of the addon error")
23545
- }).nullable().describe(
23546
- "`null` if `success` is `true`. Contains the error details of the failed model"
23547
- ),
23548
- results: zod5.string().describe(
23549
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
23550
- )
23551
- }).optional().describe(
23552
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
23553
- ),
23554
23530
  structured_data_extraction: zod5.object({
23555
23531
  success: zod5.boolean().describe("The audio intelligence model succeeded to get a valid output"),
23556
23532
  is_empty: zod5.boolean().describe("The audio intelligence model returned an empty value"),
@@ -26716,23 +26692,6 @@ var historyControllerGetListV1Response = zod5.object({
26716
26692
  }).optional().describe(
26717
26693
  "If `name_consistency` has been enabled, Gladia will improve consistency of the names accross the transcription"
26718
26694
  ),
26719
- speaker_reidentification: zod5.object({
26720
- success: zod5.boolean().describe("The audio intelligence model succeeded to get a valid output"),
26721
- is_empty: zod5.boolean().describe("The audio intelligence model returned an empty value"),
26722
- exec_time: zod5.number().describe("Time audio intelligence model took to complete the task"),
26723
- error: zod5.object({
26724
- status_code: zod5.number().describe("Status code of the addon error"),
26725
- exception: zod5.string().describe("Reason of the addon error"),
26726
- message: zod5.string().describe("Detailed message of the addon error")
26727
- }).nullable().describe(
26728
- "`null` if `success` is `true`. Contains the error details of the failed model"
26729
- ),
26730
- results: zod5.string().describe(
26731
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
26732
- )
26733
- }).optional().describe(
26734
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
26735
- ),
26736
26695
  structured_data_extraction: zod5.object({
26737
26696
  success: zod5.boolean().describe("The audio intelligence model succeeded to get a valid output"),
26738
26697
  is_empty: zod5.boolean().describe("The audio intelligence model returned an empty value"),
@@ -27026,11 +26985,7 @@ var historyControllerGetListV1Response = zod5.object({
27026
26985
  historyControllerGetListV1ResponseItemsItemRequestParamsSampleRateDefault
27027
26986
  ).describe("The sample rate of the audio stream"),
27028
26987
  channels: zod5.number().min(1).max(historyControllerGetListV1ResponseItemsItemRequestParamsChannelsMax).default(historyControllerGetListV1ResponseItemsItemRequestParamsChannelsDefault).describe("The number of channels of the audio stream"),
27029
- model: zod5.enum(["solaria-1"]).describe(
27030
- 'The model used to process the audio. "solaria-1" is used by default.'
27031
- ).default(historyControllerGetListV1ResponseItemsItemRequestParamsModelDefault).describe(
27032
- 'The model used to process the audio. "solaria-1" is used by default.'
27033
- ),
26988
+ model: zod5.enum(["solaria-1"]).describe('The model used to process the audio. "solaria-1" is used by default.').default(historyControllerGetListV1ResponseItemsItemRequestParamsModelDefault).describe('The model used to process the audio. "solaria-1" is used by default.'),
27034
26989
  endpointing: zod5.number().min(historyControllerGetListV1ResponseItemsItemRequestParamsEndpointingMin).max(historyControllerGetListV1ResponseItemsItemRequestParamsEndpointingMax).default(
27035
26990
  historyControllerGetListV1ResponseItemsItemRequestParamsEndpointingDefault
27036
26991
  ).describe(
@@ -36191,6 +36146,7 @@ __export(sonioxPublicAPI_zod_exports, {
36191
36146
  createTemporaryApiKeyBody: () => createTemporaryApiKeyBody,
36192
36147
  createTemporaryApiKeyBodyClientReferenceIdMaxOne: () => createTemporaryApiKeyBodyClientReferenceIdMaxOne,
36193
36148
  createTemporaryApiKeyBodyExpiresInSecondsMax: () => createTemporaryApiKeyBodyExpiresInSecondsMax,
36149
+ createTemporaryApiKeyBodyMaxSessionDurationSecondsMaxOne: () => createTemporaryApiKeyBodyMaxSessionDurationSecondsMaxOne,
36194
36150
  createTranscriptionBody: () => createTranscriptionBody2,
36195
36151
  createTranscriptionBodyAudioUrlMaxOne: () => createTranscriptionBodyAudioUrlMaxOne,
36196
36152
  createTranscriptionBodyAudioUrlRegExpOne: () => createTranscriptionBodyAudioUrlRegExpOne,
@@ -36321,11 +36277,11 @@ var getTranscriptionsResponse = zod10.object({
36321
36277
  });
36322
36278
  var createTranscriptionBodyModelMaxThree = 32;
36323
36279
  var createTranscriptionBodyAudioUrlMaxOne = 4096;
36324
- var createTranscriptionBodyAudioUrlRegExpOne = new RegExp("^https?://[^\\s]+$");
36280
+ var createTranscriptionBodyAudioUrlRegExpOne = /^https?:\/\/[^\s]+$/;
36325
36281
  var createTranscriptionBodyLanguageHintsItemMax = 10;
36326
36282
  var createTranscriptionBodyLanguageHintsMaxOne = 100;
36327
36283
  var createTranscriptionBodyWebhookUrlMaxOne = 256;
36328
- var createTranscriptionBodyWebhookUrlRegExpOne = new RegExp("^https?://[^\\s]+$");
36284
+ var createTranscriptionBodyWebhookUrlRegExpOne = /^https?:\/\/[^\s]+$/;
36329
36285
  var createTranscriptionBodyWebhookAuthHeaderNameMaxOne = 256;
36330
36286
  var createTranscriptionBodyWebhookAuthHeaderValueMaxOne = 256;
36331
36287
  var createTranscriptionBodyClientReferenceIdMaxOne = 256;
@@ -36473,10 +36429,15 @@ var getModelsResponse = zod10.object({
36473
36429
  });
36474
36430
  var createTemporaryApiKeyBodyExpiresInSecondsMax = 3600;
36475
36431
  var createTemporaryApiKeyBodyClientReferenceIdMaxOne = 256;
36432
+ var createTemporaryApiKeyBodyMaxSessionDurationSecondsMaxOne = 18e3;
36476
36433
  var createTemporaryApiKeyBody = zod10.object({
36477
36434
  usage_type: zod10.enum(["transcribe_websocket"]),
36478
36435
  expires_in_seconds: zod10.number().min(1).max(createTemporaryApiKeyBodyExpiresInSecondsMax).describe("Duration in seconds until the temporary API key expires."),
36479
- client_reference_id: zod10.string().max(createTemporaryApiKeyBodyClientReferenceIdMaxOne).or(zod10.null()).optional().describe("Optional tracking identifier string. Does not need to be unique.")
36436
+ client_reference_id: zod10.string().max(createTemporaryApiKeyBodyClientReferenceIdMaxOne).or(zod10.null()).optional().describe("Optional tracking identifier string. Does not need to be unique."),
36437
+ single_use: zod10.boolean().or(zod10.null()).optional().describe("If true, the temporary API key can be used only once."),
36438
+ max_session_duration_seconds: zod10.number().min(1).max(createTemporaryApiKeyBodyMaxSessionDurationSecondsMaxOne).or(zod10.null()).optional().describe(
36439
+ "Maximum WebSocket connection duration in seconds. If exceeded, the connection will be dropped. If not set, no limit is applied."
36440
+ )
36480
36441
  });
36481
36442
 
36482
36443
  // src/generated/soniox/streaming-types.zod.ts
@@ -37727,6 +37688,16 @@ var ToolChoiceOptions = {
37727
37688
  required: "required"
37728
37689
  };
37729
37690
 
37691
+ // src/generated/openai/schema/transcriptionDiarizedSegmentType.ts
37692
+ var TranscriptionDiarizedSegmentType = {
37693
+ transcripttextsegment: "transcript.text.segment"
37694
+ };
37695
+
37696
+ // src/generated/openai/schema/transcriptionInclude.ts
37697
+ var TranscriptionInclude = {
37698
+ logprobs: "logprobs"
37699
+ };
37700
+
37730
37701
  // src/generated/openai/schema/transcriptTextDeltaEventType.ts
37731
37702
  var TranscriptTextDeltaEventType = {
37732
37703
  transcripttextdelta: "transcript.text.delta"
@@ -37752,16 +37723,6 @@ var TranscriptTextUsageTokensType = {
37752
37723
  tokens: "tokens"
37753
37724
  };
37754
37725
 
37755
- // src/generated/openai/schema/transcriptionDiarizedSegmentType.ts
37756
- var TranscriptionDiarizedSegmentType = {
37757
- transcripttextsegment: "transcript.text.segment"
37758
- };
37759
-
37760
- // src/generated/openai/schema/transcriptionInclude.ts
37761
- var TranscriptionInclude = {
37762
- logprobs: "logprobs"
37763
- };
37764
-
37765
37726
  // src/generated/openai/schema/vadConfigType.ts
37766
37727
  var VadConfigType = {
37767
37728
  server_vad: "server_vad"
@@ -38143,8 +38104,8 @@ var getJobsQueryParams = zod12.object({
38143
38104
  var getJobsResponseJobsItemDurationMin = 0;
38144
38105
  var getJobsResponseJobsItemConfigTranscriptionConfigPunctuationOverridesSensitivityMin = 0;
38145
38106
  var getJobsResponseJobsItemConfigTranscriptionConfigPunctuationOverridesSensitivityMax = 1;
38146
- var getJobsResponseJobsItemConfigTranscriptionConfigPunctuationOverridesPermittedMarksItemRegExp = new RegExp("^(.|all)$");
38147
- var getJobsResponseJobsItemConfigTranscriptionConfigChannelDiarizationLabelsItemRegExp = new RegExp("^[A-Za-z0-9._]+$");
38107
+ var getJobsResponseJobsItemConfigTranscriptionConfigPunctuationOverridesPermittedMarksItemRegExp = /^(.|all)$/;
38108
+ var getJobsResponseJobsItemConfigTranscriptionConfigChannelDiarizationLabelsItemRegExp = /^[A-Za-z0-9._]+$/;
38148
38109
  var getJobsResponseJobsItemConfigTranscriptionConfigSpeakerDiarizationConfigSpeakerSensitivityMin = 0;
38149
38110
  var getJobsResponseJobsItemConfigTranscriptionConfigSpeakerDiarizationConfigSpeakerSensitivityMax = 1;
38150
38111
  var getJobsResponseJobsItemConfigTranslationConfigTargetLanguagesMax = 5;
@@ -38342,8 +38303,8 @@ var getJobsJobidParams = zod12.object({
38342
38303
  var getJobsJobidResponseJobDurationMin = 0;
38343
38304
  var getJobsJobidResponseJobConfigTranscriptionConfigPunctuationOverridesSensitivityMin = 0;
38344
38305
  var getJobsJobidResponseJobConfigTranscriptionConfigPunctuationOverridesSensitivityMax = 1;
38345
- var getJobsJobidResponseJobConfigTranscriptionConfigPunctuationOverridesPermittedMarksItemRegExp = new RegExp("^(.|all)$");
38346
- var getJobsJobidResponseJobConfigTranscriptionConfigChannelDiarizationLabelsItemRegExp = new RegExp("^[A-Za-z0-9._]+$");
38306
+ var getJobsJobidResponseJobConfigTranscriptionConfigPunctuationOverridesPermittedMarksItemRegExp = /^(.|all)$/;
38307
+ var getJobsJobidResponseJobConfigTranscriptionConfigChannelDiarizationLabelsItemRegExp = /^[A-Za-z0-9._]+$/;
38347
38308
  var getJobsJobidResponseJobConfigTranscriptionConfigSpeakerDiarizationConfigSpeakerSensitivityMin = 0;
38348
38309
  var getJobsJobidResponseJobConfigTranscriptionConfigSpeakerDiarizationConfigSpeakerSensitivityMax = 1;
38349
38310
  var getJobsJobidResponseJobConfigTranslationConfigTargetLanguagesMax = 5;
@@ -38540,8 +38501,8 @@ var deleteJobsJobidQueryParams = zod12.object({
38540
38501
  var deleteJobsJobidResponseJobDurationMin = 0;
38541
38502
  var deleteJobsJobidResponseJobConfigTranscriptionConfigPunctuationOverridesSensitivityMin = 0;
38542
38503
  var deleteJobsJobidResponseJobConfigTranscriptionConfigPunctuationOverridesSensitivityMax = 1;
38543
- var deleteJobsJobidResponseJobConfigTranscriptionConfigPunctuationOverridesPermittedMarksItemRegExp = new RegExp("^(.|all)$");
38544
- var deleteJobsJobidResponseJobConfigTranscriptionConfigChannelDiarizationLabelsItemRegExp = new RegExp("^[A-Za-z0-9._]+$");
38504
+ var deleteJobsJobidResponseJobConfigTranscriptionConfigPunctuationOverridesPermittedMarksItemRegExp = /^(.|all)$/;
38505
+ var deleteJobsJobidResponseJobConfigTranscriptionConfigChannelDiarizationLabelsItemRegExp = /^[A-Za-z0-9._]+$/;
38545
38506
  var deleteJobsJobidResponseJobConfigTranscriptionConfigSpeakerDiarizationConfigSpeakerSensitivityMin = 0;
38546
38507
  var deleteJobsJobidResponseJobConfigTranscriptionConfigSpeakerDiarizationConfigSpeakerSensitivityMax = 1;
38547
38508
  var deleteJobsJobidResponseJobConfigTranslationConfigTargetLanguagesMax = 5;
@@ -38744,8 +38705,8 @@ var getJobsJobidTranscriptQueryParams = zod12.object({
38744
38705
  var getJobsJobidTranscriptResponseJobDurationMin = 0;
38745
38706
  var getJobsJobidTranscriptResponseMetadataTranscriptionConfigPunctuationOverridesSensitivityMin = 0;
38746
38707
  var getJobsJobidTranscriptResponseMetadataTranscriptionConfigPunctuationOverridesSensitivityMax = 1;
38747
- var getJobsJobidTranscriptResponseMetadataTranscriptionConfigPunctuationOverridesPermittedMarksItemRegExp = new RegExp("^(.|all)$");
38748
- var getJobsJobidTranscriptResponseMetadataTranscriptionConfigChannelDiarizationLabelsItemRegExp = new RegExp("^[A-Za-z0-9._]+$");
38708
+ var getJobsJobidTranscriptResponseMetadataTranscriptionConfigPunctuationOverridesPermittedMarksItemRegExp = /^(.|all)$/;
38709
+ var getJobsJobidTranscriptResponseMetadataTranscriptionConfigChannelDiarizationLabelsItemRegExp = /^[A-Za-z0-9._]+$/;
38749
38710
  var getJobsJobidTranscriptResponseMetadataTranscriptionConfigSpeakerDiarizationConfigSpeakerSensitivityMin = 0;
38750
38711
  var getJobsJobidTranscriptResponseMetadataTranscriptionConfigSpeakerDiarizationConfigSpeakerSensitivityMax = 1;
38751
38712
  var getJobsJobidTranscriptResponseResultsItemVolumeMin = 0;