voice-router-dev 0.8.8 → 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -983,60 +983,60 @@ var SonioxLanguage = {
983
983
  // src/generated/soniox/models.ts
984
984
  var SonioxModels = [
985
985
  { id: "stt-rt-v4", name: "Speech-to-Text Real-time v4", mode: "real_time" },
986
- { id: "stt-rt-v3", name: "Speech-to-Text Real-time v3", mode: "real_time" },
987
986
  { id: "stt-async-v4", name: "Speech-to-Text Async v4", mode: "async" },
988
- { id: "stt-async-v3", name: "Speech-to-Text Async v3", mode: "async" },
989
- { id: "stt-rt-preview", name: "Speech-to-Text Real-time Preview", mode: "real_time", aliasOf: "stt-rt-v3" },
990
- { id: "stt-async-preview", name: "Speech-to-Text Async Preview", mode: "async", aliasOf: "stt-async-v3" },
991
- { id: "stt-rt-v3-preview", name: "Speech-to-Text Real-time v3 Preview", mode: "real_time", aliasOf: "stt-rt-v3" },
992
- { id: "stt-rt-preview-v2", name: "Speech-to-Text Real-time Preview v2", mode: "real_time", aliasOf: "stt-rt-v3" },
993
- { id: "stt-async-preview-v1", name: "Speech-to-Text Async Preview v1", mode: "async", aliasOf: "stt-async-v3" }
987
+ { id: "stt-rt-preview", name: "Speech-to-Text Real-time Preview", mode: "real_time", aliasOf: "stt-rt-v4" },
988
+ { id: "stt-async-preview", name: "Speech-to-Text Async Preview", mode: "async", aliasOf: "stt-async-v4" },
989
+ { id: "stt-rt-v3-preview", name: "Speech-to-Text Real-time v3 Preview", mode: "real_time", aliasOf: "stt-rt-v4" },
990
+ { id: "stt-rt-preview-v2", name: "Speech-to-Text Real-time Preview v2", mode: "real_time", aliasOf: "stt-rt-v4" },
991
+ { id: "stt-async-preview-v1", name: "Speech-to-Text Async Preview v1", mode: "async", aliasOf: "stt-async-v4" },
992
+ { id: "stt-rt-v3", name: "Speech-to-Text Real-time v3", mode: "real_time", aliasOf: "stt-rt-v4" },
993
+ { id: "stt-async-v3", name: "Speech-to-Text Async v3", mode: "async", aliasOf: "stt-async-v4" }
994
994
  ];
995
995
  var SonioxModelCodes = [
996
996
  "stt-rt-v4",
997
- "stt-rt-v3",
998
997
  "stt-async-v4",
999
- "stt-async-v3",
1000
998
  "stt-rt-preview",
1001
999
  "stt-async-preview",
1002
1000
  "stt-rt-v3-preview",
1003
1001
  "stt-rt-preview-v2",
1004
- "stt-async-preview-v1"
1002
+ "stt-async-preview-v1",
1003
+ "stt-rt-v3",
1004
+ "stt-async-v3"
1005
1005
  ];
1006
1006
  var SonioxModelLabels = {
1007
1007
  "stt-rt-v4": "Speech-to-Text Real-time v4",
1008
- "stt-rt-v3": "Speech-to-Text Real-time v3",
1009
1008
  "stt-async-v4": "Speech-to-Text Async v4",
1010
- "stt-async-v3": "Speech-to-Text Async v3",
1011
1009
  "stt-rt-preview": "Speech-to-Text Real-time Preview",
1012
1010
  "stt-async-preview": "Speech-to-Text Async Preview",
1013
1011
  "stt-rt-v3-preview": "Speech-to-Text Real-time v3 Preview",
1014
1012
  "stt-rt-preview-v2": "Speech-to-Text Real-time Preview v2",
1015
- "stt-async-preview-v1": "Speech-to-Text Async Preview v1"
1013
+ "stt-async-preview-v1": "Speech-to-Text Async Preview v1",
1014
+ "stt-rt-v3": "Speech-to-Text Real-time v3",
1015
+ "stt-async-v3": "Speech-to-Text Async v3"
1016
1016
  };
1017
1017
  var SonioxModel = {
1018
1018
  stt_rt_v4: "stt-rt-v4",
1019
- stt_rt_v3: "stt-rt-v3",
1020
1019
  stt_async_v4: "stt-async-v4",
1021
- stt_async_v3: "stt-async-v3",
1022
1020
  stt_rt_preview: "stt-rt-preview",
1023
1021
  stt_async_preview: "stt-async-preview",
1024
1022
  stt_rt_v3_preview: "stt-rt-v3-preview",
1025
1023
  stt_rt_preview_v2: "stt-rt-preview-v2",
1026
- stt_async_preview_v1: "stt-async-preview-v1"
1024
+ stt_async_preview_v1: "stt-async-preview-v1",
1025
+ stt_rt_v3: "stt-rt-v3",
1026
+ stt_async_v3: "stt-async-v3"
1027
1027
  };
1028
1028
  var SonioxRealtimeModel = {
1029
1029
  stt_rt_v4: "stt-rt-v4",
1030
- stt_rt_v3: "stt-rt-v3",
1031
1030
  stt_rt_preview: "stt-rt-preview",
1032
1031
  stt_rt_v3_preview: "stt-rt-v3-preview",
1033
- stt_rt_preview_v2: "stt-rt-preview-v2"
1032
+ stt_rt_preview_v2: "stt-rt-preview-v2",
1033
+ stt_rt_v3: "stt-rt-v3"
1034
1034
  };
1035
1035
  var SonioxAsyncModel = {
1036
1036
  stt_async_v4: "stt-async-v4",
1037
- stt_async_v3: "stt-async-v3",
1038
1037
  stt_async_preview: "stt-async-preview",
1039
- stt_async_preview_v1: "stt-async-preview-v1"
1038
+ stt_async_preview_v1: "stt-async-preview-v1",
1039
+ stt_async_v3: "stt-async-v3"
1040
1040
  };
1041
1041
 
1042
1042
  // src/generated/speechmatics/languages.ts
@@ -4011,17 +4011,17 @@ var SummaryTypesEnum = {
4011
4011
  concise: "concise"
4012
4012
  };
4013
4013
 
4014
- // src/generated/gladia/schema/transcriptMessageType.ts
4015
- var TranscriptMessageType = {
4016
- transcript: "transcript"
4017
- };
4018
-
4019
4014
  // src/generated/gladia/schema/transcriptionControllerListV2KindItem.ts
4020
4015
  var TranscriptionControllerListV2KindItem = {
4021
4016
  "pre-recorded": "pre-recorded",
4022
4017
  live: "live"
4023
4018
  };
4024
4019
 
4020
+ // src/generated/gladia/schema/transcriptMessageType.ts
4021
+ var TranscriptMessageType = {
4022
+ transcript: "transcript"
4023
+ };
4024
+
4025
4025
  // src/generated/gladia/schema/translationMessageType.ts
4026
4026
  var TranslationMessageType = {
4027
4027
  translation: "translation"
@@ -4293,7 +4293,7 @@ var WebhookTranscriptionSuccessPayloadEvent = {
4293
4293
 
4294
4294
  // src/generated/gladia/api/gladiaControlAPI.ts
4295
4295
  var preRecordedControllerInitPreRecordedJobV2 = (initTranscriptionRequest, options) => {
4296
- return import_axios.default.post(`/v2/pre-recorded`, initTranscriptionRequest, options);
4296
+ return import_axios.default.post("/v2/pre-recorded", initTranscriptionRequest, options);
4297
4297
  };
4298
4298
  var preRecordedControllerGetPreRecordedJobV2 = (id, options) => {
4299
4299
  return import_axios.default.get(`/v2/pre-recorded/${id}`, options);
@@ -4308,13 +4308,13 @@ var preRecordedControllerGetAudioV2 = (id, options) => {
4308
4308
  });
4309
4309
  };
4310
4310
  var transcriptionControllerListV2 = (params, options) => {
4311
- return import_axios.default.get(`/v2/transcription`, {
4311
+ return import_axios.default.get("/v2/transcription", {
4312
4312
  ...options,
4313
4313
  params: { ...params, ...options?.params }
4314
4314
  });
4315
4315
  };
4316
4316
  var streamingControllerInitStreamingSessionV2 = (streamingRequest, params, options) => {
4317
- return import_axios.default.post(`/v2/live`, streamingRequest, {
4317
+ return import_axios.default.post("/v2/live", streamingRequest, {
4318
4318
  ...options,
4319
4319
  params: { ...params, ...options?.params }
4320
4320
  });
@@ -4580,7 +4580,6 @@ var GladiaAdapter = class extends BaseAdapter {
4580
4580
  sentiment: result?.sentiment_analysis || void 0,
4581
4581
  audioToLlm: result?.audio_to_llm || void 0,
4582
4582
  chapters: result?.chapterization || void 0,
4583
- speakerReidentification: result?.speaker_reidentification || void 0,
4584
4583
  structuredData: result?.structured_data_extraction || void 0,
4585
4584
  customMetadata: response.custom_metadata || void 0
4586
4585
  },
@@ -5642,17 +5641,17 @@ var PiiPolicy = {
5642
5641
  zodiac_sign: "zodiac_sign"
5643
5642
  };
5644
5643
 
5644
+ // src/generated/assemblyai/schema/redactedAudioStatus.ts
5645
+ var RedactedAudioStatus = {
5646
+ redacted_audio_ready: "redacted_audio_ready"
5647
+ };
5648
+
5645
5649
  // src/generated/assemblyai/schema/redactPiiAudioQuality.ts
5646
5650
  var RedactPiiAudioQuality = {
5647
5651
  mp3: "mp3",
5648
5652
  wav: "wav"
5649
5653
  };
5650
5654
 
5651
- // src/generated/assemblyai/schema/redactedAudioStatus.ts
5652
- var RedactedAudioStatus = {
5653
- redacted_audio_ready: "redacted_audio_ready"
5654
- };
5655
-
5656
5655
  // src/generated/assemblyai/schema/sentiment.ts
5657
5656
  var Sentiment = {
5658
5657
  POSITIVE: "POSITIVE",
@@ -5716,10 +5715,10 @@ var TranscriptRemoveAudioTags = {
5716
5715
 
5717
5716
  // src/generated/assemblyai/api/assemblyAIAPI.ts
5718
5717
  var createTranscript = (transcriptParams, options) => {
5719
- return import_axios2.default.post(`/v2/transcript`, transcriptParams, options);
5718
+ return import_axios2.default.post("/v2/transcript", transcriptParams, options);
5720
5719
  };
5721
5720
  var listTranscripts = (params, options) => {
5722
- return import_axios2.default.get(`/v2/transcript`, {
5721
+ return import_axios2.default.get("/v2/transcript", {
5723
5722
  ...options,
5724
5723
  params: { ...params, ...options?.params }
5725
5724
  });
@@ -6129,22 +6128,22 @@ var AssemblyAIAdapter = class extends BaseAdapter {
6129
6128
  normalizeResponse(response) {
6130
6129
  let status;
6131
6130
  switch (response.status) {
6132
- case "queued":
6131
+ case TranscriptStatus.queued:
6133
6132
  status = "queued";
6134
6133
  break;
6135
- case "processing":
6134
+ case TranscriptStatus.processing:
6136
6135
  status = "processing";
6137
6136
  break;
6138
- case "completed":
6137
+ case TranscriptStatus.completed:
6139
6138
  status = "completed";
6140
6139
  break;
6141
- case "error":
6140
+ case TranscriptStatus.error:
6142
6141
  status = "error";
6143
6142
  break;
6144
6143
  default:
6145
6144
  status = "queued";
6146
6145
  }
6147
- if (response.status === "error") {
6146
+ if (response.status === TranscriptStatus.error) {
6148
6147
  return {
6149
6148
  success: false,
6150
6149
  provider: this.name,
@@ -7856,13 +7855,13 @@ var TextNormalizationKind = {
7856
7855
 
7857
7856
  // src/generated/azure/api/speechServicesAPIVersion32.ts
7858
7857
  var transcriptionsList = (params, options) => {
7859
- return import_axios4.default.get(`/transcriptions`, {
7858
+ return import_axios4.default.get("/transcriptions", {
7860
7859
  ...options,
7861
7860
  params: { ...params, ...options?.params }
7862
7861
  });
7863
7862
  };
7864
7863
  var transcriptionsCreate = (transcription, options) => {
7865
- return import_axios4.default.post(`/transcriptions`, transcription, options);
7864
+ return import_axios4.default.post("/transcriptions", transcription, options);
7866
7865
  };
7867
7866
  var transcriptionsGet = (id, options) => {
7868
7867
  return import_axios4.default.get(`/transcriptions/${id}`, options);
@@ -7877,13 +7876,13 @@ var transcriptionsListFiles = (id, params, options) => {
7877
7876
  });
7878
7877
  };
7879
7878
  var webHooksList = (params, options) => {
7880
- return import_axios4.default.get(`/webhooks`, {
7879
+ return import_axios4.default.get("/webhooks", {
7881
7880
  ...options,
7882
7881
  params: { ...params, ...options?.params }
7883
7882
  });
7884
7883
  };
7885
7884
  var webHooksCreate = (webHook, options) => {
7886
- return import_axios4.default.post(`/webhooks`, webHook, options);
7885
+ return import_axios4.default.post("/webhooks", webHook, options);
7887
7886
  };
7888
7887
  var webHooksDelete = (id, options) => {
7889
7888
  return import_axios4.default.delete(`/webhooks/${id}`, options);
@@ -8011,7 +8010,7 @@ var AzureSTTAdapter = class extends BaseAdapter {
8011
8010
  this.getAxiosConfig()
8012
8011
  );
8013
8012
  const files = filesResponse.data?.values || [];
8014
- const resultFile = files.find((file) => file.kind === "Transcription");
8013
+ const resultFile = files.find((file) => file.kind === FileKind.Transcription);
8015
8014
  if (!resultFile?.links?.contentUrl) {
8016
8015
  return {
8017
8016
  success: false,
@@ -8235,15 +8234,20 @@ var AzureSTTAdapter = class extends BaseAdapter {
8235
8234
  return properties;
8236
8235
  }
8237
8236
  /**
8238
- * Normalize Azure status to unified status
8237
+ * Normalize Azure status to unified status using generated AzureStatus constants
8239
8238
  */
8240
8239
  normalizeStatus(status) {
8241
- const statusStr = status?.toString().toLowerCase() || "";
8242
- if (statusStr.includes("succeeded")) return "completed";
8243
- if (statusStr.includes("running")) return "processing";
8244
- if (statusStr.includes("notstarted")) return "queued";
8245
- if (statusStr.includes("failed")) return "error";
8246
- return "queued";
8240
+ switch (status) {
8241
+ case Status.Succeeded:
8242
+ return "completed";
8243
+ case Status.Running:
8244
+ return "processing";
8245
+ case Status.Failed:
8246
+ return "error";
8247
+ case Status.NotStarted:
8248
+ default:
8249
+ return "queued";
8250
+ }
8247
8251
  }
8248
8252
  /**
8249
8253
  * Normalize Azure transcription response to unified format
@@ -8363,30 +8367,30 @@ function getAzureOpenAIRealtimeUrl(endpoint, deployment, apiVersion = "2024-10-0
8363
8367
  var import_axios6 = __toESM(require("axios"));
8364
8368
  var createTranscription = (createTranscriptionRequest, options) => {
8365
8369
  const formData = new FormData();
8366
- formData.append(`file`, createTranscriptionRequest.file);
8367
- formData.append(`model`, createTranscriptionRequest.model);
8370
+ formData.append("file", createTranscriptionRequest.file);
8371
+ formData.append("model", createTranscriptionRequest.model);
8368
8372
  if (createTranscriptionRequest.language !== void 0) {
8369
- formData.append(`language`, createTranscriptionRequest.language);
8373
+ formData.append("language", createTranscriptionRequest.language);
8370
8374
  }
8371
8375
  if (createTranscriptionRequest.prompt !== void 0) {
8372
- formData.append(`prompt`, createTranscriptionRequest.prompt);
8376
+ formData.append("prompt", createTranscriptionRequest.prompt);
8373
8377
  }
8374
8378
  if (createTranscriptionRequest.response_format !== void 0) {
8375
- formData.append(`response_format`, createTranscriptionRequest.response_format);
8379
+ formData.append("response_format", createTranscriptionRequest.response_format);
8376
8380
  }
8377
8381
  if (createTranscriptionRequest.temperature !== void 0) {
8378
- formData.append(`temperature`, createTranscriptionRequest.temperature.toString());
8382
+ formData.append("temperature", createTranscriptionRequest.temperature.toString());
8379
8383
  }
8380
8384
  if (createTranscriptionRequest.include !== void 0) {
8381
- createTranscriptionRequest.include.forEach((value) => formData.append(`include`, value));
8385
+ createTranscriptionRequest.include.forEach((value) => formData.append("include", value));
8382
8386
  }
8383
8387
  if (createTranscriptionRequest.timestamp_granularities !== void 0) {
8384
8388
  createTranscriptionRequest.timestamp_granularities.forEach(
8385
- (value) => formData.append(`timestamp_granularities`, value)
8389
+ (value) => formData.append("timestamp_granularities", value)
8386
8390
  );
8387
8391
  }
8388
8392
  if (createTranscriptionRequest.stream !== void 0 && createTranscriptionRequest.stream !== null) {
8389
- formData.append(`stream`, createTranscriptionRequest.stream.toString());
8393
+ formData.append("stream", createTranscriptionRequest.stream.toString());
8390
8394
  }
8391
8395
  if (createTranscriptionRequest.chunking_strategy !== void 0 && createTranscriptionRequest.chunking_strategy !== null) {
8392
8396
  formData.append(
@@ -8396,15 +8400,15 @@ var createTranscription = (createTranscriptionRequest, options) => {
8396
8400
  }
8397
8401
  if (createTranscriptionRequest.known_speaker_names !== void 0) {
8398
8402
  createTranscriptionRequest.known_speaker_names.forEach(
8399
- (value) => formData.append(`known_speaker_names`, value)
8403
+ (value) => formData.append("known_speaker_names", value)
8400
8404
  );
8401
8405
  }
8402
8406
  if (createTranscriptionRequest.known_speaker_references !== void 0) {
8403
8407
  createTranscriptionRequest.known_speaker_references.forEach(
8404
- (value) => formData.append(`known_speaker_references`, value)
8408
+ (value) => formData.append("known_speaker_references", value)
8405
8409
  );
8406
8410
  }
8407
- return import_axios6.default.post(`/audio/transcriptions`, formData, options);
8411
+ return import_axios6.default.post("/audio/transcriptions", formData, options);
8408
8412
  };
8409
8413
 
8410
8414
  // src/generated/openai/schema/createTranscriptionRequestTimestampGranularitiesItem.ts
@@ -8493,7 +8497,7 @@ var OpenAIWhisperAdapter = class extends BaseAdapter {
8493
8497
  const request = {
8494
8498
  ...options?.openai,
8495
8499
  file: audioData,
8496
- // Generated type expects Blob
8500
+ // Buffer/Blob both accepted at runtime; generated type expects Blob
8497
8501
  model
8498
8502
  };
8499
8503
  if (options?.language) {
@@ -8513,7 +8517,11 @@ var OpenAIWhisperAdapter = class extends BaseAdapter {
8513
8517
  request.response_format = OpenAIResponseFormat.json;
8514
8518
  }
8515
8519
  const response = await createTranscription(request, this.getAxiosConfig());
8516
- return this.normalizeResponse(response.data, model, isDiarization);
8520
+ return this.normalizeResponse(
8521
+ response.data,
8522
+ model,
8523
+ isDiarization
8524
+ );
8517
8525
  } catch (error) {
8518
8526
  return this.createErrorResponse(error);
8519
8527
  }
@@ -9233,8 +9241,16 @@ var SpeechmaticsAdapter = class extends BaseAdapter {
9233
9241
  if (this.config?.wsBaseUrl) {
9234
9242
  return this.config.wsBaseUrl;
9235
9243
  }
9236
- const regionPrefix = region || "eu1";
9237
- return `wss://${regionPrefix}.rt.speechmatics.com/v2`;
9244
+ const rtRegionMap = {
9245
+ eu1: "eu",
9246
+ eu2: "eu",
9247
+ us1: "us",
9248
+ us2: "us",
9249
+ au1: "eu"
9250
+ // No AU RT endpoint — fall back to EU
9251
+ };
9252
+ const rtPrefix = rtRegionMap[region || ""] || "eu";
9253
+ return `wss://${rtPrefix}.rt.speechmatics.com/v2`;
9238
9254
  }
9239
9255
  /**
9240
9256
  * Stream audio for real-time transcription via WebSocket
@@ -9843,8 +9859,23 @@ var SonioxAdapter = class extends BaseAdapter {
9843
9859
  terms: options.customVocabulary
9844
9860
  };
9845
9861
  }
9862
+ if (options?.webhookUrl) {
9863
+ requestBody.webhook_url = options.webhookUrl;
9864
+ }
9846
9865
  const response = await this.client.post("/transcriptions", requestBody);
9847
9866
  const transcriptionId = response.data.id;
9867
+ if (options?.webhookUrl) {
9868
+ return {
9869
+ success: true,
9870
+ provider: this.name,
9871
+ data: {
9872
+ id: transcriptionId,
9873
+ text: "",
9874
+ status: "queued"
9875
+ },
9876
+ raw: response.data
9877
+ };
9878
+ }
9848
9879
  return await this.pollForCompletion(transcriptionId);
9849
9880
  } catch (error) {
9850
9881
  return this.createErrorResponse(error);
@@ -10189,7 +10220,7 @@ var SonioxAdapter = class extends BaseAdapter {
10189
10220
  start: token.start_ms ? token.start_ms / 1e3 : 0,
10190
10221
  end: token.end_ms ? token.end_ms / 1e3 : 0,
10191
10222
  confidence: token.confidence,
10192
- speaker: token.speaker
10223
+ speaker: token.speaker ?? void 0
10193
10224
  }));
10194
10225
  return buildUtterancesFromWords(words);
10195
10226
  }
@@ -10197,29 +10228,24 @@ var SonioxAdapter = class extends BaseAdapter {
10197
10228
  * Normalize Soniox response to unified format
10198
10229
  */
10199
10230
  normalizeResponse(response) {
10200
- const text = response.text || (response.tokens ? response.tokens.filter((t) => t.is_final !== false).map((t) => t.text).join("") : "");
10201
- const words = response.tokens ? response.tokens.filter(
10202
- (t) => t.is_final !== false && t.start_ms !== void 0 && t.end_ms !== void 0
10203
- ).map((token) => ({
10231
+ const { text, tokens } = response;
10232
+ const words = tokens.map((token) => ({
10204
10233
  word: token.text,
10205
10234
  start: token.start_ms / 1e3,
10206
10235
  end: token.end_ms / 1e3,
10207
10236
  confidence: token.confidence,
10208
- speaker: token.speaker
10209
- })) : [];
10237
+ speaker: token.speaker ?? void 0
10238
+ }));
10210
10239
  const speakerSet = /* @__PURE__ */ new Set();
10211
- if (response.tokens) {
10212
- response.tokens.forEach((t) => {
10213
- if (t.speaker) speakerSet.add(t.speaker);
10214
- });
10240
+ for (const token of tokens) {
10241
+ if (token.speaker) speakerSet.add(token.speaker);
10215
10242
  }
10216
10243
  const speakers = speakerSet.size > 0 ? Array.from(speakerSet).map((id) => ({
10217
10244
  id,
10218
10245
  label: `Speaker ${id}`
10219
10246
  })) : void 0;
10220
- const tokens = response.tokens ? response.tokens.filter((t) => t.is_final !== false) : [];
10221
10247
  const utterances = tokens.length > 0 ? this.buildUtterancesFromTokens(tokens) : [];
10222
- const language = response.tokens?.find((t) => t.language)?.language;
10248
+ const language = tokens.find((t) => t.language)?.language ?? void 0;
10223
10249
  return {
10224
10250
  success: true,
10225
10251
  provider: this.name,
@@ -10393,11 +10419,29 @@ var ElevenLabsAdapter = class extends BaseAdapter {
10393
10419
  }
10394
10420
  }
10395
10421
  }
10422
+ if (options?.webhookUrl) {
10423
+ if (!formData.has("webhook")) {
10424
+ formData.append("webhook", "true");
10425
+ }
10426
+ }
10396
10427
  const response = await this.client.post("/v1/speech-to-text", formData, {
10397
10428
  headers: {
10398
10429
  "Content-Type": "multipart/form-data"
10399
10430
  }
10400
10431
  });
10432
+ if (options?.webhookUrl) {
10433
+ const transcriptionId = response.data.transcription_id || response.data.id || `elevenlabs_${Date.now()}`;
10434
+ return {
10435
+ success: true,
10436
+ provider: this.name,
10437
+ data: {
10438
+ id: transcriptionId,
10439
+ text: "",
10440
+ status: "queued"
10441
+ },
10442
+ raw: response.data
10443
+ };
10444
+ }
10401
10445
  return this.normalizeResponse(response.data);
10402
10446
  } catch (error) {
10403
10447
  return this.createErrorResponse(error);
@@ -10661,7 +10705,7 @@ var ElevenLabsAdapter = class extends BaseAdapter {
10661
10705
  * - Multi-channel: `MultichannelSpeechToTextResponseModel` with `transcripts[]`
10662
10706
  */
10663
10707
  normalizeResponse(response) {
10664
- const chunks = response.transcripts ? response.transcripts : [response];
10708
+ const chunks = "transcripts" in response ? response.transcripts : [response];
10665
10709
  const text = chunks.map((c) => c.text).join(" ");
10666
10710
  const words = [];
10667
10711
  const speakerSet = /* @__PURE__ */ new Set();
@@ -10711,7 +10755,7 @@ var ElevenLabsAdapter = class extends BaseAdapter {
10711
10755
  }
10712
10756
  }
10713
10757
  }
10714
- const transcriptionId = response.transcription_id || chunks[0]?.transcription_id || `elevenlabs_${Date.now()}`;
10758
+ const transcriptionId = ("transcription_id" in response ? response.transcription_id : response.transcription_id) || chunks[0]?.transcription_id || `elevenlabs_${Date.now()}`;
10715
10759
  return {
10716
10760
  success: true,
10717
10761
  provider: this.name,
@@ -11055,9 +11099,7 @@ var listenTranscribeQueryParams = import_zod.z.object({
11055
11099
  ),
11056
11100
  dictation: import_zod.z.boolean().optional().describe("Dictation mode for controlling formatting with dictated speech"),
11057
11101
  encoding: import_zod.z.enum(["linear16", "flac", "mulaw", "amr-nb", "amr-wb", "opus", "speex", "g729"]).optional().describe("Specify the expected encoding of your submitted audio"),
11058
- filler_words: import_zod.z.boolean().optional().describe(
11059
- 'Filler Words can help transcribe interruptions in your audio, like "uh" and "um"'
11060
- ),
11102
+ filler_words: import_zod.z.boolean().optional().describe('Filler Words can help transcribe interruptions in your audio, like "uh" and "um"'),
11061
11103
  keyterm: import_zod.z.array(import_zod.z.string()).optional().describe(
11062
11104
  "Key term prompting can boost or suppress specialized terminology and brands. Only compatible with Nova-3"
11063
11105
  ),
@@ -11761,6 +11803,7 @@ __export(assemblyAIAPI_zod_exports, {
11761
11803
  createTranscriptBodySpeechUnderstandingRequestTranslationFormalDefault: () => createTranscriptBodySpeechUnderstandingRequestTranslationFormalDefault,
11762
11804
  createTranscriptBodySpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault: () => createTranscriptBodySpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault,
11763
11805
  createTranscriptBodySummarizationDefault: () => createTranscriptBodySummarizationDefault,
11806
+ createTranscriptBodyTemperatureDefault: () => createTranscriptBodyTemperatureDefault,
11764
11807
  createTranscriptResponse: () => createTranscriptResponse,
11765
11808
  createTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault: () => createTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault,
11766
11809
  createTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault: () => createTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault,
@@ -11830,6 +11873,7 @@ var createTranscriptBodySpeakerOptionsMinSpeakersExpectedDefault = 1;
11830
11873
  var createTranscriptBodySpeechUnderstandingRequestTranslationFormalDefault = true;
11831
11874
  var createTranscriptBodySpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault = false;
11832
11875
  var createTranscriptBodySummarizationDefault = false;
11876
+ var createTranscriptBodyTemperatureDefault = 0;
11833
11877
  var createTranscriptBodyCustomTopicsDefault = false;
11834
11878
  var createTranscriptBody = import_zod3.z.object({
11835
11879
  audio_end_at: import_zod3.z.number().optional().describe(
@@ -11839,10 +11883,10 @@ var createTranscriptBody = import_zod3.z.object({
11839
11883
  "The point in time, in milliseconds, to begin transcribing in your media file. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details."
11840
11884
  ),
11841
11885
  auto_chapters: import_zod3.z.boolean().optional().describe(
11842
- "Enable [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters), can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
11886
+ "Enable [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters), can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
11843
11887
  ),
11844
11888
  auto_highlights: import_zod3.z.boolean().optional().describe(
11845
- "Enable [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases), either true or false"
11889
+ "Enable [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights), either true or false"
11846
11890
  ),
11847
11891
  content_safety: import_zod3.z.boolean().optional().describe(
11848
11892
  "Enable [Content Moderation](https://www.assemblyai.com/docs/content-moderation), can be true or false"
@@ -11858,16 +11902,16 @@ var createTranscriptBody = import_zod3.z.object({
11858
11902
  "Object containing words or phrases to replace, and the word or phrase to replace with"
11859
11903
  )
11860
11904
  ).optional().describe(
11861
- "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details."
11905
+ "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/correct-spelling-of-terms) for more details."
11862
11906
  ),
11863
11907
  disfluencies: import_zod3.z.boolean().optional().describe(
11864
- 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false'
11908
+ 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/include-filler-words), like "umm", in your media file; can be true or false'
11865
11909
  ),
11866
11910
  domain: import_zod3.z.string().nullish().describe(
11867
11911
  'Enable domain-specific transcription models to improve accuracy for specialized terminology. Set to `"medical-v1"` to enable [Medical Mode](https://www.assemblyai.com/docs/pre-recorded-audio/medical-mode) for improved accuracy of medical terms such as medications, procedures, conditions, and dosages.\n\nSupported languages: English (`en`), Spanish (`es`), German (`de`), French (`fr`). If used with an unsupported language, the parameter is ignored and a warning is returned.\n'
11868
11912
  ),
11869
11913
  entity_detection: import_zod3.z.boolean().optional().describe(
11870
- "Enable [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection), can be true or false"
11914
+ "Enable [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/detect-entities-in-transcript), can be true or false"
11871
11915
  ),
11872
11916
  filter_profanity: import_zod3.z.boolean().optional().describe(
11873
11917
  "Filter profanity from the transcribed text, can be true or false. See [Profanity Filtering](https://www.assemblyai.com/docs/profanity-filtering) for more details."
@@ -11876,7 +11920,7 @@ var createTranscriptBody = import_zod3.z.object({
11876
11920
  "Enable [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio), can be true or false"
11877
11921
  ),
11878
11922
  iab_categories: import_zod3.z.boolean().optional().describe(
11879
- "Enable [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection), can be true or false"
11923
+ "Enable [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics), can be true or false"
11880
11924
  ),
11881
11925
  keyterms_prompt: import_zod3.z.array(import_zod3.z.string()).optional().describe(
11882
11926
  "Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3 Pro) domain-specific words or phrases (maximum 6 words per phrase). See [Keyterms Prompting](https://www.assemblyai.com/docs/pre-recorded-audio/keyterms-prompting) for more details.\n"
@@ -12124,7 +12168,7 @@ var createTranscriptBody = import_zod3.z.object({
12124
12168
  "Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection)."
12125
12169
  ),
12126
12170
  multichannel: import_zod3.z.boolean().optional().describe(
12127
- "Enable [Multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) transcription, can be true or false."
12171
+ "Enable [Multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) transcription, can be true or false."
12128
12172
  ),
12129
12173
  prompt: import_zod3.z.string().optional().describe(
12130
12174
  "Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
@@ -12207,23 +12251,23 @@ var createTranscriptBody = import_zod3.z.object({
12207
12251
  "The replacement logic for detected PII, can be `entity_type` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
12208
12252
  ),
12209
12253
  sentiment_analysis: import_zod3.z.boolean().optional().describe(
12210
- "Enable [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis), can be true or false"
12254
+ "Enable [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/analyze-sentiment-of-speech), can be true or false"
12211
12255
  ),
12212
12256
  speaker_labels: import_zod3.z.boolean().optional().describe(
12213
- "Enable [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization), can be true or false"
12257
+ "Enable [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers), can be true or false"
12214
12258
  ),
12215
12259
  speaker_options: import_zod3.z.object({
12216
12260
  min_speakers_expected: import_zod3.z.number().default(createTranscriptBodySpeakerOptionsMinSpeakersExpectedDefault).describe(
12217
- "The minimum number of speakers expected in the audio file. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers) for more details."
12261
+ "The minimum number of speakers expected in the audio file. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers#set-a-range-of-possible-speakers) for more details."
12218
12262
  ),
12219
12263
  max_speakers_expected: import_zod3.z.number().optional().describe(
12220
- "<Warning>Setting this parameter too high may hurt model accuracy</Warning>\nThe maximum number of speakers expected in the audio file. The default depends on audio duration: no limit for 0-2 minutes, 10 for 2-10 minutes, and 30 for 10+ minutes. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers) for more details.\n"
12264
+ "<Warning>Setting this parameter too high may hurt model accuracy</Warning>\nThe maximum number of speakers expected in the audio file. The default depends on audio duration: no limit for 0-2 minutes, 10 for 2-10 minutes, and 30 for 10+ minutes. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers#set-a-range-of-possible-speakers) for more details.\n"
12221
12265
  )
12222
12266
  }).optional().describe(
12223
- "Specify options for [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers). Use this to set a range of possible speakers."
12267
+ "Specify options for [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers#set-a-range-of-possible-speakers). Use this to set a range of possible speakers."
12224
12268
  ),
12225
12269
  speakers_expected: import_zod3.z.number().nullish().describe(
12226
- "Tells the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details."
12270
+ "Tells the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers#set-number-of-speakers-expected) for more details."
12227
12271
  ),
12228
12272
  speech_models: import_zod3.z.array(
12229
12273
  import_zod3.z.string().describe(
@@ -12299,7 +12343,7 @@ var createTranscriptBody = import_zod3.z.object({
12299
12343
  "Enable speech understanding tasks like [Translation](https://www.assemblyai.com/docs/speech-understanding/translation), [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification), and [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting). See the task-specific docs for available options and configuration.\n"
12300
12344
  ),
12301
12345
  summarization: import_zod3.z.boolean().optional().describe(
12302
- "Enable [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization), can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
12346
+ "Enable [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts), can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
12303
12347
  ),
12304
12348
  summary_model: import_zod3.z.enum(["informative", "conversational", "catchy"]).optional().describe("The model to summarize the transcript"),
12305
12349
  summary_type: import_zod3.z.enum(["bullets", "bullets_verbose", "gist", "headline", "paragraph"]).optional().describe("The type of summary"),
@@ -12308,6 +12352,9 @@ var createTranscriptBody = import_zod3.z.object({
12308
12352
  ).or(import_zod3.z.null()).optional().describe(
12309
12353
  'Remove [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) from the transcript text. Set to `"all"` to remove all audio tags.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n'
12310
12354
  ),
12355
+ temperature: import_zod3.z.number().optional().describe(
12356
+ "Control the amount of randomness injected into the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.\n\nNote: This parameter can only be used with the Universal-3 Pro model.\n"
12357
+ ),
12311
12358
  webhook_auth_header_name: import_zod3.z.string().nullish().describe(
12312
12359
  "The header name to be sent with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests"
12313
12360
  ),
@@ -12329,7 +12376,7 @@ var createTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault =
12329
12376
  var createTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault = false;
12330
12377
  var createTranscriptResponse = import_zod3.z.object({
12331
12378
  audio_channels: import_zod3.z.number().optional().describe(
12332
- "The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) is enabled."
12379
+ "The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) is enabled."
12333
12380
  ),
12334
12381
  audio_duration: import_zod3.z.number().nullish().describe("The duration of this transcript object's media file, in seconds"),
12335
12382
  audio_end_at: import_zod3.z.number().nullish().describe(
@@ -12340,10 +12387,10 @@ var createTranscriptResponse = import_zod3.z.object({
12340
12387
  ),
12341
12388
  audio_url: import_zod3.z.string().describe("The URL of the media that was transcribed"),
12342
12389
  auto_chapters: import_zod3.z.boolean().nullish().describe(
12343
- "Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
12390
+ "Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
12344
12391
  ),
12345
12392
  auto_highlights: import_zod3.z.boolean().describe(
12346
- "Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) is enabled, either true or false"
12393
+ "Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) is enabled, either true or false"
12347
12394
  ),
12348
12395
  auto_highlights_result: import_zod3.z.object({
12349
12396
  status: import_zod3.z.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
@@ -12363,9 +12410,9 @@ var createTranscriptResponse = import_zod3.z.object({
12363
12410
  })
12364
12411
  ).describe("A temporally-sequential array of Key Phrases")
12365
12412
  }).describe(
12366
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
12413
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) for more information.\n"
12367
12414
  ).or(import_zod3.z.null()).optional().describe(
12368
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
12415
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) for more information.\n"
12369
12416
  ),
12370
12417
  chapters: import_zod3.z.array(
12371
12418
  import_zod3.z.object({
@@ -12378,7 +12425,7 @@ var createTranscriptResponse = import_zod3.z.object({
12378
12425
  end: import_zod3.z.number().describe("The starting time, in milliseconds, for the chapter")
12379
12426
  }).describe("Chapter of the audio file")
12380
12427
  ).nullish().describe(
12381
- "An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for more information."
12428
+ "An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) for more information."
12382
12429
  ),
12383
12430
  confidence: import_zod3.z.number().nullish().describe(
12384
12431
  "The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)"
@@ -12434,10 +12481,10 @@ var createTranscriptResponse = import_zod3.z.object({
12434
12481
  "Object containing words or phrases to replace, and the word or phrase to replace with"
12435
12482
  )
12436
12483
  ).nullish().describe(
12437
- "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details."
12484
+ "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/correct-spelling-of-terms) for more details."
12438
12485
  ),
12439
12486
  disfluencies: import_zod3.z.boolean().nullish().describe(
12440
- 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false'
12487
+ 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/include-filler-words), like "umm", in your media file; can be true or false'
12441
12488
  ),
12442
12489
  domain: import_zod3.z.string().nullish().describe(
12443
12490
  'The domain-specific model applied to the transcript. When set to `"medical-v1"`, [Medical Mode](https://www.assemblyai.com/docs/pre-recorded-audio/medical-mode) was used to improve accuracy for medical terminology.\n'
@@ -12499,10 +12546,10 @@ var createTranscriptResponse = import_zod3.z.object({
12499
12546
  )
12500
12547
  }).describe("A detected entity")
12501
12548
  ).nullish().describe(
12502
- "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) for more information.\n"
12549
+ "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/detect-entities-in-transcript) for more information.\n"
12503
12550
  ),
12504
12551
  entity_detection: import_zod3.z.boolean().nullish().describe(
12505
- "Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) is enabled, can be true or false"
12552
+ "Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/detect-entities-in-transcript) is enabled, can be true or false"
12506
12553
  ),
12507
12554
  error: import_zod3.z.string().optional().describe("Error message of why the transcript failed"),
12508
12555
  filter_profanity: import_zod3.z.boolean().nullish().describe(
@@ -12512,7 +12559,7 @@ var createTranscriptResponse = import_zod3.z.object({
12512
12559
  "Whether [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false"
12513
12560
  ),
12514
12561
  iab_categories: import_zod3.z.boolean().nullish().describe(
12515
- "Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) is enabled, can be true or false"
12562
+ "Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) is enabled, can be true or false"
12516
12563
  ),
12517
12564
  iab_categories_result: import_zod3.z.object({
12518
12565
  status: import_zod3.z.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
@@ -12535,9 +12582,9 @@ var createTranscriptResponse = import_zod3.z.object({
12535
12582
  ).describe("An array of results for the Topic Detection model"),
12536
12583
  summary: import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number()).describe("The overall relevance of topic to the entire audio file")
12537
12584
  }).describe(
12538
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
12585
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) for more information.\n"
12539
12586
  ).or(import_zod3.z.null()).optional().describe(
12540
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
12587
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) for more information.\n"
12541
12588
  ),
12542
12589
  id: import_zod3.z.string().uuid().describe("The unique identifier of your transcript"),
12543
12590
  keyterms_prompt: import_zod3.z.array(import_zod3.z.string()).optional().describe(
@@ -12787,7 +12834,7 @@ var createTranscriptResponse = import_zod3.z.object({
12787
12834
  "Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection)."
12788
12835
  ),
12789
12836
  multichannel: import_zod3.z.boolean().nullish().describe(
12790
- "Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) was enabled in the transcription request, either true or false"
12837
+ "Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) was enabled in the transcription request, either true or false"
12791
12838
  ),
12792
12839
  prompt: import_zod3.z.string().optional().describe(
12793
12840
  "Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
@@ -12870,7 +12917,7 @@ var createTranscriptResponse = import_zod3.z.object({
12870
12917
  "The replacement logic for detected PII, can be `entity_name` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
12871
12918
  ),
12872
12919
  sentiment_analysis: import_zod3.z.boolean().nullish().describe(
12873
- "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) is enabled, can be true or false"
12920
+ "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/analyze-sentiment-of-speech) is enabled, can be true or false"
12874
12921
  ),
12875
12922
  sentiment_analysis_results: import_zod3.z.array(
12876
12923
  import_zod3.z.object({
@@ -12885,17 +12932,17 @@ var createTranscriptResponse = import_zod3.z.object({
12885
12932
  "The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
12886
12933
  ),
12887
12934
  speaker: import_zod3.z.string().nullable().describe(
12888
- "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
12935
+ "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
12889
12936
  )
12890
12937
  }).describe("The result of the Sentiment Analysis model")
12891
12938
  ).nullish().describe(
12892
- "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) for more information.\n"
12939
+ "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/analyze-sentiment-of-speech) for more information.\n"
12893
12940
  ),
12894
12941
  speaker_labels: import_zod3.z.boolean().nullish().describe(
12895
- "Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, can be true or false"
12942
+ "Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, can be true or false"
12896
12943
  ),
12897
12944
  speakers_expected: import_zod3.z.number().nullish().describe(
12898
- "Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details."
12945
+ "Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers#set-number-of-speakers-expected) for more details."
12899
12946
  ),
12900
12947
  speech_model_used: import_zod3.z.string().optional().describe(
12901
12948
  "The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
@@ -12998,22 +13045,25 @@ var createTranscriptResponse = import_zod3.z.object({
12998
13045
  "The status of your transcript. Possible values are queued, processing, completed, or error."
12999
13046
  ),
13000
13047
  summarization: import_zod3.z.boolean().describe(
13001
- "Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
13048
+ "Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
13002
13049
  ),
13003
13050
  summary: import_zod3.z.string().nullish().describe(
13004
- "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
13051
+ "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details."
13005
13052
  ),
13006
13053
  summary_model: import_zod3.z.string().nullish().describe(
13007
- "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n"
13054
+ "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details.\n"
13008
13055
  ),
13009
13056
  summary_type: import_zod3.z.string().nullish().describe(
13010
- "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
13057
+ "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details."
13011
13058
  ),
13012
13059
  remove_audio_tags: import_zod3.z.enum(["all"]).describe(
13013
13060
  "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
13014
13061
  ).or(import_zod3.z.null()).optional().describe(
13015
13062
  "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
13016
13063
  ),
13064
+ temperature: import_zod3.z.number().nullish().describe(
13065
+ "The temperature that was used for the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.\n\nNote: This parameter can only be used with the Universal-3 Pro model.\n"
13066
+ ),
13017
13067
  text: import_zod3.z.string().nullish().describe("The textual transcript of your media file"),
13018
13068
  throttled: import_zod3.z.boolean().nullish().describe(
13019
13069
  "True while a request is throttled and false when a request is no longer throttled"
@@ -13034,7 +13084,7 @@ var createTranscriptResponse = import_zod3.z.object({
13034
13084
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
13035
13085
  ),
13036
13086
  speaker: import_zod3.z.string().nullable().describe(
13037
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
13087
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
13038
13088
  )
13039
13089
  })
13040
13090
  ).describe("The words in the utterance."),
@@ -13049,7 +13099,7 @@ var createTranscriptResponse = import_zod3.z.object({
13049
13099
  )
13050
13100
  })
13051
13101
  ).nullish().describe(
13052
- "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) for more information.\n"
13102
+ "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) for more information.\n"
13053
13103
  ),
13054
13104
  webhook_auth: import_zod3.z.boolean().describe(
13055
13105
  "Whether [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) authentication details were provided"
@@ -13073,7 +13123,7 @@ var createTranscriptResponse = import_zod3.z.object({
13073
13123
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
13074
13124
  ),
13075
13125
  speaker: import_zod3.z.string().nullable().describe(
13076
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
13126
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
13077
13127
  )
13078
13128
  })
13079
13129
  ).nullish().describe(
@@ -13146,7 +13196,7 @@ var getTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault = tr
13146
13196
  var getTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault = false;
13147
13197
  var getTranscriptResponse = import_zod3.z.object({
13148
13198
  audio_channels: import_zod3.z.number().optional().describe(
13149
- "The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) is enabled."
13199
+ "The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) is enabled."
13150
13200
  ),
13151
13201
  audio_duration: import_zod3.z.number().nullish().describe("The duration of this transcript object's media file, in seconds"),
13152
13202
  audio_end_at: import_zod3.z.number().nullish().describe(
@@ -13157,10 +13207,10 @@ var getTranscriptResponse = import_zod3.z.object({
13157
13207
  ),
13158
13208
  audio_url: import_zod3.z.string().describe("The URL of the media that was transcribed"),
13159
13209
  auto_chapters: import_zod3.z.boolean().nullish().describe(
13160
- "Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
13210
+ "Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
13161
13211
  ),
13162
13212
  auto_highlights: import_zod3.z.boolean().describe(
13163
- "Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) is enabled, either true or false"
13213
+ "Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) is enabled, either true or false"
13164
13214
  ),
13165
13215
  auto_highlights_result: import_zod3.z.object({
13166
13216
  status: import_zod3.z.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
@@ -13180,9 +13230,9 @@ var getTranscriptResponse = import_zod3.z.object({
13180
13230
  })
13181
13231
  ).describe("A temporally-sequential array of Key Phrases")
13182
13232
  }).describe(
13183
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
13233
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) for more information.\n"
13184
13234
  ).or(import_zod3.z.null()).optional().describe(
13185
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
13235
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) for more information.\n"
13186
13236
  ),
13187
13237
  chapters: import_zod3.z.array(
13188
13238
  import_zod3.z.object({
@@ -13195,7 +13245,7 @@ var getTranscriptResponse = import_zod3.z.object({
13195
13245
  end: import_zod3.z.number().describe("The starting time, in milliseconds, for the chapter")
13196
13246
  }).describe("Chapter of the audio file")
13197
13247
  ).nullish().describe(
13198
- "An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for more information."
13248
+ "An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) for more information."
13199
13249
  ),
13200
13250
  confidence: import_zod3.z.number().nullish().describe(
13201
13251
  "The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)"
@@ -13251,10 +13301,10 @@ var getTranscriptResponse = import_zod3.z.object({
13251
13301
  "Object containing words or phrases to replace, and the word or phrase to replace with"
13252
13302
  )
13253
13303
  ).nullish().describe(
13254
- "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details."
13304
+ "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/correct-spelling-of-terms) for more details."
13255
13305
  ),
13256
13306
  disfluencies: import_zod3.z.boolean().nullish().describe(
13257
- 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false'
13307
+ 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/include-filler-words), like "umm", in your media file; can be true or false'
13258
13308
  ),
13259
13309
  domain: import_zod3.z.string().nullish().describe(
13260
13310
  'The domain-specific model applied to the transcript. When set to `"medical-v1"`, [Medical Mode](https://www.assemblyai.com/docs/pre-recorded-audio/medical-mode) was used to improve accuracy for medical terminology.\n'
@@ -13316,10 +13366,10 @@ var getTranscriptResponse = import_zod3.z.object({
13316
13366
  )
13317
13367
  }).describe("A detected entity")
13318
13368
  ).nullish().describe(
13319
- "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) for more information.\n"
13369
+ "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/detect-entities-in-transcript) for more information.\n"
13320
13370
  ),
13321
13371
  entity_detection: import_zod3.z.boolean().nullish().describe(
13322
- "Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) is enabled, can be true or false"
13372
+ "Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/detect-entities-in-transcript) is enabled, can be true or false"
13323
13373
  ),
13324
13374
  error: import_zod3.z.string().optional().describe("Error message of why the transcript failed"),
13325
13375
  filter_profanity: import_zod3.z.boolean().nullish().describe(
@@ -13329,7 +13379,7 @@ var getTranscriptResponse = import_zod3.z.object({
13329
13379
  "Whether [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false"
13330
13380
  ),
13331
13381
  iab_categories: import_zod3.z.boolean().nullish().describe(
13332
- "Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) is enabled, can be true or false"
13382
+ "Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) is enabled, can be true or false"
13333
13383
  ),
13334
13384
  iab_categories_result: import_zod3.z.object({
13335
13385
  status: import_zod3.z.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
@@ -13352,9 +13402,9 @@ var getTranscriptResponse = import_zod3.z.object({
13352
13402
  ).describe("An array of results for the Topic Detection model"),
13353
13403
  summary: import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number()).describe("The overall relevance of topic to the entire audio file")
13354
13404
  }).describe(
13355
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
13405
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) for more information.\n"
13356
13406
  ).or(import_zod3.z.null()).optional().describe(
13357
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
13407
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) for more information.\n"
13358
13408
  ),
13359
13409
  id: import_zod3.z.string().uuid().describe("The unique identifier of your transcript"),
13360
13410
  keyterms_prompt: import_zod3.z.array(import_zod3.z.string()).optional().describe(
@@ -13604,7 +13654,7 @@ var getTranscriptResponse = import_zod3.z.object({
13604
13654
  "Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection)."
13605
13655
  ),
13606
13656
  multichannel: import_zod3.z.boolean().nullish().describe(
13607
- "Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) was enabled in the transcription request, either true or false"
13657
+ "Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) was enabled in the transcription request, either true or false"
13608
13658
  ),
13609
13659
  prompt: import_zod3.z.string().optional().describe(
13610
13660
  "Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
@@ -13687,7 +13737,7 @@ var getTranscriptResponse = import_zod3.z.object({
13687
13737
  "The replacement logic for detected PII, can be `entity_name` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
13688
13738
  ),
13689
13739
  sentiment_analysis: import_zod3.z.boolean().nullish().describe(
13690
- "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) is enabled, can be true or false"
13740
+ "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/analyze-sentiment-of-speech) is enabled, can be true or false"
13691
13741
  ),
13692
13742
  sentiment_analysis_results: import_zod3.z.array(
13693
13743
  import_zod3.z.object({
@@ -13702,17 +13752,17 @@ var getTranscriptResponse = import_zod3.z.object({
13702
13752
  "The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
13703
13753
  ),
13704
13754
  speaker: import_zod3.z.string().nullable().describe(
13705
- "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
13755
+ "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
13706
13756
  )
13707
13757
  }).describe("The result of the Sentiment Analysis model")
13708
13758
  ).nullish().describe(
13709
- "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) for more information.\n"
13759
+ "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/analyze-sentiment-of-speech) for more information.\n"
13710
13760
  ),
13711
13761
  speaker_labels: import_zod3.z.boolean().nullish().describe(
13712
- "Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, can be true or false"
13762
+ "Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, can be true or false"
13713
13763
  ),
13714
13764
  speakers_expected: import_zod3.z.number().nullish().describe(
13715
- "Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details."
13765
+ "Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers#set-number-of-speakers-expected) for more details."
13716
13766
  ),
13717
13767
  speech_model_used: import_zod3.z.string().optional().describe(
13718
13768
  "The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
@@ -13815,22 +13865,25 @@ var getTranscriptResponse = import_zod3.z.object({
13815
13865
  "The status of your transcript. Possible values are queued, processing, completed, or error."
13816
13866
  ),
13817
13867
  summarization: import_zod3.z.boolean().describe(
13818
- "Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
13868
+ "Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
13819
13869
  ),
13820
13870
  summary: import_zod3.z.string().nullish().describe(
13821
- "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
13871
+ "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details."
13822
13872
  ),
13823
13873
  summary_model: import_zod3.z.string().nullish().describe(
13824
- "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n"
13874
+ "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details.\n"
13825
13875
  ),
13826
13876
  summary_type: import_zod3.z.string().nullish().describe(
13827
- "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
13877
+ "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details."
13828
13878
  ),
13829
13879
  remove_audio_tags: import_zod3.z.enum(["all"]).describe(
13830
13880
  "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
13831
13881
  ).or(import_zod3.z.null()).optional().describe(
13832
13882
  "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
13833
13883
  ),
13884
+ temperature: import_zod3.z.number().nullish().describe(
13885
+ "The temperature that was used for the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.\n\nNote: This parameter can only be used with the Universal-3 Pro model.\n"
13886
+ ),
13834
13887
  text: import_zod3.z.string().nullish().describe("The textual transcript of your media file"),
13835
13888
  throttled: import_zod3.z.boolean().nullish().describe(
13836
13889
  "True while a request is throttled and false when a request is no longer throttled"
@@ -13851,7 +13904,7 @@ var getTranscriptResponse = import_zod3.z.object({
13851
13904
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
13852
13905
  ),
13853
13906
  speaker: import_zod3.z.string().nullable().describe(
13854
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
13907
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
13855
13908
  )
13856
13909
  })
13857
13910
  ).describe("The words in the utterance."),
@@ -13866,7 +13919,7 @@ var getTranscriptResponse = import_zod3.z.object({
13866
13919
  )
13867
13920
  })
13868
13921
  ).nullish().describe(
13869
- "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) for more information.\n"
13922
+ "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) for more information.\n"
13870
13923
  ),
13871
13924
  webhook_auth: import_zod3.z.boolean().describe(
13872
13925
  "Whether [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) authentication details were provided"
@@ -13890,7 +13943,7 @@ var getTranscriptResponse = import_zod3.z.object({
13890
13943
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
13891
13944
  ),
13892
13945
  speaker: import_zod3.z.string().nullable().describe(
13893
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
13946
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
13894
13947
  )
13895
13948
  })
13896
13949
  ).nullish().describe(
@@ -13923,7 +13976,7 @@ var deleteTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault =
13923
13976
  var deleteTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault = false;
13924
13977
  var deleteTranscriptResponse = import_zod3.z.object({
13925
13978
  audio_channels: import_zod3.z.number().optional().describe(
13926
- "The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) is enabled."
13979
+ "The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) is enabled."
13927
13980
  ),
13928
13981
  audio_duration: import_zod3.z.number().nullish().describe("The duration of this transcript object's media file, in seconds"),
13929
13982
  audio_end_at: import_zod3.z.number().nullish().describe(
@@ -13934,10 +13987,10 @@ var deleteTranscriptResponse = import_zod3.z.object({
13934
13987
  ),
13935
13988
  audio_url: import_zod3.z.string().describe("The URL of the media that was transcribed"),
13936
13989
  auto_chapters: import_zod3.z.boolean().nullish().describe(
13937
- "Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
13990
+ "Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
13938
13991
  ),
13939
13992
  auto_highlights: import_zod3.z.boolean().describe(
13940
- "Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) is enabled, either true or false"
13993
+ "Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) is enabled, either true or false"
13941
13994
  ),
13942
13995
  auto_highlights_result: import_zod3.z.object({
13943
13996
  status: import_zod3.z.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
@@ -13957,9 +14010,9 @@ var deleteTranscriptResponse = import_zod3.z.object({
13957
14010
  })
13958
14011
  ).describe("A temporally-sequential array of Key Phrases")
13959
14012
  }).describe(
13960
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
14013
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) for more information.\n"
13961
14014
  ).or(import_zod3.z.null()).optional().describe(
13962
- "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
14015
+ "An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/identify-highlights) for more information.\n"
13963
14016
  ),
13964
14017
  chapters: import_zod3.z.array(
13965
14018
  import_zod3.z.object({
@@ -13972,7 +14025,7 @@ var deleteTranscriptResponse = import_zod3.z.object({
13972
14025
  end: import_zod3.z.number().describe("The starting time, in milliseconds, for the chapter")
13973
14026
  }).describe("Chapter of the audio file")
13974
14027
  ).nullish().describe(
13975
- "An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for more information."
14028
+ "An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/create-summarized-chapters) for more information."
13976
14029
  ),
13977
14030
  confidence: import_zod3.z.number().nullish().describe(
13978
14031
  "The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)"
@@ -14028,10 +14081,10 @@ var deleteTranscriptResponse = import_zod3.z.object({
14028
14081
  "Object containing words or phrases to replace, and the word or phrase to replace with"
14029
14082
  )
14030
14083
  ).nullish().describe(
14031
- "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details."
14084
+ "Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/correct-spelling-of-terms) for more details."
14032
14085
  ),
14033
14086
  disfluencies: import_zod3.z.boolean().nullish().describe(
14034
- 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false'
14087
+ 'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/include-filler-words), like "umm", in your media file; can be true or false'
14035
14088
  ),
14036
14089
  domain: import_zod3.z.string().nullish().describe(
14037
14090
  'The domain-specific model applied to the transcript. When set to `"medical-v1"`, [Medical Mode](https://www.assemblyai.com/docs/pre-recorded-audio/medical-mode) was used to improve accuracy for medical terminology.\n'
@@ -14093,10 +14146,10 @@ var deleteTranscriptResponse = import_zod3.z.object({
14093
14146
  )
14094
14147
  }).describe("A detected entity")
14095
14148
  ).nullish().describe(
14096
- "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) for more information.\n"
14149
+ "An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/detect-entities-in-transcript) for more information.\n"
14097
14150
  ),
14098
14151
  entity_detection: import_zod3.z.boolean().nullish().describe(
14099
- "Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) is enabled, can be true or false"
14152
+ "Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/detect-entities-in-transcript) is enabled, can be true or false"
14100
14153
  ),
14101
14154
  error: import_zod3.z.string().optional().describe("Error message of why the transcript failed"),
14102
14155
  filter_profanity: import_zod3.z.boolean().nullish().describe(
@@ -14106,7 +14159,7 @@ var deleteTranscriptResponse = import_zod3.z.object({
14106
14159
  "Whether [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false"
14107
14160
  ),
14108
14161
  iab_categories: import_zod3.z.boolean().nullish().describe(
14109
- "Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) is enabled, can be true or false"
14162
+ "Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) is enabled, can be true or false"
14110
14163
  ),
14111
14164
  iab_categories_result: import_zod3.z.object({
14112
14165
  status: import_zod3.z.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
@@ -14129,9 +14182,9 @@ var deleteTranscriptResponse = import_zod3.z.object({
14129
14182
  ).describe("An array of results for the Topic Detection model"),
14130
14183
  summary: import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number()).describe("The overall relevance of topic to the entire audio file")
14131
14184
  }).describe(
14132
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
14185
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) for more information.\n"
14133
14186
  ).or(import_zod3.z.null()).optional().describe(
14134
- "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
14187
+ "The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/detect-discussion-topics) for more information.\n"
14135
14188
  ),
14136
14189
  id: import_zod3.z.string().uuid().describe("The unique identifier of your transcript"),
14137
14190
  keyterms_prompt: import_zod3.z.array(import_zod3.z.string()).optional().describe(
@@ -14381,7 +14434,7 @@ var deleteTranscriptResponse = import_zod3.z.object({
14381
14434
  "Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection)."
14382
14435
  ),
14383
14436
  multichannel: import_zod3.z.boolean().nullish().describe(
14384
- "Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) was enabled in the transcription request, either true or false"
14437
+ "Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) was enabled in the transcription request, either true or false"
14385
14438
  ),
14386
14439
  prompt: import_zod3.z.string().optional().describe(
14387
14440
  "Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
@@ -14464,7 +14517,7 @@ var deleteTranscriptResponse = import_zod3.z.object({
14464
14517
  "The replacement logic for detected PII, can be `entity_name` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
14465
14518
  ),
14466
14519
  sentiment_analysis: import_zod3.z.boolean().nullish().describe(
14467
- "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) is enabled, can be true or false"
14520
+ "Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/analyze-sentiment-of-speech) is enabled, can be true or false"
14468
14521
  ),
14469
14522
  sentiment_analysis_results: import_zod3.z.array(
14470
14523
  import_zod3.z.object({
@@ -14479,17 +14532,17 @@ var deleteTranscriptResponse = import_zod3.z.object({
14479
14532
  "The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
14480
14533
  ),
14481
14534
  speaker: import_zod3.z.string().nullable().describe(
14482
- "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
14535
+ "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
14483
14536
  )
14484
14537
  }).describe("The result of the Sentiment Analysis model")
14485
14538
  ).nullish().describe(
14486
- "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) for more information.\n"
14539
+ "An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/analyze-sentiment-of-speech) for more information.\n"
14487
14540
  ),
14488
14541
  speaker_labels: import_zod3.z.boolean().nullish().describe(
14489
- "Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, can be true or false"
14542
+ "Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, can be true or false"
14490
14543
  ),
14491
14544
  speakers_expected: import_zod3.z.number().nullish().describe(
14492
- "Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details."
14545
+ "Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers#set-number-of-speakers-expected) for more details."
14493
14546
  ),
14494
14547
  speech_model_used: import_zod3.z.string().optional().describe(
14495
14548
  "The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
@@ -14592,22 +14645,25 @@ var deleteTranscriptResponse = import_zod3.z.object({
14592
14645
  "The status of your transcript. Possible values are queued, processing, completed, or error."
14593
14646
  ),
14594
14647
  summarization: import_zod3.z.boolean().describe(
14595
- "Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
14648
+ "Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
14596
14649
  ),
14597
14650
  summary: import_zod3.z.string().nullish().describe(
14598
- "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
14651
+ "The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details."
14599
14652
  ),
14600
14653
  summary_model: import_zod3.z.string().nullish().describe(
14601
- "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n"
14654
+ "The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details.\n"
14602
14655
  ),
14603
14656
  summary_type: import_zod3.z.string().nullish().describe(
14604
- "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
14657
+ "The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarize-transcripts) for details."
14605
14658
  ),
14606
14659
  remove_audio_tags: import_zod3.z.enum(["all"]).describe(
14607
14660
  "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
14608
14661
  ).or(import_zod3.z.null()).optional().describe(
14609
14662
  "Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
14610
14663
  ),
14664
+ temperature: import_zod3.z.number().nullish().describe(
14665
+ "The temperature that was used for the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.\n\nNote: This parameter can only be used with the Universal-3 Pro model.\n"
14666
+ ),
14611
14667
  text: import_zod3.z.string().nullish().describe("The textual transcript of your media file"),
14612
14668
  throttled: import_zod3.z.boolean().nullish().describe(
14613
14669
  "True while a request is throttled and false when a request is no longer throttled"
@@ -14628,7 +14684,7 @@ var deleteTranscriptResponse = import_zod3.z.object({
14628
14684
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
14629
14685
  ),
14630
14686
  speaker: import_zod3.z.string().nullable().describe(
14631
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
14687
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
14632
14688
  )
14633
14689
  })
14634
14690
  ).describe("The words in the utterance."),
@@ -14643,7 +14699,7 @@ var deleteTranscriptResponse = import_zod3.z.object({
14643
14699
  )
14644
14700
  })
14645
14701
  ).nullish().describe(
14646
- "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) for more information.\n"
14702
+ "When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/transcribe-multiple-audio-channels) for more information.\n"
14647
14703
  ),
14648
14704
  webhook_auth: import_zod3.z.boolean().describe(
14649
14705
  "Whether [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) authentication details were provided"
@@ -14667,7 +14723,7 @@ var deleteTranscriptResponse = import_zod3.z.object({
14667
14723
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
14668
14724
  ),
14669
14725
  speaker: import_zod3.z.string().nullable().describe(
14670
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
14726
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
14671
14727
  )
14672
14728
  })
14673
14729
  ).nullish().describe(
@@ -14712,7 +14768,7 @@ var getTranscriptSentencesResponse = import_zod3.z.object({
14712
14768
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
14713
14769
  ),
14714
14770
  speaker: import_zod3.z.string().nullable().describe(
14715
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
14771
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
14716
14772
  )
14717
14773
  })
14718
14774
  ).describe("An array of words in the sentence"),
@@ -14720,7 +14776,7 @@ var getTranscriptSentencesResponse = import_zod3.z.object({
14720
14776
  "The channel of the sentence. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
14721
14777
  ),
14722
14778
  speaker: import_zod3.z.string().nullable().describe(
14723
- "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
14779
+ "The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
14724
14780
  )
14725
14781
  })
14726
14782
  ).describe("An array of sentences in the transcript")
@@ -14748,7 +14804,7 @@ var getTranscriptParagraphsResponse = import_zod3.z.object({
14748
14804
  "The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
14749
14805
  ),
14750
14806
  speaker: import_zod3.z.string().nullable().describe(
14751
- "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
14807
+ "The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/label-speakers) is enabled, else null"
14752
14808
  )
14753
14809
  })
14754
14810
  ).describe("An array of words in the paragraph")
@@ -17360,23 +17416,6 @@ var preRecordedControllerGetPreRecordedJobsV2Response = import_zod5.z.object({
17360
17416
  }).optional().describe(
17361
17417
  "If `name_consistency` has been enabled, Gladia will improve consistency of the names accross the transcription"
17362
17418
  ),
17363
- speaker_reidentification: import_zod5.z.object({
17364
- success: import_zod5.z.boolean().describe("The audio intelligence model succeeded to get a valid output"),
17365
- is_empty: import_zod5.z.boolean().describe("The audio intelligence model returned an empty value"),
17366
- exec_time: import_zod5.z.number().describe("Time audio intelligence model took to complete the task"),
17367
- error: import_zod5.z.object({
17368
- status_code: import_zod5.z.number().describe("Status code of the addon error"),
17369
- exception: import_zod5.z.string().describe("Reason of the addon error"),
17370
- message: import_zod5.z.string().describe("Detailed message of the addon error")
17371
- }).nullable().describe(
17372
- "`null` if `success` is `true`. Contains the error details of the failed model"
17373
- ),
17374
- results: import_zod5.z.string().describe(
17375
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
17376
- )
17377
- }).optional().describe(
17378
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
17379
- ),
17380
17419
  structured_data_extraction: import_zod5.z.object({
17381
17420
  success: import_zod5.z.boolean().describe("The audio intelligence model succeeded to get a valid output"),
17382
17421
  is_empty: import_zod5.z.boolean().describe("The audio intelligence model returned an empty value"),
@@ -18857,23 +18896,6 @@ var preRecordedControllerGetPreRecordedJobV2Response = import_zod5.z.object({
18857
18896
  }).optional().describe(
18858
18897
  "If `name_consistency` has been enabled, Gladia will improve consistency of the names accross the transcription"
18859
18898
  ),
18860
- speaker_reidentification: import_zod5.z.object({
18861
- success: import_zod5.z.boolean().describe("The audio intelligence model succeeded to get a valid output"),
18862
- is_empty: import_zod5.z.boolean().describe("The audio intelligence model returned an empty value"),
18863
- exec_time: import_zod5.z.number().describe("Time audio intelligence model took to complete the task"),
18864
- error: import_zod5.z.object({
18865
- status_code: import_zod5.z.number().describe("Status code of the addon error"),
18866
- exception: import_zod5.z.string().describe("Reason of the addon error"),
18867
- message: import_zod5.z.string().describe("Detailed message of the addon error")
18868
- }).nullable().describe(
18869
- "`null` if `success` is `true`. Contains the error details of the failed model"
18870
- ),
18871
- results: import_zod5.z.string().describe(
18872
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
18873
- )
18874
- }).optional().describe(
18875
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
18876
- ),
18877
18899
  structured_data_extraction: import_zod5.z.object({
18878
18900
  success: import_zod5.z.boolean().describe("The audio intelligence model succeeded to get a valid output"),
18879
18901
  is_empty: import_zod5.z.boolean().describe("The audio intelligence model returned an empty value"),
@@ -21011,23 +21033,6 @@ var transcriptionControllerListV2Response = import_zod5.z.object({
21011
21033
  }).optional().describe(
21012
21034
  "If `name_consistency` has been enabled, Gladia will improve consistency of the names accross the transcription"
21013
21035
  ),
21014
- speaker_reidentification: import_zod5.z.object({
21015
- success: import_zod5.z.boolean().describe("The audio intelligence model succeeded to get a valid output"),
21016
- is_empty: import_zod5.z.boolean().describe("The audio intelligence model returned an empty value"),
21017
- exec_time: import_zod5.z.number().describe("Time audio intelligence model took to complete the task"),
21018
- error: import_zod5.z.object({
21019
- status_code: import_zod5.z.number().describe("Status code of the addon error"),
21020
- exception: import_zod5.z.string().describe("Reason of the addon error"),
21021
- message: import_zod5.z.string().describe("Detailed message of the addon error")
21022
- }).nullable().describe(
21023
- "`null` if `success` is `true`. Contains the error details of the failed model"
21024
- ),
21025
- results: import_zod5.z.string().describe(
21026
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
21027
- )
21028
- }).optional().describe(
21029
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
21030
- ),
21031
21036
  structured_data_extraction: import_zod5.z.object({
21032
21037
  success: import_zod5.z.boolean().describe("The audio intelligence model succeeded to get a valid output"),
21033
21038
  is_empty: import_zod5.z.boolean().describe("The audio intelligence model returned an empty value"),
@@ -21327,11 +21332,7 @@ var transcriptionControllerListV2Response = import_zod5.z.object({
21327
21332
  channels: import_zod5.z.number().min(1).max(transcriptionControllerListV2ResponseItemsItemRequestParamsChannelsMax).default(
21328
21333
  transcriptionControllerListV2ResponseItemsItemRequestParamsChannelsDefault
21329
21334
  ).describe("The number of channels of the audio stream"),
21330
- model: import_zod5.z.enum(["solaria-1"]).describe(
21331
- 'The model used to process the audio. "solaria-1" is used by default.'
21332
- ).default(transcriptionControllerListV2ResponseItemsItemRequestParamsModelDefault).describe(
21333
- 'The model used to process the audio. "solaria-1" is used by default.'
21334
- ),
21335
+ model: import_zod5.z.enum(["solaria-1"]).describe('The model used to process the audio. "solaria-1" is used by default.').default(transcriptionControllerListV2ResponseItemsItemRequestParamsModelDefault).describe('The model used to process the audio. "solaria-1" is used by default.'),
21335
21336
  endpointing: import_zod5.z.number().min(transcriptionControllerListV2ResponseItemsItemRequestParamsEndpointingMin).max(transcriptionControllerListV2ResponseItemsItemRequestParamsEndpointingMax).default(
21336
21337
  transcriptionControllerListV2ResponseItemsItemRequestParamsEndpointingDefault
21337
21338
  ).describe(
@@ -23755,23 +23756,6 @@ var transcriptionControllerGetTranscriptV2Response = import_zod5.z.discriminated
23755
23756
  }).optional().describe(
23756
23757
  "If `name_consistency` has been enabled, Gladia will improve consistency of the names accross the transcription"
23757
23758
  ),
23758
- speaker_reidentification: import_zod5.z.object({
23759
- success: import_zod5.z.boolean().describe("The audio intelligence model succeeded to get a valid output"),
23760
- is_empty: import_zod5.z.boolean().describe("The audio intelligence model returned an empty value"),
23761
- exec_time: import_zod5.z.number().describe("Time audio intelligence model took to complete the task"),
23762
- error: import_zod5.z.object({
23763
- status_code: import_zod5.z.number().describe("Status code of the addon error"),
23764
- exception: import_zod5.z.string().describe("Reason of the addon error"),
23765
- message: import_zod5.z.string().describe("Detailed message of the addon error")
23766
- }).nullable().describe(
23767
- "`null` if `success` is `true`. Contains the error details of the failed model"
23768
- ),
23769
- results: import_zod5.z.string().describe(
23770
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
23771
- )
23772
- }).optional().describe(
23773
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
23774
- ),
23775
23759
  structured_data_extraction: import_zod5.z.object({
23776
23760
  success: import_zod5.z.boolean().describe("The audio intelligence model succeeded to get a valid output"),
23777
23761
  is_empty: import_zod5.z.boolean().describe("The audio intelligence model returned an empty value"),
@@ -26937,23 +26921,6 @@ var historyControllerGetListV1Response = import_zod5.z.object({
26937
26921
  }).optional().describe(
26938
26922
  "If `name_consistency` has been enabled, Gladia will improve consistency of the names accross the transcription"
26939
26923
  ),
26940
- speaker_reidentification: import_zod5.z.object({
26941
- success: import_zod5.z.boolean().describe("The audio intelligence model succeeded to get a valid output"),
26942
- is_empty: import_zod5.z.boolean().describe("The audio intelligence model returned an empty value"),
26943
- exec_time: import_zod5.z.number().describe("Time audio intelligence model took to complete the task"),
26944
- error: import_zod5.z.object({
26945
- status_code: import_zod5.z.number().describe("Status code of the addon error"),
26946
- exception: import_zod5.z.string().describe("Reason of the addon error"),
26947
- message: import_zod5.z.string().describe("Detailed message of the addon error")
26948
- }).nullable().describe(
26949
- "`null` if `success` is `true`. Contains the error details of the failed model"
26950
- ),
26951
- results: import_zod5.z.string().describe(
26952
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
26953
- )
26954
- }).optional().describe(
26955
- "If `speaker_reidentification` has been enabled, results of the AI speaker reidentification."
26956
- ),
26957
26924
  structured_data_extraction: import_zod5.z.object({
26958
26925
  success: import_zod5.z.boolean().describe("The audio intelligence model succeeded to get a valid output"),
26959
26926
  is_empty: import_zod5.z.boolean().describe("The audio intelligence model returned an empty value"),
@@ -27247,11 +27214,7 @@ var historyControllerGetListV1Response = import_zod5.z.object({
27247
27214
  historyControllerGetListV1ResponseItemsItemRequestParamsSampleRateDefault
27248
27215
  ).describe("The sample rate of the audio stream"),
27249
27216
  channels: import_zod5.z.number().min(1).max(historyControllerGetListV1ResponseItemsItemRequestParamsChannelsMax).default(historyControllerGetListV1ResponseItemsItemRequestParamsChannelsDefault).describe("The number of channels of the audio stream"),
27250
- model: import_zod5.z.enum(["solaria-1"]).describe(
27251
- 'The model used to process the audio. "solaria-1" is used by default.'
27252
- ).default(historyControllerGetListV1ResponseItemsItemRequestParamsModelDefault).describe(
27253
- 'The model used to process the audio. "solaria-1" is used by default.'
27254
- ),
27217
+ model: import_zod5.z.enum(["solaria-1"]).describe('The model used to process the audio. "solaria-1" is used by default.').default(historyControllerGetListV1ResponseItemsItemRequestParamsModelDefault).describe('The model used to process the audio. "solaria-1" is used by default.'),
27255
27218
  endpointing: import_zod5.z.number().min(historyControllerGetListV1ResponseItemsItemRequestParamsEndpointingMin).max(historyControllerGetListV1ResponseItemsItemRequestParamsEndpointingMax).default(
27256
27219
  historyControllerGetListV1ResponseItemsItemRequestParamsEndpointingDefault
27257
27220
  ).describe(
@@ -36412,6 +36375,7 @@ __export(sonioxPublicAPI_zod_exports, {
36412
36375
  createTemporaryApiKeyBody: () => createTemporaryApiKeyBody,
36413
36376
  createTemporaryApiKeyBodyClientReferenceIdMaxOne: () => createTemporaryApiKeyBodyClientReferenceIdMaxOne,
36414
36377
  createTemporaryApiKeyBodyExpiresInSecondsMax: () => createTemporaryApiKeyBodyExpiresInSecondsMax,
36378
+ createTemporaryApiKeyBodyMaxSessionDurationSecondsMaxOne: () => createTemporaryApiKeyBodyMaxSessionDurationSecondsMaxOne,
36415
36379
  createTranscriptionBody: () => createTranscriptionBody2,
36416
36380
  createTranscriptionBodyAudioUrlMaxOne: () => createTranscriptionBodyAudioUrlMaxOne,
36417
36381
  createTranscriptionBodyAudioUrlRegExpOne: () => createTranscriptionBodyAudioUrlRegExpOne,
@@ -36542,11 +36506,11 @@ var getTranscriptionsResponse = import_zod10.z.object({
36542
36506
  });
36543
36507
  var createTranscriptionBodyModelMaxThree = 32;
36544
36508
  var createTranscriptionBodyAudioUrlMaxOne = 4096;
36545
- var createTranscriptionBodyAudioUrlRegExpOne = new RegExp("^https?://[^\\s]+$");
36509
+ var createTranscriptionBodyAudioUrlRegExpOne = /^https?:\/\/[^\s]+$/;
36546
36510
  var createTranscriptionBodyLanguageHintsItemMax = 10;
36547
36511
  var createTranscriptionBodyLanguageHintsMaxOne = 100;
36548
36512
  var createTranscriptionBodyWebhookUrlMaxOne = 256;
36549
- var createTranscriptionBodyWebhookUrlRegExpOne = new RegExp("^https?://[^\\s]+$");
36513
+ var createTranscriptionBodyWebhookUrlRegExpOne = /^https?:\/\/[^\s]+$/;
36550
36514
  var createTranscriptionBodyWebhookAuthHeaderNameMaxOne = 256;
36551
36515
  var createTranscriptionBodyWebhookAuthHeaderValueMaxOne = 256;
36552
36516
  var createTranscriptionBodyClientReferenceIdMaxOne = 256;
@@ -36694,10 +36658,15 @@ var getModelsResponse = import_zod10.z.object({
36694
36658
  });
36695
36659
  var createTemporaryApiKeyBodyExpiresInSecondsMax = 3600;
36696
36660
  var createTemporaryApiKeyBodyClientReferenceIdMaxOne = 256;
36661
+ var createTemporaryApiKeyBodyMaxSessionDurationSecondsMaxOne = 18e3;
36697
36662
  var createTemporaryApiKeyBody = import_zod10.z.object({
36698
36663
  usage_type: import_zod10.z.enum(["transcribe_websocket"]),
36699
36664
  expires_in_seconds: import_zod10.z.number().min(1).max(createTemporaryApiKeyBodyExpiresInSecondsMax).describe("Duration in seconds until the temporary API key expires."),
36700
- client_reference_id: import_zod10.z.string().max(createTemporaryApiKeyBodyClientReferenceIdMaxOne).or(import_zod10.z.null()).optional().describe("Optional tracking identifier string. Does not need to be unique.")
36665
+ client_reference_id: import_zod10.z.string().max(createTemporaryApiKeyBodyClientReferenceIdMaxOne).or(import_zod10.z.null()).optional().describe("Optional tracking identifier string. Does not need to be unique."),
36666
+ single_use: import_zod10.z.boolean().or(import_zod10.z.null()).optional().describe("If true, the temporary API key can be used only once."),
36667
+ max_session_duration_seconds: import_zod10.z.number().min(1).max(createTemporaryApiKeyBodyMaxSessionDurationSecondsMaxOne).or(import_zod10.z.null()).optional().describe(
36668
+ "Maximum WebSocket connection duration in seconds. If exceeded, the connection will be dropped. If not set, no limit is applied."
36669
+ )
36701
36670
  });
36702
36671
 
36703
36672
  // src/generated/soniox/streaming-types.zod.ts
@@ -37948,6 +37917,16 @@ var ToolChoiceOptions = {
37948
37917
  required: "required"
37949
37918
  };
37950
37919
 
37920
+ // src/generated/openai/schema/transcriptionDiarizedSegmentType.ts
37921
+ var TranscriptionDiarizedSegmentType = {
37922
+ transcripttextsegment: "transcript.text.segment"
37923
+ };
37924
+
37925
+ // src/generated/openai/schema/transcriptionInclude.ts
37926
+ var TranscriptionInclude = {
37927
+ logprobs: "logprobs"
37928
+ };
37929
+
37951
37930
  // src/generated/openai/schema/transcriptTextDeltaEventType.ts
37952
37931
  var TranscriptTextDeltaEventType = {
37953
37932
  transcripttextdelta: "transcript.text.delta"
@@ -37973,16 +37952,6 @@ var TranscriptTextUsageTokensType = {
37973
37952
  tokens: "tokens"
37974
37953
  };
37975
37954
 
37976
- // src/generated/openai/schema/transcriptionDiarizedSegmentType.ts
37977
- var TranscriptionDiarizedSegmentType = {
37978
- transcripttextsegment: "transcript.text.segment"
37979
- };
37980
-
37981
- // src/generated/openai/schema/transcriptionInclude.ts
37982
- var TranscriptionInclude = {
37983
- logprobs: "logprobs"
37984
- };
37985
-
37986
37955
  // src/generated/openai/schema/vadConfigType.ts
37987
37956
  var VadConfigType = {
37988
37957
  server_vad: "server_vad"
@@ -38364,8 +38333,8 @@ var getJobsQueryParams = import_zod12.z.object({
38364
38333
  var getJobsResponseJobsItemDurationMin = 0;
38365
38334
  var getJobsResponseJobsItemConfigTranscriptionConfigPunctuationOverridesSensitivityMin = 0;
38366
38335
  var getJobsResponseJobsItemConfigTranscriptionConfigPunctuationOverridesSensitivityMax = 1;
38367
- var getJobsResponseJobsItemConfigTranscriptionConfigPunctuationOverridesPermittedMarksItemRegExp = new RegExp("^(.|all)$");
38368
- var getJobsResponseJobsItemConfigTranscriptionConfigChannelDiarizationLabelsItemRegExp = new RegExp("^[A-Za-z0-9._]+$");
38336
+ var getJobsResponseJobsItemConfigTranscriptionConfigPunctuationOverridesPermittedMarksItemRegExp = /^(.|all)$/;
38337
+ var getJobsResponseJobsItemConfigTranscriptionConfigChannelDiarizationLabelsItemRegExp = /^[A-Za-z0-9._]+$/;
38369
38338
  var getJobsResponseJobsItemConfigTranscriptionConfigSpeakerDiarizationConfigSpeakerSensitivityMin = 0;
38370
38339
  var getJobsResponseJobsItemConfigTranscriptionConfigSpeakerDiarizationConfigSpeakerSensitivityMax = 1;
38371
38340
  var getJobsResponseJobsItemConfigTranslationConfigTargetLanguagesMax = 5;
@@ -38563,8 +38532,8 @@ var getJobsJobidParams = import_zod12.z.object({
38563
38532
  var getJobsJobidResponseJobDurationMin = 0;
38564
38533
  var getJobsJobidResponseJobConfigTranscriptionConfigPunctuationOverridesSensitivityMin = 0;
38565
38534
  var getJobsJobidResponseJobConfigTranscriptionConfigPunctuationOverridesSensitivityMax = 1;
38566
- var getJobsJobidResponseJobConfigTranscriptionConfigPunctuationOverridesPermittedMarksItemRegExp = new RegExp("^(.|all)$");
38567
- var getJobsJobidResponseJobConfigTranscriptionConfigChannelDiarizationLabelsItemRegExp = new RegExp("^[A-Za-z0-9._]+$");
38535
+ var getJobsJobidResponseJobConfigTranscriptionConfigPunctuationOverridesPermittedMarksItemRegExp = /^(.|all)$/;
38536
+ var getJobsJobidResponseJobConfigTranscriptionConfigChannelDiarizationLabelsItemRegExp = /^[A-Za-z0-9._]+$/;
38568
38537
  var getJobsJobidResponseJobConfigTranscriptionConfigSpeakerDiarizationConfigSpeakerSensitivityMin = 0;
38569
38538
  var getJobsJobidResponseJobConfigTranscriptionConfigSpeakerDiarizationConfigSpeakerSensitivityMax = 1;
38570
38539
  var getJobsJobidResponseJobConfigTranslationConfigTargetLanguagesMax = 5;
@@ -38761,8 +38730,8 @@ var deleteJobsJobidQueryParams = import_zod12.z.object({
38761
38730
  var deleteJobsJobidResponseJobDurationMin = 0;
38762
38731
  var deleteJobsJobidResponseJobConfigTranscriptionConfigPunctuationOverridesSensitivityMin = 0;
38763
38732
  var deleteJobsJobidResponseJobConfigTranscriptionConfigPunctuationOverridesSensitivityMax = 1;
38764
- var deleteJobsJobidResponseJobConfigTranscriptionConfigPunctuationOverridesPermittedMarksItemRegExp = new RegExp("^(.|all)$");
38765
- var deleteJobsJobidResponseJobConfigTranscriptionConfigChannelDiarizationLabelsItemRegExp = new RegExp("^[A-Za-z0-9._]+$");
38733
+ var deleteJobsJobidResponseJobConfigTranscriptionConfigPunctuationOverridesPermittedMarksItemRegExp = /^(.|all)$/;
38734
+ var deleteJobsJobidResponseJobConfigTranscriptionConfigChannelDiarizationLabelsItemRegExp = /^[A-Za-z0-9._]+$/;
38766
38735
  var deleteJobsJobidResponseJobConfigTranscriptionConfigSpeakerDiarizationConfigSpeakerSensitivityMin = 0;
38767
38736
  var deleteJobsJobidResponseJobConfigTranscriptionConfigSpeakerDiarizationConfigSpeakerSensitivityMax = 1;
38768
38737
  var deleteJobsJobidResponseJobConfigTranslationConfigTargetLanguagesMax = 5;
@@ -38965,8 +38934,8 @@ var getJobsJobidTranscriptQueryParams = import_zod12.z.object({
38965
38934
  var getJobsJobidTranscriptResponseJobDurationMin = 0;
38966
38935
  var getJobsJobidTranscriptResponseMetadataTranscriptionConfigPunctuationOverridesSensitivityMin = 0;
38967
38936
  var getJobsJobidTranscriptResponseMetadataTranscriptionConfigPunctuationOverridesSensitivityMax = 1;
38968
- var getJobsJobidTranscriptResponseMetadataTranscriptionConfigPunctuationOverridesPermittedMarksItemRegExp = new RegExp("^(.|all)$");
38969
- var getJobsJobidTranscriptResponseMetadataTranscriptionConfigChannelDiarizationLabelsItemRegExp = new RegExp("^[A-Za-z0-9._]+$");
38937
+ var getJobsJobidTranscriptResponseMetadataTranscriptionConfigPunctuationOverridesPermittedMarksItemRegExp = /^(.|all)$/;
38938
+ var getJobsJobidTranscriptResponseMetadataTranscriptionConfigChannelDiarizationLabelsItemRegExp = /^[A-Za-z0-9._]+$/;
38970
38939
  var getJobsJobidTranscriptResponseMetadataTranscriptionConfigSpeakerDiarizationConfigSpeakerSensitivityMin = 0;
38971
38940
  var getJobsJobidTranscriptResponseMetadataTranscriptionConfigSpeakerDiarizationConfigSpeakerSensitivityMax = 1;
38972
38941
  var getJobsJobidTranscriptResponseResultsItemVolumeMin = 0;