voice-router-dev 0.8.3 → 0.8.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +42 -1
- package/dist/constants.d.mts +7 -8
- package/dist/constants.d.ts +7 -8
- package/dist/constants.js +4 -8
- package/dist/constants.mjs +4 -8
- package/dist/{field-configs-t_lVCkE5.d.mts → field-configs-DLbrsYTk.d.mts} +3299 -2114
- package/dist/{field-configs-t_lVCkE5.d.ts → field-configs-DLbrsYTk.d.ts} +3299 -2114
- package/dist/field-configs.d.mts +1 -1
- package/dist/field-configs.d.ts +1 -1
- package/dist/field-configs.js +2325 -1634
- package/dist/field-configs.mjs +2325 -1634
- package/dist/index.d.mts +237 -702
- package/dist/index.d.ts +237 -702
- package/dist/index.js +2073 -1529
- package/dist/index.mjs +2073 -1528
- package/dist/{speechToTextChunkResponseModel-DvIT4xai.d.mts → speechToTextChunkResponseModel-BcT1LJSZ.d.mts} +1630 -1166
- package/dist/{speechToTextChunkResponseModel-DjL2ncnf.d.ts → speechToTextChunkResponseModel-eq8eLKEA.d.ts} +1630 -1166
- package/dist/webhooks.d.mts +9 -3
- package/dist/webhooks.d.ts +9 -3
- package/package.json +5 -5
package/dist/index.mjs
CHANGED
|
@@ -2252,13 +2252,6 @@ var TranslationLanguageCodeEnum = {
|
|
|
2252
2252
|
zh: "zh"
|
|
2253
2253
|
};
|
|
2254
2254
|
|
|
2255
|
-
// src/generated/assemblyai/schema/speechModel.ts
|
|
2256
|
-
var SpeechModel = {
|
|
2257
|
-
best: "best",
|
|
2258
|
-
"slam-1": "slam-1",
|
|
2259
|
-
universal: "universal"
|
|
2260
|
-
};
|
|
2261
|
-
|
|
2262
2255
|
// src/generated/assemblyai/schema/transcriptLanguageCode.ts
|
|
2263
2256
|
var TranscriptLanguageCode = {
|
|
2264
2257
|
en: "en",
|
|
@@ -2574,7 +2567,10 @@ var AssemblyAIEncoding = {
|
|
|
2574
2567
|
/** μ-law (telephony) */
|
|
2575
2568
|
pcmMulaw: "pcm_mulaw"
|
|
2576
2569
|
};
|
|
2577
|
-
var AssemblyAITranscriptionModel =
|
|
2570
|
+
var AssemblyAITranscriptionModel = {
|
|
2571
|
+
"universal-3-pro": "universal-3-pro",
|
|
2572
|
+
"universal-2": "universal-2"
|
|
2573
|
+
};
|
|
2578
2574
|
var AssemblyAILanguage = TranscriptLanguageCode;
|
|
2579
2575
|
var AssemblyAISpeechModel = {
|
|
2580
2576
|
/** Optimized for English */
|
|
@@ -5199,19 +5195,20 @@ var schema_exports2 = {};
|
|
|
5199
5195
|
__export(schema_exports2, {
|
|
5200
5196
|
AudioIntelligenceModelStatus: () => AudioIntelligenceModelStatus,
|
|
5201
5197
|
EntityType: () => EntityType,
|
|
5202
|
-
LemurModel: () => LemurModel,
|
|
5203
5198
|
PiiPolicy: () => PiiPolicy,
|
|
5204
5199
|
RedactPiiAudioQuality: () => RedactPiiAudioQuality,
|
|
5205
5200
|
RedactedAudioStatus: () => RedactedAudioStatus,
|
|
5206
5201
|
Sentiment: () => Sentiment,
|
|
5207
|
-
|
|
5202
|
+
SpeakerIdentificationRequestBodySpeakerIdentificationSpeakerType: () => SpeakerIdentificationRequestBodySpeakerIdentificationSpeakerType,
|
|
5208
5203
|
SubstitutionPolicy: () => SubstitutionPolicy,
|
|
5209
5204
|
SubtitleFormat: () => SubtitleFormat,
|
|
5210
5205
|
SummaryModel: () => SummaryModel,
|
|
5211
5206
|
SummaryType: () => SummaryType,
|
|
5212
|
-
TranscriptBoostParam: () => TranscriptBoostParam,
|
|
5213
5207
|
TranscriptLanguageCode: () => TranscriptLanguageCode,
|
|
5214
|
-
|
|
5208
|
+
TranscriptOptionalParamsRedactPiiAudioOptionsOverrideAudioRedactionMethod: () => TranscriptOptionalParamsRedactPiiAudioOptionsOverrideAudioRedactionMethod,
|
|
5209
|
+
TranscriptOptionalParamsRemoveAudioTags: () => TranscriptOptionalParamsRemoveAudioTags,
|
|
5210
|
+
TranscriptRedactPiiAudioOptionsOverrideAudioRedactionMethod: () => TranscriptRedactPiiAudioOptionsOverrideAudioRedactionMethod,
|
|
5211
|
+
TranscriptRemoveAudioTags: () => TranscriptRemoveAudioTags,
|
|
5215
5212
|
TranscriptStatus: () => TranscriptStatus
|
|
5216
5213
|
});
|
|
5217
5214
|
|
|
@@ -5269,13 +5266,6 @@ var EntityType = {
|
|
|
5269
5266
|
zodiac_sign: "zodiac_sign"
|
|
5270
5267
|
};
|
|
5271
5268
|
|
|
5272
|
-
// src/generated/assemblyai/schema/lemurModel.ts
|
|
5273
|
-
var LemurModel = {
|
|
5274
|
-
"anthropic/claude-3-5-sonnet": "anthropic/claude-3-5-sonnet",
|
|
5275
|
-
"anthropic/claude-3-opus": "anthropic/claude-3-opus",
|
|
5276
|
-
"anthropic/claude-3-haiku": "anthropic/claude-3-haiku"
|
|
5277
|
-
};
|
|
5278
|
-
|
|
5279
5269
|
// src/generated/assemblyai/schema/piiPolicy.ts
|
|
5280
5270
|
var PiiPolicy = {
|
|
5281
5271
|
account_number: "account_number",
|
|
@@ -5342,6 +5332,12 @@ var Sentiment = {
|
|
|
5342
5332
|
NEGATIVE: "NEGATIVE"
|
|
5343
5333
|
};
|
|
5344
5334
|
|
|
5335
|
+
// src/generated/assemblyai/schema/speakerIdentificationRequestBodySpeakerIdentificationSpeakerType.ts
|
|
5336
|
+
var SpeakerIdentificationRequestBodySpeakerIdentificationSpeakerType = {
|
|
5337
|
+
role: "role",
|
|
5338
|
+
name: "name"
|
|
5339
|
+
};
|
|
5340
|
+
|
|
5345
5341
|
// src/generated/assemblyai/schema/substitutionPolicy.ts
|
|
5346
5342
|
var SubstitutionPolicy = {
|
|
5347
5343
|
entity_name: "entity_name",
|
|
@@ -5370,17 +5366,24 @@ var SummaryType = {
|
|
|
5370
5366
|
paragraph: "paragraph"
|
|
5371
5367
|
};
|
|
5372
5368
|
|
|
5373
|
-
// src/generated/assemblyai/schema/
|
|
5374
|
-
var
|
|
5375
|
-
|
|
5376
|
-
default: "default",
|
|
5377
|
-
high: "high"
|
|
5369
|
+
// src/generated/assemblyai/schema/transcriptOptionalParamsRedactPiiAudioOptionsOverrideAudioRedactionMethod.ts
|
|
5370
|
+
var TranscriptOptionalParamsRedactPiiAudioOptionsOverrideAudioRedactionMethod = {
|
|
5371
|
+
silence: "silence"
|
|
5378
5372
|
};
|
|
5379
5373
|
|
|
5380
|
-
// src/generated/assemblyai/schema/
|
|
5381
|
-
var
|
|
5382
|
-
|
|
5383
|
-
|
|
5374
|
+
// src/generated/assemblyai/schema/transcriptOptionalParamsRemoveAudioTags.ts
|
|
5375
|
+
var TranscriptOptionalParamsRemoveAudioTags = {
|
|
5376
|
+
all: "all"
|
|
5377
|
+
};
|
|
5378
|
+
|
|
5379
|
+
// src/generated/assemblyai/schema/transcriptRedactPiiAudioOptionsOverrideAudioRedactionMethod.ts
|
|
5380
|
+
var TranscriptRedactPiiAudioOptionsOverrideAudioRedactionMethod = {
|
|
5381
|
+
silence: "silence"
|
|
5382
|
+
};
|
|
5383
|
+
|
|
5384
|
+
// src/generated/assemblyai/schema/transcriptRemoveAudioTags.ts
|
|
5385
|
+
var TranscriptRemoveAudioTags = {
|
|
5386
|
+
all: "all"
|
|
5384
5387
|
};
|
|
5385
5388
|
|
|
5386
5389
|
// src/generated/assemblyai/api/assemblyAIAPI.ts
|
|
@@ -5399,9 +5402,6 @@ var getTranscript = (transcriptId, options) => {
|
|
|
5399
5402
|
var deleteTranscript = (transcriptId, options) => {
|
|
5400
5403
|
return axios2.delete(`/v2/transcript/${transcriptId}`, options);
|
|
5401
5404
|
};
|
|
5402
|
-
var createTemporaryToken = (createRealtimeTemporaryTokenParams, options) => {
|
|
5403
|
-
return axios2.post(`/v2/realtime/token`, createRealtimeTemporaryTokenParams, options);
|
|
5404
|
-
};
|
|
5405
5405
|
|
|
5406
5406
|
// src/adapters/assemblyai-adapter.ts
|
|
5407
5407
|
var AssemblyAIAdapter = class extends BaseAdapter {
|
|
@@ -5740,13 +5740,15 @@ var AssemblyAIAdapter = class extends BaseAdapter {
|
|
|
5740
5740
|
const request = {
|
|
5741
5741
|
...options?.assemblyai,
|
|
5742
5742
|
audio_url: audioUrl,
|
|
5743
|
+
// speech_models is required — default to universal-3-pro
|
|
5744
|
+
speech_models: options?.assemblyai?.speech_models ?? ["universal-3-pro"],
|
|
5743
5745
|
// Enable punctuation and formatting by default
|
|
5744
5746
|
punctuate: options?.assemblyai?.punctuate ?? true,
|
|
5745
5747
|
format_text: options?.assemblyai?.format_text ?? true
|
|
5746
5748
|
};
|
|
5747
5749
|
if (options) {
|
|
5748
5750
|
if (options.model) {
|
|
5749
|
-
request.
|
|
5751
|
+
request.speech_models = [options.model];
|
|
5750
5752
|
}
|
|
5751
5753
|
if (options.language) {
|
|
5752
5754
|
const languageCode = options.language.includes("_") ? options.language : `${options.language}_us`;
|
|
@@ -5762,8 +5764,7 @@ var AssemblyAIAdapter = class extends BaseAdapter {
|
|
|
5762
5764
|
}
|
|
5763
5765
|
}
|
|
5764
5766
|
if (options.customVocabulary && options.customVocabulary.length > 0) {
|
|
5765
|
-
request.
|
|
5766
|
-
request.boost_param = request.boost_param ?? "high";
|
|
5767
|
+
request.keyterms_prompt = options.customVocabulary;
|
|
5767
5768
|
}
|
|
5768
5769
|
if (options.summarization) {
|
|
5769
5770
|
request.summarization = true;
|
|
@@ -11543,15 +11544,10 @@ var deepgramStreamingOnlyParams = zod2.object({
|
|
|
11543
11544
|
// src/generated/assemblyai/api/assemblyAIAPI.zod.ts
|
|
11544
11545
|
var assemblyAIAPI_zod_exports = {};
|
|
11545
11546
|
__export(assemblyAIAPI_zod_exports, {
|
|
11546
|
-
createTemporaryTokenBody: () => createTemporaryTokenBody,
|
|
11547
|
-
createTemporaryTokenBodyExpiresInMin: () => createTemporaryTokenBodyExpiresInMin,
|
|
11548
|
-
createTemporaryTokenResponse: () => createTemporaryTokenResponse,
|
|
11549
11547
|
createTranscriptBody: () => createTranscriptBody,
|
|
11550
11548
|
createTranscriptBodyAutoChaptersDefault: () => createTranscriptBodyAutoChaptersDefault,
|
|
11551
11549
|
createTranscriptBodyAutoHighlightsDefault: () => createTranscriptBodyAutoHighlightsDefault,
|
|
11552
11550
|
createTranscriptBodyContentSafetyConfidenceDefault: () => createTranscriptBodyContentSafetyConfidenceDefault,
|
|
11553
|
-
createTranscriptBodyContentSafetyConfidenceMax: () => createTranscriptBodyContentSafetyConfidenceMax,
|
|
11554
|
-
createTranscriptBodyContentSafetyConfidenceMin: () => createTranscriptBodyContentSafetyConfidenceMin,
|
|
11555
11551
|
createTranscriptBodyContentSafetyDefault: () => createTranscriptBodyContentSafetyDefault,
|
|
11556
11552
|
createTranscriptBodyCustomTopicsDefault: () => createTranscriptBodyCustomTopicsDefault,
|
|
11557
11553
|
createTranscriptBodyDisfluenciesDefault: () => createTranscriptBodyDisfluenciesDefault,
|
|
@@ -11559,198 +11555,58 @@ __export(assemblyAIAPI_zod_exports, {
|
|
|
11559
11555
|
createTranscriptBodyFilterProfanityDefault: () => createTranscriptBodyFilterProfanityDefault,
|
|
11560
11556
|
createTranscriptBodyFormatTextDefault: () => createTranscriptBodyFormatTextDefault,
|
|
11561
11557
|
createTranscriptBodyIabCategoriesDefault: () => createTranscriptBodyIabCategoriesDefault,
|
|
11562
|
-
createTranscriptBodyLanguageCodeDefault: () => createTranscriptBodyLanguageCodeDefault,
|
|
11563
|
-
createTranscriptBodyLanguageConfidenceThresholdDefault: () => createTranscriptBodyLanguageConfidenceThresholdDefault,
|
|
11564
|
-
createTranscriptBodyLanguageConfidenceThresholdMax: () => createTranscriptBodyLanguageConfidenceThresholdMax,
|
|
11565
|
-
createTranscriptBodyLanguageConfidenceThresholdMin: () => createTranscriptBodyLanguageConfidenceThresholdMin,
|
|
11566
11558
|
createTranscriptBodyLanguageDetectionDefault: () => createTranscriptBodyLanguageDetectionDefault,
|
|
11559
|
+
createTranscriptBodyLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault: () => createTranscriptBodyLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault,
|
|
11560
|
+
createTranscriptBodyLanguageDetectionOptionsCodeSwitchingDefault: () => createTranscriptBodyLanguageDetectionOptionsCodeSwitchingDefault,
|
|
11561
|
+
createTranscriptBodyLanguageDetectionOptionsFallbackLanguageDefault: () => createTranscriptBodyLanguageDetectionOptionsFallbackLanguageDefault,
|
|
11567
11562
|
createTranscriptBodyMultichannelDefault: () => createTranscriptBodyMultichannelDefault,
|
|
11568
11563
|
createTranscriptBodyPunctuateDefault: () => createTranscriptBodyPunctuateDefault,
|
|
11569
11564
|
createTranscriptBodyRedactPiiAudioDefault: () => createTranscriptBodyRedactPiiAudioDefault,
|
|
11565
|
+
createTranscriptBodyRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault: () => createTranscriptBodyRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault,
|
|
11570
11566
|
createTranscriptBodyRedactPiiDefault: () => createTranscriptBodyRedactPiiDefault,
|
|
11571
|
-
createTranscriptBodyRedactPiiSubDefault: () => createTranscriptBodyRedactPiiSubDefault,
|
|
11572
11567
|
createTranscriptBodySentimentAnalysisDefault: () => createTranscriptBodySentimentAnalysisDefault,
|
|
11573
11568
|
createTranscriptBodySpeakerLabelsDefault: () => createTranscriptBodySpeakerLabelsDefault,
|
|
11574
|
-
|
|
11575
|
-
|
|
11576
|
-
|
|
11577
|
-
createTranscriptBodySpeechThresholdMax: () => createTranscriptBodySpeechThresholdMax,
|
|
11578
|
-
createTranscriptBodySpeechThresholdMin: () => createTranscriptBodySpeechThresholdMin,
|
|
11569
|
+
createTranscriptBodySpeakerOptionsMinSpeakersExpectedDefault: () => createTranscriptBodySpeakerOptionsMinSpeakersExpectedDefault,
|
|
11570
|
+
createTranscriptBodySpeechUnderstandingRequestTranslationFormalDefault: () => createTranscriptBodySpeechUnderstandingRequestTranslationFormalDefault,
|
|
11571
|
+
createTranscriptBodySpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault: () => createTranscriptBodySpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault,
|
|
11579
11572
|
createTranscriptBodySummarizationDefault: () => createTranscriptBodySummarizationDefault,
|
|
11580
|
-
|
|
11581
|
-
createTranscriptBodyWebhookAuthHeaderValueDefault: () => createTranscriptBodyWebhookAuthHeaderValueDefault,
|
|
11573
|
+
createTranscriptBodyTemperatureDefault: () => createTranscriptBodyTemperatureDefault,
|
|
11582
11574
|
createTranscriptResponse: () => createTranscriptResponse,
|
|
11583
|
-
|
|
11584
|
-
|
|
11585
|
-
|
|
11586
|
-
|
|
11587
|
-
|
|
11588
|
-
|
|
11589
|
-
createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax: () => createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax,
|
|
11590
|
-
createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin: () => createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin,
|
|
11591
|
-
createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMax: () => createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMax,
|
|
11592
|
-
createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMin: () => createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMin,
|
|
11593
|
-
createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMax: () => createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMax,
|
|
11594
|
-
createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMin: () => createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMin,
|
|
11595
|
-
createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMax: () => createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMax,
|
|
11596
|
-
createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMin: () => createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMin,
|
|
11597
|
-
createTranscriptResponseContentSafetyLabelsSummaryMaxOne: () => createTranscriptResponseContentSafetyLabelsSummaryMaxOne,
|
|
11598
|
-
createTranscriptResponseContentSafetyLabelsSummaryMinOne: () => createTranscriptResponseContentSafetyLabelsSummaryMinOne,
|
|
11599
|
-
createTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax: () => createTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax,
|
|
11600
|
-
createTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin: () => createTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin,
|
|
11601
|
-
createTranscriptResponseIabCategoriesResultSummaryMaxOne: () => createTranscriptResponseIabCategoriesResultSummaryMaxOne,
|
|
11602
|
-
createTranscriptResponseIabCategoriesResultSummaryMinOne: () => createTranscriptResponseIabCategoriesResultSummaryMinOne,
|
|
11603
|
-
createTranscriptResponseLanguageConfidenceMax: () => createTranscriptResponseLanguageConfidenceMax,
|
|
11604
|
-
createTranscriptResponseLanguageConfidenceMin: () => createTranscriptResponseLanguageConfidenceMin,
|
|
11605
|
-
createTranscriptResponseLanguageConfidenceThresholdMax: () => createTranscriptResponseLanguageConfidenceThresholdMax,
|
|
11606
|
-
createTranscriptResponseLanguageConfidenceThresholdMin: () => createTranscriptResponseLanguageConfidenceThresholdMin,
|
|
11607
|
-
createTranscriptResponseSentimentAnalysisResultsItemConfidenceMax: () => createTranscriptResponseSentimentAnalysisResultsItemConfidenceMax,
|
|
11608
|
-
createTranscriptResponseSentimentAnalysisResultsItemConfidenceMin: () => createTranscriptResponseSentimentAnalysisResultsItemConfidenceMin,
|
|
11609
|
-
createTranscriptResponseSpeechModelDefault: () => createTranscriptResponseSpeechModelDefault,
|
|
11610
|
-
createTranscriptResponseSpeechThresholdMax: () => createTranscriptResponseSpeechThresholdMax,
|
|
11611
|
-
createTranscriptResponseSpeechThresholdMin: () => createTranscriptResponseSpeechThresholdMin,
|
|
11612
|
-
createTranscriptResponseUtterancesItemConfidenceMax: () => createTranscriptResponseUtterancesItemConfidenceMax,
|
|
11613
|
-
createTranscriptResponseUtterancesItemConfidenceMin: () => createTranscriptResponseUtterancesItemConfidenceMin,
|
|
11614
|
-
createTranscriptResponseUtterancesItemWordsItemConfidenceMax: () => createTranscriptResponseUtterancesItemWordsItemConfidenceMax,
|
|
11615
|
-
createTranscriptResponseUtterancesItemWordsItemConfidenceMin: () => createTranscriptResponseUtterancesItemWordsItemConfidenceMin,
|
|
11616
|
-
createTranscriptResponseWordsItemConfidenceMax: () => createTranscriptResponseWordsItemConfidenceMax,
|
|
11617
|
-
createTranscriptResponseWordsItemConfidenceMin: () => createTranscriptResponseWordsItemConfidenceMin,
|
|
11575
|
+
createTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault: () => createTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault,
|
|
11576
|
+
createTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault: () => createTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault,
|
|
11577
|
+
createTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault: () => createTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault,
|
|
11578
|
+
createTranscriptResponseRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault: () => createTranscriptResponseRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault,
|
|
11579
|
+
createTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault: () => createTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault,
|
|
11580
|
+
createTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault: () => createTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault,
|
|
11618
11581
|
deleteTranscriptParams: () => deleteTranscriptParams,
|
|
11619
11582
|
deleteTranscriptResponse: () => deleteTranscriptResponse,
|
|
11620
|
-
|
|
11621
|
-
|
|
11622
|
-
|
|
11623
|
-
|
|
11624
|
-
|
|
11625
|
-
|
|
11626
|
-
deleteTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax: () => deleteTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax,
|
|
11627
|
-
deleteTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin: () => deleteTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin,
|
|
11628
|
-
deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMax: () => deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMax,
|
|
11629
|
-
deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMin: () => deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMin,
|
|
11630
|
-
deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMax: () => deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMax,
|
|
11631
|
-
deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMin: () => deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMin,
|
|
11632
|
-
deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMax: () => deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMax,
|
|
11633
|
-
deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMin: () => deleteTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMin,
|
|
11634
|
-
deleteTranscriptResponseContentSafetyLabelsSummaryMaxOne: () => deleteTranscriptResponseContentSafetyLabelsSummaryMaxOne,
|
|
11635
|
-
deleteTranscriptResponseContentSafetyLabelsSummaryMinOne: () => deleteTranscriptResponseContentSafetyLabelsSummaryMinOne,
|
|
11636
|
-
deleteTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax: () => deleteTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax,
|
|
11637
|
-
deleteTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin: () => deleteTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin,
|
|
11638
|
-
deleteTranscriptResponseIabCategoriesResultSummaryMaxOne: () => deleteTranscriptResponseIabCategoriesResultSummaryMaxOne,
|
|
11639
|
-
deleteTranscriptResponseIabCategoriesResultSummaryMinOne: () => deleteTranscriptResponseIabCategoriesResultSummaryMinOne,
|
|
11640
|
-
deleteTranscriptResponseLanguageConfidenceMax: () => deleteTranscriptResponseLanguageConfidenceMax,
|
|
11641
|
-
deleteTranscriptResponseLanguageConfidenceMin: () => deleteTranscriptResponseLanguageConfidenceMin,
|
|
11642
|
-
deleteTranscriptResponseLanguageConfidenceThresholdMax: () => deleteTranscriptResponseLanguageConfidenceThresholdMax,
|
|
11643
|
-
deleteTranscriptResponseLanguageConfidenceThresholdMin: () => deleteTranscriptResponseLanguageConfidenceThresholdMin,
|
|
11644
|
-
deleteTranscriptResponseSentimentAnalysisResultsItemConfidenceMax: () => deleteTranscriptResponseSentimentAnalysisResultsItemConfidenceMax,
|
|
11645
|
-
deleteTranscriptResponseSentimentAnalysisResultsItemConfidenceMin: () => deleteTranscriptResponseSentimentAnalysisResultsItemConfidenceMin,
|
|
11646
|
-
deleteTranscriptResponseSpeechModelDefault: () => deleteTranscriptResponseSpeechModelDefault,
|
|
11647
|
-
deleteTranscriptResponseSpeechThresholdMax: () => deleteTranscriptResponseSpeechThresholdMax,
|
|
11648
|
-
deleteTranscriptResponseSpeechThresholdMin: () => deleteTranscriptResponseSpeechThresholdMin,
|
|
11649
|
-
deleteTranscriptResponseUtterancesItemConfidenceMax: () => deleteTranscriptResponseUtterancesItemConfidenceMax,
|
|
11650
|
-
deleteTranscriptResponseUtterancesItemConfidenceMin: () => deleteTranscriptResponseUtterancesItemConfidenceMin,
|
|
11651
|
-
deleteTranscriptResponseUtterancesItemWordsItemConfidenceMax: () => deleteTranscriptResponseUtterancesItemWordsItemConfidenceMax,
|
|
11652
|
-
deleteTranscriptResponseUtterancesItemWordsItemConfidenceMin: () => deleteTranscriptResponseUtterancesItemWordsItemConfidenceMin,
|
|
11653
|
-
deleteTranscriptResponseWordsItemConfidenceMax: () => deleteTranscriptResponseWordsItemConfidenceMax,
|
|
11654
|
-
deleteTranscriptResponseWordsItemConfidenceMin: () => deleteTranscriptResponseWordsItemConfidenceMin,
|
|
11655
|
-
getLemurResponseParams: () => getLemurResponseParams,
|
|
11656
|
-
getLemurResponseResponse: () => getLemurResponseResponse,
|
|
11657
|
-
getLemurResponseResponseUsageInputTokensMin: () => getLemurResponseResponseUsageInputTokensMin,
|
|
11658
|
-
getLemurResponseResponseUsageInputTokensMinOne: () => getLemurResponseResponseUsageInputTokensMinOne,
|
|
11659
|
-
getLemurResponseResponseUsageOutputTokensMin: () => getLemurResponseResponseUsageOutputTokensMin,
|
|
11660
|
-
getLemurResponseResponseUsageOutputTokensMinOne: () => getLemurResponseResponseUsageOutputTokensMinOne,
|
|
11583
|
+
deleteTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault: () => deleteTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault,
|
|
11584
|
+
deleteTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault: () => deleteTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault,
|
|
11585
|
+
deleteTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault: () => deleteTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault,
|
|
11586
|
+
deleteTranscriptResponseRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault: () => deleteTranscriptResponseRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault,
|
|
11587
|
+
deleteTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault: () => deleteTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault,
|
|
11588
|
+
deleteTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault: () => deleteTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault,
|
|
11661
11589
|
getRedactedAudioParams: () => getRedactedAudioParams,
|
|
11662
11590
|
getRedactedAudioResponse: () => getRedactedAudioResponse,
|
|
11663
11591
|
getSubtitlesParams: () => getSubtitlesParams,
|
|
11664
11592
|
getSubtitlesQueryParams: () => getSubtitlesQueryParams,
|
|
11593
|
+
getSubtitlesResponse: () => getSubtitlesResponse,
|
|
11665
11594
|
getTranscriptParagraphsParams: () => getTranscriptParagraphsParams,
|
|
11666
11595
|
getTranscriptParagraphsResponse: () => getTranscriptParagraphsResponse,
|
|
11667
|
-
getTranscriptParagraphsResponseConfidenceMax: () => getTranscriptParagraphsResponseConfidenceMax,
|
|
11668
|
-
getTranscriptParagraphsResponseConfidenceMin: () => getTranscriptParagraphsResponseConfidenceMin,
|
|
11669
|
-
getTranscriptParagraphsResponseParagraphsItemConfidenceMax: () => getTranscriptParagraphsResponseParagraphsItemConfidenceMax,
|
|
11670
|
-
getTranscriptParagraphsResponseParagraphsItemConfidenceMin: () => getTranscriptParagraphsResponseParagraphsItemConfidenceMin,
|
|
11671
|
-
getTranscriptParagraphsResponseParagraphsItemWordsItemConfidenceMax: () => getTranscriptParagraphsResponseParagraphsItemWordsItemConfidenceMax,
|
|
11672
|
-
getTranscriptParagraphsResponseParagraphsItemWordsItemConfidenceMin: () => getTranscriptParagraphsResponseParagraphsItemWordsItemConfidenceMin,
|
|
11673
11596
|
getTranscriptParams: () => getTranscriptParams,
|
|
11674
11597
|
getTranscriptResponse: () => getTranscriptResponse,
|
|
11675
|
-
|
|
11676
|
-
|
|
11677
|
-
|
|
11678
|
-
|
|
11679
|
-
|
|
11680
|
-
|
|
11681
|
-
getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax: () => getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax,
|
|
11682
|
-
getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin: () => getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin,
|
|
11683
|
-
getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMax: () => getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMax,
|
|
11684
|
-
getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMin: () => getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMin,
|
|
11685
|
-
getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMax: () => getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMax,
|
|
11686
|
-
getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMin: () => getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMin,
|
|
11687
|
-
getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMax: () => getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMax,
|
|
11688
|
-
getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMin: () => getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMin,
|
|
11689
|
-
getTranscriptResponseContentSafetyLabelsSummaryMaxOne: () => getTranscriptResponseContentSafetyLabelsSummaryMaxOne,
|
|
11690
|
-
getTranscriptResponseContentSafetyLabelsSummaryMinOne: () => getTranscriptResponseContentSafetyLabelsSummaryMinOne,
|
|
11691
|
-
getTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax: () => getTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax,
|
|
11692
|
-
getTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin: () => getTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin,
|
|
11693
|
-
getTranscriptResponseIabCategoriesResultSummaryMaxOne: () => getTranscriptResponseIabCategoriesResultSummaryMaxOne,
|
|
11694
|
-
getTranscriptResponseIabCategoriesResultSummaryMinOne: () => getTranscriptResponseIabCategoriesResultSummaryMinOne,
|
|
11695
|
-
getTranscriptResponseLanguageConfidenceMax: () => getTranscriptResponseLanguageConfidenceMax,
|
|
11696
|
-
getTranscriptResponseLanguageConfidenceMin: () => getTranscriptResponseLanguageConfidenceMin,
|
|
11697
|
-
getTranscriptResponseLanguageConfidenceThresholdMax: () => getTranscriptResponseLanguageConfidenceThresholdMax,
|
|
11698
|
-
getTranscriptResponseLanguageConfidenceThresholdMin: () => getTranscriptResponseLanguageConfidenceThresholdMin,
|
|
11699
|
-
getTranscriptResponseSentimentAnalysisResultsItemConfidenceMax: () => getTranscriptResponseSentimentAnalysisResultsItemConfidenceMax,
|
|
11700
|
-
getTranscriptResponseSentimentAnalysisResultsItemConfidenceMin: () => getTranscriptResponseSentimentAnalysisResultsItemConfidenceMin,
|
|
11701
|
-
getTranscriptResponseSpeechModelDefault: () => getTranscriptResponseSpeechModelDefault,
|
|
11702
|
-
getTranscriptResponseSpeechThresholdMax: () => getTranscriptResponseSpeechThresholdMax,
|
|
11703
|
-
getTranscriptResponseSpeechThresholdMin: () => getTranscriptResponseSpeechThresholdMin,
|
|
11704
|
-
getTranscriptResponseUtterancesItemConfidenceMax: () => getTranscriptResponseUtterancesItemConfidenceMax,
|
|
11705
|
-
getTranscriptResponseUtterancesItemConfidenceMin: () => getTranscriptResponseUtterancesItemConfidenceMin,
|
|
11706
|
-
getTranscriptResponseUtterancesItemWordsItemConfidenceMax: () => getTranscriptResponseUtterancesItemWordsItemConfidenceMax,
|
|
11707
|
-
getTranscriptResponseUtterancesItemWordsItemConfidenceMin: () => getTranscriptResponseUtterancesItemWordsItemConfidenceMin,
|
|
11708
|
-
getTranscriptResponseWordsItemConfidenceMax: () => getTranscriptResponseWordsItemConfidenceMax,
|
|
11709
|
-
getTranscriptResponseWordsItemConfidenceMin: () => getTranscriptResponseWordsItemConfidenceMin,
|
|
11598
|
+
getTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault: () => getTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault,
|
|
11599
|
+
getTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault: () => getTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault,
|
|
11600
|
+
getTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault: () => getTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault,
|
|
11601
|
+
getTranscriptResponseRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault: () => getTranscriptResponseRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault,
|
|
11602
|
+
getTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault: () => getTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault,
|
|
11603
|
+
getTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault: () => getTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault,
|
|
11710
11604
|
getTranscriptSentencesParams: () => getTranscriptSentencesParams,
|
|
11711
11605
|
getTranscriptSentencesResponse: () => getTranscriptSentencesResponse,
|
|
11712
|
-
getTranscriptSentencesResponseConfidenceMax: () => getTranscriptSentencesResponseConfidenceMax,
|
|
11713
|
-
getTranscriptSentencesResponseConfidenceMin: () => getTranscriptSentencesResponseConfidenceMin,
|
|
11714
|
-
getTranscriptSentencesResponseSentencesItemConfidenceMax: () => getTranscriptSentencesResponseSentencesItemConfidenceMax,
|
|
11715
|
-
getTranscriptSentencesResponseSentencesItemConfidenceMin: () => getTranscriptSentencesResponseSentencesItemConfidenceMin,
|
|
11716
|
-
getTranscriptSentencesResponseSentencesItemWordsItemConfidenceMax: () => getTranscriptSentencesResponseSentencesItemWordsItemConfidenceMax,
|
|
11717
|
-
getTranscriptSentencesResponseSentencesItemWordsItemConfidenceMin: () => getTranscriptSentencesResponseSentencesItemWordsItemConfidenceMin,
|
|
11718
|
-
lemurQuestionAnswerBody: () => lemurQuestionAnswerBody,
|
|
11719
|
-
lemurQuestionAnswerBodyFinalModelDefault: () => lemurQuestionAnswerBodyFinalModelDefault,
|
|
11720
|
-
lemurQuestionAnswerBodyMaxOutputSizeDefault: () => lemurQuestionAnswerBodyMaxOutputSizeDefault,
|
|
11721
|
-
lemurQuestionAnswerBodyTemperatureDefault: () => lemurQuestionAnswerBodyTemperatureDefault,
|
|
11722
|
-
lemurQuestionAnswerBodyTemperatureMax: () => lemurQuestionAnswerBodyTemperatureMax,
|
|
11723
|
-
lemurQuestionAnswerBodyTemperatureMin: () => lemurQuestionAnswerBodyTemperatureMin,
|
|
11724
|
-
lemurQuestionAnswerResponse: () => lemurQuestionAnswerResponse,
|
|
11725
|
-
lemurQuestionAnswerResponseUsageInputTokensMin: () => lemurQuestionAnswerResponseUsageInputTokensMin,
|
|
11726
|
-
lemurQuestionAnswerResponseUsageOutputTokensMin: () => lemurQuestionAnswerResponseUsageOutputTokensMin,
|
|
11727
|
-
lemurSummaryBody: () => lemurSummaryBody,
|
|
11728
|
-
lemurSummaryBodyFinalModelDefault: () => lemurSummaryBodyFinalModelDefault,
|
|
11729
|
-
lemurSummaryBodyMaxOutputSizeDefault: () => lemurSummaryBodyMaxOutputSizeDefault,
|
|
11730
|
-
lemurSummaryBodyTemperatureDefault: () => lemurSummaryBodyTemperatureDefault,
|
|
11731
|
-
lemurSummaryBodyTemperatureMax: () => lemurSummaryBodyTemperatureMax,
|
|
11732
|
-
lemurSummaryBodyTemperatureMin: () => lemurSummaryBodyTemperatureMin,
|
|
11733
|
-
lemurSummaryResponse: () => lemurSummaryResponse,
|
|
11734
|
-
lemurSummaryResponseUsageInputTokensMin: () => lemurSummaryResponseUsageInputTokensMin,
|
|
11735
|
-
lemurSummaryResponseUsageOutputTokensMin: () => lemurSummaryResponseUsageOutputTokensMin,
|
|
11736
|
-
lemurTaskBody: () => lemurTaskBody,
|
|
11737
|
-
lemurTaskBodyFinalModelDefault: () => lemurTaskBodyFinalModelDefault,
|
|
11738
|
-
lemurTaskBodyMaxOutputSizeDefault: () => lemurTaskBodyMaxOutputSizeDefault,
|
|
11739
|
-
lemurTaskBodyTemperatureDefault: () => lemurTaskBodyTemperatureDefault,
|
|
11740
|
-
lemurTaskBodyTemperatureMax: () => lemurTaskBodyTemperatureMax,
|
|
11741
|
-
lemurTaskBodyTemperatureMin: () => lemurTaskBodyTemperatureMin,
|
|
11742
|
-
lemurTaskResponse: () => lemurTaskResponse,
|
|
11743
|
-
lemurTaskResponseUsageInputTokensMin: () => lemurTaskResponseUsageInputTokensMin,
|
|
11744
|
-
lemurTaskResponseUsageOutputTokensMin: () => lemurTaskResponseUsageOutputTokensMin,
|
|
11745
11606
|
listTranscriptsQueryLimitDefault: () => listTranscriptsQueryLimitDefault,
|
|
11746
|
-
listTranscriptsQueryLimitMax: () => listTranscriptsQueryLimitMax,
|
|
11747
11607
|
listTranscriptsQueryParams: () => listTranscriptsQueryParams,
|
|
11748
11608
|
listTranscriptsQueryThrottledOnlyDefault: () => listTranscriptsQueryThrottledOnlyDefault,
|
|
11749
11609
|
listTranscriptsResponse: () => listTranscriptsResponse,
|
|
11750
|
-
listTranscriptsResponseTranscriptsItemCompletedRegExp: () => listTranscriptsResponseTranscriptsItemCompletedRegExp,
|
|
11751
|
-
listTranscriptsResponseTranscriptsItemCreatedRegExp: () => listTranscriptsResponseTranscriptsItemCreatedRegExp,
|
|
11752
|
-
purgeLemurRequestDataParams: () => purgeLemurRequestDataParams,
|
|
11753
|
-
purgeLemurRequestDataResponse: () => purgeLemurRequestDataResponse,
|
|
11754
11610
|
uploadFileResponse: () => uploadFileResponse,
|
|
11755
11611
|
wordSearchParams: () => wordSearchParams,
|
|
11756
11612
|
wordSearchQueryParams: () => wordSearchQueryParams,
|
|
@@ -11760,43 +11616,192 @@ import { z as zod3 } from "zod";
|
|
|
11760
11616
|
var uploadFileResponse = zod3.object({
|
|
11761
11617
|
upload_url: zod3.string().describe("A URL that points to your audio file, accessible only by AssemblyAI's servers\n")
|
|
11762
11618
|
});
|
|
11763
|
-
var
|
|
11764
|
-
var createTranscriptBodyLanguageDetectionDefault = false;
|
|
11765
|
-
var createTranscriptBodyLanguageConfidenceThresholdDefault = 0;
|
|
11766
|
-
var createTranscriptBodyLanguageConfidenceThresholdMin = 0;
|
|
11767
|
-
var createTranscriptBodyLanguageConfidenceThresholdMax = 1;
|
|
11768
|
-
var createTranscriptBodySpeechModelDefault = "best";
|
|
11769
|
-
var createTranscriptBodyPunctuateDefault = true;
|
|
11770
|
-
var createTranscriptBodyFormatTextDefault = true;
|
|
11771
|
-
var createTranscriptBodyDisfluenciesDefault = false;
|
|
11772
|
-
var createTranscriptBodyMultichannelDefault = false;
|
|
11773
|
-
var createTranscriptBodyWebhookAuthHeaderNameDefault = null;
|
|
11774
|
-
var createTranscriptBodyWebhookAuthHeaderValueDefault = null;
|
|
11619
|
+
var createTranscriptBodyAutoChaptersDefault = false;
|
|
11775
11620
|
var createTranscriptBodyAutoHighlightsDefault = false;
|
|
11776
|
-
var createTranscriptBodyFilterProfanityDefault = false;
|
|
11777
|
-
var createTranscriptBodyRedactPiiDefault = false;
|
|
11778
|
-
var createTranscriptBodyRedactPiiAudioDefault = false;
|
|
11779
|
-
var createTranscriptBodyRedactPiiSubDefault = "hash";
|
|
11780
|
-
var createTranscriptBodySpeakerLabelsDefault = false;
|
|
11781
|
-
var createTranscriptBodySpeakersExpectedDefault = null;
|
|
11782
11621
|
var createTranscriptBodyContentSafetyDefault = false;
|
|
11783
11622
|
var createTranscriptBodyContentSafetyConfidenceDefault = 50;
|
|
11784
|
-
var
|
|
11785
|
-
var
|
|
11623
|
+
var createTranscriptBodyDisfluenciesDefault = false;
|
|
11624
|
+
var createTranscriptBodyEntityDetectionDefault = false;
|
|
11625
|
+
var createTranscriptBodyFilterProfanityDefault = false;
|
|
11626
|
+
var createTranscriptBodyFormatTextDefault = true;
|
|
11786
11627
|
var createTranscriptBodyIabCategoriesDefault = false;
|
|
11628
|
+
var createTranscriptBodyLanguageDetectionDefault = false;
|
|
11629
|
+
var createTranscriptBodyLanguageDetectionOptionsFallbackLanguageDefault = "auto";
|
|
11630
|
+
var createTranscriptBodyLanguageDetectionOptionsCodeSwitchingDefault = false;
|
|
11631
|
+
var createTranscriptBodyLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault = 0.3;
|
|
11632
|
+
var createTranscriptBodyMultichannelDefault = false;
|
|
11633
|
+
var createTranscriptBodyPunctuateDefault = true;
|
|
11634
|
+
var createTranscriptBodyRedactPiiDefault = false;
|
|
11635
|
+
var createTranscriptBodyRedactPiiAudioDefault = false;
|
|
11636
|
+
var createTranscriptBodyRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault = false;
|
|
11787
11637
|
var createTranscriptBodySentimentAnalysisDefault = false;
|
|
11788
|
-
var
|
|
11789
|
-
var
|
|
11790
|
-
var
|
|
11791
|
-
var
|
|
11792
|
-
var createTranscriptBodySpeechThresholdMax = 1;
|
|
11638
|
+
var createTranscriptBodySpeakerLabelsDefault = false;
|
|
11639
|
+
var createTranscriptBodySpeakerOptionsMinSpeakersExpectedDefault = 1;
|
|
11640
|
+
var createTranscriptBodySpeechUnderstandingRequestTranslationFormalDefault = true;
|
|
11641
|
+
var createTranscriptBodySpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault = false;
|
|
11793
11642
|
var createTranscriptBodySummarizationDefault = false;
|
|
11643
|
+
var createTranscriptBodyTemperatureDefault = 0;
|
|
11794
11644
|
var createTranscriptBodyCustomTopicsDefault = false;
|
|
11795
11645
|
var createTranscriptBody = zod3.object({
|
|
11796
|
-
|
|
11797
|
-
|
|
11798
|
-
|
|
11799
|
-
|
|
11646
|
+
audio_end_at: zod3.number().optional().describe(
|
|
11647
|
+
"The point in time, in milliseconds, to stop transcribing in your media file. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details."
|
|
11648
|
+
),
|
|
11649
|
+
audio_start_from: zod3.number().optional().describe(
|
|
11650
|
+
"The point in time, in milliseconds, to begin transcribing in your media file. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details."
|
|
11651
|
+
),
|
|
11652
|
+
auto_chapters: zod3.boolean().optional().describe(
|
|
11653
|
+
"Enable [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters), can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
|
|
11654
|
+
),
|
|
11655
|
+
auto_highlights: zod3.boolean().optional().describe(
|
|
11656
|
+
"Enable [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases), either true or false"
|
|
11657
|
+
),
|
|
11658
|
+
content_safety: zod3.boolean().optional().describe(
|
|
11659
|
+
"Enable [Content Moderation](https://www.assemblyai.com/docs/content-moderation), can be true or false"
|
|
11660
|
+
),
|
|
11661
|
+
content_safety_confidence: zod3.number().default(createTranscriptBodyContentSafetyConfidenceDefault).describe(
|
|
11662
|
+
"The confidence threshold for the [Content Moderation](https://www.assemblyai.com/docs/content-moderation) model. Values must be between 25 and 100."
|
|
11663
|
+
),
|
|
11664
|
+
custom_spelling: zod3.array(
|
|
11665
|
+
zod3.object({
|
|
11666
|
+
from: zod3.array(zod3.string()).describe("Words or phrases to replace"),
|
|
11667
|
+
to: zod3.string().describe("Word to replace with")
|
|
11668
|
+
}).describe(
|
|
11669
|
+
"Object containing words or phrases to replace, and the word or phrase to replace with"
|
|
11670
|
+
)
|
|
11671
|
+
).optional().describe(
|
|
11672
|
+
"Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details."
|
|
11673
|
+
),
|
|
11674
|
+
disfluencies: zod3.boolean().optional().describe(
|
|
11675
|
+
'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false'
|
|
11676
|
+
),
|
|
11677
|
+
domain: zod3.string().nullish().describe(
|
|
11678
|
+
'Enable domain-specific transcription models to improve accuracy for specialized terminology. Set to `"medical-v1"` to enable [Medical Mode](https://www.assemblyai.com/docs/pre-recorded-audio/medical-mode) for improved accuracy of medical terms such as medications, procedures, conditions, and dosages.\n\nSupported languages: English (`en`), Spanish (`es`), German (`de`), French (`fr`). If used with an unsupported language, the parameter is ignored and a warning is returned.\n'
|
|
11679
|
+
),
|
|
11680
|
+
entity_detection: zod3.boolean().optional().describe(
|
|
11681
|
+
"Enable [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection), can be true or false"
|
|
11682
|
+
),
|
|
11683
|
+
filter_profanity: zod3.boolean().optional().describe(
|
|
11684
|
+
"Filter profanity from the transcribed text, can be true or false. See [Profanity Filtering](https://www.assemblyai.com/docs/profanity-filtering) for more details."
|
|
11685
|
+
),
|
|
11686
|
+
format_text: zod3.boolean().default(createTranscriptBodyFormatTextDefault).describe(
|
|
11687
|
+
"Enable [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio), can be true or false"
|
|
11688
|
+
),
|
|
11689
|
+
iab_categories: zod3.boolean().optional().describe(
|
|
11690
|
+
"Enable [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection), can be true or false"
|
|
11691
|
+
),
|
|
11692
|
+
keyterms_prompt: zod3.array(zod3.string()).optional().describe(
|
|
11693
|
+
"Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3 Pro) domain-specific words or phrases (maximum 6 words per phrase). See [Keyterms Prompting](https://www.assemblyai.com/docs/pre-recorded-audio/keyterms-prompting) for more details.\n"
|
|
11694
|
+
),
|
|
11695
|
+
language_code: zod3.enum([
|
|
11696
|
+
"en",
|
|
11697
|
+
"en_au",
|
|
11698
|
+
"en_uk",
|
|
11699
|
+
"en_us",
|
|
11700
|
+
"es",
|
|
11701
|
+
"fr",
|
|
11702
|
+
"de",
|
|
11703
|
+
"it",
|
|
11704
|
+
"pt",
|
|
11705
|
+
"nl",
|
|
11706
|
+
"af",
|
|
11707
|
+
"sq",
|
|
11708
|
+
"am",
|
|
11709
|
+
"ar",
|
|
11710
|
+
"hy",
|
|
11711
|
+
"as",
|
|
11712
|
+
"az",
|
|
11713
|
+
"ba",
|
|
11714
|
+
"eu",
|
|
11715
|
+
"be",
|
|
11716
|
+
"bn",
|
|
11717
|
+
"bs",
|
|
11718
|
+
"br",
|
|
11719
|
+
"bg",
|
|
11720
|
+
"my",
|
|
11721
|
+
"ca",
|
|
11722
|
+
"zh",
|
|
11723
|
+
"hr",
|
|
11724
|
+
"cs",
|
|
11725
|
+
"da",
|
|
11726
|
+
"et",
|
|
11727
|
+
"fo",
|
|
11728
|
+
"fi",
|
|
11729
|
+
"gl",
|
|
11730
|
+
"ka",
|
|
11731
|
+
"el",
|
|
11732
|
+
"gu",
|
|
11733
|
+
"ht",
|
|
11734
|
+
"ha",
|
|
11735
|
+
"haw",
|
|
11736
|
+
"he",
|
|
11737
|
+
"hi",
|
|
11738
|
+
"hu",
|
|
11739
|
+
"is",
|
|
11740
|
+
"id",
|
|
11741
|
+
"ja",
|
|
11742
|
+
"jw",
|
|
11743
|
+
"kn",
|
|
11744
|
+
"kk",
|
|
11745
|
+
"km",
|
|
11746
|
+
"ko",
|
|
11747
|
+
"lo",
|
|
11748
|
+
"la",
|
|
11749
|
+
"lv",
|
|
11750
|
+
"ln",
|
|
11751
|
+
"lt",
|
|
11752
|
+
"lb",
|
|
11753
|
+
"mk",
|
|
11754
|
+
"mg",
|
|
11755
|
+
"ms",
|
|
11756
|
+
"ml",
|
|
11757
|
+
"mt",
|
|
11758
|
+
"mi",
|
|
11759
|
+
"mr",
|
|
11760
|
+
"mn",
|
|
11761
|
+
"ne",
|
|
11762
|
+
"no",
|
|
11763
|
+
"nn",
|
|
11764
|
+
"oc",
|
|
11765
|
+
"pa",
|
|
11766
|
+
"ps",
|
|
11767
|
+
"fa",
|
|
11768
|
+
"pl",
|
|
11769
|
+
"ro",
|
|
11770
|
+
"ru",
|
|
11771
|
+
"sa",
|
|
11772
|
+
"sr",
|
|
11773
|
+
"sn",
|
|
11774
|
+
"sd",
|
|
11775
|
+
"si",
|
|
11776
|
+
"sk",
|
|
11777
|
+
"sl",
|
|
11778
|
+
"so",
|
|
11779
|
+
"su",
|
|
11780
|
+
"sw",
|
|
11781
|
+
"sv",
|
|
11782
|
+
"tl",
|
|
11783
|
+
"tg",
|
|
11784
|
+
"ta",
|
|
11785
|
+
"tt",
|
|
11786
|
+
"te",
|
|
11787
|
+
"th",
|
|
11788
|
+
"bo",
|
|
11789
|
+
"tr",
|
|
11790
|
+
"tk",
|
|
11791
|
+
"uk",
|
|
11792
|
+
"ur",
|
|
11793
|
+
"uz",
|
|
11794
|
+
"vi",
|
|
11795
|
+
"cy",
|
|
11796
|
+
"yi",
|
|
11797
|
+
"yo"
|
|
11798
|
+
]).describe(
|
|
11799
|
+
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).\nThe default value is 'en_us'.\n"
|
|
11800
|
+
).or(zod3.null()).optional().describe(
|
|
11801
|
+
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).\nThe default value is 'en_us'.\n"
|
|
11802
|
+
),
|
|
11803
|
+
language_codes: zod3.array(
|
|
11804
|
+
zod3.enum([
|
|
11800
11805
|
"en",
|
|
11801
11806
|
"en_au",
|
|
11802
11807
|
"en_uk",
|
|
@@ -11900,53 +11905,365 @@ var createTranscriptBody = zod3.object({
|
|
|
11900
11905
|
"yi",
|
|
11901
11906
|
"yo"
|
|
11902
11907
|
]).describe(
|
|
11903
|
-
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/
|
|
11904
|
-
)
|
|
11905
|
-
|
|
11906
|
-
)
|
|
11907
|
-
|
|
11908
|
-
|
|
11909
|
-
)
|
|
11910
|
-
|
|
11911
|
-
|
|
11912
|
-
),
|
|
11913
|
-
|
|
11914
|
-
|
|
11915
|
-
)
|
|
11916
|
-
|
|
11917
|
-
format_text: zod3.boolean().default(createTranscriptBodyFormatTextDefault).describe("Enable Text Formatting, can be true or false"),
|
|
11918
|
-
disfluencies: zod3.boolean().optional().describe(
|
|
11919
|
-
'Transcribe Filler Words, like "umm", in your media file; can be true or false'
|
|
11920
|
-
),
|
|
11921
|
-
multichannel: zod3.boolean().optional().describe(
|
|
11922
|
-
"Enable [Multichannel](https://www.assemblyai.com/docs/models/speech-recognition#multichannel-transcription) transcription, can be true or false."
|
|
11923
|
-
),
|
|
11924
|
-
webhook_url: zod3.string().optional().describe(
|
|
11925
|
-
"The URL to which we send webhook requests.\nWe sends two different types of webhook requests.\nOne request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.\n"
|
|
11908
|
+
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).\nThe default value is 'en_us'.\n"
|
|
11909
|
+
)
|
|
11910
|
+
).nullish().describe(
|
|
11911
|
+
"The language codes of your audio file. Used for [Code switching](/docs/speech-to-text/pre-recorded-audio/code-switching)\nOne of the values specified must be `en`.\n"
|
|
11912
|
+
),
|
|
11913
|
+
language_confidence_threshold: zod3.number().optional().describe(
|
|
11914
|
+
"The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nDefaults to 0. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.\n"
|
|
11915
|
+
),
|
|
11916
|
+
language_detection: zod3.boolean().optional().describe(
|
|
11917
|
+
"Enable [Automatic language detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection), either true or false."
|
|
11918
|
+
),
|
|
11919
|
+
language_detection_options: zod3.object({
|
|
11920
|
+
expected_languages: zod3.array(zod3.string()).optional().describe(
|
|
11921
|
+
'List of languages expected in the audio file. Defaults to `["all"]` when unspecified. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.'
|
|
11926
11922
|
),
|
|
11927
|
-
|
|
11928
|
-
|
|
11923
|
+
fallback_language: zod3.string().default(createTranscriptBodyLanguageDetectionOptionsFallbackLanguageDefault).describe(
|
|
11924
|
+
'If the detected language of the audio file is not in the list of expected languages, the `fallback_language` is used. Specify `["auto"]` to let our model choose the fallback language from `expected_languages` with the highest confidence score. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.\n'
|
|
11929
11925
|
),
|
|
11930
|
-
|
|
11931
|
-
"
|
|
11926
|
+
code_switching: zod3.boolean().optional().describe(
|
|
11927
|
+
"Whether [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) should be detected.\n"
|
|
11932
11928
|
),
|
|
11933
|
-
|
|
11934
|
-
|
|
11935
|
-
|
|
11936
|
-
|
|
11937
|
-
|
|
11938
|
-
|
|
11939
|
-
|
|
11940
|
-
|
|
11929
|
+
code_switching_confidence_threshold: zod3.number().default(
|
|
11930
|
+
createTranscriptBodyLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault
|
|
11931
|
+
).describe(
|
|
11932
|
+
"The confidence threshold for [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) detection. If the code switching confidence is below this threshold, the transcript will be processed in the language with the highest `language_detection_confidence` score.\n"
|
|
11933
|
+
)
|
|
11934
|
+
}).optional().describe(
|
|
11935
|
+
"Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection)."
|
|
11936
|
+
),
|
|
11937
|
+
multichannel: zod3.boolean().optional().describe(
|
|
11938
|
+
"Enable [Multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) transcription, can be true or false."
|
|
11939
|
+
),
|
|
11940
|
+
prompt: zod3.string().optional().describe(
|
|
11941
|
+
"Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
|
|
11942
|
+
),
|
|
11943
|
+
punctuate: zod3.boolean().default(createTranscriptBodyPunctuateDefault).describe(
|
|
11944
|
+
"Enable [Automatic Punctuation](https://www.assemblyai.com/docs/pre-recorded-audio), can be true or false"
|
|
11945
|
+
),
|
|
11946
|
+
redact_pii: zod3.boolean().optional().describe(
|
|
11947
|
+
"Redact PII from the transcribed text using the Redact PII model, can be true or false. See [PII Redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
|
|
11948
|
+
),
|
|
11949
|
+
redact_pii_audio: zod3.boolean().optional().describe(
|
|
11950
|
+
'Generate a copy of the original media file with spoken PII "beeped" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more details.'
|
|
11951
|
+
),
|
|
11952
|
+
redact_pii_audio_options: zod3.object({
|
|
11953
|
+
return_redacted_no_speech_audio: zod3.boolean().optional().describe(
|
|
11954
|
+
"By default, audio redaction provides redacted audio URLs only when speech is detected. However, if your use-case specifically requires redacted audio files even for silent audio files without any dialogue, you can opt to receive these URLs by setting this parameter to `true`."
|
|
11941
11955
|
),
|
|
11942
|
-
|
|
11943
|
-
|
|
11956
|
+
override_audio_redaction_method: zod3.enum(["silence"]).optional().describe(
|
|
11957
|
+
"Specify the method used to redact audio. By default, redacted audio uses a beep sound. Set to `silence` to replace PII with silence instead of a beep."
|
|
11958
|
+
)
|
|
11959
|
+
}).optional().describe(
|
|
11960
|
+
"Specify options for [PII redacted audio](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) files."
|
|
11961
|
+
),
|
|
11962
|
+
redact_pii_audio_quality: zod3.enum(["mp3", "wav"]).optional().describe(
|
|
11963
|
+
"Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more details."
|
|
11964
|
+
),
|
|
11965
|
+
redact_pii_policies: zod3.array(
|
|
11966
|
+
zod3.enum([
|
|
11967
|
+
"account_number",
|
|
11968
|
+
"banking_information",
|
|
11969
|
+
"blood_type",
|
|
11970
|
+
"credit_card_cvv",
|
|
11971
|
+
"credit_card_expiration",
|
|
11972
|
+
"credit_card_number",
|
|
11973
|
+
"date",
|
|
11974
|
+
"date_interval",
|
|
11975
|
+
"date_of_birth",
|
|
11976
|
+
"drivers_license",
|
|
11977
|
+
"drug",
|
|
11978
|
+
"duration",
|
|
11979
|
+
"email_address",
|
|
11980
|
+
"event",
|
|
11981
|
+
"filename",
|
|
11982
|
+
"gender_sexuality",
|
|
11983
|
+
"healthcare_number",
|
|
11984
|
+
"injury",
|
|
11985
|
+
"ip_address",
|
|
11986
|
+
"language",
|
|
11987
|
+
"location",
|
|
11988
|
+
"marital_status",
|
|
11989
|
+
"medical_condition",
|
|
11990
|
+
"medical_process",
|
|
11991
|
+
"money_amount",
|
|
11992
|
+
"nationality",
|
|
11993
|
+
"number_sequence",
|
|
11994
|
+
"occupation",
|
|
11995
|
+
"organization",
|
|
11996
|
+
"passport_number",
|
|
11997
|
+
"password",
|
|
11998
|
+
"person_age",
|
|
11999
|
+
"person_name",
|
|
12000
|
+
"phone_number",
|
|
12001
|
+
"physical_attribute",
|
|
12002
|
+
"political_affiliation",
|
|
12003
|
+
"religion",
|
|
12004
|
+
"statistics",
|
|
12005
|
+
"time",
|
|
12006
|
+
"url",
|
|
12007
|
+
"us_social_security_number",
|
|
12008
|
+
"username",
|
|
12009
|
+
"vehicle_id",
|
|
12010
|
+
"zodiac_sign"
|
|
12011
|
+
]).describe("The type of PII to redact")
|
|
12012
|
+
).optional().describe(
|
|
12013
|
+
"The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
|
|
12014
|
+
),
|
|
12015
|
+
redact_pii_sub: zod3.enum(["entity_name", "hash"]).describe(
|
|
12016
|
+
"The replacement logic for detected PII, can be `entity_name` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
|
|
12017
|
+
).or(zod3.null()).optional().describe(
|
|
12018
|
+
"The replacement logic for detected PII, can be `entity_type` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
|
|
12019
|
+
),
|
|
12020
|
+
sentiment_analysis: zod3.boolean().optional().describe(
|
|
12021
|
+
"Enable [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis), can be true or false"
|
|
12022
|
+
),
|
|
12023
|
+
speaker_labels: zod3.boolean().optional().describe(
|
|
12024
|
+
"Enable [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization), can be true or false"
|
|
12025
|
+
),
|
|
12026
|
+
speaker_options: zod3.object({
|
|
12027
|
+
min_speakers_expected: zod3.number().default(createTranscriptBodySpeakerOptionsMinSpeakersExpectedDefault).describe(
|
|
12028
|
+
"The minimum number of speakers expected in the audio file. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers) for more details."
|
|
11944
12029
|
),
|
|
11945
|
-
|
|
11946
|
-
"
|
|
12030
|
+
max_speakers_expected: zod3.number().optional().describe(
|
|
12031
|
+
"<Warning>Setting this parameter too high may hurt model accuracy</Warning>\nThe maximum number of speakers expected in the audio file. The default depends on audio duration: no limit for 0-2 minutes, 10 for 2-10 minutes, and 30 for 10+ minutes. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers) for more details.\n"
|
|
12032
|
+
)
|
|
12033
|
+
}).optional().describe(
|
|
12034
|
+
"Specify options for [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers). Use this to set a range of possible speakers."
|
|
12035
|
+
),
|
|
12036
|
+
speakers_expected: zod3.number().nullish().describe(
|
|
12037
|
+
"Tells the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details."
|
|
12038
|
+
),
|
|
12039
|
+
speech_models: zod3.array(
|
|
12040
|
+
zod3.string().describe(
|
|
12041
|
+
"The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
|
|
12042
|
+
)
|
|
12043
|
+
).describe(
|
|
12044
|
+
"List multiple speech models in priority order, allowing our system to automatically route your audio to the best available option. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models and routing behavior.\n"
|
|
12045
|
+
),
|
|
12046
|
+
speech_threshold: zod3.number().nullish().describe(
|
|
12047
|
+
"Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive. See [Speech Threshold](https://www.assemblyai.com/docs/speech-threshold) for more details.\n"
|
|
12048
|
+
),
|
|
12049
|
+
speech_understanding: zod3.object({
|
|
12050
|
+
request: zod3.object({
|
|
12051
|
+
translation: zod3.object({
|
|
12052
|
+
target_languages: zod3.array(zod3.string()).describe(
|
|
12053
|
+
'List of target language codes (e.g., `["es", "de"]`). See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for supported languages.'
|
|
12054
|
+
),
|
|
12055
|
+
formal: zod3.boolean().default(createTranscriptBodySpeechUnderstandingRequestTranslationFormalDefault).describe(
|
|
12056
|
+
"Use formal language style. See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for more details."
|
|
12057
|
+
),
|
|
12058
|
+
match_original_utterance: zod3.boolean().optional().describe(
|
|
12059
|
+
"When enabled with Speaker Labels, returns translated text in the utterances array. Each utterance will include a `translated_texts` key containing translations for each target language."
|
|
12060
|
+
)
|
|
12061
|
+
})
|
|
12062
|
+
}).describe(
|
|
12063
|
+
"Request body for [Translation](https://www.assemblyai.com/docs/speech-understanding/translation)."
|
|
12064
|
+
).or(
|
|
12065
|
+
zod3.object({
|
|
12066
|
+
speaker_identification: zod3.object({
|
|
12067
|
+
speaker_type: zod3.enum(["role", "name"]).describe(
|
|
12068
|
+
"Type of speaker identification. See [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification) for details on each type."
|
|
12069
|
+
),
|
|
12070
|
+
known_values: zod3.array(zod3.string()).optional().describe(
|
|
12071
|
+
'Required if speaker_type is "role". Each value must be 35 characters or less.'
|
|
12072
|
+
),
|
|
12073
|
+
speakers: zod3.array(
|
|
12074
|
+
zod3.object({
|
|
12075
|
+
role: zod3.string().optional().describe(
|
|
12076
|
+
'The role of the speaker. Required when `speaker_type` is "role".'
|
|
12077
|
+
),
|
|
12078
|
+
name: zod3.string().optional().describe(
|
|
12079
|
+
'The name of the speaker. Required when `speaker_type` is "name".'
|
|
12080
|
+
),
|
|
12081
|
+
description: zod3.string().optional().describe(
|
|
12082
|
+
"A description of the speaker to help the model identify them based on conversational context."
|
|
12083
|
+
)
|
|
12084
|
+
})
|
|
12085
|
+
).optional().describe(
|
|
12086
|
+
"An array of speaker objects with metadata to improve identification accuracy. Each object should include a `role` or `name` (depending on `speaker_type`) and an optional `description` to help the model identify the speaker. You can also include any additional custom properties (e.g., `company`, `title`) to provide more context. Use this as an alternative to `known_values` when you want to provide additional context about each speaker."
|
|
12087
|
+
)
|
|
12088
|
+
})
|
|
12089
|
+
}).describe(
|
|
12090
|
+
"Request body for [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification)."
|
|
12091
|
+
)
|
|
12092
|
+
).or(
|
|
12093
|
+
zod3.object({
|
|
12094
|
+
custom_formatting: zod3.object({
|
|
12095
|
+
date: zod3.string().optional().describe(
|
|
12096
|
+
'Date format pattern (e.g., `"mm/dd/yyyy"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
|
|
12097
|
+
),
|
|
12098
|
+
phone_number: zod3.string().optional().describe(
|
|
12099
|
+
'Phone number format pattern (e.g., `"(xxx)xxx-xxxx"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
|
|
12100
|
+
),
|
|
12101
|
+
email: zod3.string().optional().describe(
|
|
12102
|
+
'Email format pattern (e.g., `"username@domain.com"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
|
|
12103
|
+
)
|
|
12104
|
+
})
|
|
12105
|
+
}).describe(
|
|
12106
|
+
"Request body for [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting)."
|
|
12107
|
+
)
|
|
12108
|
+
)
|
|
12109
|
+
}).optional().describe(
|
|
12110
|
+
"Enable speech understanding tasks like [Translation](https://www.assemblyai.com/docs/speech-understanding/translation), [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification), and [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting). See the task-specific docs for available options and configuration.\n"
|
|
12111
|
+
),
|
|
12112
|
+
summarization: zod3.boolean().optional().describe(
|
|
12113
|
+
"Enable [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization), can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
|
|
12114
|
+
),
|
|
12115
|
+
summary_model: zod3.enum(["informative", "conversational", "catchy"]).optional().describe("The model to summarize the transcript"),
|
|
12116
|
+
summary_type: zod3.enum(["bullets", "bullets_verbose", "gist", "headline", "paragraph"]).optional().describe("The type of summary"),
|
|
12117
|
+
remove_audio_tags: zod3.enum(["all"]).describe(
|
|
12118
|
+
'Remove [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) from the transcript text. Set to `"all"` to remove all audio tags.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n'
|
|
12119
|
+
).or(zod3.null()).optional().describe(
|
|
12120
|
+
'Remove [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) from the transcript text. Set to `"all"` to remove all audio tags.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n'
|
|
12121
|
+
),
|
|
12122
|
+
temperature: zod3.number().optional().describe(
|
|
12123
|
+
"Control the amount of randomness injected into the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.\n\nNote: This parameter can only be used with the Universal-3 Pro model.\n"
|
|
12124
|
+
),
|
|
12125
|
+
webhook_auth_header_name: zod3.string().nullish().describe(
|
|
12126
|
+
"The header name to be sent with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests"
|
|
12127
|
+
),
|
|
12128
|
+
webhook_auth_header_value: zod3.string().nullish().describe(
|
|
12129
|
+
"The header value to send back with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests for added security"
|
|
12130
|
+
),
|
|
12131
|
+
webhook_url: zod3.string().optional().describe(
|
|
12132
|
+
"The URL to which we send [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests.\n"
|
|
12133
|
+
),
|
|
12134
|
+
custom_topics: zod3.boolean().optional().describe("This parameter does not currently have any functionality attached to it."),
|
|
12135
|
+
speech_model: zod3.string().describe(
|
|
12136
|
+
"The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
|
|
12137
|
+
).or(zod3.null()).optional().describe(
|
|
12138
|
+
"This parameter has been replaced with the `speech_models` parameter, learn more about the `speech_models` parameter [here](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model).\n"
|
|
12139
|
+
),
|
|
12140
|
+
topics: zod3.array(zod3.string()).optional().describe("This parameter does not currently have any functionality attached to it."),
|
|
12141
|
+
audio_url: zod3.string().describe("The URL of the audio or video file to transcribe.")
|
|
12142
|
+
}).describe("The parameters for creating a transcript");
|
|
12143
|
+
var createTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault = "auto";
|
|
12144
|
+
var createTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault = false;
|
|
12145
|
+
var createTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault = 0.3;
|
|
12146
|
+
var createTranscriptResponseRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault = false;
|
|
12147
|
+
var createTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault = true;
|
|
12148
|
+
var createTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault = false;
|
|
12149
|
+
var createTranscriptResponse = zod3.object({
|
|
12150
|
+
audio_channels: zod3.number().optional().describe(
|
|
12151
|
+
"The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) is enabled."
|
|
12152
|
+
),
|
|
12153
|
+
audio_duration: zod3.number().nullish().describe("The duration of this transcript object's media file, in seconds"),
|
|
12154
|
+
audio_end_at: zod3.number().nullish().describe(
|
|
12155
|
+
"The point in time, in milliseconds, in the file at which the transcription was terminated. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details."
|
|
12156
|
+
),
|
|
12157
|
+
audio_start_from: zod3.number().nullish().describe(
|
|
12158
|
+
"The point in time, in milliseconds, in the file at which the transcription was started. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details."
|
|
12159
|
+
),
|
|
12160
|
+
audio_url: zod3.string().describe("The URL of the media that was transcribed"),
|
|
12161
|
+
auto_chapters: zod3.boolean().nullish().describe(
|
|
12162
|
+
"Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
|
|
12163
|
+
),
|
|
12164
|
+
auto_highlights: zod3.boolean().describe(
|
|
12165
|
+
"Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) is enabled, either true or false"
|
|
12166
|
+
),
|
|
12167
|
+
auto_highlights_result: zod3.object({
|
|
12168
|
+
status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
|
|
12169
|
+
results: zod3.array(
|
|
12170
|
+
zod3.object({
|
|
12171
|
+
count: zod3.number().describe("The total number of times the key phrase appears in the audio file"),
|
|
12172
|
+
rank: zod3.number().describe(
|
|
12173
|
+
"The total relevancy to the overall audio file of this key phrase - a greater number means more relevant"
|
|
12174
|
+
),
|
|
12175
|
+
text: zod3.string().describe("The text itself of the key phrase"),
|
|
12176
|
+
timestamps: zod3.array(
|
|
12177
|
+
zod3.object({
|
|
12178
|
+
start: zod3.number().describe("The start time in milliseconds"),
|
|
12179
|
+
end: zod3.number().describe("The end time in milliseconds")
|
|
12180
|
+
}).describe("Timestamp containing a start and end property in milliseconds")
|
|
12181
|
+
).describe("The timestamp of the of the key phrase")
|
|
12182
|
+
})
|
|
12183
|
+
).describe("A temporally-sequential array of Key Phrases")
|
|
12184
|
+
}).describe(
|
|
12185
|
+
"An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
|
|
12186
|
+
).or(zod3.null()).optional().describe(
|
|
12187
|
+
"An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
|
|
12188
|
+
),
|
|
12189
|
+
chapters: zod3.array(
|
|
12190
|
+
zod3.object({
|
|
12191
|
+
gist: zod3.string().describe(
|
|
12192
|
+
"An ultra-short summary (just a few words) of the content spoken in the chapter"
|
|
12193
|
+
),
|
|
12194
|
+
headline: zod3.string().describe("A single sentence summary of the content spoken during the chapter"),
|
|
12195
|
+
summary: zod3.string().describe("A one paragraph summary of the content spoken during the chapter"),
|
|
12196
|
+
start: zod3.number().describe("The starting time, in milliseconds, for the chapter"),
|
|
12197
|
+
end: zod3.number().describe("The starting time, in milliseconds, for the chapter")
|
|
12198
|
+
}).describe("Chapter of the audio file")
|
|
12199
|
+
).nullish().describe(
|
|
12200
|
+
"An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for more information."
|
|
12201
|
+
),
|
|
12202
|
+
confidence: zod3.number().nullish().describe(
|
|
12203
|
+
"The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)"
|
|
12204
|
+
),
|
|
12205
|
+
content_safety: zod3.boolean().nullish().describe(
|
|
12206
|
+
"Whether [Content Moderation](https://www.assemblyai.com/docs/content-moderation) is enabled, can be true or false"
|
|
12207
|
+
),
|
|
12208
|
+
content_safety_labels: zod3.object({
|
|
12209
|
+
status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
|
|
12210
|
+
results: zod3.array(
|
|
12211
|
+
zod3.object({
|
|
12212
|
+
text: zod3.string().describe("The transcript of the section flagged by the Content Moderation model"),
|
|
12213
|
+
labels: zod3.array(
|
|
12214
|
+
zod3.object({
|
|
12215
|
+
label: zod3.string().describe("The label of the sensitive topic"),
|
|
12216
|
+
confidence: zod3.number().describe("The confidence score for the topic being discussed, from 0 to 1"),
|
|
12217
|
+
severity: zod3.number().describe("How severely the topic is discussed in the section, from 0 to 1")
|
|
12218
|
+
})
|
|
12219
|
+
).describe(
|
|
12220
|
+
"An array of safety labels, one per sensitive topic that was detected in the section"
|
|
12221
|
+
),
|
|
12222
|
+
sentences_idx_start: zod3.number().describe("The sentence index at which the section begins"),
|
|
12223
|
+
sentences_idx_end: zod3.number().describe("The sentence index at which the section ends"),
|
|
12224
|
+
timestamp: zod3.object({
|
|
12225
|
+
start: zod3.number().describe("The start time in milliseconds"),
|
|
12226
|
+
end: zod3.number().describe("The end time in milliseconds")
|
|
12227
|
+
}).describe("Timestamp containing a start and end property in milliseconds")
|
|
12228
|
+
})
|
|
12229
|
+
).describe("An array of results for the Content Moderation model"),
|
|
12230
|
+
summary: zod3.record(zod3.string(), zod3.number()).describe(
|
|
12231
|
+
"A summary of the Content Moderation confidence results for the entire audio file"
|
|
11947
12232
|
),
|
|
11948
|
-
|
|
11949
|
-
zod3.
|
|
12233
|
+
severity_score_summary: zod3.record(
|
|
12234
|
+
zod3.string(),
|
|
12235
|
+
zod3.object({
|
|
12236
|
+
low: zod3.number(),
|
|
12237
|
+
medium: zod3.number(),
|
|
12238
|
+
high: zod3.number()
|
|
12239
|
+
})
|
|
12240
|
+
).describe(
|
|
12241
|
+
"A summary of the Content Moderation severity results for the entire audio file"
|
|
12242
|
+
)
|
|
12243
|
+
}).describe(
|
|
12244
|
+
"An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/content-moderation) for more information.\n"
|
|
12245
|
+
).or(zod3.null()).optional().describe(
|
|
12246
|
+
"An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/content-moderation) for more information.\n"
|
|
12247
|
+
),
|
|
12248
|
+
custom_spelling: zod3.array(
|
|
12249
|
+
zod3.object({
|
|
12250
|
+
from: zod3.array(zod3.string()).describe("Words or phrases to replace"),
|
|
12251
|
+
to: zod3.string().describe("Word to replace with")
|
|
12252
|
+
}).describe(
|
|
12253
|
+
"Object containing words or phrases to replace, and the word or phrase to replace with"
|
|
12254
|
+
)
|
|
12255
|
+
).nullish().describe(
|
|
12256
|
+
"Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details."
|
|
12257
|
+
),
|
|
12258
|
+
disfluencies: zod3.boolean().nullish().describe(
|
|
12259
|
+
'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false'
|
|
12260
|
+
),
|
|
12261
|
+
domain: zod3.string().nullish().describe(
|
|
12262
|
+
'The domain-specific model applied to the transcript. When set to `"medical-v1"`, [Medical Mode](https://www.assemblyai.com/docs/pre-recorded-audio/medical-mode) was used to improve accuracy for medical terminology.\n'
|
|
12263
|
+
),
|
|
12264
|
+
entities: zod3.array(
|
|
12265
|
+
zod3.object({
|
|
12266
|
+
entity_type: zod3.enum([
|
|
11950
12267
|
"account_number",
|
|
11951
12268
|
"banking_information",
|
|
11952
12269
|
"blood_type",
|
|
@@ -11991,103 +12308,59 @@ var createTranscriptBody = zod3.object({
|
|
|
11991
12308
|
"username",
|
|
11992
12309
|
"vehicle_id",
|
|
11993
12310
|
"zodiac_sign"
|
|
11994
|
-
]).describe("The type of
|
|
11995
|
-
|
|
11996
|
-
|
|
11997
|
-
|
|
11998
|
-
|
|
11999
|
-
|
|
12000
|
-
|
|
12001
|
-
'The replacement logic for detected PII, can be "entity_type" or "hash". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.'
|
|
12002
|
-
),
|
|
12003
|
-
speaker_labels: zod3.boolean().optional().describe(
|
|
12004
|
-
"Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false"
|
|
12005
|
-
),
|
|
12006
|
-
speakers_expected: zod3.number().nullish().describe(
|
|
12007
|
-
"Tells the speaker label model how many speakers it should attempt to identify. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details."
|
|
12008
|
-
),
|
|
12009
|
-
content_safety: zod3.boolean().optional().describe(
|
|
12010
|
-
"Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false"
|
|
12011
|
-
),
|
|
12012
|
-
content_safety_confidence: zod3.number().min(createTranscriptBodyContentSafetyConfidenceMin).max(createTranscriptBodyContentSafetyConfidenceMax).default(createTranscriptBodyContentSafetyConfidenceDefault).describe(
|
|
12013
|
-
"The confidence threshold for the Content Moderation model. Values must be between 25 and 100."
|
|
12014
|
-
),
|
|
12015
|
-
iab_categories: zod3.boolean().optional().describe(
|
|
12016
|
-
"Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false"
|
|
12017
|
-
),
|
|
12018
|
-
custom_spelling: zod3.array(
|
|
12019
|
-
zod3.object({
|
|
12020
|
-
from: zod3.array(zod3.string().describe("Word or phrase to replace")).describe("Words or phrases to replace"),
|
|
12021
|
-
to: zod3.string().describe("Word to replace with")
|
|
12022
|
-
}).describe(
|
|
12023
|
-
"Object containing words or phrases to replace, and the word or phrase to replace with"
|
|
12311
|
+
]).describe("The type of entity for the detected entity"),
|
|
12312
|
+
text: zod3.string().describe("The text for the detected entity"),
|
|
12313
|
+
start: zod3.number().describe(
|
|
12314
|
+
"The starting time, in milliseconds, at which the detected entity appears in the audio file"
|
|
12315
|
+
),
|
|
12316
|
+
end: zod3.number().describe(
|
|
12317
|
+
"The ending time, in milliseconds, for the detected entity in the audio file"
|
|
12024
12318
|
)
|
|
12025
|
-
).
|
|
12026
|
-
|
|
12027
|
-
|
|
12028
|
-
|
|
12029
|
-
|
|
12030
|
-
|
|
12031
|
-
|
|
12032
|
-
|
|
12033
|
-
|
|
12034
|
-
|
|
12035
|
-
|
|
12036
|
-
|
|
12037
|
-
|
|
12038
|
-
|
|
12039
|
-
|
|
12040
|
-
|
|
12041
|
-
|
|
12042
|
-
|
|
12043
|
-
|
|
12044
|
-
|
|
12045
|
-
|
|
12046
|
-
|
|
12047
|
-
|
|
12048
|
-
|
|
12049
|
-
|
|
12050
|
-
).describe(
|
|
12051
|
-
|
|
12052
|
-
|
|
12053
|
-
|
|
12054
|
-
|
|
12055
|
-
|
|
12056
|
-
|
|
12057
|
-
|
|
12058
|
-
|
|
12059
|
-
|
|
12060
|
-
|
|
12061
|
-
|
|
12062
|
-
|
|
12063
|
-
|
|
12064
|
-
|
|
12065
|
-
|
|
12066
|
-
|
|
12067
|
-
var createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMax = 1;
|
|
12068
|
-
var createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin = 0;
|
|
12069
|
-
var createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax = 1;
|
|
12070
|
-
var createTranscriptResponseContentSafetyLabelsSummaryMinOne = 0;
|
|
12071
|
-
var createTranscriptResponseContentSafetyLabelsSummaryMaxOne = 1;
|
|
12072
|
-
var createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMin = 0;
|
|
12073
|
-
var createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMax = 1;
|
|
12074
|
-
var createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMin = 0;
|
|
12075
|
-
var createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMax = 1;
|
|
12076
|
-
var createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMin = 0;
|
|
12077
|
-
var createTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMax = 1;
|
|
12078
|
-
var createTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin = 0;
|
|
12079
|
-
var createTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax = 1;
|
|
12080
|
-
var createTranscriptResponseIabCategoriesResultSummaryMinOne = 0;
|
|
12081
|
-
var createTranscriptResponseIabCategoriesResultSummaryMaxOne = 1;
|
|
12082
|
-
var createTranscriptResponseSentimentAnalysisResultsItemConfidenceMin = 0;
|
|
12083
|
-
var createTranscriptResponseSentimentAnalysisResultsItemConfidenceMax = 1;
|
|
12084
|
-
var createTranscriptResponseSpeechThresholdMin = 0;
|
|
12085
|
-
var createTranscriptResponseSpeechThresholdMax = 1;
|
|
12086
|
-
var createTranscriptResponse = zod3.object({
|
|
12319
|
+
}).describe("A detected entity")
|
|
12320
|
+
).nullish().describe(
|
|
12321
|
+
"An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) for more information.\n"
|
|
12322
|
+
),
|
|
12323
|
+
entity_detection: zod3.boolean().nullish().describe(
|
|
12324
|
+
"Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) is enabled, can be true or false"
|
|
12325
|
+
),
|
|
12326
|
+
error: zod3.string().optional().describe("Error message of why the transcript failed"),
|
|
12327
|
+
filter_profanity: zod3.boolean().nullish().describe(
|
|
12328
|
+
"Whether [Profanity Filtering](https://www.assemblyai.com/docs/profanity-filtering) is enabled, either true or false"
|
|
12329
|
+
),
|
|
12330
|
+
format_text: zod3.boolean().nullish().describe(
|
|
12331
|
+
"Whether [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false"
|
|
12332
|
+
),
|
|
12333
|
+
iab_categories: zod3.boolean().nullish().describe(
|
|
12334
|
+
"Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) is enabled, can be true or false"
|
|
12335
|
+
),
|
|
12336
|
+
iab_categories_result: zod3.object({
|
|
12337
|
+
status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
|
|
12338
|
+
results: zod3.array(
|
|
12339
|
+
zod3.object({
|
|
12340
|
+
text: zod3.string().describe("The text in the transcript in which a detected topic occurs"),
|
|
12341
|
+
labels: zod3.array(
|
|
12342
|
+
zod3.object({
|
|
12343
|
+
relevance: zod3.number().describe("How relevant the detected topic is of a detected topic"),
|
|
12344
|
+
label: zod3.string().describe(
|
|
12345
|
+
"The IAB taxonomical label for the label of the detected topic, where > denotes supertopic/subtopic relationship"
|
|
12346
|
+
)
|
|
12347
|
+
})
|
|
12348
|
+
).optional().describe("An array of detected topics in the text"),
|
|
12349
|
+
timestamp: zod3.object({
|
|
12350
|
+
start: zod3.number().describe("The start time in milliseconds"),
|
|
12351
|
+
end: zod3.number().describe("The end time in milliseconds")
|
|
12352
|
+
}).optional().describe("Timestamp containing a start and end property in milliseconds")
|
|
12353
|
+
}).describe("The result of the topic detection model")
|
|
12354
|
+
).describe("An array of results for the Topic Detection model"),
|
|
12355
|
+
summary: zod3.record(zod3.string(), zod3.number()).describe("The overall relevance of topic to the entire audio file")
|
|
12356
|
+
}).describe(
|
|
12357
|
+
"The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
|
|
12358
|
+
).or(zod3.null()).optional().describe(
|
|
12359
|
+
"The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
|
|
12360
|
+
),
|
|
12087
12361
|
id: zod3.string().uuid().describe("The unique identifier of your transcript"),
|
|
12088
|
-
|
|
12089
|
-
|
|
12090
|
-
"The status of your transcript. Possible values are queued, processing, completed, or error."
|
|
12362
|
+
keyterms_prompt: zod3.array(zod3.string()).optional().describe(
|
|
12363
|
+
"Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3 Pro) domain-specific words or phrases (maximum 6 words per phrase). See [Keyterms Prompting](https://www.assemblyai.com/docs/pre-recorded-audio/keyterms-prompting) for more details.\n"
|
|
12091
12364
|
),
|
|
12092
12365
|
language_code: zod3.enum([
|
|
12093
12366
|
"en",
|
|
@@ -12192,138 +12465,175 @@ var createTranscriptResponse = zod3.object({
|
|
|
12192
12465
|
"cy",
|
|
12193
12466
|
"yi",
|
|
12194
12467
|
"yo"
|
|
12195
|
-
]).describe(
|
|
12196
|
-
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/
|
|
12197
|
-
).or(zod3.string()).optional().describe(
|
|
12198
|
-
"The language of your audio file.\nPossible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n"
|
|
12199
|
-
),
|
|
12200
|
-
language_detection: zod3.boolean().nullish().describe(
|
|
12201
|
-
"Whether [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection) is enabled, either true or false"
|
|
12202
|
-
),
|
|
12203
|
-
language_confidence_threshold: zod3.number().min(createTranscriptResponseLanguageConfidenceThresholdMin).max(createTranscriptResponseLanguageConfidenceThresholdMax).nullable().describe(
|
|
12204
|
-
"The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\n"
|
|
12205
|
-
),
|
|
12206
|
-
language_confidence: zod3.number().min(createTranscriptResponseLanguageConfidenceMin).max(createTranscriptResponseLanguageConfidenceMax).nullable().describe(
|
|
12207
|
-
"The confidence score for the detected language, between 0.0 (low confidence) and 1.0 (high confidence)"
|
|
12208
|
-
),
|
|
12209
|
-
speech_model: zod3.enum(["best", "slam-1", "universal"]).describe("The speech model to use for the transcription.").or(zod3.null()).describe(
|
|
12210
|
-
"The speech model used for the transcription. When `null`, the default model is used."
|
|
12211
|
-
),
|
|
12212
|
-
text: zod3.string().nullish().describe("The textual transcript of your media file"),
|
|
12213
|
-
words: zod3.array(
|
|
12214
|
-
zod3.object({
|
|
12215
|
-
confidence: zod3.number().min(createTranscriptResponseWordsItemConfidenceMin).max(createTranscriptResponseWordsItemConfidenceMax).describe("The confidence score for the transcript of this word"),
|
|
12216
|
-
start: zod3.number().describe("The starting time, in milliseconds, for the word"),
|
|
12217
|
-
end: zod3.number().describe("The ending time, in milliseconds, for the word"),
|
|
12218
|
-
text: zod3.string().describe("The text of the word"),
|
|
12219
|
-
channel: zod3.string().nullish().describe(
|
|
12220
|
-
"The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
|
|
12221
|
-
),
|
|
12222
|
-
speaker: zod3.string().nullable().describe(
|
|
12223
|
-
"The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null"
|
|
12224
|
-
)
|
|
12225
|
-
})
|
|
12226
|
-
).nullish().describe(
|
|
12227
|
-
"An array of temporally-sequential word objects, one for each word in the transcript.\nSee [Speech recognition](https://www.assemblyai.com/docs/models/speech-recognition) for more information.\n"
|
|
12468
|
+
]).optional().describe(
|
|
12469
|
+
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).\nThe default value is 'en_us'.\n"
|
|
12228
12470
|
),
|
|
12229
|
-
|
|
12230
|
-
zod3.
|
|
12231
|
-
|
|
12232
|
-
|
|
12233
|
-
|
|
12234
|
-
|
|
12235
|
-
|
|
12236
|
-
|
|
12237
|
-
|
|
12238
|
-
|
|
12239
|
-
|
|
12240
|
-
|
|
12241
|
-
|
|
12242
|
-
|
|
12243
|
-
|
|
12244
|
-
|
|
12245
|
-
|
|
12246
|
-
|
|
12247
|
-
|
|
12248
|
-
|
|
12249
|
-
|
|
12250
|
-
|
|
12251
|
-
|
|
12252
|
-
|
|
12253
|
-
|
|
12254
|
-
|
|
12255
|
-
|
|
12471
|
+
language_codes: zod3.array(
|
|
12472
|
+
zod3.enum([
|
|
12473
|
+
"en",
|
|
12474
|
+
"en_au",
|
|
12475
|
+
"en_uk",
|
|
12476
|
+
"en_us",
|
|
12477
|
+
"es",
|
|
12478
|
+
"fr",
|
|
12479
|
+
"de",
|
|
12480
|
+
"it",
|
|
12481
|
+
"pt",
|
|
12482
|
+
"nl",
|
|
12483
|
+
"af",
|
|
12484
|
+
"sq",
|
|
12485
|
+
"am",
|
|
12486
|
+
"ar",
|
|
12487
|
+
"hy",
|
|
12488
|
+
"as",
|
|
12489
|
+
"az",
|
|
12490
|
+
"ba",
|
|
12491
|
+
"eu",
|
|
12492
|
+
"be",
|
|
12493
|
+
"bn",
|
|
12494
|
+
"bs",
|
|
12495
|
+
"br",
|
|
12496
|
+
"bg",
|
|
12497
|
+
"my",
|
|
12498
|
+
"ca",
|
|
12499
|
+
"zh",
|
|
12500
|
+
"hr",
|
|
12501
|
+
"cs",
|
|
12502
|
+
"da",
|
|
12503
|
+
"et",
|
|
12504
|
+
"fo",
|
|
12505
|
+
"fi",
|
|
12506
|
+
"gl",
|
|
12507
|
+
"ka",
|
|
12508
|
+
"el",
|
|
12509
|
+
"gu",
|
|
12510
|
+
"ht",
|
|
12511
|
+
"ha",
|
|
12512
|
+
"haw",
|
|
12513
|
+
"he",
|
|
12514
|
+
"hi",
|
|
12515
|
+
"hu",
|
|
12516
|
+
"is",
|
|
12517
|
+
"id",
|
|
12518
|
+
"ja",
|
|
12519
|
+
"jw",
|
|
12520
|
+
"kn",
|
|
12521
|
+
"kk",
|
|
12522
|
+
"km",
|
|
12523
|
+
"ko",
|
|
12524
|
+
"lo",
|
|
12525
|
+
"la",
|
|
12526
|
+
"lv",
|
|
12527
|
+
"ln",
|
|
12528
|
+
"lt",
|
|
12529
|
+
"lb",
|
|
12530
|
+
"mk",
|
|
12531
|
+
"mg",
|
|
12532
|
+
"ms",
|
|
12533
|
+
"ml",
|
|
12534
|
+
"mt",
|
|
12535
|
+
"mi",
|
|
12536
|
+
"mr",
|
|
12537
|
+
"mn",
|
|
12538
|
+
"ne",
|
|
12539
|
+
"no",
|
|
12540
|
+
"nn",
|
|
12541
|
+
"oc",
|
|
12542
|
+
"pa",
|
|
12543
|
+
"ps",
|
|
12544
|
+
"fa",
|
|
12545
|
+
"pl",
|
|
12546
|
+
"ro",
|
|
12547
|
+
"ru",
|
|
12548
|
+
"sa",
|
|
12549
|
+
"sr",
|
|
12550
|
+
"sn",
|
|
12551
|
+
"sd",
|
|
12552
|
+
"si",
|
|
12553
|
+
"sk",
|
|
12554
|
+
"sl",
|
|
12555
|
+
"so",
|
|
12556
|
+
"su",
|
|
12557
|
+
"sw",
|
|
12558
|
+
"sv",
|
|
12559
|
+
"tl",
|
|
12560
|
+
"tg",
|
|
12561
|
+
"ta",
|
|
12562
|
+
"tt",
|
|
12563
|
+
"te",
|
|
12564
|
+
"th",
|
|
12565
|
+
"bo",
|
|
12566
|
+
"tr",
|
|
12567
|
+
"tk",
|
|
12568
|
+
"uk",
|
|
12569
|
+
"ur",
|
|
12570
|
+
"uz",
|
|
12571
|
+
"vi",
|
|
12572
|
+
"cy",
|
|
12573
|
+
"yi",
|
|
12574
|
+
"yo"
|
|
12575
|
+
]).describe(
|
|
12576
|
+
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).\nThe default value is 'en_us'.\n"
|
|
12577
|
+
)
|
|
12256
12578
|
).nullish().describe(
|
|
12257
|
-
"
|
|
12579
|
+
"The language codes of your audio file. Used for [Code switching](/docs/speech-to-text/pre-recorded-audio/code-switching)\nOne of the values specified must be `en`.\n"
|
|
12258
12580
|
),
|
|
12259
|
-
|
|
12260
|
-
"The confidence score for the
|
|
12581
|
+
language_confidence: zod3.number().nullable().describe(
|
|
12582
|
+
"The confidence score for the detected language, between 0.0 (low confidence) and 1.0 (high confidence). See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details."
|
|
12261
12583
|
),
|
|
12262
|
-
|
|
12263
|
-
|
|
12264
|
-
format_text: zod3.boolean().nullish().describe("Whether Text Formatting is enabled, either true or false"),
|
|
12265
|
-
disfluencies: zod3.boolean().nullish().describe('Transcribe Filler Words, like "umm", in your media file; can be true or false'),
|
|
12266
|
-
multichannel: zod3.boolean().nullish().describe(
|
|
12267
|
-
"Whether [Multichannel transcription](https://www.assemblyai.com/docs/models/speech-recognition#multichannel-transcription) was enabled in the transcription request, either true or false"
|
|
12268
|
-
),
|
|
12269
|
-
audio_channels: zod3.number().optional().describe(
|
|
12270
|
-
"The number of audio channels in the audio file. This is only present when multichannel is enabled."
|
|
12584
|
+
language_confidence_threshold: zod3.number().nullable().describe(
|
|
12585
|
+
"The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nSee [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.\n"
|
|
12271
12586
|
),
|
|
12272
|
-
|
|
12273
|
-
"
|
|
12274
|
-
),
|
|
12275
|
-
webhook_status_code: zod3.number().nullish().describe(
|
|
12276
|
-
"The status code we received from your server when delivering the transcript completed or failed webhook request, if a webhook URL was provided"
|
|
12277
|
-
),
|
|
12278
|
-
webhook_auth: zod3.boolean().describe("Whether webhook authentication details were provided"),
|
|
12279
|
-
webhook_auth_header_name: zod3.string().nullish().describe(
|
|
12280
|
-
"The header name to be sent with the transcript completed or failed webhook requests"
|
|
12587
|
+
language_detection: zod3.boolean().nullish().describe(
|
|
12588
|
+
"Whether [Automatic language detection](/docs/pre-recorded-audio/automatic-language-detection) is enabled, either true or false"
|
|
12281
12589
|
),
|
|
12282
|
-
|
|
12283
|
-
|
|
12284
|
-
|
|
12285
|
-
|
|
12286
|
-
|
|
12287
|
-
|
|
12288
|
-
|
|
12289
|
-
|
|
12290
|
-
|
|
12291
|
-
|
|
12292
|
-
|
|
12293
|
-
|
|
12294
|
-
|
|
12295
|
-
|
|
12296
|
-
|
|
12297
|
-
|
|
12298
|
-
|
|
12299
|
-
})
|
|
12300
|
-
).describe("A temporally-sequential array of Key Phrases")
|
|
12301
|
-
}).describe(
|
|
12302
|
-
"An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/models/key-phrases) for more information.\n"
|
|
12303
|
-
).or(zod3.null()).optional().describe(
|
|
12304
|
-
"An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/models/key-phrases) for more information.\n"
|
|
12590
|
+
language_detection_options: zod3.object({
|
|
12591
|
+
expected_languages: zod3.array(zod3.string()).optional().describe(
|
|
12592
|
+
'List of languages expected in the audio file. Defaults to `["all"]` when unspecified. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.'
|
|
12593
|
+
),
|
|
12594
|
+
fallback_language: zod3.string().default(createTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault).describe(
|
|
12595
|
+
'If the detected language of the audio file is not in the list of expected languages, the `fallback_language` is used. Specify `["auto"]` to let our model choose the fallback language from `expected_languages` with the highest confidence score. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.\n'
|
|
12596
|
+
),
|
|
12597
|
+
code_switching: zod3.boolean().optional().describe(
|
|
12598
|
+
"Whether [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) should be detected.\n"
|
|
12599
|
+
),
|
|
12600
|
+
code_switching_confidence_threshold: zod3.number().default(
|
|
12601
|
+
createTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault
|
|
12602
|
+
).describe(
|
|
12603
|
+
"The confidence threshold for [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) detection. If the code switching confidence is below this threshold, the transcript will be processed in the language with the highest `language_detection_confidence` score.\n"
|
|
12604
|
+
)
|
|
12605
|
+
}).optional().describe(
|
|
12606
|
+
"Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection)."
|
|
12305
12607
|
),
|
|
12306
|
-
|
|
12307
|
-
"
|
|
12608
|
+
multichannel: zod3.boolean().nullish().describe(
|
|
12609
|
+
"Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) was enabled in the transcription request, either true or false"
|
|
12308
12610
|
),
|
|
12309
|
-
|
|
12310
|
-
"
|
|
12611
|
+
prompt: zod3.string().optional().describe(
|
|
12612
|
+
"Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
|
|
12311
12613
|
),
|
|
12312
|
-
|
|
12313
|
-
|
|
12314
|
-
filter_profanity: zod3.boolean().nullish().describe(
|
|
12315
|
-
"Whether [Profanity Filtering](https://www.assemblyai.com/docs/models/speech-recognition#profanity-filtering) is enabled, either true or false"
|
|
12614
|
+
punctuate: zod3.boolean().nullish().describe(
|
|
12615
|
+
"Whether [Automatic Punctuation](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false"
|
|
12316
12616
|
),
|
|
12317
12617
|
redact_pii: zod3.boolean().describe(
|
|
12318
|
-
"Whether [PII Redaction](https://www.assemblyai.com/docs/
|
|
12618
|
+
"Whether [PII Redaction](https://www.assemblyai.com/docs/pii-redaction) is enabled, either true or false"
|
|
12319
12619
|
),
|
|
12320
12620
|
redact_pii_audio: zod3.boolean().nullish().describe(
|
|
12321
|
-
"Whether a redacted version of the audio file was generated,\neither true or false. See [PII redaction](https://www.assemblyai.com/docs/
|
|
12621
|
+
"Whether a redacted version of the audio file was generated,\neither true or false. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.\n"
|
|
12622
|
+
),
|
|
12623
|
+
redact_pii_audio_options: zod3.object({
|
|
12624
|
+
return_redacted_no_speech_audio: zod3.boolean().optional().describe(
|
|
12625
|
+
"By default, audio redaction provides redacted audio URLs only when speech is detected. However, if your use-case specifically requires redacted audio files even for silent audio files without any dialogue, you can opt to receive these URLs by setting this parameter to `true`."
|
|
12626
|
+
),
|
|
12627
|
+
override_audio_redaction_method: zod3.enum(["silence"]).optional().describe(
|
|
12628
|
+
"Specify the method used to redact audio. By default, redacted audio uses a beep sound. Set to `silence` to replace PII with silence instead of a beep."
|
|
12629
|
+
)
|
|
12630
|
+
}).optional().describe(
|
|
12631
|
+
"The options for PII-redacted audio, if redact_pii_audio is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.\n"
|
|
12322
12632
|
),
|
|
12323
12633
|
redact_pii_audio_quality: zod3.enum(["mp3", "wav"]).describe(
|
|
12324
|
-
"Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/
|
|
12634
|
+
"Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more details."
|
|
12325
12635
|
).or(zod3.null()).optional().describe(
|
|
12326
|
-
"The audio quality of the PII-redacted audio file, if redact_pii_audio is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/
|
|
12636
|
+
"The audio quality of the PII-redacted audio file, if redact_pii_audio is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.\n"
|
|
12327
12637
|
),
|
|
12328
12638
|
redact_pii_policies: zod3.array(
|
|
12329
12639
|
zod3.enum([
|
|
@@ -12373,19 +12683,347 @@ var createTranscriptResponse = zod3.object({
|
|
|
12373
12683
|
"zodiac_sign"
|
|
12374
12684
|
]).describe("The type of PII to redact")
|
|
12375
12685
|
).nullish().describe(
|
|
12376
|
-
"The list of PII Redaction policies that were enabled, if PII Redaction is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/
|
|
12686
|
+
"The list of PII Redaction policies that were enabled, if PII Redaction is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more information.\n"
|
|
12377
12687
|
),
|
|
12378
12688
|
redact_pii_sub: zod3.enum(["entity_name", "hash"]).optional().describe(
|
|
12379
|
-
|
|
12689
|
+
"The replacement logic for detected PII, can be `entity_name` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
|
|
12690
|
+
),
|
|
12691
|
+
sentiment_analysis: zod3.boolean().nullish().describe(
|
|
12692
|
+
"Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) is enabled, can be true or false"
|
|
12693
|
+
),
|
|
12694
|
+
sentiment_analysis_results: zod3.array(
|
|
12695
|
+
zod3.object({
|
|
12696
|
+
text: zod3.string().describe("The transcript of the sentence"),
|
|
12697
|
+
start: zod3.number().describe("The starting time, in milliseconds, of the sentence"),
|
|
12698
|
+
end: zod3.number().describe("The ending time, in milliseconds, of the sentence"),
|
|
12699
|
+
sentiment: zod3.enum(["POSITIVE", "NEUTRAL", "NEGATIVE"]),
|
|
12700
|
+
confidence: zod3.number().describe(
|
|
12701
|
+
"The confidence score for the detected sentiment of the sentence, from 0 to 1"
|
|
12702
|
+
),
|
|
12703
|
+
channel: zod3.string().nullish().describe(
|
|
12704
|
+
"The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
|
|
12705
|
+
),
|
|
12706
|
+
speaker: zod3.string().nullable().describe(
|
|
12707
|
+
"The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
|
|
12708
|
+
)
|
|
12709
|
+
}).describe("The result of the Sentiment Analysis model")
|
|
12710
|
+
).nullish().describe(
|
|
12711
|
+
"An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) for more information.\n"
|
|
12380
12712
|
),
|
|
12381
12713
|
speaker_labels: zod3.boolean().nullish().describe(
|
|
12382
|
-
"Whether [Speaker diarization](https://www.assemblyai.com/docs/
|
|
12714
|
+
"Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, can be true or false"
|
|
12383
12715
|
),
|
|
12384
12716
|
speakers_expected: zod3.number().nullish().describe(
|
|
12385
|
-
"Tell the speaker label model how many speakers it should attempt to identify. See [
|
|
12717
|
+
"Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details."
|
|
12718
|
+
),
|
|
12719
|
+
speech_model_used: zod3.string().optional().describe(
|
|
12720
|
+
"The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
|
|
12721
|
+
),
|
|
12722
|
+
speech_models: zod3.array(
|
|
12723
|
+
zod3.string().describe(
|
|
12724
|
+
"The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
|
|
12725
|
+
)
|
|
12726
|
+
).nullish().describe(
|
|
12727
|
+
"List multiple speech models in priority order, allowing our system to automatically route your audio to the best available option. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models and routing behavior.\n"
|
|
12728
|
+
),
|
|
12729
|
+
speech_threshold: zod3.number().nullish().describe(
|
|
12730
|
+
"Defaults to null. Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive. See [Speech Threshold](https://www.assemblyai.com/docs/speech-threshold) for more details.\n"
|
|
12731
|
+
),
|
|
12732
|
+
speech_understanding: zod3.object({
|
|
12733
|
+
request: zod3.object({
|
|
12734
|
+
translation: zod3.object({
|
|
12735
|
+
target_languages: zod3.array(zod3.string()).describe(
|
|
12736
|
+
'List of target language codes (e.g., `["es", "de"]`). See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for supported languages.'
|
|
12737
|
+
),
|
|
12738
|
+
formal: zod3.boolean().default(createTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault).describe(
|
|
12739
|
+
"Use formal language style. See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for more details."
|
|
12740
|
+
),
|
|
12741
|
+
match_original_utterance: zod3.boolean().optional().describe(
|
|
12742
|
+
"When enabled with Speaker Labels, returns translated text in the utterances array. Each utterance will include a `translated_texts` key containing translations for each target language."
|
|
12743
|
+
)
|
|
12744
|
+
})
|
|
12745
|
+
}).describe(
|
|
12746
|
+
"Request body for [Translation](https://www.assemblyai.com/docs/speech-understanding/translation)."
|
|
12747
|
+
).or(
|
|
12748
|
+
zod3.object({
|
|
12749
|
+
speaker_identification: zod3.object({
|
|
12750
|
+
speaker_type: zod3.enum(["role", "name"]).describe(
|
|
12751
|
+
"Type of speaker identification. See [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification) for details on each type."
|
|
12752
|
+
),
|
|
12753
|
+
known_values: zod3.array(zod3.string()).optional().describe(
|
|
12754
|
+
'Required if speaker_type is "role". Each value must be 35 characters or less.'
|
|
12755
|
+
),
|
|
12756
|
+
speakers: zod3.array(
|
|
12757
|
+
zod3.object({
|
|
12758
|
+
role: zod3.string().optional().describe(
|
|
12759
|
+
'The role of the speaker. Required when `speaker_type` is "role".'
|
|
12760
|
+
),
|
|
12761
|
+
name: zod3.string().optional().describe(
|
|
12762
|
+
'The name of the speaker. Required when `speaker_type` is "name".'
|
|
12763
|
+
),
|
|
12764
|
+
description: zod3.string().optional().describe(
|
|
12765
|
+
"A description of the speaker to help the model identify them based on conversational context."
|
|
12766
|
+
)
|
|
12767
|
+
})
|
|
12768
|
+
).optional().describe(
|
|
12769
|
+
"An array of speaker objects with metadata to improve identification accuracy. Each object should include a `role` or `name` (depending on `speaker_type`) and an optional `description` to help the model identify the speaker. You can also include any additional custom properties (e.g., `company`, `title`) to provide more context. Use this as an alternative to `known_values` when you want to provide additional context about each speaker."
|
|
12770
|
+
)
|
|
12771
|
+
})
|
|
12772
|
+
}).describe(
|
|
12773
|
+
"Request body for [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification)."
|
|
12774
|
+
)
|
|
12775
|
+
).or(
|
|
12776
|
+
zod3.object({
|
|
12777
|
+
custom_formatting: zod3.object({
|
|
12778
|
+
date: zod3.string().optional().describe(
|
|
12779
|
+
'Date format pattern (e.g., `"mm/dd/yyyy"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
|
|
12780
|
+
),
|
|
12781
|
+
phone_number: zod3.string().optional().describe(
|
|
12782
|
+
'Phone number format pattern (e.g., `"(xxx)xxx-xxxx"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
|
|
12783
|
+
),
|
|
12784
|
+
email: zod3.string().optional().describe(
|
|
12785
|
+
'Email format pattern (e.g., `"username@domain.com"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
|
|
12786
|
+
)
|
|
12787
|
+
})
|
|
12788
|
+
}).describe(
|
|
12789
|
+
"Request body for [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting)."
|
|
12790
|
+
)
|
|
12791
|
+
).optional(),
|
|
12792
|
+
response: zod3.object({
|
|
12793
|
+
translation: zod3.object({
|
|
12794
|
+
status: zod3.string().optional()
|
|
12795
|
+
}).optional()
|
|
12796
|
+
}).or(
|
|
12797
|
+
zod3.object({
|
|
12798
|
+
speaker_identification: zod3.object({
|
|
12799
|
+
mapping: zod3.record(zod3.string(), zod3.string()).optional().describe(
|
|
12800
|
+
'A mapping of the original generic speaker labels (e.g., "A", "B") to the identified speaker names or roles.'
|
|
12801
|
+
),
|
|
12802
|
+
status: zod3.string().optional()
|
|
12803
|
+
}).optional()
|
|
12804
|
+
})
|
|
12805
|
+
).or(
|
|
12806
|
+
zod3.object({
|
|
12807
|
+
custom_formatting: zod3.object({
|
|
12808
|
+
mapping: zod3.record(zod3.string(), zod3.string()).optional(),
|
|
12809
|
+
formatted_text: zod3.string().optional()
|
|
12810
|
+
}).optional()
|
|
12811
|
+
})
|
|
12812
|
+
).optional()
|
|
12813
|
+
}).optional().describe(
|
|
12814
|
+
"Speech understanding tasks like [Translation](https://www.assemblyai.com/docs/speech-understanding/translation), [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification), and [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting). See the task-specific docs for available options and configuration.\n"
|
|
12815
|
+
),
|
|
12816
|
+
status: zod3.enum(["queued", "processing", "completed", "error"]).describe(
|
|
12817
|
+
"The status of your transcript. Possible values are queued, processing, completed, or error."
|
|
12818
|
+
),
|
|
12819
|
+
summarization: zod3.boolean().describe(
|
|
12820
|
+
"Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
|
|
12821
|
+
),
|
|
12822
|
+
summary: zod3.string().nullish().describe(
|
|
12823
|
+
"The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
|
|
12824
|
+
),
|
|
12825
|
+
summary_model: zod3.string().nullish().describe(
|
|
12826
|
+
"The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n"
|
|
12827
|
+
),
|
|
12828
|
+
summary_type: zod3.string().nullish().describe(
|
|
12829
|
+
"The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
|
|
12830
|
+
),
|
|
12831
|
+
remove_audio_tags: zod3.enum(["all"]).describe(
|
|
12832
|
+
"Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
|
|
12833
|
+
).or(zod3.null()).optional().describe(
|
|
12834
|
+
"Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
|
|
12835
|
+
),
|
|
12836
|
+
temperature: zod3.number().nullish().describe(
|
|
12837
|
+
"The temperature that was used for the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.\n\nNote: This parameter can only be used with the Universal-3 Pro model.\n"
|
|
12838
|
+
),
|
|
12839
|
+
text: zod3.string().nullish().describe("The textual transcript of your media file"),
|
|
12840
|
+
throttled: zod3.boolean().nullish().describe(
|
|
12841
|
+
"True while a request is throttled and false when a request is no longer throttled"
|
|
12842
|
+
),
|
|
12843
|
+
utterances: zod3.array(
|
|
12844
|
+
zod3.object({
|
|
12845
|
+
confidence: zod3.number().describe("The confidence score for the transcript of this utterance"),
|
|
12846
|
+
start: zod3.number().describe("The starting time, in milliseconds, of the utterance in the audio file"),
|
|
12847
|
+
end: zod3.number().describe("The ending time, in milliseconds, of the utterance in the audio file"),
|
|
12848
|
+
text: zod3.string().describe("The text for this utterance"),
|
|
12849
|
+
words: zod3.array(
|
|
12850
|
+
zod3.object({
|
|
12851
|
+
confidence: zod3.number().describe("The confidence score for the transcript of this word"),
|
|
12852
|
+
start: zod3.number().describe("The starting time, in milliseconds, for the word"),
|
|
12853
|
+
end: zod3.number().describe("The ending time, in milliseconds, for the word"),
|
|
12854
|
+
text: zod3.string().describe("The text of the word"),
|
|
12855
|
+
channel: zod3.string().nullish().describe(
|
|
12856
|
+
"The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
|
|
12857
|
+
),
|
|
12858
|
+
speaker: zod3.string().nullable().describe(
|
|
12859
|
+
"The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
|
|
12860
|
+
)
|
|
12861
|
+
})
|
|
12862
|
+
).describe("The words in the utterance."),
|
|
12863
|
+
channel: zod3.string().nullish().describe(
|
|
12864
|
+
"The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
|
|
12865
|
+
),
|
|
12866
|
+
speaker: zod3.string().describe(
|
|
12867
|
+
'The speaker of this utterance, where each speaker is assigned a sequential capital letter - e.g. "A" for Speaker A, "B" for Speaker B, etc.'
|
|
12868
|
+
),
|
|
12869
|
+
translated_texts: zod3.record(zod3.string(), zod3.string()).optional().describe(
|
|
12870
|
+
'Translations keyed by language code (e.g., `{"es": "Texto traducido", "de": "\xDCbersetzter Text"}`). Only present when `match_original_utterance` is enabled with translation.'
|
|
12871
|
+
)
|
|
12872
|
+
})
|
|
12873
|
+
).nullish().describe(
|
|
12874
|
+
"When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) for more information.\n"
|
|
12875
|
+
),
|
|
12876
|
+
webhook_auth: zod3.boolean().describe(
|
|
12877
|
+
"Whether [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) authentication details were provided"
|
|
12878
|
+
),
|
|
12879
|
+
webhook_auth_header_name: zod3.string().nullish().describe(
|
|
12880
|
+
"The header name to be sent with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests"
|
|
12881
|
+
),
|
|
12882
|
+
webhook_status_code: zod3.number().nullish().describe(
|
|
12883
|
+
"The status code we received from your server when delivering the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) request, if a webhook URL was provided"
|
|
12884
|
+
),
|
|
12885
|
+
webhook_url: zod3.string().nullish().describe(
|
|
12886
|
+
"The URL to which we send [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests.\n"
|
|
12887
|
+
),
|
|
12888
|
+
words: zod3.array(
|
|
12889
|
+
zod3.object({
|
|
12890
|
+
confidence: zod3.number().describe("The confidence score for the transcript of this word"),
|
|
12891
|
+
start: zod3.number().describe("The starting time, in milliseconds, for the word"),
|
|
12892
|
+
end: zod3.number().describe("The ending time, in milliseconds, for the word"),
|
|
12893
|
+
text: zod3.string().describe("The text of the word"),
|
|
12894
|
+
channel: zod3.string().nullish().describe(
|
|
12895
|
+
"The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
|
|
12896
|
+
),
|
|
12897
|
+
speaker: zod3.string().nullable().describe(
|
|
12898
|
+
"The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
|
|
12899
|
+
)
|
|
12900
|
+
})
|
|
12901
|
+
).nullish().describe(
|
|
12902
|
+
"An array of temporally-sequential word objects, one for each word in the transcript.\n"
|
|
12903
|
+
),
|
|
12904
|
+
acoustic_model: zod3.string().describe("This parameter does not currently have any functionality attached to it."),
|
|
12905
|
+
custom_topics: zod3.boolean().nullish().describe("This parameter does not currently have any functionality attached to it."),
|
|
12906
|
+
language_model: zod3.string().describe("This parameter does not currently have any functionality attached to it."),
|
|
12907
|
+
speech_model: zod3.string().describe(
|
|
12908
|
+
"The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
|
|
12909
|
+
).or(zod3.null()).describe(
|
|
12910
|
+
"This parameter has been replaced with the `speech_models` parameter, learn more about the `speech_models` parameter [here](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model).\n"
|
|
12911
|
+
),
|
|
12912
|
+
speed_boost: zod3.boolean().nullish().describe("This parameter does not currently have any functionality attached to it."),
|
|
12913
|
+
topics: zod3.array(zod3.string()).optional().describe("This parameter does not currently have any functionality attached to it."),
|
|
12914
|
+
translated_texts: zod3.object({
|
|
12915
|
+
language_code: zod3.string().optional().describe("Translated text for this language code")
|
|
12916
|
+
}).optional().describe(
|
|
12917
|
+
"Translated text keyed by language code. See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for more details."
|
|
12918
|
+
)
|
|
12919
|
+
}).describe("A transcript object");
|
|
12920
|
+
var listTranscriptsQueryLimitDefault = 10;
|
|
12921
|
+
var listTranscriptsQueryThrottledOnlyDefault = false;
|
|
12922
|
+
var listTranscriptsQueryParams = zod3.object({
|
|
12923
|
+
limit: zod3.number().default(listTranscriptsQueryLimitDefault).describe("Maximum amount of transcripts to retrieve"),
|
|
12924
|
+
status: zod3.enum(["queued", "processing", "completed", "error"]).optional().describe("Filter by transcript status"),
|
|
12925
|
+
created_on: zod3.string().date().optional().describe("Only get transcripts created on this date"),
|
|
12926
|
+
before_id: zod3.string().uuid().optional().describe("Get transcripts that were created before this transcript ID"),
|
|
12927
|
+
after_id: zod3.string().uuid().optional().describe("Get transcripts that were created after this transcript ID"),
|
|
12928
|
+
throttled_only: zod3.boolean().optional().describe("Only get throttled transcripts, overrides the status filter")
|
|
12929
|
+
});
|
|
12930
|
+
var listTranscriptsResponse = zod3.object({
|
|
12931
|
+
page_details: zod3.object({
|
|
12932
|
+
limit: zod3.number().describe("The number of results this page is limited to"),
|
|
12933
|
+
result_count: zod3.number().describe("The actual number of results in the page"),
|
|
12934
|
+
current_url: zod3.string().describe("The URL used to retrieve the current page of transcripts"),
|
|
12935
|
+
prev_url: zod3.string().nullable().describe(
|
|
12936
|
+
"The URL to the next page of transcripts. The previous URL always points to a page with older transcripts."
|
|
12937
|
+
),
|
|
12938
|
+
next_url: zod3.string().nullable().describe(
|
|
12939
|
+
"The URL to the next page of transcripts. The next URL always points to a page with newer transcripts."
|
|
12940
|
+
)
|
|
12941
|
+
}).describe(
|
|
12942
|
+
"Details of the transcript page. Transcripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts."
|
|
12943
|
+
),
|
|
12944
|
+
transcripts: zod3.array(
|
|
12945
|
+
zod3.object({
|
|
12946
|
+
id: zod3.string().uuid().describe("The unique identifier for the transcript"),
|
|
12947
|
+
resource_url: zod3.string().describe("The URL to retrieve the transcript"),
|
|
12948
|
+
status: zod3.enum(["queued", "processing", "completed", "error"]).describe(
|
|
12949
|
+
"The status of your transcript. Possible values are queued, processing, completed, or error."
|
|
12950
|
+
),
|
|
12951
|
+
created: zod3.string().datetime({}).describe("The date and time the transcript was created"),
|
|
12952
|
+
completed: zod3.string().datetime({}).optional().describe("The date and time the transcript was completed"),
|
|
12953
|
+
audio_url: zod3.string().describe("The URL to the audio file"),
|
|
12954
|
+
error: zod3.string().nullable().describe("Error message of why the transcript failed")
|
|
12955
|
+
})
|
|
12956
|
+
).describe("An array of transcripts")
|
|
12957
|
+
}).describe(
|
|
12958
|
+
"A list of transcripts. Transcripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts."
|
|
12959
|
+
);
|
|
12960
|
+
var getTranscriptParams = zod3.object({
|
|
12961
|
+
transcript_id: zod3.string().describe("ID of the transcript")
|
|
12962
|
+
});
|
|
12963
|
+
var getTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault = "auto";
|
|
12964
|
+
var getTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault = false;
|
|
12965
|
+
var getTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault = 0.3;
|
|
12966
|
+
var getTranscriptResponseRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault = false;
|
|
12967
|
+
var getTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault = true;
|
|
12968
|
+
var getTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault = false;
|
|
12969
|
+
var getTranscriptResponse = zod3.object({
|
|
12970
|
+
audio_channels: zod3.number().optional().describe(
|
|
12971
|
+
"The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) is enabled."
|
|
12972
|
+
),
|
|
12973
|
+
audio_duration: zod3.number().nullish().describe("The duration of this transcript object's media file, in seconds"),
|
|
12974
|
+
audio_end_at: zod3.number().nullish().describe(
|
|
12975
|
+
"The point in time, in milliseconds, in the file at which the transcription was terminated. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details."
|
|
12976
|
+
),
|
|
12977
|
+
audio_start_from: zod3.number().nullish().describe(
|
|
12978
|
+
"The point in time, in milliseconds, in the file at which the transcription was started. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details."
|
|
12979
|
+
),
|
|
12980
|
+
audio_url: zod3.string().describe("The URL of the media that was transcribed"),
|
|
12981
|
+
auto_chapters: zod3.boolean().nullish().describe(
|
|
12982
|
+
"Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
|
|
12983
|
+
),
|
|
12984
|
+
auto_highlights: zod3.boolean().describe(
|
|
12985
|
+
"Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) is enabled, either true or false"
|
|
12986
|
+
),
|
|
12987
|
+
auto_highlights_result: zod3.object({
|
|
12988
|
+
status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
|
|
12989
|
+
results: zod3.array(
|
|
12990
|
+
zod3.object({
|
|
12991
|
+
count: zod3.number().describe("The total number of times the key phrase appears in the audio file"),
|
|
12992
|
+
rank: zod3.number().describe(
|
|
12993
|
+
"The total relevancy to the overall audio file of this key phrase - a greater number means more relevant"
|
|
12994
|
+
),
|
|
12995
|
+
text: zod3.string().describe("The text itself of the key phrase"),
|
|
12996
|
+
timestamps: zod3.array(
|
|
12997
|
+
zod3.object({
|
|
12998
|
+
start: zod3.number().describe("The start time in milliseconds"),
|
|
12999
|
+
end: zod3.number().describe("The end time in milliseconds")
|
|
13000
|
+
}).describe("Timestamp containing a start and end property in milliseconds")
|
|
13001
|
+
).describe("The timestamp of the of the key phrase")
|
|
13002
|
+
})
|
|
13003
|
+
).describe("A temporally-sequential array of Key Phrases")
|
|
13004
|
+
}).describe(
|
|
13005
|
+
"An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
|
|
13006
|
+
).or(zod3.null()).optional().describe(
|
|
13007
|
+
"An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
|
|
13008
|
+
),
|
|
13009
|
+
chapters: zod3.array(
|
|
13010
|
+
zod3.object({
|
|
13011
|
+
gist: zod3.string().describe(
|
|
13012
|
+
"An ultra-short summary (just a few words) of the content spoken in the chapter"
|
|
13013
|
+
),
|
|
13014
|
+
headline: zod3.string().describe("A single sentence summary of the content spoken during the chapter"),
|
|
13015
|
+
summary: zod3.string().describe("A one paragraph summary of the content spoken during the chapter"),
|
|
13016
|
+
start: zod3.number().describe("The starting time, in milliseconds, for the chapter"),
|
|
13017
|
+
end: zod3.number().describe("The starting time, in milliseconds, for the chapter")
|
|
13018
|
+
}).describe("Chapter of the audio file")
|
|
13019
|
+
).nullish().describe(
|
|
13020
|
+
"An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for more information."
|
|
13021
|
+
),
|
|
13022
|
+
confidence: zod3.number().nullish().describe(
|
|
13023
|
+
"The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)"
|
|
12386
13024
|
),
|
|
12387
13025
|
content_safety: zod3.boolean().nullish().describe(
|
|
12388
|
-
"Whether [Content Moderation](https://www.assemblyai.com/docs/
|
|
13026
|
+
"Whether [Content Moderation](https://www.assemblyai.com/docs/content-moderation) is enabled, can be true or false"
|
|
12389
13027
|
),
|
|
12390
13028
|
content_safety_labels: zod3.object({
|
|
12391
13029
|
status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
|
|
@@ -12395,16 +13033,8 @@ var createTranscriptResponse = zod3.object({
|
|
|
12395
13033
|
labels: zod3.array(
|
|
12396
13034
|
zod3.object({
|
|
12397
13035
|
label: zod3.string().describe("The label of the sensitive topic"),
|
|
12398
|
-
confidence: zod3.number().
|
|
12399
|
-
|
|
12400
|
-
).max(
|
|
12401
|
-
createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMax
|
|
12402
|
-
).describe("The confidence score for the topic being discussed, from 0 to 1"),
|
|
12403
|
-
severity: zod3.number().min(
|
|
12404
|
-
createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin
|
|
12405
|
-
).max(
|
|
12406
|
-
createTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax
|
|
12407
|
-
).describe("How severely the topic is discussed in the section, from 0 to 1")
|
|
13036
|
+
confidence: zod3.number().describe("The confidence score for the topic being discussed, from 0 to 1"),
|
|
13037
|
+
severity: zod3.number().describe("How severely the topic is discussed in the section, from 0 to 1")
|
|
12408
13038
|
})
|
|
12409
13039
|
).describe(
|
|
12410
13040
|
"An array of safety labels, one per sensitive topic that was detected in the section"
|
|
@@ -12417,128 +13047,39 @@ var createTranscriptResponse = zod3.object({
|
|
|
12417
13047
|
}).describe("Timestamp containing a start and end property in milliseconds")
|
|
12418
13048
|
})
|
|
12419
13049
|
).describe("An array of results for the Content Moderation model"),
|
|
12420
|
-
summary: zod3.record(
|
|
12421
|
-
zod3.string(),
|
|
12422
|
-
zod3.number().min(createTranscriptResponseContentSafetyLabelsSummaryMinOne).max(createTranscriptResponseContentSafetyLabelsSummaryMaxOne).describe(
|
|
12423
|
-
'A confidence score for the presence of the sensitive topic "topic" across the entire audio file'
|
|
12424
|
-
)
|
|
12425
|
-
).describe(
|
|
13050
|
+
summary: zod3.record(zod3.string(), zod3.number()).describe(
|
|
12426
13051
|
"A summary of the Content Moderation confidence results for the entire audio file"
|
|
12427
13052
|
),
|
|
12428
13053
|
severity_score_summary: zod3.record(
|
|
12429
13054
|
zod3.string(),
|
|
12430
13055
|
zod3.object({
|
|
12431
|
-
low: zod3.number()
|
|
12432
|
-
medium: zod3.number()
|
|
12433
|
-
high: zod3.number()
|
|
13056
|
+
low: zod3.number(),
|
|
13057
|
+
medium: zod3.number(),
|
|
13058
|
+
high: zod3.number()
|
|
12434
13059
|
})
|
|
12435
13060
|
).describe(
|
|
12436
13061
|
"A summary of the Content Moderation severity results for the entire audio file"
|
|
12437
13062
|
)
|
|
12438
13063
|
}).describe(
|
|
12439
|
-
"An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/
|
|
13064
|
+
"An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/content-moderation) for more information.\n"
|
|
12440
13065
|
).or(zod3.null()).optional().describe(
|
|
12441
|
-
"An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/
|
|
12442
|
-
),
|
|
12443
|
-
iab_categories: zod3.boolean().nullish().describe(
|
|
12444
|
-
"Whether [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) is enabled, can be true or false"
|
|
12445
|
-
),
|
|
12446
|
-
iab_categories_result: zod3.object({
|
|
12447
|
-
status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
|
|
12448
|
-
results: zod3.array(
|
|
12449
|
-
zod3.object({
|
|
12450
|
-
text: zod3.string().describe("The text in the transcript in which a detected topic occurs"),
|
|
12451
|
-
labels: zod3.array(
|
|
12452
|
-
zod3.object({
|
|
12453
|
-
relevance: zod3.number().min(
|
|
12454
|
-
createTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin
|
|
12455
|
-
).max(
|
|
12456
|
-
createTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax
|
|
12457
|
-
).describe("How relevant the detected topic is of a detected topic"),
|
|
12458
|
-
label: zod3.string().describe(
|
|
12459
|
-
"The IAB taxonomical label for the label of the detected topic, where > denotes supertopic/subtopic relationship"
|
|
12460
|
-
)
|
|
12461
|
-
})
|
|
12462
|
-
).optional().describe("An array of detected topics in the text"),
|
|
12463
|
-
timestamp: zod3.object({
|
|
12464
|
-
start: zod3.number().describe("The start time in milliseconds"),
|
|
12465
|
-
end: zod3.number().describe("The end time in milliseconds")
|
|
12466
|
-
}).optional().describe("Timestamp containing a start and end property in milliseconds")
|
|
12467
|
-
}).describe("The result of the topic detection model")
|
|
12468
|
-
).describe("An array of results for the Topic Detection model"),
|
|
12469
|
-
summary: zod3.record(
|
|
12470
|
-
zod3.string(),
|
|
12471
|
-
zod3.number().min(createTranscriptResponseIabCategoriesResultSummaryMinOne).max(createTranscriptResponseIabCategoriesResultSummaryMaxOne)
|
|
12472
|
-
).describe("The overall relevance of topic to the entire audio file")
|
|
12473
|
-
}).describe(
|
|
12474
|
-
"The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) for more information.\n"
|
|
12475
|
-
).or(zod3.null()).optional().describe(
|
|
12476
|
-
"The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) for more information.\n"
|
|
13066
|
+
"An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/content-moderation) for more information.\n"
|
|
12477
13067
|
),
|
|
12478
13068
|
custom_spelling: zod3.array(
|
|
12479
13069
|
zod3.object({
|
|
12480
|
-
from: zod3.array(zod3.string()
|
|
13070
|
+
from: zod3.array(zod3.string()).describe("Words or phrases to replace"),
|
|
12481
13071
|
to: zod3.string().describe("Word to replace with")
|
|
12482
13072
|
}).describe(
|
|
12483
13073
|
"Object containing words or phrases to replace, and the word or phrase to replace with"
|
|
12484
13074
|
)
|
|
12485
|
-
).nullish().describe("Customize how words are spelled and formatted using to and from values"),
|
|
12486
|
-
keyterms_prompt: zod3.array(zod3.string()).optional().describe(
|
|
12487
|
-
"Improve accuracy with up to 1000 domain-specific words or phrases (maximum 6 words per phrase).\n"
|
|
12488
|
-
),
|
|
12489
|
-
prompt: zod3.string().optional().describe("This parameter does not currently have any functionality attached to it."),
|
|
12490
|
-
auto_chapters: zod3.boolean().nullish().describe(
|
|
12491
|
-
"Whether [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters) is enabled, can be true or false"
|
|
12492
|
-
),
|
|
12493
|
-
chapters: zod3.array(
|
|
12494
|
-
zod3.object({
|
|
12495
|
-
gist: zod3.string().describe(
|
|
12496
|
-
"An ultra-short summary (just a few words) of the content spoken in the chapter"
|
|
12497
|
-
),
|
|
12498
|
-
headline: zod3.string().describe("A single sentence summary of the content spoken during the chapter"),
|
|
12499
|
-
summary: zod3.string().describe("A one paragraph summary of the content spoken during the chapter"),
|
|
12500
|
-
start: zod3.number().describe("The starting time, in milliseconds, for the chapter"),
|
|
12501
|
-
end: zod3.number().describe("The starting time, in milliseconds, for the chapter")
|
|
12502
|
-
}).describe("Chapter of the audio file")
|
|
12503
|
-
).nullish().describe("An array of temporally sequential chapters for the audio file"),
|
|
12504
|
-
summarization: zod3.boolean().describe(
|
|
12505
|
-
"Whether [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled, either true or false"
|
|
12506
|
-
),
|
|
12507
|
-
summary_type: zod3.string().nullish().describe(
|
|
12508
|
-
"The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled"
|
|
12509
|
-
),
|
|
12510
|
-
summary_model: zod3.string().nullish().describe(
|
|
12511
|
-
"The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled\n"
|
|
12512
|
-
),
|
|
12513
|
-
summary: zod3.string().nullish().describe(
|
|
12514
|
-
"The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled"
|
|
12515
|
-
),
|
|
12516
|
-
custom_topics: zod3.boolean().nullish().describe("Whether custom topics is enabled, either true or false"),
|
|
12517
|
-
topics: zod3.array(zod3.string()).optional().describe("The list of custom topics provided if custom topics is enabled"),
|
|
12518
|
-
sentiment_analysis: zod3.boolean().nullish().describe(
|
|
12519
|
-
"Whether [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) is enabled, can be true or false"
|
|
12520
|
-
),
|
|
12521
|
-
sentiment_analysis_results: zod3.array(
|
|
12522
|
-
zod3.object({
|
|
12523
|
-
text: zod3.string().describe("The transcript of the sentence"),
|
|
12524
|
-
start: zod3.number().describe("The starting time, in milliseconds, of the sentence"),
|
|
12525
|
-
end: zod3.number().describe("The ending time, in milliseconds, of the sentence"),
|
|
12526
|
-
sentiment: zod3.enum(["POSITIVE", "NEUTRAL", "NEGATIVE"]),
|
|
12527
|
-
confidence: zod3.number().min(createTranscriptResponseSentimentAnalysisResultsItemConfidenceMin).max(createTranscriptResponseSentimentAnalysisResultsItemConfidenceMax).describe(
|
|
12528
|
-
"The confidence score for the detected sentiment of the sentence, from 0 to 1"
|
|
12529
|
-
),
|
|
12530
|
-
channel: zod3.string().nullish().describe(
|
|
12531
|
-
"The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
|
|
12532
|
-
),
|
|
12533
|
-
speaker: zod3.string().nullable().describe(
|
|
12534
|
-
"The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null"
|
|
12535
|
-
)
|
|
12536
|
-
}).describe("The result of the Sentiment Analysis model")
|
|
12537
13075
|
).nullish().describe(
|
|
12538
|
-
"
|
|
13076
|
+
"Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details."
|
|
12539
13077
|
),
|
|
12540
|
-
|
|
12541
|
-
|
|
13078
|
+
disfluencies: zod3.boolean().nullish().describe(
|
|
13079
|
+
'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false'
|
|
13080
|
+
),
|
|
13081
|
+
domain: zod3.string().nullish().describe(
|
|
13082
|
+
'The domain-specific model applied to the transcript. When set to `"medical-v1"`, [Medical Mode](https://www.assemblyai.com/docs/pre-recorded-audio/medical-mode) was used to improve accuracy for medical terminology.\n'
|
|
12542
13083
|
),
|
|
12543
13084
|
entities: zod3.array(
|
|
12544
13085
|
zod3.object({
|
|
@@ -12597,108 +13138,49 @@ var createTranscriptResponse = zod3.object({
|
|
|
12597
13138
|
)
|
|
12598
13139
|
}).describe("A detected entity")
|
|
12599
13140
|
).nullish().describe(
|
|
12600
|
-
"An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/
|
|
12601
|
-
),
|
|
12602
|
-
speech_threshold: zod3.number().min(createTranscriptResponseSpeechThresholdMin).max(createTranscriptResponseSpeechThresholdMax).nullish().describe(
|
|
12603
|
-
"Defaults to null. Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n"
|
|
13141
|
+
"An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) for more information.\n"
|
|
12604
13142
|
),
|
|
12605
|
-
|
|
12606
|
-
"
|
|
13143
|
+
entity_detection: zod3.boolean().nullish().describe(
|
|
13144
|
+
"Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) is enabled, can be true or false"
|
|
12607
13145
|
),
|
|
12608
13146
|
error: zod3.string().optional().describe("Error message of why the transcript failed"),
|
|
12609
|
-
|
|
12610
|
-
|
|
12611
|
-
|
|
12612
|
-
|
|
12613
|
-
|
|
12614
|
-
|
|
12615
|
-
|
|
12616
|
-
|
|
12617
|
-
|
|
12618
|
-
|
|
12619
|
-
|
|
12620
|
-
|
|
12621
|
-
|
|
12622
|
-
|
|
12623
|
-
|
|
12624
|
-
|
|
12625
|
-
)
|
|
12626
|
-
|
|
12627
|
-
|
|
12628
|
-
)
|
|
12629
|
-
|
|
12630
|
-
|
|
12631
|
-
|
|
12632
|
-
|
|
12633
|
-
|
|
12634
|
-
|
|
12635
|
-
"The
|
|
12636
|
-
),
|
|
12637
|
-
|
|
12638
|
-
"The URL to the next page of transcripts. The next URL always points to a page with newer transcripts."
|
|
12639
|
-
)
|
|
13147
|
+
filter_profanity: zod3.boolean().nullish().describe(
|
|
13148
|
+
"Whether [Profanity Filtering](https://www.assemblyai.com/docs/profanity-filtering) is enabled, either true or false"
|
|
13149
|
+
),
|
|
13150
|
+
format_text: zod3.boolean().nullish().describe(
|
|
13151
|
+
"Whether [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false"
|
|
13152
|
+
),
|
|
13153
|
+
iab_categories: zod3.boolean().nullish().describe(
|
|
13154
|
+
"Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) is enabled, can be true or false"
|
|
13155
|
+
),
|
|
13156
|
+
iab_categories_result: zod3.object({
|
|
13157
|
+
status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
|
|
13158
|
+
results: zod3.array(
|
|
13159
|
+
zod3.object({
|
|
13160
|
+
text: zod3.string().describe("The text in the transcript in which a detected topic occurs"),
|
|
13161
|
+
labels: zod3.array(
|
|
13162
|
+
zod3.object({
|
|
13163
|
+
relevance: zod3.number().describe("How relevant the detected topic is of a detected topic"),
|
|
13164
|
+
label: zod3.string().describe(
|
|
13165
|
+
"The IAB taxonomical label for the label of the detected topic, where > denotes supertopic/subtopic relationship"
|
|
13166
|
+
)
|
|
13167
|
+
})
|
|
13168
|
+
).optional().describe("An array of detected topics in the text"),
|
|
13169
|
+
timestamp: zod3.object({
|
|
13170
|
+
start: zod3.number().describe("The start time in milliseconds"),
|
|
13171
|
+
end: zod3.number().describe("The end time in milliseconds")
|
|
13172
|
+
}).optional().describe("Timestamp containing a start and end property in milliseconds")
|
|
13173
|
+
}).describe("The result of the topic detection model")
|
|
13174
|
+
).describe("An array of results for the Topic Detection model"),
|
|
13175
|
+
summary: zod3.record(zod3.string(), zod3.number()).describe("The overall relevance of topic to the entire audio file")
|
|
12640
13176
|
}).describe(
|
|
12641
|
-
"
|
|
13177
|
+
"The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
|
|
13178
|
+
).or(zod3.null()).optional().describe(
|
|
13179
|
+
"The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
|
|
12642
13180
|
),
|
|
12643
|
-
transcripts: zod3.array(
|
|
12644
|
-
zod3.object({
|
|
12645
|
-
id: zod3.string().uuid().describe("The unique identifier for the transcript"),
|
|
12646
|
-
resource_url: zod3.string().describe("The URL to retrieve the transcript"),
|
|
12647
|
-
status: zod3.enum(["queued", "processing", "completed", "error"]).describe(
|
|
12648
|
-
"The status of your transcript. Possible values are queued, processing, completed, or error."
|
|
12649
|
-
),
|
|
12650
|
-
created: zod3.string().regex(listTranscriptsResponseTranscriptsItemCreatedRegExp).describe("The date and time the transcript was created"),
|
|
12651
|
-
completed: zod3.string().regex(listTranscriptsResponseTranscriptsItemCompletedRegExp).nullable().describe("The date and time the transcript was completed"),
|
|
12652
|
-
audio_url: zod3.string().describe("The URL to the audio file"),
|
|
12653
|
-
error: zod3.string().nullable().describe("Error message of why the transcript failed")
|
|
12654
|
-
})
|
|
12655
|
-
).describe("An array of transcripts")
|
|
12656
|
-
}).describe(
|
|
12657
|
-
"A list of transcripts. Transcripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts."
|
|
12658
|
-
);
|
|
12659
|
-
var getTranscriptParams = zod3.object({
|
|
12660
|
-
transcript_id: zod3.string().describe("ID of the transcript")
|
|
12661
|
-
});
|
|
12662
|
-
var getTranscriptResponseLanguageConfidenceThresholdMin = 0;
|
|
12663
|
-
var getTranscriptResponseLanguageConfidenceThresholdMax = 1;
|
|
12664
|
-
var getTranscriptResponseLanguageConfidenceMin = 0;
|
|
12665
|
-
var getTranscriptResponseLanguageConfidenceMax = 1;
|
|
12666
|
-
var getTranscriptResponseSpeechModelDefault = null;
|
|
12667
|
-
var getTranscriptResponseWordsItemConfidenceMin = 0;
|
|
12668
|
-
var getTranscriptResponseWordsItemConfidenceMax = 1;
|
|
12669
|
-
var getTranscriptResponseUtterancesItemConfidenceMin = 0;
|
|
12670
|
-
var getTranscriptResponseUtterancesItemConfidenceMax = 1;
|
|
12671
|
-
var getTranscriptResponseUtterancesItemWordsItemConfidenceMin = 0;
|
|
12672
|
-
var getTranscriptResponseUtterancesItemWordsItemConfidenceMax = 1;
|
|
12673
|
-
var getTranscriptResponseConfidenceMin = 0;
|
|
12674
|
-
var getTranscriptResponseConfidenceMax = 1;
|
|
12675
|
-
var getTranscriptResponseAutoHighlightsResultResultsItemRankMin = 0;
|
|
12676
|
-
var getTranscriptResponseAutoHighlightsResultResultsItemRankMax = 1;
|
|
12677
|
-
var getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMin = 0;
|
|
12678
|
-
var getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMax = 1;
|
|
12679
|
-
var getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin = 0;
|
|
12680
|
-
var getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax = 1;
|
|
12681
|
-
var getTranscriptResponseContentSafetyLabelsSummaryMinOne = 0;
|
|
12682
|
-
var getTranscriptResponseContentSafetyLabelsSummaryMaxOne = 1;
|
|
12683
|
-
var getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMin = 0;
|
|
12684
|
-
var getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryLowMax = 1;
|
|
12685
|
-
var getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMin = 0;
|
|
12686
|
-
var getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryMediumMax = 1;
|
|
12687
|
-
var getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMin = 0;
|
|
12688
|
-
var getTranscriptResponseContentSafetyLabelsSeverityScoreSummaryHighMax = 1;
|
|
12689
|
-
var getTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin = 0;
|
|
12690
|
-
var getTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax = 1;
|
|
12691
|
-
var getTranscriptResponseIabCategoriesResultSummaryMinOne = 0;
|
|
12692
|
-
var getTranscriptResponseIabCategoriesResultSummaryMaxOne = 1;
|
|
12693
|
-
var getTranscriptResponseSentimentAnalysisResultsItemConfidenceMin = 0;
|
|
12694
|
-
var getTranscriptResponseSentimentAnalysisResultsItemConfidenceMax = 1;
|
|
12695
|
-
var getTranscriptResponseSpeechThresholdMin = 0;
|
|
12696
|
-
var getTranscriptResponseSpeechThresholdMax = 1;
|
|
12697
|
-
var getTranscriptResponse = zod3.object({
|
|
12698
13181
|
id: zod3.string().uuid().describe("The unique identifier of your transcript"),
|
|
12699
|
-
|
|
12700
|
-
|
|
12701
|
-
"The status of your transcript. Possible values are queued, processing, completed, or error."
|
|
13182
|
+
keyterms_prompt: zod3.array(zod3.string()).optional().describe(
|
|
13183
|
+
"Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3 Pro) domain-specific words or phrases (maximum 6 words per phrase). See [Keyterms Prompting](https://www.assemblyai.com/docs/pre-recorded-audio/keyterms-prompting) for more details.\n"
|
|
12702
13184
|
),
|
|
12703
13185
|
language_code: zod3.enum([
|
|
12704
13186
|
"en",
|
|
@@ -12803,138 +13285,175 @@ var getTranscriptResponse = zod3.object({
|
|
|
12803
13285
|
"cy",
|
|
12804
13286
|
"yi",
|
|
12805
13287
|
"yo"
|
|
12806
|
-
]).describe(
|
|
12807
|
-
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/
|
|
12808
|
-
).or(zod3.string()).optional().describe(
|
|
12809
|
-
"The language of your audio file.\nPossible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n"
|
|
12810
|
-
),
|
|
12811
|
-
language_detection: zod3.boolean().nullish().describe(
|
|
12812
|
-
"Whether [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection) is enabled, either true or false"
|
|
12813
|
-
),
|
|
12814
|
-
language_confidence_threshold: zod3.number().min(getTranscriptResponseLanguageConfidenceThresholdMin).max(getTranscriptResponseLanguageConfidenceThresholdMax).nullable().describe(
|
|
12815
|
-
"The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\n"
|
|
12816
|
-
),
|
|
12817
|
-
language_confidence: zod3.number().min(getTranscriptResponseLanguageConfidenceMin).max(getTranscriptResponseLanguageConfidenceMax).nullable().describe(
|
|
12818
|
-
"The confidence score for the detected language, between 0.0 (low confidence) and 1.0 (high confidence)"
|
|
12819
|
-
),
|
|
12820
|
-
speech_model: zod3.enum(["best", "slam-1", "universal"]).describe("The speech model to use for the transcription.").or(zod3.null()).describe(
|
|
12821
|
-
"The speech model used for the transcription. When `null`, the default model is used."
|
|
12822
|
-
),
|
|
12823
|
-
text: zod3.string().nullish().describe("The textual transcript of your media file"),
|
|
12824
|
-
words: zod3.array(
|
|
12825
|
-
zod3.object({
|
|
12826
|
-
confidence: zod3.number().min(getTranscriptResponseWordsItemConfidenceMin).max(getTranscriptResponseWordsItemConfidenceMax).describe("The confidence score for the transcript of this word"),
|
|
12827
|
-
start: zod3.number().describe("The starting time, in milliseconds, for the word"),
|
|
12828
|
-
end: zod3.number().describe("The ending time, in milliseconds, for the word"),
|
|
12829
|
-
text: zod3.string().describe("The text of the word"),
|
|
12830
|
-
channel: zod3.string().nullish().describe(
|
|
12831
|
-
"The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
|
|
12832
|
-
),
|
|
12833
|
-
speaker: zod3.string().nullable().describe(
|
|
12834
|
-
"The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null"
|
|
12835
|
-
)
|
|
12836
|
-
})
|
|
12837
|
-
).nullish().describe(
|
|
12838
|
-
"An array of temporally-sequential word objects, one for each word in the transcript.\nSee [Speech recognition](https://www.assemblyai.com/docs/models/speech-recognition) for more information.\n"
|
|
13288
|
+
]).optional().describe(
|
|
13289
|
+
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).\nThe default value is 'en_us'.\n"
|
|
12839
13290
|
),
|
|
12840
|
-
|
|
12841
|
-
zod3.
|
|
12842
|
-
|
|
12843
|
-
|
|
12844
|
-
|
|
12845
|
-
|
|
12846
|
-
|
|
12847
|
-
|
|
12848
|
-
|
|
12849
|
-
|
|
12850
|
-
|
|
12851
|
-
|
|
12852
|
-
|
|
12853
|
-
|
|
12854
|
-
|
|
12855
|
-
|
|
12856
|
-
|
|
12857
|
-
|
|
12858
|
-
|
|
12859
|
-
|
|
12860
|
-
|
|
12861
|
-
|
|
12862
|
-
|
|
12863
|
-
|
|
12864
|
-
|
|
12865
|
-
|
|
12866
|
-
|
|
13291
|
+
language_codes: zod3.array(
|
|
13292
|
+
zod3.enum([
|
|
13293
|
+
"en",
|
|
13294
|
+
"en_au",
|
|
13295
|
+
"en_uk",
|
|
13296
|
+
"en_us",
|
|
13297
|
+
"es",
|
|
13298
|
+
"fr",
|
|
13299
|
+
"de",
|
|
13300
|
+
"it",
|
|
13301
|
+
"pt",
|
|
13302
|
+
"nl",
|
|
13303
|
+
"af",
|
|
13304
|
+
"sq",
|
|
13305
|
+
"am",
|
|
13306
|
+
"ar",
|
|
13307
|
+
"hy",
|
|
13308
|
+
"as",
|
|
13309
|
+
"az",
|
|
13310
|
+
"ba",
|
|
13311
|
+
"eu",
|
|
13312
|
+
"be",
|
|
13313
|
+
"bn",
|
|
13314
|
+
"bs",
|
|
13315
|
+
"br",
|
|
13316
|
+
"bg",
|
|
13317
|
+
"my",
|
|
13318
|
+
"ca",
|
|
13319
|
+
"zh",
|
|
13320
|
+
"hr",
|
|
13321
|
+
"cs",
|
|
13322
|
+
"da",
|
|
13323
|
+
"et",
|
|
13324
|
+
"fo",
|
|
13325
|
+
"fi",
|
|
13326
|
+
"gl",
|
|
13327
|
+
"ka",
|
|
13328
|
+
"el",
|
|
13329
|
+
"gu",
|
|
13330
|
+
"ht",
|
|
13331
|
+
"ha",
|
|
13332
|
+
"haw",
|
|
13333
|
+
"he",
|
|
13334
|
+
"hi",
|
|
13335
|
+
"hu",
|
|
13336
|
+
"is",
|
|
13337
|
+
"id",
|
|
13338
|
+
"ja",
|
|
13339
|
+
"jw",
|
|
13340
|
+
"kn",
|
|
13341
|
+
"kk",
|
|
13342
|
+
"km",
|
|
13343
|
+
"ko",
|
|
13344
|
+
"lo",
|
|
13345
|
+
"la",
|
|
13346
|
+
"lv",
|
|
13347
|
+
"ln",
|
|
13348
|
+
"lt",
|
|
13349
|
+
"lb",
|
|
13350
|
+
"mk",
|
|
13351
|
+
"mg",
|
|
13352
|
+
"ms",
|
|
13353
|
+
"ml",
|
|
13354
|
+
"mt",
|
|
13355
|
+
"mi",
|
|
13356
|
+
"mr",
|
|
13357
|
+
"mn",
|
|
13358
|
+
"ne",
|
|
13359
|
+
"no",
|
|
13360
|
+
"nn",
|
|
13361
|
+
"oc",
|
|
13362
|
+
"pa",
|
|
13363
|
+
"ps",
|
|
13364
|
+
"fa",
|
|
13365
|
+
"pl",
|
|
13366
|
+
"ro",
|
|
13367
|
+
"ru",
|
|
13368
|
+
"sa",
|
|
13369
|
+
"sr",
|
|
13370
|
+
"sn",
|
|
13371
|
+
"sd",
|
|
13372
|
+
"si",
|
|
13373
|
+
"sk",
|
|
13374
|
+
"sl",
|
|
13375
|
+
"so",
|
|
13376
|
+
"su",
|
|
13377
|
+
"sw",
|
|
13378
|
+
"sv",
|
|
13379
|
+
"tl",
|
|
13380
|
+
"tg",
|
|
13381
|
+
"ta",
|
|
13382
|
+
"tt",
|
|
13383
|
+
"te",
|
|
13384
|
+
"th",
|
|
13385
|
+
"bo",
|
|
13386
|
+
"tr",
|
|
13387
|
+
"tk",
|
|
13388
|
+
"uk",
|
|
13389
|
+
"ur",
|
|
13390
|
+
"uz",
|
|
13391
|
+
"vi",
|
|
13392
|
+
"cy",
|
|
13393
|
+
"yi",
|
|
13394
|
+
"yo"
|
|
13395
|
+
]).describe(
|
|
13396
|
+
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).\nThe default value is 'en_us'.\n"
|
|
13397
|
+
)
|
|
12867
13398
|
).nullish().describe(
|
|
12868
|
-
"
|
|
12869
|
-
),
|
|
12870
|
-
confidence: zod3.number().min(getTranscriptResponseConfidenceMin).max(getTranscriptResponseConfidenceMax).nullish().describe(
|
|
12871
|
-
"The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)"
|
|
13399
|
+
"The language codes of your audio file. Used for [Code switching](/docs/speech-to-text/pre-recorded-audio/code-switching)\nOne of the values specified must be `en`.\n"
|
|
12872
13400
|
),
|
|
12873
|
-
|
|
12874
|
-
|
|
12875
|
-
format_text: zod3.boolean().nullish().describe("Whether Text Formatting is enabled, either true or false"),
|
|
12876
|
-
disfluencies: zod3.boolean().nullish().describe('Transcribe Filler Words, like "umm", in your media file; can be true or false'),
|
|
12877
|
-
multichannel: zod3.boolean().nullish().describe(
|
|
12878
|
-
"Whether [Multichannel transcription](https://www.assemblyai.com/docs/models/speech-recognition#multichannel-transcription) was enabled in the transcription request, either true or false"
|
|
12879
|
-
),
|
|
12880
|
-
audio_channels: zod3.number().optional().describe(
|
|
12881
|
-
"The number of audio channels in the audio file. This is only present when multichannel is enabled."
|
|
13401
|
+
language_confidence: zod3.number().nullable().describe(
|
|
13402
|
+
"The confidence score for the detected language, between 0.0 (low confidence) and 1.0 (high confidence). See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details."
|
|
12882
13403
|
),
|
|
12883
|
-
|
|
12884
|
-
"The
|
|
12885
|
-
),
|
|
12886
|
-
webhook_status_code: zod3.number().nullish().describe(
|
|
12887
|
-
"The status code we received from your server when delivering the transcript completed or failed webhook request, if a webhook URL was provided"
|
|
13404
|
+
language_confidence_threshold: zod3.number().nullable().describe(
|
|
13405
|
+
"The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nSee [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.\n"
|
|
12888
13406
|
),
|
|
12889
|
-
|
|
12890
|
-
|
|
12891
|
-
"The header name to be sent with the transcript completed or failed webhook requests"
|
|
13407
|
+
language_detection: zod3.boolean().nullish().describe(
|
|
13408
|
+
"Whether [Automatic language detection](/docs/pre-recorded-audio/automatic-language-detection) is enabled, either true or false"
|
|
12892
13409
|
),
|
|
12893
|
-
|
|
12894
|
-
|
|
12895
|
-
|
|
12896
|
-
|
|
12897
|
-
|
|
12898
|
-
|
|
12899
|
-
|
|
12900
|
-
|
|
12901
|
-
|
|
12902
|
-
|
|
12903
|
-
|
|
12904
|
-
|
|
12905
|
-
|
|
12906
|
-
|
|
12907
|
-
|
|
12908
|
-
|
|
12909
|
-
|
|
12910
|
-
})
|
|
12911
|
-
).describe("A temporally-sequential array of Key Phrases")
|
|
12912
|
-
}).describe(
|
|
12913
|
-
"An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/models/key-phrases) for more information.\n"
|
|
12914
|
-
).or(zod3.null()).optional().describe(
|
|
12915
|
-
"An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/models/key-phrases) for more information.\n"
|
|
13410
|
+
language_detection_options: zod3.object({
|
|
13411
|
+
expected_languages: zod3.array(zod3.string()).optional().describe(
|
|
13412
|
+
'List of languages expected in the audio file. Defaults to `["all"]` when unspecified. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.'
|
|
13413
|
+
),
|
|
13414
|
+
fallback_language: zod3.string().default(getTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault).describe(
|
|
13415
|
+
'If the detected language of the audio file is not in the list of expected languages, the `fallback_language` is used. Specify `["auto"]` to let our model choose the fallback language from `expected_languages` with the highest confidence score. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.\n'
|
|
13416
|
+
),
|
|
13417
|
+
code_switching: zod3.boolean().optional().describe(
|
|
13418
|
+
"Whether [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) should be detected.\n"
|
|
13419
|
+
),
|
|
13420
|
+
code_switching_confidence_threshold: zod3.number().default(
|
|
13421
|
+
getTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault
|
|
13422
|
+
).describe(
|
|
13423
|
+
"The confidence threshold for [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) detection. If the code switching confidence is below this threshold, the transcript will be processed in the language with the highest `language_detection_confidence` score.\n"
|
|
13424
|
+
)
|
|
13425
|
+
}).optional().describe(
|
|
13426
|
+
"Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection)."
|
|
12916
13427
|
),
|
|
12917
|
-
|
|
12918
|
-
"
|
|
13428
|
+
multichannel: zod3.boolean().nullish().describe(
|
|
13429
|
+
"Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) was enabled in the transcription request, either true or false"
|
|
12919
13430
|
),
|
|
12920
|
-
|
|
12921
|
-
"
|
|
13431
|
+
prompt: zod3.string().optional().describe(
|
|
13432
|
+
"Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
|
|
12922
13433
|
),
|
|
12923
|
-
|
|
12924
|
-
|
|
12925
|
-
filter_profanity: zod3.boolean().nullish().describe(
|
|
12926
|
-
"Whether [Profanity Filtering](https://www.assemblyai.com/docs/models/speech-recognition#profanity-filtering) is enabled, either true or false"
|
|
13434
|
+
punctuate: zod3.boolean().nullish().describe(
|
|
13435
|
+
"Whether [Automatic Punctuation](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false"
|
|
12927
13436
|
),
|
|
12928
13437
|
redact_pii: zod3.boolean().describe(
|
|
12929
|
-
"Whether [PII Redaction](https://www.assemblyai.com/docs/
|
|
13438
|
+
"Whether [PII Redaction](https://www.assemblyai.com/docs/pii-redaction) is enabled, either true or false"
|
|
12930
13439
|
),
|
|
12931
13440
|
redact_pii_audio: zod3.boolean().nullish().describe(
|
|
12932
|
-
"Whether a redacted version of the audio file was generated,\neither true or false. See [PII redaction](https://www.assemblyai.com/docs/
|
|
13441
|
+
"Whether a redacted version of the audio file was generated,\neither true or false. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.\n"
|
|
13442
|
+
),
|
|
13443
|
+
redact_pii_audio_options: zod3.object({
|
|
13444
|
+
return_redacted_no_speech_audio: zod3.boolean().optional().describe(
|
|
13445
|
+
"By default, audio redaction provides redacted audio URLs only when speech is detected. However, if your use-case specifically requires redacted audio files even for silent audio files without any dialogue, you can opt to receive these URLs by setting this parameter to `true`."
|
|
13446
|
+
),
|
|
13447
|
+
override_audio_redaction_method: zod3.enum(["silence"]).optional().describe(
|
|
13448
|
+
"Specify the method used to redact audio. By default, redacted audio uses a beep sound. Set to `silence` to replace PII with silence instead of a beep."
|
|
13449
|
+
)
|
|
13450
|
+
}).optional().describe(
|
|
13451
|
+
"The options for PII-redacted audio, if redact_pii_audio is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.\n"
|
|
12933
13452
|
),
|
|
12934
13453
|
redact_pii_audio_quality: zod3.enum(["mp3", "wav"]).describe(
|
|
12935
|
-
"Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/
|
|
13454
|
+
"Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more details."
|
|
12936
13455
|
).or(zod3.null()).optional().describe(
|
|
12937
|
-
"The audio quality of the PII-redacted audio file, if redact_pii_audio is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/
|
|
13456
|
+
"The audio quality of the PII-redacted audio file, if redact_pii_audio is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.\n"
|
|
12938
13457
|
),
|
|
12939
13458
|
redact_pii_policies: zod3.array(
|
|
12940
13459
|
zod3.enum([
|
|
@@ -12984,19 +13503,307 @@ var getTranscriptResponse = zod3.object({
|
|
|
12984
13503
|
"zodiac_sign"
|
|
12985
13504
|
]).describe("The type of PII to redact")
|
|
12986
13505
|
).nullish().describe(
|
|
12987
|
-
"The list of PII Redaction policies that were enabled, if PII Redaction is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/
|
|
13506
|
+
"The list of PII Redaction policies that were enabled, if PII Redaction is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more information.\n"
|
|
12988
13507
|
),
|
|
12989
13508
|
redact_pii_sub: zod3.enum(["entity_name", "hash"]).optional().describe(
|
|
12990
|
-
|
|
13509
|
+
"The replacement logic for detected PII, can be `entity_name` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
|
|
13510
|
+
),
|
|
13511
|
+
sentiment_analysis: zod3.boolean().nullish().describe(
|
|
13512
|
+
"Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) is enabled, can be true or false"
|
|
13513
|
+
),
|
|
13514
|
+
sentiment_analysis_results: zod3.array(
|
|
13515
|
+
zod3.object({
|
|
13516
|
+
text: zod3.string().describe("The transcript of the sentence"),
|
|
13517
|
+
start: zod3.number().describe("The starting time, in milliseconds, of the sentence"),
|
|
13518
|
+
end: zod3.number().describe("The ending time, in milliseconds, of the sentence"),
|
|
13519
|
+
sentiment: zod3.enum(["POSITIVE", "NEUTRAL", "NEGATIVE"]),
|
|
13520
|
+
confidence: zod3.number().describe(
|
|
13521
|
+
"The confidence score for the detected sentiment of the sentence, from 0 to 1"
|
|
13522
|
+
),
|
|
13523
|
+
channel: zod3.string().nullish().describe(
|
|
13524
|
+
"The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
|
|
13525
|
+
),
|
|
13526
|
+
speaker: zod3.string().nullable().describe(
|
|
13527
|
+
"The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
|
|
13528
|
+
)
|
|
13529
|
+
}).describe("The result of the Sentiment Analysis model")
|
|
13530
|
+
).nullish().describe(
|
|
13531
|
+
"An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) for more information.\n"
|
|
12991
13532
|
),
|
|
12992
13533
|
speaker_labels: zod3.boolean().nullish().describe(
|
|
12993
|
-
"Whether [Speaker diarization](https://www.assemblyai.com/docs/
|
|
13534
|
+
"Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, can be true or false"
|
|
12994
13535
|
),
|
|
12995
13536
|
speakers_expected: zod3.number().nullish().describe(
|
|
12996
|
-
"Tell the speaker label model how many speakers it should attempt to identify. See [
|
|
13537
|
+
"Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details."
|
|
13538
|
+
),
|
|
13539
|
+
speech_model_used: zod3.string().optional().describe(
|
|
13540
|
+
"The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
|
|
13541
|
+
),
|
|
13542
|
+
speech_models: zod3.array(
|
|
13543
|
+
zod3.string().describe(
|
|
13544
|
+
"The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
|
|
13545
|
+
)
|
|
13546
|
+
).nullish().describe(
|
|
13547
|
+
"List multiple speech models in priority order, allowing our system to automatically route your audio to the best available option. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models and routing behavior.\n"
|
|
13548
|
+
),
|
|
13549
|
+
speech_threshold: zod3.number().nullish().describe(
|
|
13550
|
+
"Defaults to null. Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive. See [Speech Threshold](https://www.assemblyai.com/docs/speech-threshold) for more details.\n"
|
|
13551
|
+
),
|
|
13552
|
+
speech_understanding: zod3.object({
|
|
13553
|
+
request: zod3.object({
|
|
13554
|
+
translation: zod3.object({
|
|
13555
|
+
target_languages: zod3.array(zod3.string()).describe(
|
|
13556
|
+
'List of target language codes (e.g., `["es", "de"]`). See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for supported languages.'
|
|
13557
|
+
),
|
|
13558
|
+
formal: zod3.boolean().default(getTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault).describe(
|
|
13559
|
+
"Use formal language style. See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for more details."
|
|
13560
|
+
),
|
|
13561
|
+
match_original_utterance: zod3.boolean().optional().describe(
|
|
13562
|
+
"When enabled with Speaker Labels, returns translated text in the utterances array. Each utterance will include a `translated_texts` key containing translations for each target language."
|
|
13563
|
+
)
|
|
13564
|
+
})
|
|
13565
|
+
}).describe(
|
|
13566
|
+
"Request body for [Translation](https://www.assemblyai.com/docs/speech-understanding/translation)."
|
|
13567
|
+
).or(
|
|
13568
|
+
zod3.object({
|
|
13569
|
+
speaker_identification: zod3.object({
|
|
13570
|
+
speaker_type: zod3.enum(["role", "name"]).describe(
|
|
13571
|
+
"Type of speaker identification. See [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification) for details on each type."
|
|
13572
|
+
),
|
|
13573
|
+
known_values: zod3.array(zod3.string()).optional().describe(
|
|
13574
|
+
'Required if speaker_type is "role". Each value must be 35 characters or less.'
|
|
13575
|
+
),
|
|
13576
|
+
speakers: zod3.array(
|
|
13577
|
+
zod3.object({
|
|
13578
|
+
role: zod3.string().optional().describe(
|
|
13579
|
+
'The role of the speaker. Required when `speaker_type` is "role".'
|
|
13580
|
+
),
|
|
13581
|
+
name: zod3.string().optional().describe(
|
|
13582
|
+
'The name of the speaker. Required when `speaker_type` is "name".'
|
|
13583
|
+
),
|
|
13584
|
+
description: zod3.string().optional().describe(
|
|
13585
|
+
"A description of the speaker to help the model identify them based on conversational context."
|
|
13586
|
+
)
|
|
13587
|
+
})
|
|
13588
|
+
).optional().describe(
|
|
13589
|
+
"An array of speaker objects with metadata to improve identification accuracy. Each object should include a `role` or `name` (depending on `speaker_type`) and an optional `description` to help the model identify the speaker. You can also include any additional custom properties (e.g., `company`, `title`) to provide more context. Use this as an alternative to `known_values` when you want to provide additional context about each speaker."
|
|
13590
|
+
)
|
|
13591
|
+
})
|
|
13592
|
+
}).describe(
|
|
13593
|
+
"Request body for [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification)."
|
|
13594
|
+
)
|
|
13595
|
+
).or(
|
|
13596
|
+
zod3.object({
|
|
13597
|
+
custom_formatting: zod3.object({
|
|
13598
|
+
date: zod3.string().optional().describe(
|
|
13599
|
+
'Date format pattern (e.g., `"mm/dd/yyyy"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
|
|
13600
|
+
),
|
|
13601
|
+
phone_number: zod3.string().optional().describe(
|
|
13602
|
+
'Phone number format pattern (e.g., `"(xxx)xxx-xxxx"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
|
|
13603
|
+
),
|
|
13604
|
+
email: zod3.string().optional().describe(
|
|
13605
|
+
'Email format pattern (e.g., `"username@domain.com"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
|
|
13606
|
+
)
|
|
13607
|
+
})
|
|
13608
|
+
}).describe(
|
|
13609
|
+
"Request body for [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting)."
|
|
13610
|
+
)
|
|
13611
|
+
).optional(),
|
|
13612
|
+
response: zod3.object({
|
|
13613
|
+
translation: zod3.object({
|
|
13614
|
+
status: zod3.string().optional()
|
|
13615
|
+
}).optional()
|
|
13616
|
+
}).or(
|
|
13617
|
+
zod3.object({
|
|
13618
|
+
speaker_identification: zod3.object({
|
|
13619
|
+
mapping: zod3.record(zod3.string(), zod3.string()).optional().describe(
|
|
13620
|
+
'A mapping of the original generic speaker labels (e.g., "A", "B") to the identified speaker names or roles.'
|
|
13621
|
+
),
|
|
13622
|
+
status: zod3.string().optional()
|
|
13623
|
+
}).optional()
|
|
13624
|
+
})
|
|
13625
|
+
).or(
|
|
13626
|
+
zod3.object({
|
|
13627
|
+
custom_formatting: zod3.object({
|
|
13628
|
+
mapping: zod3.record(zod3.string(), zod3.string()).optional(),
|
|
13629
|
+
formatted_text: zod3.string().optional()
|
|
13630
|
+
}).optional()
|
|
13631
|
+
})
|
|
13632
|
+
).optional()
|
|
13633
|
+
}).optional().describe(
|
|
13634
|
+
"Speech understanding tasks like [Translation](https://www.assemblyai.com/docs/speech-understanding/translation), [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification), and [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting). See the task-specific docs for available options and configuration.\n"
|
|
13635
|
+
),
|
|
13636
|
+
status: zod3.enum(["queued", "processing", "completed", "error"]).describe(
|
|
13637
|
+
"The status of your transcript. Possible values are queued, processing, completed, or error."
|
|
13638
|
+
),
|
|
13639
|
+
summarization: zod3.boolean().describe(
|
|
13640
|
+
"Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
|
|
13641
|
+
),
|
|
13642
|
+
summary: zod3.string().nullish().describe(
|
|
13643
|
+
"The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
|
|
13644
|
+
),
|
|
13645
|
+
summary_model: zod3.string().nullish().describe(
|
|
13646
|
+
"The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n"
|
|
13647
|
+
),
|
|
13648
|
+
summary_type: zod3.string().nullish().describe(
|
|
13649
|
+
"The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
|
|
13650
|
+
),
|
|
13651
|
+
remove_audio_tags: zod3.enum(["all"]).describe(
|
|
13652
|
+
"Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
|
|
13653
|
+
).or(zod3.null()).optional().describe(
|
|
13654
|
+
"Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
|
|
13655
|
+
),
|
|
13656
|
+
temperature: zod3.number().nullish().describe(
|
|
13657
|
+
"The temperature that was used for the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.\n\nNote: This parameter can only be used with the Universal-3 Pro model.\n"
|
|
13658
|
+
),
|
|
13659
|
+
text: zod3.string().nullish().describe("The textual transcript of your media file"),
|
|
13660
|
+
throttled: zod3.boolean().nullish().describe(
|
|
13661
|
+
"True while a request is throttled and false when a request is no longer throttled"
|
|
13662
|
+
),
|
|
13663
|
+
utterances: zod3.array(
|
|
13664
|
+
zod3.object({
|
|
13665
|
+
confidence: zod3.number().describe("The confidence score for the transcript of this utterance"),
|
|
13666
|
+
start: zod3.number().describe("The starting time, in milliseconds, of the utterance in the audio file"),
|
|
13667
|
+
end: zod3.number().describe("The ending time, in milliseconds, of the utterance in the audio file"),
|
|
13668
|
+
text: zod3.string().describe("The text for this utterance"),
|
|
13669
|
+
words: zod3.array(
|
|
13670
|
+
zod3.object({
|
|
13671
|
+
confidence: zod3.number().describe("The confidence score for the transcript of this word"),
|
|
13672
|
+
start: zod3.number().describe("The starting time, in milliseconds, for the word"),
|
|
13673
|
+
end: zod3.number().describe("The ending time, in milliseconds, for the word"),
|
|
13674
|
+
text: zod3.string().describe("The text of the word"),
|
|
13675
|
+
channel: zod3.string().nullish().describe(
|
|
13676
|
+
"The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
|
|
13677
|
+
),
|
|
13678
|
+
speaker: zod3.string().nullable().describe(
|
|
13679
|
+
"The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
|
|
13680
|
+
)
|
|
13681
|
+
})
|
|
13682
|
+
).describe("The words in the utterance."),
|
|
13683
|
+
channel: zod3.string().nullish().describe(
|
|
13684
|
+
"The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
|
|
13685
|
+
),
|
|
13686
|
+
speaker: zod3.string().describe(
|
|
13687
|
+
'The speaker of this utterance, where each speaker is assigned a sequential capital letter - e.g. "A" for Speaker A, "B" for Speaker B, etc.'
|
|
13688
|
+
),
|
|
13689
|
+
translated_texts: zod3.record(zod3.string(), zod3.string()).optional().describe(
|
|
13690
|
+
'Translations keyed by language code (e.g., `{"es": "Texto traducido", "de": "\xDCbersetzter Text"}`). Only present when `match_original_utterance` is enabled with translation.'
|
|
13691
|
+
)
|
|
13692
|
+
})
|
|
13693
|
+
).nullish().describe(
|
|
13694
|
+
"When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) for more information.\n"
|
|
13695
|
+
),
|
|
13696
|
+
webhook_auth: zod3.boolean().describe(
|
|
13697
|
+
"Whether [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) authentication details were provided"
|
|
13698
|
+
),
|
|
13699
|
+
webhook_auth_header_name: zod3.string().nullish().describe(
|
|
13700
|
+
"The header name to be sent with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests"
|
|
13701
|
+
),
|
|
13702
|
+
webhook_status_code: zod3.number().nullish().describe(
|
|
13703
|
+
"The status code we received from your server when delivering the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) request, if a webhook URL was provided"
|
|
13704
|
+
),
|
|
13705
|
+
webhook_url: zod3.string().nullish().describe(
|
|
13706
|
+
"The URL to which we send [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests.\n"
|
|
13707
|
+
),
|
|
13708
|
+
words: zod3.array(
|
|
13709
|
+
zod3.object({
|
|
13710
|
+
confidence: zod3.number().describe("The confidence score for the transcript of this word"),
|
|
13711
|
+
start: zod3.number().describe("The starting time, in milliseconds, for the word"),
|
|
13712
|
+
end: zod3.number().describe("The ending time, in milliseconds, for the word"),
|
|
13713
|
+
text: zod3.string().describe("The text of the word"),
|
|
13714
|
+
channel: zod3.string().nullish().describe(
|
|
13715
|
+
"The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
|
|
13716
|
+
),
|
|
13717
|
+
speaker: zod3.string().nullable().describe(
|
|
13718
|
+
"The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
|
|
13719
|
+
)
|
|
13720
|
+
})
|
|
13721
|
+
).nullish().describe(
|
|
13722
|
+
"An array of temporally-sequential word objects, one for each word in the transcript.\n"
|
|
13723
|
+
),
|
|
13724
|
+
acoustic_model: zod3.string().describe("This parameter does not currently have any functionality attached to it."),
|
|
13725
|
+
custom_topics: zod3.boolean().nullish().describe("This parameter does not currently have any functionality attached to it."),
|
|
13726
|
+
language_model: zod3.string().describe("This parameter does not currently have any functionality attached to it."),
|
|
13727
|
+
speech_model: zod3.string().describe(
|
|
13728
|
+
"The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
|
|
13729
|
+
).or(zod3.null()).describe(
|
|
13730
|
+
"This parameter has been replaced with the `speech_models` parameter, learn more about the `speech_models` parameter [here](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model).\n"
|
|
13731
|
+
),
|
|
13732
|
+
speed_boost: zod3.boolean().nullish().describe("This parameter does not currently have any functionality attached to it."),
|
|
13733
|
+
topics: zod3.array(zod3.string()).optional().describe("This parameter does not currently have any functionality attached to it."),
|
|
13734
|
+
translated_texts: zod3.object({
|
|
13735
|
+
language_code: zod3.string().optional().describe("Translated text for this language code")
|
|
13736
|
+
}).optional().describe(
|
|
13737
|
+
"Translated text keyed by language code. See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for more details."
|
|
13738
|
+
)
|
|
13739
|
+
}).describe("A transcript object");
|
|
13740
|
+
var deleteTranscriptParams = zod3.object({
|
|
13741
|
+
transcript_id: zod3.string().describe("ID of the transcript")
|
|
13742
|
+
});
|
|
13743
|
+
var deleteTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault = "auto";
|
|
13744
|
+
var deleteTranscriptResponseLanguageDetectionOptionsCodeSwitchingDefault = false;
|
|
13745
|
+
var deleteTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault = 0.3;
|
|
13746
|
+
var deleteTranscriptResponseRedactPiiAudioOptionsReturnRedactedNoSpeechAudioDefault = false;
|
|
13747
|
+
var deleteTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault = true;
|
|
13748
|
+
var deleteTranscriptResponseSpeechUnderstandingRequestTranslationMatchOriginalUtteranceDefault = false;
|
|
13749
|
+
var deleteTranscriptResponse = zod3.object({
|
|
13750
|
+
audio_channels: zod3.number().optional().describe(
|
|
13751
|
+
"The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) is enabled."
|
|
13752
|
+
),
|
|
13753
|
+
audio_duration: zod3.number().nullish().describe("The duration of this transcript object's media file, in seconds"),
|
|
13754
|
+
audio_end_at: zod3.number().nullish().describe(
|
|
13755
|
+
"The point in time, in milliseconds, in the file at which the transcription was terminated. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details."
|
|
13756
|
+
),
|
|
13757
|
+
audio_start_from: zod3.number().nullish().describe(
|
|
13758
|
+
"The point in time, in milliseconds, in the file at which the transcription was started. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details."
|
|
13759
|
+
),
|
|
13760
|
+
audio_url: zod3.string().describe("The URL of the media that was transcribed"),
|
|
13761
|
+
auto_chapters: zod3.boolean().nullish().describe(
|
|
13762
|
+
"Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) is enabled, can be true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible chapter summaries. See the [updated Auto Chapters page](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
|
|
13763
|
+
),
|
|
13764
|
+
auto_highlights: zod3.boolean().describe(
|
|
13765
|
+
"Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) is enabled, either true or false"
|
|
13766
|
+
),
|
|
13767
|
+
auto_highlights_result: zod3.object({
|
|
13768
|
+
status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
|
|
13769
|
+
results: zod3.array(
|
|
13770
|
+
zod3.object({
|
|
13771
|
+
count: zod3.number().describe("The total number of times the key phrase appears in the audio file"),
|
|
13772
|
+
rank: zod3.number().describe(
|
|
13773
|
+
"The total relevancy to the overall audio file of this key phrase - a greater number means more relevant"
|
|
13774
|
+
),
|
|
13775
|
+
text: zod3.string().describe("The text itself of the key phrase"),
|
|
13776
|
+
timestamps: zod3.array(
|
|
13777
|
+
zod3.object({
|
|
13778
|
+
start: zod3.number().describe("The start time in milliseconds"),
|
|
13779
|
+
end: zod3.number().describe("The end time in milliseconds")
|
|
13780
|
+
}).describe("Timestamp containing a start and end property in milliseconds")
|
|
13781
|
+
).describe("The timestamp of the of the key phrase")
|
|
13782
|
+
})
|
|
13783
|
+
).describe("A temporally-sequential array of Key Phrases")
|
|
13784
|
+
}).describe(
|
|
13785
|
+
"An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
|
|
13786
|
+
).or(zod3.null()).optional().describe(
|
|
13787
|
+
"An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.\n"
|
|
13788
|
+
),
|
|
13789
|
+
chapters: zod3.array(
|
|
13790
|
+
zod3.object({
|
|
13791
|
+
gist: zod3.string().describe(
|
|
13792
|
+
"An ultra-short summary (just a few words) of the content spoken in the chapter"
|
|
13793
|
+
),
|
|
13794
|
+
headline: zod3.string().describe("A single sentence summary of the content spoken during the chapter"),
|
|
13795
|
+
summary: zod3.string().describe("A one paragraph summary of the content spoken during the chapter"),
|
|
13796
|
+
start: zod3.number().describe("The starting time, in milliseconds, for the chapter"),
|
|
13797
|
+
end: zod3.number().describe("The starting time, in milliseconds, for the chapter")
|
|
13798
|
+
}).describe("Chapter of the audio file")
|
|
13799
|
+
).nullish().describe(
|
|
13800
|
+
"An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for more information."
|
|
13801
|
+
),
|
|
13802
|
+
confidence: zod3.number().nullish().describe(
|
|
13803
|
+
"The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)"
|
|
12997
13804
|
),
|
|
12998
13805
|
content_safety: zod3.boolean().nullish().describe(
|
|
12999
|
-
"Whether [Content Moderation](https://www.assemblyai.com/docs/
|
|
13806
|
+
"Whether [Content Moderation](https://www.assemblyai.com/docs/content-moderation) is enabled, can be true or false"
|
|
13000
13807
|
),
|
|
13001
13808
|
content_safety_labels: zod3.object({
|
|
13002
13809
|
status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
|
|
@@ -13006,12 +13813,8 @@ var getTranscriptResponse = zod3.object({
|
|
|
13006
13813
|
labels: zod3.array(
|
|
13007
13814
|
zod3.object({
|
|
13008
13815
|
label: zod3.string().describe("The label of the sensitive topic"),
|
|
13009
|
-
confidence: zod3.number().
|
|
13010
|
-
|
|
13011
|
-
).max(
|
|
13012
|
-
getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemConfidenceMax
|
|
13013
|
-
).describe("The confidence score for the topic being discussed, from 0 to 1"),
|
|
13014
|
-
severity: zod3.number().min(getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMin).max(getTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax).describe("How severely the topic is discussed in the section, from 0 to 1")
|
|
13816
|
+
confidence: zod3.number().describe("The confidence score for the topic being discussed, from 0 to 1"),
|
|
13817
|
+
severity: zod3.number().describe("How severely the topic is discussed in the section, from 0 to 1")
|
|
13015
13818
|
})
|
|
13016
13819
|
).describe(
|
|
13017
13820
|
"An array of safety labels, one per sensitive topic that was detected in the section"
|
|
@@ -13024,128 +13827,39 @@ var getTranscriptResponse = zod3.object({
|
|
|
13024
13827
|
}).describe("Timestamp containing a start and end property in milliseconds")
|
|
13025
13828
|
})
|
|
13026
13829
|
).describe("An array of results for the Content Moderation model"),
|
|
13027
|
-
summary: zod3.record(
|
|
13028
|
-
zod3.string(),
|
|
13029
|
-
zod3.number().min(getTranscriptResponseContentSafetyLabelsSummaryMinOne).max(getTranscriptResponseContentSafetyLabelsSummaryMaxOne).describe(
|
|
13030
|
-
'A confidence score for the presence of the sensitive topic "topic" across the entire audio file'
|
|
13031
|
-
)
|
|
13032
|
-
).describe(
|
|
13830
|
+
summary: zod3.record(zod3.string(), zod3.number()).describe(
|
|
13033
13831
|
"A summary of the Content Moderation confidence results for the entire audio file"
|
|
13034
13832
|
),
|
|
13035
13833
|
severity_score_summary: zod3.record(
|
|
13036
13834
|
zod3.string(),
|
|
13037
13835
|
zod3.object({
|
|
13038
|
-
low: zod3.number()
|
|
13039
|
-
medium: zod3.number()
|
|
13040
|
-
high: zod3.number()
|
|
13836
|
+
low: zod3.number(),
|
|
13837
|
+
medium: zod3.number(),
|
|
13838
|
+
high: zod3.number()
|
|
13041
13839
|
})
|
|
13042
13840
|
).describe(
|
|
13043
13841
|
"A summary of the Content Moderation severity results for the entire audio file"
|
|
13044
13842
|
)
|
|
13045
13843
|
}).describe(
|
|
13046
|
-
"An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/
|
|
13047
|
-
).or(zod3.null()).optional().describe(
|
|
13048
|
-
"An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/models/content-moderation) for more information.\n"
|
|
13049
|
-
),
|
|
13050
|
-
iab_categories: zod3.boolean().nullish().describe(
|
|
13051
|
-
"Whether [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) is enabled, can be true or false"
|
|
13052
|
-
),
|
|
13053
|
-
iab_categories_result: zod3.object({
|
|
13054
|
-
status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
|
|
13055
|
-
results: zod3.array(
|
|
13056
|
-
zod3.object({
|
|
13057
|
-
text: zod3.string().describe("The text in the transcript in which a detected topic occurs"),
|
|
13058
|
-
labels: zod3.array(
|
|
13059
|
-
zod3.object({
|
|
13060
|
-
relevance: zod3.number().min(
|
|
13061
|
-
getTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMin
|
|
13062
|
-
).max(
|
|
13063
|
-
getTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax
|
|
13064
|
-
).describe("How relevant the detected topic is of a detected topic"),
|
|
13065
|
-
label: zod3.string().describe(
|
|
13066
|
-
"The IAB taxonomical label for the label of the detected topic, where > denotes supertopic/subtopic relationship"
|
|
13067
|
-
)
|
|
13068
|
-
})
|
|
13069
|
-
).optional().describe("An array of detected topics in the text"),
|
|
13070
|
-
timestamp: zod3.object({
|
|
13071
|
-
start: zod3.number().describe("The start time in milliseconds"),
|
|
13072
|
-
end: zod3.number().describe("The end time in milliseconds")
|
|
13073
|
-
}).optional().describe("Timestamp containing a start and end property in milliseconds")
|
|
13074
|
-
}).describe("The result of the topic detection model")
|
|
13075
|
-
).describe("An array of results for the Topic Detection model"),
|
|
13076
|
-
summary: zod3.record(
|
|
13077
|
-
zod3.string(),
|
|
13078
|
-
zod3.number().min(getTranscriptResponseIabCategoriesResultSummaryMinOne).max(getTranscriptResponseIabCategoriesResultSummaryMaxOne)
|
|
13079
|
-
).describe("The overall relevance of topic to the entire audio file")
|
|
13080
|
-
}).describe(
|
|
13081
|
-
"The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) for more information.\n"
|
|
13844
|
+
"An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/content-moderation) for more information.\n"
|
|
13082
13845
|
).or(zod3.null()).optional().describe(
|
|
13083
|
-
"
|
|
13846
|
+
"An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/content-moderation) for more information.\n"
|
|
13084
13847
|
),
|
|
13085
13848
|
custom_spelling: zod3.array(
|
|
13086
13849
|
zod3.object({
|
|
13087
|
-
from: zod3.array(zod3.string()
|
|
13850
|
+
from: zod3.array(zod3.string()).describe("Words or phrases to replace"),
|
|
13088
13851
|
to: zod3.string().describe("Word to replace with")
|
|
13089
13852
|
}).describe(
|
|
13090
13853
|
"Object containing words or phrases to replace, and the word or phrase to replace with"
|
|
13091
13854
|
)
|
|
13092
|
-
).nullish().describe("Customize how words are spelled and formatted using to and from values"),
|
|
13093
|
-
keyterms_prompt: zod3.array(zod3.string()).optional().describe(
|
|
13094
|
-
"Improve accuracy with up to 1000 domain-specific words or phrases (maximum 6 words per phrase).\n"
|
|
13095
|
-
),
|
|
13096
|
-
prompt: zod3.string().optional().describe("This parameter does not currently have any functionality attached to it."),
|
|
13097
|
-
auto_chapters: zod3.boolean().nullish().describe(
|
|
13098
|
-
"Whether [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters) is enabled, can be true or false"
|
|
13099
|
-
),
|
|
13100
|
-
chapters: zod3.array(
|
|
13101
|
-
zod3.object({
|
|
13102
|
-
gist: zod3.string().describe(
|
|
13103
|
-
"An ultra-short summary (just a few words) of the content spoken in the chapter"
|
|
13104
|
-
),
|
|
13105
|
-
headline: zod3.string().describe("A single sentence summary of the content spoken during the chapter"),
|
|
13106
|
-
summary: zod3.string().describe("A one paragraph summary of the content spoken during the chapter"),
|
|
13107
|
-
start: zod3.number().describe("The starting time, in milliseconds, for the chapter"),
|
|
13108
|
-
end: zod3.number().describe("The starting time, in milliseconds, for the chapter")
|
|
13109
|
-
}).describe("Chapter of the audio file")
|
|
13110
|
-
).nullish().describe("An array of temporally sequential chapters for the audio file"),
|
|
13111
|
-
summarization: zod3.boolean().describe(
|
|
13112
|
-
"Whether [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled, either true or false"
|
|
13113
|
-
),
|
|
13114
|
-
summary_type: zod3.string().nullish().describe(
|
|
13115
|
-
"The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled"
|
|
13116
|
-
),
|
|
13117
|
-
summary_model: zod3.string().nullish().describe(
|
|
13118
|
-
"The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled\n"
|
|
13119
|
-
),
|
|
13120
|
-
summary: zod3.string().nullish().describe(
|
|
13121
|
-
"The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled"
|
|
13122
|
-
),
|
|
13123
|
-
custom_topics: zod3.boolean().nullish().describe("Whether custom topics is enabled, either true or false"),
|
|
13124
|
-
topics: zod3.array(zod3.string()).optional().describe("The list of custom topics provided if custom topics is enabled"),
|
|
13125
|
-
sentiment_analysis: zod3.boolean().nullish().describe(
|
|
13126
|
-
"Whether [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) is enabled, can be true or false"
|
|
13127
|
-
),
|
|
13128
|
-
sentiment_analysis_results: zod3.array(
|
|
13129
|
-
zod3.object({
|
|
13130
|
-
text: zod3.string().describe("The transcript of the sentence"),
|
|
13131
|
-
start: zod3.number().describe("The starting time, in milliseconds, of the sentence"),
|
|
13132
|
-
end: zod3.number().describe("The ending time, in milliseconds, of the sentence"),
|
|
13133
|
-
sentiment: zod3.enum(["POSITIVE", "NEUTRAL", "NEGATIVE"]),
|
|
13134
|
-
confidence: zod3.number().min(getTranscriptResponseSentimentAnalysisResultsItemConfidenceMin).max(getTranscriptResponseSentimentAnalysisResultsItemConfidenceMax).describe(
|
|
13135
|
-
"The confidence score for the detected sentiment of the sentence, from 0 to 1"
|
|
13136
|
-
),
|
|
13137
|
-
channel: zod3.string().nullish().describe(
|
|
13138
|
-
"The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
|
|
13139
|
-
),
|
|
13140
|
-
speaker: zod3.string().nullable().describe(
|
|
13141
|
-
"The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null"
|
|
13142
|
-
)
|
|
13143
|
-
}).describe("The result of the Sentiment Analysis model")
|
|
13144
13855
|
).nullish().describe(
|
|
13145
|
-
"
|
|
13856
|
+
"Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details."
|
|
13146
13857
|
),
|
|
13147
|
-
|
|
13148
|
-
|
|
13858
|
+
disfluencies: zod3.boolean().nullish().describe(
|
|
13859
|
+
'Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false'
|
|
13860
|
+
),
|
|
13861
|
+
domain: zod3.string().nullish().describe(
|
|
13862
|
+
'The domain-specific model applied to the transcript. When set to `"medical-v1"`, [Medical Mode](https://www.assemblyai.com/docs/pre-recorded-audio/medical-mode) was used to improve accuracy for medical terminology.\n'
|
|
13149
13863
|
),
|
|
13150
13864
|
entities: zod3.array(
|
|
13151
13865
|
zod3.object({
|
|
@@ -13204,61 +13918,49 @@ var getTranscriptResponse = zod3.object({
|
|
|
13204
13918
|
)
|
|
13205
13919
|
}).describe("A detected entity")
|
|
13206
13920
|
).nullish().describe(
|
|
13207
|
-
"An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/
|
|
13208
|
-
),
|
|
13209
|
-
speech_threshold: zod3.number().min(getTranscriptResponseSpeechThresholdMin).max(getTranscriptResponseSpeechThresholdMax).nullish().describe(
|
|
13210
|
-
"Defaults to null. Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive.\n"
|
|
13921
|
+
"An array of results for the Entity Detection model, if it is enabled.\nSee [Entity detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) for more information.\n"
|
|
13211
13922
|
),
|
|
13212
|
-
|
|
13213
|
-
"
|
|
13923
|
+
entity_detection: zod3.boolean().nullish().describe(
|
|
13924
|
+
"Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) is enabled, can be true or false"
|
|
13214
13925
|
),
|
|
13215
13926
|
error: zod3.string().optional().describe("Error message of why the transcript failed"),
|
|
13216
|
-
|
|
13217
|
-
|
|
13218
|
-
|
|
13219
|
-
|
|
13220
|
-
|
|
13221
|
-
|
|
13222
|
-
|
|
13223
|
-
|
|
13224
|
-
|
|
13225
|
-
|
|
13226
|
-
|
|
13227
|
-
|
|
13228
|
-
|
|
13229
|
-
|
|
13230
|
-
|
|
13231
|
-
|
|
13232
|
-
|
|
13233
|
-
|
|
13234
|
-
|
|
13235
|
-
|
|
13236
|
-
|
|
13237
|
-
|
|
13238
|
-
|
|
13239
|
-
|
|
13240
|
-
|
|
13241
|
-
|
|
13242
|
-
|
|
13243
|
-
|
|
13244
|
-
|
|
13245
|
-
|
|
13246
|
-
|
|
13247
|
-
|
|
13248
|
-
|
|
13249
|
-
|
|
13250
|
-
var deleteTranscriptResponseIabCategoriesResultResultsItemLabelsItemRelevanceMax = 1;
|
|
13251
|
-
var deleteTranscriptResponseIabCategoriesResultSummaryMinOne = 0;
|
|
13252
|
-
var deleteTranscriptResponseIabCategoriesResultSummaryMaxOne = 1;
|
|
13253
|
-
var deleteTranscriptResponseSentimentAnalysisResultsItemConfidenceMin = 0;
|
|
13254
|
-
var deleteTranscriptResponseSentimentAnalysisResultsItemConfidenceMax = 1;
|
|
13255
|
-
var deleteTranscriptResponseSpeechThresholdMin = 0;
|
|
13256
|
-
var deleteTranscriptResponseSpeechThresholdMax = 1;
|
|
13257
|
-
var deleteTranscriptResponse = zod3.object({
|
|
13927
|
+
filter_profanity: zod3.boolean().nullish().describe(
|
|
13928
|
+
"Whether [Profanity Filtering](https://www.assemblyai.com/docs/profanity-filtering) is enabled, either true or false"
|
|
13929
|
+
),
|
|
13930
|
+
format_text: zod3.boolean().nullish().describe(
|
|
13931
|
+
"Whether [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false"
|
|
13932
|
+
),
|
|
13933
|
+
iab_categories: zod3.boolean().nullish().describe(
|
|
13934
|
+
"Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) is enabled, can be true or false"
|
|
13935
|
+
),
|
|
13936
|
+
iab_categories_result: zod3.object({
|
|
13937
|
+
status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
|
|
13938
|
+
results: zod3.array(
|
|
13939
|
+
zod3.object({
|
|
13940
|
+
text: zod3.string().describe("The text in the transcript in which a detected topic occurs"),
|
|
13941
|
+
labels: zod3.array(
|
|
13942
|
+
zod3.object({
|
|
13943
|
+
relevance: zod3.number().describe("How relevant the detected topic is of a detected topic"),
|
|
13944
|
+
label: zod3.string().describe(
|
|
13945
|
+
"The IAB taxonomical label for the label of the detected topic, where > denotes supertopic/subtopic relationship"
|
|
13946
|
+
)
|
|
13947
|
+
})
|
|
13948
|
+
).optional().describe("An array of detected topics in the text"),
|
|
13949
|
+
timestamp: zod3.object({
|
|
13950
|
+
start: zod3.number().describe("The start time in milliseconds"),
|
|
13951
|
+
end: zod3.number().describe("The end time in milliseconds")
|
|
13952
|
+
}).optional().describe("Timestamp containing a start and end property in milliseconds")
|
|
13953
|
+
}).describe("The result of the topic detection model")
|
|
13954
|
+
).describe("An array of results for the Topic Detection model"),
|
|
13955
|
+
summary: zod3.record(zod3.string(), zod3.number()).describe("The overall relevance of topic to the entire audio file")
|
|
13956
|
+
}).describe(
|
|
13957
|
+
"The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
|
|
13958
|
+
).or(zod3.null()).optional().describe(
|
|
13959
|
+
"The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.\n"
|
|
13960
|
+
),
|
|
13258
13961
|
id: zod3.string().uuid().describe("The unique identifier of your transcript"),
|
|
13259
|
-
|
|
13260
|
-
|
|
13261
|
-
"The status of your transcript. Possible values are queued, processing, completed, or error."
|
|
13962
|
+
keyterms_prompt: zod3.array(zod3.string()).optional().describe(
|
|
13963
|
+
"Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3 Pro) domain-specific words or phrases (maximum 6 words per phrase). See [Keyterms Prompting](https://www.assemblyai.com/docs/pre-recorded-audio/keyterms-prompting) for more details.\n"
|
|
13262
13964
|
),
|
|
13263
13965
|
language_code: zod3.enum([
|
|
13264
13966
|
"en",
|
|
@@ -13363,138 +14065,175 @@ var deleteTranscriptResponse = zod3.object({
|
|
|
13363
14065
|
"cy",
|
|
13364
14066
|
"yi",
|
|
13365
14067
|
"yo"
|
|
13366
|
-
]).describe(
|
|
13367
|
-
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/
|
|
13368
|
-
).or(zod3.string()).optional().describe(
|
|
13369
|
-
"The language of your audio file.\nPossible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).\nThe default value is 'en_us'.\n"
|
|
13370
|
-
),
|
|
13371
|
-
language_detection: zod3.boolean().nullish().describe(
|
|
13372
|
-
"Whether [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection) is enabled, either true or false"
|
|
13373
|
-
),
|
|
13374
|
-
language_confidence_threshold: zod3.number().min(deleteTranscriptResponseLanguageConfidenceThresholdMin).max(deleteTranscriptResponseLanguageConfidenceThresholdMax).nullable().describe(
|
|
13375
|
-
"The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\n"
|
|
13376
|
-
),
|
|
13377
|
-
language_confidence: zod3.number().min(deleteTranscriptResponseLanguageConfidenceMin).max(deleteTranscriptResponseLanguageConfidenceMax).nullable().describe(
|
|
13378
|
-
"The confidence score for the detected language, between 0.0 (low confidence) and 1.0 (high confidence)"
|
|
13379
|
-
),
|
|
13380
|
-
speech_model: zod3.enum(["best", "slam-1", "universal"]).describe("The speech model to use for the transcription.").or(zod3.null()).describe(
|
|
13381
|
-
"The speech model used for the transcription. When `null`, the default model is used."
|
|
13382
|
-
),
|
|
13383
|
-
text: zod3.string().nullish().describe("The textual transcript of your media file"),
|
|
13384
|
-
words: zod3.array(
|
|
13385
|
-
zod3.object({
|
|
13386
|
-
confidence: zod3.number().min(deleteTranscriptResponseWordsItemConfidenceMin).max(deleteTranscriptResponseWordsItemConfidenceMax).describe("The confidence score for the transcript of this word"),
|
|
13387
|
-
start: zod3.number().describe("The starting time, in milliseconds, for the word"),
|
|
13388
|
-
end: zod3.number().describe("The ending time, in milliseconds, for the word"),
|
|
13389
|
-
text: zod3.string().describe("The text of the word"),
|
|
13390
|
-
channel: zod3.string().nullish().describe(
|
|
13391
|
-
"The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
|
|
13392
|
-
),
|
|
13393
|
-
speaker: zod3.string().nullable().describe(
|
|
13394
|
-
"The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null"
|
|
13395
|
-
)
|
|
13396
|
-
})
|
|
13397
|
-
).nullish().describe(
|
|
13398
|
-
"An array of temporally-sequential word objects, one for each word in the transcript.\nSee [Speech recognition](https://www.assemblyai.com/docs/models/speech-recognition) for more information.\n"
|
|
14068
|
+
]).optional().describe(
|
|
14069
|
+
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).\nThe default value is 'en_us'.\n"
|
|
13399
14070
|
),
|
|
13400
|
-
|
|
13401
|
-
zod3.
|
|
13402
|
-
|
|
13403
|
-
|
|
13404
|
-
|
|
13405
|
-
|
|
13406
|
-
|
|
13407
|
-
|
|
13408
|
-
|
|
13409
|
-
|
|
13410
|
-
|
|
13411
|
-
|
|
13412
|
-
|
|
13413
|
-
|
|
13414
|
-
|
|
13415
|
-
|
|
13416
|
-
|
|
13417
|
-
|
|
13418
|
-
|
|
13419
|
-
|
|
13420
|
-
|
|
13421
|
-
|
|
13422
|
-
|
|
13423
|
-
|
|
13424
|
-
|
|
13425
|
-
|
|
13426
|
-
|
|
14071
|
+
language_codes: zod3.array(
|
|
14072
|
+
zod3.enum([
|
|
14073
|
+
"en",
|
|
14074
|
+
"en_au",
|
|
14075
|
+
"en_uk",
|
|
14076
|
+
"en_us",
|
|
14077
|
+
"es",
|
|
14078
|
+
"fr",
|
|
14079
|
+
"de",
|
|
14080
|
+
"it",
|
|
14081
|
+
"pt",
|
|
14082
|
+
"nl",
|
|
14083
|
+
"af",
|
|
14084
|
+
"sq",
|
|
14085
|
+
"am",
|
|
14086
|
+
"ar",
|
|
14087
|
+
"hy",
|
|
14088
|
+
"as",
|
|
14089
|
+
"az",
|
|
14090
|
+
"ba",
|
|
14091
|
+
"eu",
|
|
14092
|
+
"be",
|
|
14093
|
+
"bn",
|
|
14094
|
+
"bs",
|
|
14095
|
+
"br",
|
|
14096
|
+
"bg",
|
|
14097
|
+
"my",
|
|
14098
|
+
"ca",
|
|
14099
|
+
"zh",
|
|
14100
|
+
"hr",
|
|
14101
|
+
"cs",
|
|
14102
|
+
"da",
|
|
14103
|
+
"et",
|
|
14104
|
+
"fo",
|
|
14105
|
+
"fi",
|
|
14106
|
+
"gl",
|
|
14107
|
+
"ka",
|
|
14108
|
+
"el",
|
|
14109
|
+
"gu",
|
|
14110
|
+
"ht",
|
|
14111
|
+
"ha",
|
|
14112
|
+
"haw",
|
|
14113
|
+
"he",
|
|
14114
|
+
"hi",
|
|
14115
|
+
"hu",
|
|
14116
|
+
"is",
|
|
14117
|
+
"id",
|
|
14118
|
+
"ja",
|
|
14119
|
+
"jw",
|
|
14120
|
+
"kn",
|
|
14121
|
+
"kk",
|
|
14122
|
+
"km",
|
|
14123
|
+
"ko",
|
|
14124
|
+
"lo",
|
|
14125
|
+
"la",
|
|
14126
|
+
"lv",
|
|
14127
|
+
"ln",
|
|
14128
|
+
"lt",
|
|
14129
|
+
"lb",
|
|
14130
|
+
"mk",
|
|
14131
|
+
"mg",
|
|
14132
|
+
"ms",
|
|
14133
|
+
"ml",
|
|
14134
|
+
"mt",
|
|
14135
|
+
"mi",
|
|
14136
|
+
"mr",
|
|
14137
|
+
"mn",
|
|
14138
|
+
"ne",
|
|
14139
|
+
"no",
|
|
14140
|
+
"nn",
|
|
14141
|
+
"oc",
|
|
14142
|
+
"pa",
|
|
14143
|
+
"ps",
|
|
14144
|
+
"fa",
|
|
14145
|
+
"pl",
|
|
14146
|
+
"ro",
|
|
14147
|
+
"ru",
|
|
14148
|
+
"sa",
|
|
14149
|
+
"sr",
|
|
14150
|
+
"sn",
|
|
14151
|
+
"sd",
|
|
14152
|
+
"si",
|
|
14153
|
+
"sk",
|
|
14154
|
+
"sl",
|
|
14155
|
+
"so",
|
|
14156
|
+
"su",
|
|
14157
|
+
"sw",
|
|
14158
|
+
"sv",
|
|
14159
|
+
"tl",
|
|
14160
|
+
"tg",
|
|
14161
|
+
"ta",
|
|
14162
|
+
"tt",
|
|
14163
|
+
"te",
|
|
14164
|
+
"th",
|
|
14165
|
+
"bo",
|
|
14166
|
+
"tr",
|
|
14167
|
+
"tk",
|
|
14168
|
+
"uk",
|
|
14169
|
+
"ur",
|
|
14170
|
+
"uz",
|
|
14171
|
+
"vi",
|
|
14172
|
+
"cy",
|
|
14173
|
+
"yi",
|
|
14174
|
+
"yo"
|
|
14175
|
+
]).describe(
|
|
14176
|
+
"The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).\nThe default value is 'en_us'.\n"
|
|
14177
|
+
)
|
|
13427
14178
|
).nullish().describe(
|
|
13428
|
-
"
|
|
13429
|
-
),
|
|
13430
|
-
confidence: zod3.number().min(deleteTranscriptResponseConfidenceMin).max(deleteTranscriptResponseConfidenceMax).nullish().describe(
|
|
13431
|
-
"The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)"
|
|
13432
|
-
),
|
|
13433
|
-
audio_duration: zod3.number().nullish().describe("The duration of this transcript object's media file, in seconds"),
|
|
13434
|
-
punctuate: zod3.boolean().nullish().describe("Whether Automatic Punctuation is enabled, either true or false"),
|
|
13435
|
-
format_text: zod3.boolean().nullish().describe("Whether Text Formatting is enabled, either true or false"),
|
|
13436
|
-
disfluencies: zod3.boolean().nullish().describe('Transcribe Filler Words, like "umm", in your media file; can be true or false'),
|
|
13437
|
-
multichannel: zod3.boolean().nullish().describe(
|
|
13438
|
-
"Whether [Multichannel transcription](https://www.assemblyai.com/docs/models/speech-recognition#multichannel-transcription) was enabled in the transcription request, either true or false"
|
|
13439
|
-
),
|
|
13440
|
-
audio_channels: zod3.number().optional().describe(
|
|
13441
|
-
"The number of audio channels in the audio file. This is only present when multichannel is enabled."
|
|
14179
|
+
"The language codes of your audio file. Used for [Code switching](/docs/speech-to-text/pre-recorded-audio/code-switching)\nOne of the values specified must be `en`.\n"
|
|
13442
14180
|
),
|
|
13443
|
-
|
|
13444
|
-
"The
|
|
14181
|
+
language_confidence: zod3.number().nullable().describe(
|
|
14182
|
+
"The confidence score for the detected language, between 0.0 (low confidence) and 1.0 (high confidence). See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details."
|
|
13445
14183
|
),
|
|
13446
|
-
|
|
13447
|
-
"The
|
|
14184
|
+
language_confidence_threshold: zod3.number().nullable().describe(
|
|
14185
|
+
"The confidence threshold for the automatically detected language.\nAn error will be returned if the language confidence is below this threshold.\nSee [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.\n"
|
|
13448
14186
|
),
|
|
13449
|
-
|
|
13450
|
-
|
|
13451
|
-
"The header name to be sent with the transcript completed or failed webhook requests"
|
|
14187
|
+
language_detection: zod3.boolean().nullish().describe(
|
|
14188
|
+
"Whether [Automatic language detection](/docs/pre-recorded-audio/automatic-language-detection) is enabled, either true or false"
|
|
13452
14189
|
),
|
|
13453
|
-
|
|
13454
|
-
|
|
13455
|
-
|
|
13456
|
-
|
|
13457
|
-
|
|
13458
|
-
|
|
13459
|
-
|
|
13460
|
-
|
|
13461
|
-
|
|
13462
|
-
|
|
13463
|
-
|
|
13464
|
-
|
|
13465
|
-
|
|
13466
|
-
|
|
13467
|
-
|
|
13468
|
-
|
|
13469
|
-
|
|
13470
|
-
})
|
|
13471
|
-
).describe("A temporally-sequential array of Key Phrases")
|
|
13472
|
-
}).describe(
|
|
13473
|
-
"An array of results for the Key Phrases model, if it is enabled.\nSee [Key phrases](https://www.assemblyai.com/docs/models/key-phrases) for more information.\n"
|
|
13474
|
-
).or(zod3.null()).optional().describe(
|
|
13475
|
-
"An array of results for the Key Phrases model, if it is enabled.\nSee [Key Phrases](https://www.assemblyai.com/docs/models/key-phrases) for more information.\n"
|
|
14190
|
+
language_detection_options: zod3.object({
|
|
14191
|
+
expected_languages: zod3.array(zod3.string()).optional().describe(
|
|
14192
|
+
'List of languages expected in the audio file. Defaults to `["all"]` when unspecified. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.'
|
|
14193
|
+
),
|
|
14194
|
+
fallback_language: zod3.string().default(deleteTranscriptResponseLanguageDetectionOptionsFallbackLanguageDefault).describe(
|
|
14195
|
+
'If the detected language of the audio file is not in the list of expected languages, the `fallback_language` is used. Specify `["auto"]` to let our model choose the fallback language from `expected_languages` with the highest confidence score. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.\n'
|
|
14196
|
+
),
|
|
14197
|
+
code_switching: zod3.boolean().optional().describe(
|
|
14198
|
+
"Whether [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) should be detected.\n"
|
|
14199
|
+
),
|
|
14200
|
+
code_switching_confidence_threshold: zod3.number().default(
|
|
14201
|
+
deleteTranscriptResponseLanguageDetectionOptionsCodeSwitchingConfidenceThresholdDefault
|
|
14202
|
+
).describe(
|
|
14203
|
+
"The confidence threshold for [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) detection. If the code switching confidence is below this threshold, the transcript will be processed in the language with the highest `language_detection_confidence` score.\n"
|
|
14204
|
+
)
|
|
14205
|
+
}).optional().describe(
|
|
14206
|
+
"Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection)."
|
|
13476
14207
|
),
|
|
13477
|
-
|
|
13478
|
-
"
|
|
14208
|
+
multichannel: zod3.boolean().nullish().describe(
|
|
14209
|
+
"Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) was enabled in the transcription request, either true or false"
|
|
13479
14210
|
),
|
|
13480
|
-
|
|
13481
|
-
"
|
|
14211
|
+
prompt: zod3.string().optional().describe(
|
|
14212
|
+
"Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
|
|
13482
14213
|
),
|
|
13483
|
-
|
|
13484
|
-
|
|
13485
|
-
filter_profanity: zod3.boolean().nullish().describe(
|
|
13486
|
-
"Whether [Profanity Filtering](https://www.assemblyai.com/docs/models/speech-recognition#profanity-filtering) is enabled, either true or false"
|
|
14214
|
+
punctuate: zod3.boolean().nullish().describe(
|
|
14215
|
+
"Whether [Automatic Punctuation](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false"
|
|
13487
14216
|
),
|
|
13488
14217
|
redact_pii: zod3.boolean().describe(
|
|
13489
|
-
"Whether [PII Redaction](https://www.assemblyai.com/docs/
|
|
14218
|
+
"Whether [PII Redaction](https://www.assemblyai.com/docs/pii-redaction) is enabled, either true or false"
|
|
13490
14219
|
),
|
|
13491
14220
|
redact_pii_audio: zod3.boolean().nullish().describe(
|
|
13492
|
-
"Whether a redacted version of the audio file was generated,\neither true or false. See [PII redaction](https://www.assemblyai.com/docs/
|
|
14221
|
+
"Whether a redacted version of the audio file was generated,\neither true or false. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.\n"
|
|
14222
|
+
),
|
|
14223
|
+
redact_pii_audio_options: zod3.object({
|
|
14224
|
+
return_redacted_no_speech_audio: zod3.boolean().optional().describe(
|
|
14225
|
+
"By default, audio redaction provides redacted audio URLs only when speech is detected. However, if your use-case specifically requires redacted audio files even for silent audio files without any dialogue, you can opt to receive these URLs by setting this parameter to `true`."
|
|
14226
|
+
),
|
|
14227
|
+
override_audio_redaction_method: zod3.enum(["silence"]).optional().describe(
|
|
14228
|
+
"Specify the method used to redact audio. By default, redacted audio uses a beep sound. Set to `silence` to replace PII with silence instead of a beep."
|
|
14229
|
+
)
|
|
14230
|
+
}).optional().describe(
|
|
14231
|
+
"The options for PII-redacted audio, if redact_pii_audio is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.\n"
|
|
13493
14232
|
),
|
|
13494
14233
|
redact_pii_audio_quality: zod3.enum(["mp3", "wav"]).describe(
|
|
13495
|
-
"Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/
|
|
14234
|
+
"Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more details."
|
|
13496
14235
|
).or(zod3.null()).optional().describe(
|
|
13497
|
-
"The audio quality of the PII-redacted audio file, if redact_pii_audio is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/
|
|
14236
|
+
"The audio quality of the PII-redacted audio file, if redact_pii_audio is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.\n"
|
|
13498
14237
|
),
|
|
13499
14238
|
redact_pii_policies: zod3.array(
|
|
13500
14239
|
zod3.enum([
|
|
@@ -13544,271 +14283,256 @@ var deleteTranscriptResponse = zod3.object({
|
|
|
13544
14283
|
"zodiac_sign"
|
|
13545
14284
|
]).describe("The type of PII to redact")
|
|
13546
14285
|
).nullish().describe(
|
|
13547
|
-
"The list of PII Redaction policies that were enabled, if PII Redaction is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/
|
|
14286
|
+
"The list of PII Redaction policies that were enabled, if PII Redaction is enabled.\nSee [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more information.\n"
|
|
13548
14287
|
),
|
|
13549
14288
|
redact_pii_sub: zod3.enum(["entity_name", "hash"]).optional().describe(
|
|
13550
|
-
|
|
14289
|
+
"The replacement logic for detected PII, can be `entity_name` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details."
|
|
14290
|
+
),
|
|
14291
|
+
sentiment_analysis: zod3.boolean().nullish().describe(
|
|
14292
|
+
"Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) is enabled, can be true or false"
|
|
14293
|
+
),
|
|
14294
|
+
sentiment_analysis_results: zod3.array(
|
|
14295
|
+
zod3.object({
|
|
14296
|
+
text: zod3.string().describe("The transcript of the sentence"),
|
|
14297
|
+
start: zod3.number().describe("The starting time, in milliseconds, of the sentence"),
|
|
14298
|
+
end: zod3.number().describe("The ending time, in milliseconds, of the sentence"),
|
|
14299
|
+
sentiment: zod3.enum(["POSITIVE", "NEUTRAL", "NEGATIVE"]),
|
|
14300
|
+
confidence: zod3.number().describe(
|
|
14301
|
+
"The confidence score for the detected sentiment of the sentence, from 0 to 1"
|
|
14302
|
+
),
|
|
14303
|
+
channel: zod3.string().nullish().describe(
|
|
14304
|
+
"The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
|
|
14305
|
+
),
|
|
14306
|
+
speaker: zod3.string().nullable().describe(
|
|
14307
|
+
"The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
|
|
14308
|
+
)
|
|
14309
|
+
}).describe("The result of the Sentiment Analysis model")
|
|
14310
|
+
).nullish().describe(
|
|
14311
|
+
"An array of results for the Sentiment Analysis model, if it is enabled.\nSee [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) for more information.\n"
|
|
13551
14312
|
),
|
|
13552
14313
|
speaker_labels: zod3.boolean().nullish().describe(
|
|
13553
|
-
"Whether [Speaker diarization](https://www.assemblyai.com/docs/
|
|
14314
|
+
"Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, can be true or false"
|
|
13554
14315
|
),
|
|
13555
14316
|
speakers_expected: zod3.number().nullish().describe(
|
|
13556
|
-
"Tell the speaker label model how many speakers it should attempt to identify. See [
|
|
14317
|
+
"Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details."
|
|
13557
14318
|
),
|
|
13558
|
-
|
|
13559
|
-
"
|
|
14319
|
+
speech_model_used: zod3.string().optional().describe(
|
|
14320
|
+
"The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
|
|
13560
14321
|
),
|
|
13561
|
-
|
|
13562
|
-
|
|
13563
|
-
|
|
13564
|
-
|
|
13565
|
-
|
|
13566
|
-
|
|
13567
|
-
|
|
13568
|
-
|
|
13569
|
-
|
|
13570
|
-
|
|
13571
|
-
|
|
13572
|
-
|
|
13573
|
-
|
|
13574
|
-
|
|
13575
|
-
|
|
13576
|
-
).max(
|
|
13577
|
-
deleteTranscriptResponseContentSafetyLabelsResultsItemLabelsItemSeverityMax
|
|
13578
|
-
).describe("How severely the topic is discussed in the section, from 0 to 1")
|
|
13579
|
-
})
|
|
13580
|
-
).describe(
|
|
13581
|
-
"An array of safety labels, one per sensitive topic that was detected in the section"
|
|
14322
|
+
speech_models: zod3.array(
|
|
14323
|
+
zod3.string().describe(
|
|
14324
|
+
"The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
|
|
14325
|
+
)
|
|
14326
|
+
).nullish().describe(
|
|
14327
|
+
"List multiple speech models in priority order, allowing our system to automatically route your audio to the best available option. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models and routing behavior.\n"
|
|
14328
|
+
),
|
|
14329
|
+
speech_threshold: zod3.number().nullish().describe(
|
|
14330
|
+
"Defaults to null. Reject audio files that contain less than this fraction of speech.\nValid values are in the range [0, 1] inclusive. See [Speech Threshold](https://www.assemblyai.com/docs/speech-threshold) for more details.\n"
|
|
14331
|
+
),
|
|
14332
|
+
speech_understanding: zod3.object({
|
|
14333
|
+
request: zod3.object({
|
|
14334
|
+
translation: zod3.object({
|
|
14335
|
+
target_languages: zod3.array(zod3.string()).describe(
|
|
14336
|
+
'List of target language codes (e.g., `["es", "de"]`). See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for supported languages.'
|
|
13582
14337
|
),
|
|
13583
|
-
|
|
13584
|
-
|
|
13585
|
-
|
|
13586
|
-
|
|
13587
|
-
|
|
13588
|
-
|
|
14338
|
+
formal: zod3.boolean().default(deleteTranscriptResponseSpeechUnderstandingRequestTranslationFormalDefault).describe(
|
|
14339
|
+
"Use formal language style. See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for more details."
|
|
14340
|
+
),
|
|
14341
|
+
match_original_utterance: zod3.boolean().optional().describe(
|
|
14342
|
+
"When enabled with Speaker Labels, returns translated text in the utterances array. Each utterance will include a `translated_texts` key containing translations for each target language."
|
|
14343
|
+
)
|
|
13589
14344
|
})
|
|
13590
|
-
).describe(
|
|
13591
|
-
|
|
13592
|
-
|
|
13593
|
-
zod3.
|
|
13594
|
-
|
|
14345
|
+
}).describe(
|
|
14346
|
+
"Request body for [Translation](https://www.assemblyai.com/docs/speech-understanding/translation)."
|
|
14347
|
+
).or(
|
|
14348
|
+
zod3.object({
|
|
14349
|
+
speaker_identification: zod3.object({
|
|
14350
|
+
speaker_type: zod3.enum(["role", "name"]).describe(
|
|
14351
|
+
"Type of speaker identification. See [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification) for details on each type."
|
|
14352
|
+
),
|
|
14353
|
+
known_values: zod3.array(zod3.string()).optional().describe(
|
|
14354
|
+
'Required if speaker_type is "role". Each value must be 35 characters or less.'
|
|
14355
|
+
),
|
|
14356
|
+
speakers: zod3.array(
|
|
14357
|
+
zod3.object({
|
|
14358
|
+
role: zod3.string().optional().describe(
|
|
14359
|
+
'The role of the speaker. Required when `speaker_type` is "role".'
|
|
14360
|
+
),
|
|
14361
|
+
name: zod3.string().optional().describe(
|
|
14362
|
+
'The name of the speaker. Required when `speaker_type` is "name".'
|
|
14363
|
+
),
|
|
14364
|
+
description: zod3.string().optional().describe(
|
|
14365
|
+
"A description of the speaker to help the model identify them based on conversational context."
|
|
14366
|
+
)
|
|
14367
|
+
})
|
|
14368
|
+
).optional().describe(
|
|
14369
|
+
"An array of speaker objects with metadata to improve identification accuracy. Each object should include a `role` or `name` (depending on `speaker_type`) and an optional `description` to help the model identify the speaker. You can also include any additional custom properties (e.g., `company`, `title`) to provide more context. Use this as an alternative to `known_values` when you want to provide additional context about each speaker."
|
|
14370
|
+
)
|
|
14371
|
+
})
|
|
14372
|
+
}).describe(
|
|
14373
|
+
"Request body for [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification)."
|
|
13595
14374
|
)
|
|
13596
|
-
).
|
|
13597
|
-
|
|
13598
|
-
|
|
13599
|
-
|
|
13600
|
-
|
|
14375
|
+
).or(
|
|
14376
|
+
zod3.object({
|
|
14377
|
+
custom_formatting: zod3.object({
|
|
14378
|
+
date: zod3.string().optional().describe(
|
|
14379
|
+
'Date format pattern (e.g., `"mm/dd/yyyy"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
|
|
14380
|
+
),
|
|
14381
|
+
phone_number: zod3.string().optional().describe(
|
|
14382
|
+
'Phone number format pattern (e.g., `"(xxx)xxx-xxxx"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
|
|
14383
|
+
),
|
|
14384
|
+
email: zod3.string().optional().describe(
|
|
14385
|
+
'Email format pattern (e.g., `"username@domain.com"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.'
|
|
14386
|
+
)
|
|
14387
|
+
})
|
|
14388
|
+
}).describe(
|
|
14389
|
+
"Request body for [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting)."
|
|
14390
|
+
)
|
|
14391
|
+
).optional(),
|
|
14392
|
+
response: zod3.object({
|
|
14393
|
+
translation: zod3.object({
|
|
14394
|
+
status: zod3.string().optional()
|
|
14395
|
+
}).optional()
|
|
14396
|
+
}).or(
|
|
13601
14397
|
zod3.object({
|
|
13602
|
-
|
|
13603
|
-
|
|
13604
|
-
|
|
14398
|
+
speaker_identification: zod3.object({
|
|
14399
|
+
mapping: zod3.record(zod3.string(), zod3.string()).optional().describe(
|
|
14400
|
+
'A mapping of the original generic speaker labels (e.g., "A", "B") to the identified speaker names or roles.'
|
|
14401
|
+
),
|
|
14402
|
+
status: zod3.string().optional()
|
|
14403
|
+
}).optional()
|
|
13605
14404
|
})
|
|
13606
|
-
).
|
|
13607
|
-
"A summary of the Content Moderation severity results for the entire audio file"
|
|
13608
|
-
)
|
|
13609
|
-
}).describe(
|
|
13610
|
-
"An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/models/content-moderation) for more information.\n"
|
|
13611
|
-
).or(zod3.null()).optional().describe(
|
|
13612
|
-
"An array of results for the Content Moderation model, if it is enabled.\nSee [Content moderation](https://www.assemblyai.com/docs/models/content-moderation) for more information.\n"
|
|
13613
|
-
),
|
|
13614
|
-
iab_categories: zod3.boolean().nullish().describe(
|
|
13615
|
-
"Whether [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) is enabled, can be true or false"
|
|
13616
|
-
),
|
|
13617
|
-
iab_categories_result: zod3.object({
|
|
13618
|
-
status: zod3.enum(["success", "unavailable"]).describe("Either success, or unavailable in the rare case that the model failed"),
|
|
13619
|
-
results: zod3.array(
|
|
14405
|
+
).or(
|
|
13620
14406
|
zod3.object({
|
|
13621
|
-
|
|
13622
|
-
|
|
13623
|
-
zod3.
|
|
13624
|
-
|
|
13625
|
-
|
|
13626
|
-
|
|
13627
|
-
|
|
13628
|
-
|
|
13629
|
-
label: zod3.string().describe(
|
|
13630
|
-
"The IAB taxonomical label for the label of the detected topic, where > denotes supertopic/subtopic relationship"
|
|
13631
|
-
)
|
|
13632
|
-
})
|
|
13633
|
-
).optional().describe("An array of detected topics in the text"),
|
|
13634
|
-
timestamp: zod3.object({
|
|
13635
|
-
start: zod3.number().describe("The start time in milliseconds"),
|
|
13636
|
-
end: zod3.number().describe("The end time in milliseconds")
|
|
13637
|
-
}).optional().describe("Timestamp containing a start and end property in milliseconds")
|
|
13638
|
-
}).describe("The result of the topic detection model")
|
|
13639
|
-
).describe("An array of results for the Topic Detection model"),
|
|
13640
|
-
summary: zod3.record(
|
|
13641
|
-
zod3.string(),
|
|
13642
|
-
zod3.number().min(deleteTranscriptResponseIabCategoriesResultSummaryMinOne).max(deleteTranscriptResponseIabCategoriesResultSummaryMaxOne)
|
|
13643
|
-
).describe("The overall relevance of topic to the entire audio file")
|
|
13644
|
-
}).describe(
|
|
13645
|
-
"The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) for more information.\n"
|
|
13646
|
-
).or(zod3.null()).optional().describe(
|
|
13647
|
-
"The result of the Topic Detection model, if it is enabled.\nSee [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) for more information.\n"
|
|
13648
|
-
),
|
|
13649
|
-
custom_spelling: zod3.array(
|
|
13650
|
-
zod3.object({
|
|
13651
|
-
from: zod3.array(zod3.string().describe("Word or phrase to replace")).describe("Words or phrases to replace"),
|
|
13652
|
-
to: zod3.string().describe("Word to replace with")
|
|
13653
|
-
}).describe(
|
|
13654
|
-
"Object containing words or phrases to replace, and the word or phrase to replace with"
|
|
13655
|
-
)
|
|
13656
|
-
).nullish().describe("Customize how words are spelled and formatted using to and from values"),
|
|
13657
|
-
keyterms_prompt: zod3.array(zod3.string()).optional().describe(
|
|
13658
|
-
"Improve accuracy with up to 1000 domain-specific words or phrases (maximum 6 words per phrase).\n"
|
|
14407
|
+
custom_formatting: zod3.object({
|
|
14408
|
+
mapping: zod3.record(zod3.string(), zod3.string()).optional(),
|
|
14409
|
+
formatted_text: zod3.string().optional()
|
|
14410
|
+
}).optional()
|
|
14411
|
+
})
|
|
14412
|
+
).optional()
|
|
14413
|
+
}).optional().describe(
|
|
14414
|
+
"Speech understanding tasks like [Translation](https://www.assemblyai.com/docs/speech-understanding/translation), [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification), and [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting). See the task-specific docs for available options and configuration.\n"
|
|
13659
14415
|
),
|
|
13660
|
-
|
|
13661
|
-
|
|
13662
|
-
"Whether [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters) is enabled, can be true or false"
|
|
14416
|
+
status: zod3.enum(["queued", "processing", "completed", "error"]).describe(
|
|
14417
|
+
"The status of your transcript. Possible values are queued, processing, completed, or error."
|
|
13663
14418
|
),
|
|
13664
|
-
chapters: zod3.array(
|
|
13665
|
-
zod3.object({
|
|
13666
|
-
gist: zod3.string().describe(
|
|
13667
|
-
"An ultra-short summary (just a few words) of the content spoken in the chapter"
|
|
13668
|
-
),
|
|
13669
|
-
headline: zod3.string().describe("A single sentence summary of the content spoken during the chapter"),
|
|
13670
|
-
summary: zod3.string().describe("A one paragraph summary of the content spoken during the chapter"),
|
|
13671
|
-
start: zod3.number().describe("The starting time, in milliseconds, for the chapter"),
|
|
13672
|
-
end: zod3.number().describe("The starting time, in milliseconds, for the chapter")
|
|
13673
|
-
}).describe("Chapter of the audio file")
|
|
13674
|
-
).nullish().describe("An array of temporally sequential chapters for the audio file"),
|
|
13675
14419
|
summarization: zod3.boolean().describe(
|
|
13676
|
-
"Whether [Summarization](https://www.assemblyai.com/docs/
|
|
14420
|
+
"Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled, either true or false. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n\nNote: This parameter is only supported for the Universal-2 model.\n"
|
|
13677
14421
|
),
|
|
13678
|
-
|
|
13679
|
-
"The
|
|
14422
|
+
summary: zod3.string().nullish().describe(
|
|
14423
|
+
"The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
|
|
13680
14424
|
),
|
|
13681
14425
|
summary_model: zod3.string().nullish().describe(
|
|
13682
|
-
"The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/
|
|
14426
|
+
"The Summarization model used to generate the summary,\nif [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-models) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details.\n"
|
|
13683
14427
|
),
|
|
13684
|
-
|
|
13685
|
-
"The
|
|
14428
|
+
summary_type: zod3.string().nullish().describe(
|
|
14429
|
+
"The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-types) is enabled. Deprecated - use [LLM Gateway](https://www.assemblyai.com/docs/llm-gateway/overview) instead for more flexible summaries. See the [updated Summarization page](https://www.assemblyai.com/docs/speech-understanding/summarization) for details."
|
|
13686
14430
|
),
|
|
13687
|
-
|
|
13688
|
-
|
|
13689
|
-
|
|
13690
|
-
"Whether [
|
|
14431
|
+
remove_audio_tags: zod3.enum(["all"]).describe(
|
|
14432
|
+
"Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
|
|
14433
|
+
).or(zod3.null()).optional().describe(
|
|
14434
|
+
"Whether [audio event tags](https://www.assemblyai.com/docs/pre-recorded-audio/universal-3-pro#audio-event-tags) were removed from the transcript text.\n\nNote: This parameter is only supported for the Universal-3 Pro model.\n"
|
|
13691
14435
|
),
|
|
13692
|
-
|
|
14436
|
+
temperature: zod3.number().nullish().describe(
|
|
14437
|
+
"The temperature that was used for the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.\n\nNote: This parameter can only be used with the Universal-3 Pro model.\n"
|
|
14438
|
+
),
|
|
14439
|
+
text: zod3.string().nullish().describe("The textual transcript of your media file"),
|
|
14440
|
+
throttled: zod3.boolean().nullish().describe(
|
|
14441
|
+
"True while a request is throttled and false when a request is no longer throttled"
|
|
14442
|
+
),
|
|
14443
|
+
utterances: zod3.array(
|
|
13693
14444
|
zod3.object({
|
|
13694
|
-
|
|
13695
|
-
start: zod3.number().describe("The starting time, in milliseconds, of the
|
|
13696
|
-
end: zod3.number().describe("The ending time, in milliseconds, of the
|
|
13697
|
-
|
|
13698
|
-
|
|
13699
|
-
|
|
13700
|
-
|
|
14445
|
+
confidence: zod3.number().describe("The confidence score for the transcript of this utterance"),
|
|
14446
|
+
start: zod3.number().describe("The starting time, in milliseconds, of the utterance in the audio file"),
|
|
14447
|
+
end: zod3.number().describe("The ending time, in milliseconds, of the utterance in the audio file"),
|
|
14448
|
+
text: zod3.string().describe("The text for this utterance"),
|
|
14449
|
+
words: zod3.array(
|
|
14450
|
+
zod3.object({
|
|
14451
|
+
confidence: zod3.number().describe("The confidence score for the transcript of this word"),
|
|
14452
|
+
start: zod3.number().describe("The starting time, in milliseconds, for the word"),
|
|
14453
|
+
end: zod3.number().describe("The ending time, in milliseconds, for the word"),
|
|
14454
|
+
text: zod3.string().describe("The text of the word"),
|
|
14455
|
+
channel: zod3.string().nullish().describe(
|
|
14456
|
+
"The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
|
|
14457
|
+
),
|
|
14458
|
+
speaker: zod3.string().nullable().describe(
|
|
14459
|
+
"The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
|
|
14460
|
+
)
|
|
14461
|
+
})
|
|
14462
|
+
).describe("The words in the utterance."),
|
|
13701
14463
|
channel: zod3.string().nullish().describe(
|
|
13702
14464
|
"The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
|
|
13703
14465
|
),
|
|
13704
|
-
speaker: zod3.string().
|
|
13705
|
-
|
|
14466
|
+
speaker: zod3.string().describe(
|
|
14467
|
+
'The speaker of this utterance, where each speaker is assigned a sequential capital letter - e.g. "A" for Speaker A, "B" for Speaker B, etc.'
|
|
14468
|
+
),
|
|
14469
|
+
translated_texts: zod3.record(zod3.string(), zod3.string()).optional().describe(
|
|
14470
|
+
'Translations keyed by language code (e.g., `{"es": "Texto traducido", "de": "\xDCbersetzter Text"}`). Only present when `match_original_utterance` is enabled with translation.'
|
|
13706
14471
|
)
|
|
13707
|
-
})
|
|
14472
|
+
})
|
|
13708
14473
|
).nullish().describe(
|
|
13709
|
-
"
|
|
14474
|
+
"When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.\nSee [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) for more information.\n"
|
|
13710
14475
|
),
|
|
13711
|
-
|
|
13712
|
-
"Whether [
|
|
14476
|
+
webhook_auth: zod3.boolean().describe(
|
|
14477
|
+
"Whether [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) authentication details were provided"
|
|
13713
14478
|
),
|
|
13714
|
-
|
|
14479
|
+
webhook_auth_header_name: zod3.string().nullish().describe(
|
|
14480
|
+
"The header name to be sent with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests"
|
|
14481
|
+
),
|
|
14482
|
+
webhook_status_code: zod3.number().nullish().describe(
|
|
14483
|
+
"The status code we received from your server when delivering the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) request, if a webhook URL was provided"
|
|
14484
|
+
),
|
|
14485
|
+
webhook_url: zod3.string().nullish().describe(
|
|
14486
|
+
"The URL to which we send [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests.\n"
|
|
14487
|
+
),
|
|
14488
|
+
words: zod3.array(
|
|
13715
14489
|
zod3.object({
|
|
13716
|
-
|
|
13717
|
-
|
|
13718
|
-
|
|
13719
|
-
|
|
13720
|
-
|
|
13721
|
-
"
|
|
13722
|
-
"credit_card_number",
|
|
13723
|
-
"date",
|
|
13724
|
-
"date_interval",
|
|
13725
|
-
"date_of_birth",
|
|
13726
|
-
"drivers_license",
|
|
13727
|
-
"drug",
|
|
13728
|
-
"duration",
|
|
13729
|
-
"email_address",
|
|
13730
|
-
"event",
|
|
13731
|
-
"filename",
|
|
13732
|
-
"gender_sexuality",
|
|
13733
|
-
"healthcare_number",
|
|
13734
|
-
"injury",
|
|
13735
|
-
"ip_address",
|
|
13736
|
-
"language",
|
|
13737
|
-
"location",
|
|
13738
|
-
"marital_status",
|
|
13739
|
-
"medical_condition",
|
|
13740
|
-
"medical_process",
|
|
13741
|
-
"money_amount",
|
|
13742
|
-
"nationality",
|
|
13743
|
-
"number_sequence",
|
|
13744
|
-
"occupation",
|
|
13745
|
-
"organization",
|
|
13746
|
-
"passport_number",
|
|
13747
|
-
"password",
|
|
13748
|
-
"person_age",
|
|
13749
|
-
"person_name",
|
|
13750
|
-
"phone_number",
|
|
13751
|
-
"physical_attribute",
|
|
13752
|
-
"political_affiliation",
|
|
13753
|
-
"religion",
|
|
13754
|
-
"statistics",
|
|
13755
|
-
"time",
|
|
13756
|
-
"url",
|
|
13757
|
-
"us_social_security_number",
|
|
13758
|
-
"username",
|
|
13759
|
-
"vehicle_id",
|
|
13760
|
-
"zodiac_sign"
|
|
13761
|
-
]).describe("The type of entity for the detected entity"),
|
|
13762
|
-
text: zod3.string().describe("The text for the detected entity"),
|
|
13763
|
-
start: zod3.number().describe(
|
|
13764
|
-
"The starting time, in milliseconds, at which the detected entity appears in the audio file"
|
|
14490
|
+
confidence: zod3.number().describe("The confidence score for the transcript of this word"),
|
|
14491
|
+
start: zod3.number().describe("The starting time, in milliseconds, for the word"),
|
|
14492
|
+
end: zod3.number().describe("The ending time, in milliseconds, for the word"),
|
|
14493
|
+
text: zod3.string().describe("The text of the word"),
|
|
14494
|
+
channel: zod3.string().nullish().describe(
|
|
14495
|
+
"The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
|
|
13765
14496
|
),
|
|
13766
|
-
|
|
13767
|
-
"The
|
|
14497
|
+
speaker: zod3.string().nullable().describe(
|
|
14498
|
+
"The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
|
|
13768
14499
|
)
|
|
13769
|
-
})
|
|
14500
|
+
})
|
|
13770
14501
|
).nullish().describe(
|
|
13771
|
-
"An array of
|
|
13772
|
-
),
|
|
13773
|
-
|
|
13774
|
-
|
|
13775
|
-
),
|
|
13776
|
-
|
|
13777
|
-
"
|
|
13778
|
-
)
|
|
13779
|
-
|
|
13780
|
-
|
|
13781
|
-
|
|
14502
|
+
"An array of temporally-sequential word objects, one for each word in the transcript.\n"
|
|
14503
|
+
),
|
|
14504
|
+
acoustic_model: zod3.string().describe("This parameter does not currently have any functionality attached to it."),
|
|
14505
|
+
custom_topics: zod3.boolean().nullish().describe("This parameter does not currently have any functionality attached to it."),
|
|
14506
|
+
language_model: zod3.string().describe("This parameter does not currently have any functionality attached to it."),
|
|
14507
|
+
speech_model: zod3.string().describe(
|
|
14508
|
+
"The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models."
|
|
14509
|
+
).or(zod3.null()).describe(
|
|
14510
|
+
"This parameter has been replaced with the `speech_models` parameter, learn more about the `speech_models` parameter [here](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model).\n"
|
|
14511
|
+
),
|
|
14512
|
+
speed_boost: zod3.boolean().nullish().describe("This parameter does not currently have any functionality attached to it."),
|
|
14513
|
+
topics: zod3.array(zod3.string()).optional().describe("This parameter does not currently have any functionality attached to it."),
|
|
14514
|
+
translated_texts: zod3.object({
|
|
14515
|
+
language_code: zod3.string().optional().describe("Translated text for this language code")
|
|
14516
|
+
}).optional().describe(
|
|
14517
|
+
"Translated text keyed by language code. See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for more details."
|
|
14518
|
+
)
|
|
13782
14519
|
}).describe("A transcript object");
|
|
13783
|
-
var getSubtitlesParams = zod3.object({
|
|
13784
|
-
transcript_id: zod3.string().describe("ID of the transcript"),
|
|
13785
|
-
subtitle_format: zod3.enum(["srt", "vtt"]).describe("The format of the captions")
|
|
13786
|
-
});
|
|
13787
|
-
var getSubtitlesQueryParams = zod3.object({
|
|
13788
|
-
chars_per_caption: zod3.number().optional().describe("The maximum number of characters per caption")
|
|
13789
|
-
});
|
|
13790
14520
|
var getTranscriptSentencesParams = zod3.object({
|
|
13791
14521
|
transcript_id: zod3.string().describe("ID of the transcript")
|
|
13792
14522
|
});
|
|
13793
|
-
var getTranscriptSentencesResponseConfidenceMin = 0;
|
|
13794
|
-
var getTranscriptSentencesResponseConfidenceMax = 1;
|
|
13795
|
-
var getTranscriptSentencesResponseSentencesItemConfidenceMin = 0;
|
|
13796
|
-
var getTranscriptSentencesResponseSentencesItemConfidenceMax = 1;
|
|
13797
|
-
var getTranscriptSentencesResponseSentencesItemWordsItemConfidenceMin = 0;
|
|
13798
|
-
var getTranscriptSentencesResponseSentencesItemWordsItemConfidenceMax = 1;
|
|
13799
14523
|
var getTranscriptSentencesResponse = zod3.object({
|
|
13800
14524
|
id: zod3.string().uuid().describe("The unique identifier for the transcript"),
|
|
13801
|
-
confidence: zod3.number().
|
|
14525
|
+
confidence: zod3.number().describe("The confidence score for the transcript"),
|
|
13802
14526
|
audio_duration: zod3.number().describe("The duration of the audio file in seconds"),
|
|
13803
14527
|
sentences: zod3.array(
|
|
13804
14528
|
zod3.object({
|
|
13805
14529
|
text: zod3.string().describe("The transcript of the sentence"),
|
|
13806
14530
|
start: zod3.number().describe("The starting time, in milliseconds, for the sentence"),
|
|
13807
14531
|
end: zod3.number().describe("The ending time, in milliseconds, for the sentence"),
|
|
13808
|
-
confidence: zod3.number().
|
|
14532
|
+
confidence: zod3.number().describe("The confidence score for the transcript of this sentence"),
|
|
13809
14533
|
words: zod3.array(
|
|
13810
14534
|
zod3.object({
|
|
13811
|
-
confidence: zod3.number().
|
|
14535
|
+
confidence: zod3.number().describe("The confidence score for the transcript of this word"),
|
|
13812
14536
|
start: zod3.number().describe("The starting time, in milliseconds, for the word"),
|
|
13813
14537
|
end: zod3.number().describe("The ending time, in milliseconds, for the word"),
|
|
13814
14538
|
text: zod3.string().describe("The text of the word"),
|
|
@@ -13816,7 +14540,7 @@ var getTranscriptSentencesResponse = zod3.object({
|
|
|
13816
14540
|
"The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
|
|
13817
14541
|
),
|
|
13818
14542
|
speaker: zod3.string().nullable().describe(
|
|
13819
|
-
"The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/
|
|
14543
|
+
"The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
|
|
13820
14544
|
)
|
|
13821
14545
|
})
|
|
13822
14546
|
).describe("An array of words in the sentence"),
|
|
@@ -13824,7 +14548,7 @@ var getTranscriptSentencesResponse = zod3.object({
|
|
|
13824
14548
|
"The channel of the sentence. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
|
|
13825
14549
|
),
|
|
13826
14550
|
speaker: zod3.string().nullable().describe(
|
|
13827
|
-
"The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/
|
|
14551
|
+
"The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
|
|
13828
14552
|
)
|
|
13829
14553
|
})
|
|
13830
14554
|
).describe("An array of sentences in the transcript")
|
|
@@ -13832,25 +14556,19 @@ var getTranscriptSentencesResponse = zod3.object({
|
|
|
13832
14556
|
var getTranscriptParagraphsParams = zod3.object({
|
|
13833
14557
|
transcript_id: zod3.string().describe("ID of the transcript")
|
|
13834
14558
|
});
|
|
13835
|
-
var getTranscriptParagraphsResponseConfidenceMin = 0;
|
|
13836
|
-
var getTranscriptParagraphsResponseConfidenceMax = 1;
|
|
13837
|
-
var getTranscriptParagraphsResponseParagraphsItemConfidenceMin = 0;
|
|
13838
|
-
var getTranscriptParagraphsResponseParagraphsItemConfidenceMax = 1;
|
|
13839
|
-
var getTranscriptParagraphsResponseParagraphsItemWordsItemConfidenceMin = 0;
|
|
13840
|
-
var getTranscriptParagraphsResponseParagraphsItemWordsItemConfidenceMax = 1;
|
|
13841
14559
|
var getTranscriptParagraphsResponse = zod3.object({
|
|
13842
14560
|
id: zod3.string().uuid().describe("The unique identifier of your transcript"),
|
|
13843
|
-
confidence: zod3.number().
|
|
14561
|
+
confidence: zod3.number().describe("The confidence score for the transcript"),
|
|
13844
14562
|
audio_duration: zod3.number().describe("The duration of the audio file in seconds"),
|
|
13845
14563
|
paragraphs: zod3.array(
|
|
13846
14564
|
zod3.object({
|
|
13847
14565
|
text: zod3.string().describe("The transcript of the paragraph"),
|
|
13848
14566
|
start: zod3.number().describe("The starting time, in milliseconds, of the paragraph"),
|
|
13849
14567
|
end: zod3.number().describe("The ending time, in milliseconds, of the paragraph"),
|
|
13850
|
-
confidence: zod3.number().
|
|
14568
|
+
confidence: zod3.number().describe("The confidence score for the transcript of this paragraph"),
|
|
13851
14569
|
words: zod3.array(
|
|
13852
14570
|
zod3.object({
|
|
13853
|
-
confidence: zod3.number().
|
|
14571
|
+
confidence: zod3.number().describe("The confidence score for the transcript of this word"),
|
|
13854
14572
|
start: zod3.number().describe("The starting time, in milliseconds, for the word"),
|
|
13855
14573
|
end: zod3.number().describe("The ending time, in milliseconds, for the word"),
|
|
13856
14574
|
text: zod3.string().describe("The text of the word"),
|
|
@@ -13858,13 +14576,28 @@ var getTranscriptParagraphsResponse = zod3.object({
|
|
|
13858
14576
|
"The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially."
|
|
13859
14577
|
),
|
|
13860
14578
|
speaker: zod3.string().nullable().describe(
|
|
13861
|
-
"The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/
|
|
14579
|
+
"The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null"
|
|
13862
14580
|
)
|
|
13863
14581
|
})
|
|
13864
14582
|
).describe("An array of words in the paragraph")
|
|
13865
14583
|
})
|
|
13866
14584
|
).describe("An array of paragraphs in the transcript")
|
|
13867
14585
|
});
|
|
14586
|
+
var getSubtitlesParams = zod3.object({
|
|
14587
|
+
transcript_id: zod3.string().describe("ID of the transcript"),
|
|
14588
|
+
subtitle_format: zod3.enum(["srt", "vtt"]).describe("The format of the captions")
|
|
14589
|
+
});
|
|
14590
|
+
var getSubtitlesQueryParams = zod3.object({
|
|
14591
|
+
chars_per_caption: zod3.number().optional().describe("The maximum number of characters per caption")
|
|
14592
|
+
});
|
|
14593
|
+
var getSubtitlesResponse = zod3.object({});
|
|
14594
|
+
var getRedactedAudioParams = zod3.object({
|
|
14595
|
+
transcript_id: zod3.string().describe("ID of the transcript")
|
|
14596
|
+
});
|
|
14597
|
+
var getRedactedAudioResponse = zod3.object({
|
|
14598
|
+
status: zod3.enum(["redacted_audio_ready"]).describe("The status of the redacted audio"),
|
|
14599
|
+
redacted_audio_url: zod3.string().describe("The URL of the redacted audio file")
|
|
14600
|
+
});
|
|
13868
14601
|
var wordSearchParams = zod3.object({
|
|
13869
14602
|
transcript_id: zod3.string().describe("ID of the transcript")
|
|
13870
14603
|
});
|
|
@@ -13881,7 +14614,7 @@ var wordSearchResponse = zod3.object({
|
|
|
13881
14614
|
text: zod3.string().describe("The matched word"),
|
|
13882
14615
|
count: zod3.number().describe("The total amount of times the word is in the transcript"),
|
|
13883
14616
|
timestamps: zod3.array(
|
|
13884
|
-
zod3.array(zod3.number()
|
|
14617
|
+
zod3.array(zod3.number()).describe(
|
|
13885
14618
|
"An array of timestamps structured as [`start_time`, `end_time`] in milliseconds"
|
|
13886
14619
|
)
|
|
13887
14620
|
).describe("An array of timestamps"),
|
|
@@ -13891,205 +14624,6 @@ var wordSearchResponse = zod3.object({
|
|
|
13891
14624
|
})
|
|
13892
14625
|
).describe("The matches of the search")
|
|
13893
14626
|
});
|
|
13894
|
-
var getRedactedAudioParams = zod3.object({
|
|
13895
|
-
transcript_id: zod3.string().describe("ID of the transcript")
|
|
13896
|
-
});
|
|
13897
|
-
var getRedactedAudioResponse = zod3.object({
|
|
13898
|
-
status: zod3.enum(["redacted_audio_ready"]).describe("The status of the redacted audio"),
|
|
13899
|
-
redacted_audio_url: zod3.string().describe("The URL of the redacted audio file")
|
|
13900
|
-
});
|
|
13901
|
-
var createTemporaryTokenBodyExpiresInMin = 60;
|
|
13902
|
-
var createTemporaryTokenBody = zod3.object({
|
|
13903
|
-
expires_in: zod3.number().min(createTemporaryTokenBodyExpiresInMin).describe("The amount of time until the token expires in seconds")
|
|
13904
|
-
});
|
|
13905
|
-
var createTemporaryTokenResponse = zod3.object({
|
|
13906
|
-
token: zod3.string().describe("The temporary authentication token for Streaming Speech-to-Text")
|
|
13907
|
-
});
|
|
13908
|
-
var lemurTaskBodyFinalModelDefault = "default";
|
|
13909
|
-
var lemurTaskBodyMaxOutputSizeDefault = 2e3;
|
|
13910
|
-
var lemurTaskBodyTemperatureDefault = 0;
|
|
13911
|
-
var lemurTaskBodyTemperatureMin = 0;
|
|
13912
|
-
var lemurTaskBodyTemperatureMax = 1;
|
|
13913
|
-
var lemurTaskBody = zod3.object({
|
|
13914
|
-
prompt: zod3.string().describe(
|
|
13915
|
-
"Your text to prompt the model to produce a desired output, including any context you want to pass into the model."
|
|
13916
|
-
)
|
|
13917
|
-
}).and(
|
|
13918
|
-
zod3.object({
|
|
13919
|
-
transcript_ids: zod3.array(zod3.string().uuid()).optional().describe(
|
|
13920
|
-
"A list of completed transcripts with text. Up to a maximum of 100 hours of audio.\nUse either transcript_ids or input_text as input into LeMUR.\n"
|
|
13921
|
-
),
|
|
13922
|
-
input_text: zod3.string().optional().describe(
|
|
13923
|
-
"Custom formatted transcript data. Maximum size is the context limit of the selected model.\nUse either transcript_ids or input_text as input into LeMUR.\n"
|
|
13924
|
-
),
|
|
13925
|
-
context: zod3.string().or(zod3.record(zod3.string(), zod3.any())).optional().describe("Context to provide the model. This can be a string or a free-form JSON value."),
|
|
13926
|
-
final_model: zod3.enum([
|
|
13927
|
-
"anthropic/claude-3-5-sonnet",
|
|
13928
|
-
"anthropic/claude-3-opus",
|
|
13929
|
-
"anthropic/claude-3-haiku"
|
|
13930
|
-
]).describe("The model that is used for the final prompt after compression is performed.\n").or(zod3.string()).describe("The model that is used for the final prompt after compression is performed.\n"),
|
|
13931
|
-
max_output_size: zod3.number().default(lemurTaskBodyMaxOutputSizeDefault).describe("Max output size in tokens."),
|
|
13932
|
-
temperature: zod3.number().min(lemurTaskBodyTemperatureMin).max(lemurTaskBodyTemperatureMax).optional().describe(
|
|
13933
|
-
"The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n"
|
|
13934
|
-
)
|
|
13935
|
-
})
|
|
13936
|
-
);
|
|
13937
|
-
var lemurTaskResponseUsageInputTokensMin = 0;
|
|
13938
|
-
var lemurTaskResponseUsageOutputTokensMin = 0;
|
|
13939
|
-
var lemurTaskResponse = zod3.object({
|
|
13940
|
-
response: zod3.string().describe("The response generated by LeMUR.")
|
|
13941
|
-
}).and(
|
|
13942
|
-
zod3.object({
|
|
13943
|
-
request_id: zod3.string().uuid().describe("The ID of the LeMUR request"),
|
|
13944
|
-
usage: zod3.object({
|
|
13945
|
-
input_tokens: zod3.number().min(lemurTaskResponseUsageInputTokensMin).describe("The number of input tokens used by the model"),
|
|
13946
|
-
output_tokens: zod3.number().min(lemurTaskResponseUsageOutputTokensMin).describe("The number of output tokens generated by the model")
|
|
13947
|
-
}).describe("The usage numbers for the LeMUR request")
|
|
13948
|
-
})
|
|
13949
|
-
);
|
|
13950
|
-
var lemurSummaryBodyFinalModelDefault = "default";
|
|
13951
|
-
var lemurSummaryBodyMaxOutputSizeDefault = 2e3;
|
|
13952
|
-
var lemurSummaryBodyTemperatureDefault = 0;
|
|
13953
|
-
var lemurSummaryBodyTemperatureMin = 0;
|
|
13954
|
-
var lemurSummaryBodyTemperatureMax = 1;
|
|
13955
|
-
var lemurSummaryBody = zod3.object({
|
|
13956
|
-
transcript_ids: zod3.array(zod3.string().uuid()).optional().describe(
|
|
13957
|
-
"A list of completed transcripts with text. Up to a maximum of 100 hours of audio.\nUse either transcript_ids or input_text as input into LeMUR.\n"
|
|
13958
|
-
),
|
|
13959
|
-
input_text: zod3.string().optional().describe(
|
|
13960
|
-
"Custom formatted transcript data. Maximum size is the context limit of the selected model.\nUse either transcript_ids or input_text as input into LeMUR.\n"
|
|
13961
|
-
),
|
|
13962
|
-
context: zod3.string().or(zod3.record(zod3.string(), zod3.any())).optional().describe("Context to provide the model. This can be a string or a free-form JSON value."),
|
|
13963
|
-
final_model: zod3.enum(["anthropic/claude-3-5-sonnet", "anthropic/claude-3-opus", "anthropic/claude-3-haiku"]).describe("The model that is used for the final prompt after compression is performed.\n").or(zod3.string()).describe("The model that is used for the final prompt after compression is performed.\n"),
|
|
13964
|
-
max_output_size: zod3.number().default(lemurSummaryBodyMaxOutputSizeDefault).describe("Max output size in tokens."),
|
|
13965
|
-
temperature: zod3.number().min(lemurSummaryBodyTemperatureMin).max(lemurSummaryBodyTemperatureMax).optional().describe(
|
|
13966
|
-
"The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n"
|
|
13967
|
-
)
|
|
13968
|
-
}).and(
|
|
13969
|
-
zod3.object({
|
|
13970
|
-
answer_format: zod3.string().optional().describe(
|
|
13971
|
-
'How you want the summary to be returned. This can be any text. Examples: "TLDR", "bullet points"\n'
|
|
13972
|
-
)
|
|
13973
|
-
})
|
|
13974
|
-
);
|
|
13975
|
-
var lemurSummaryResponseUsageInputTokensMin = 0;
|
|
13976
|
-
var lemurSummaryResponseUsageOutputTokensMin = 0;
|
|
13977
|
-
var lemurSummaryResponse = zod3.object({
|
|
13978
|
-
response: zod3.string().describe("The response generated by LeMUR.")
|
|
13979
|
-
}).and(
|
|
13980
|
-
zod3.object({
|
|
13981
|
-
request_id: zod3.string().uuid().describe("The ID of the LeMUR request"),
|
|
13982
|
-
usage: zod3.object({
|
|
13983
|
-
input_tokens: zod3.number().min(lemurSummaryResponseUsageInputTokensMin).describe("The number of input tokens used by the model"),
|
|
13984
|
-
output_tokens: zod3.number().min(lemurSummaryResponseUsageOutputTokensMin).describe("The number of output tokens generated by the model")
|
|
13985
|
-
}).describe("The usage numbers for the LeMUR request")
|
|
13986
|
-
})
|
|
13987
|
-
);
|
|
13988
|
-
var lemurQuestionAnswerBodyFinalModelDefault = "default";
|
|
13989
|
-
var lemurQuestionAnswerBodyMaxOutputSizeDefault = 2e3;
|
|
13990
|
-
var lemurQuestionAnswerBodyTemperatureDefault = 0;
|
|
13991
|
-
var lemurQuestionAnswerBodyTemperatureMin = 0;
|
|
13992
|
-
var lemurQuestionAnswerBodyTemperatureMax = 1;
|
|
13993
|
-
var lemurQuestionAnswerBody = zod3.object({
|
|
13994
|
-
transcript_ids: zod3.array(zod3.string().uuid()).optional().describe(
|
|
13995
|
-
"A list of completed transcripts with text. Up to a maximum of 100 hours of audio.\nUse either transcript_ids or input_text as input into LeMUR.\n"
|
|
13996
|
-
),
|
|
13997
|
-
input_text: zod3.string().optional().describe(
|
|
13998
|
-
"Custom formatted transcript data. Maximum size is the context limit of the selected model.\nUse either transcript_ids or input_text as input into LeMUR.\n"
|
|
13999
|
-
),
|
|
14000
|
-
context: zod3.string().or(zod3.record(zod3.string(), zod3.any())).optional().describe("Context to provide the model. This can be a string or a free-form JSON value."),
|
|
14001
|
-
final_model: zod3.enum(["anthropic/claude-3-5-sonnet", "anthropic/claude-3-opus", "anthropic/claude-3-haiku"]).describe("The model that is used for the final prompt after compression is performed.\n").or(zod3.string()).describe("The model that is used for the final prompt after compression is performed.\n"),
|
|
14002
|
-
max_output_size: zod3.number().default(lemurQuestionAnswerBodyMaxOutputSizeDefault).describe("Max output size in tokens."),
|
|
14003
|
-
temperature: zod3.number().min(lemurQuestionAnswerBodyTemperatureMin).max(lemurQuestionAnswerBodyTemperatureMax).optional().describe(
|
|
14004
|
-
"The temperature to use for the model.\nHigher values result in answers that are more creative, lower values are more conservative.\nCan be any value between 0.0 and 1.0 inclusive.\n"
|
|
14005
|
-
)
|
|
14006
|
-
}).and(
|
|
14007
|
-
zod3.object({
|
|
14008
|
-
questions: zod3.array(
|
|
14009
|
-
zod3.object({
|
|
14010
|
-
question: zod3.string().describe(
|
|
14011
|
-
"The question you wish to ask. For more complex questions use default model."
|
|
14012
|
-
),
|
|
14013
|
-
context: zod3.string().or(zod3.record(zod3.string(), zod3.any())).optional().describe(
|
|
14014
|
-
"Any context about the transcripts you wish to provide. This can be a string or any object."
|
|
14015
|
-
),
|
|
14016
|
-
answer_format: zod3.string().optional().describe(
|
|
14017
|
-
`How you want the answer to be returned. This can be any text. Can't be used with answer_options. Examples: "short sentence", "bullet points"
|
|
14018
|
-
`
|
|
14019
|
-
),
|
|
14020
|
-
answer_options: zod3.array(zod3.string()).optional().describe(
|
|
14021
|
-
`What discrete options to return. Useful for precise responses. Can't be used with answer_format. Example: ["Yes", "No"]
|
|
14022
|
-
`
|
|
14023
|
-
)
|
|
14024
|
-
})
|
|
14025
|
-
).describe("A list of questions to ask")
|
|
14026
|
-
})
|
|
14027
|
-
);
|
|
14028
|
-
var lemurQuestionAnswerResponseUsageInputTokensMin = 0;
|
|
14029
|
-
var lemurQuestionAnswerResponseUsageOutputTokensMin = 0;
|
|
14030
|
-
var lemurQuestionAnswerResponse = zod3.object({
|
|
14031
|
-
request_id: zod3.string().uuid().describe("The ID of the LeMUR request"),
|
|
14032
|
-
usage: zod3.object({
|
|
14033
|
-
input_tokens: zod3.number().min(lemurQuestionAnswerResponseUsageInputTokensMin).describe("The number of input tokens used by the model"),
|
|
14034
|
-
output_tokens: zod3.number().min(lemurQuestionAnswerResponseUsageOutputTokensMin).describe("The number of output tokens generated by the model")
|
|
14035
|
-
}).describe("The usage numbers for the LeMUR request")
|
|
14036
|
-
}).and(
|
|
14037
|
-
zod3.object({
|
|
14038
|
-
response: zod3.array(
|
|
14039
|
-
zod3.object({
|
|
14040
|
-
question: zod3.string().describe("The question for LeMUR to answer"),
|
|
14041
|
-
answer: zod3.string().describe("The answer generated by LeMUR")
|
|
14042
|
-
}).describe("An answer generated by LeMUR and its question")
|
|
14043
|
-
).describe("The answers generated by LeMUR and their questions")
|
|
14044
|
-
})
|
|
14045
|
-
);
|
|
14046
|
-
var getLemurResponseParams = zod3.object({
|
|
14047
|
-
request_id: zod3.string().describe(
|
|
14048
|
-
"The ID of the LeMUR request you previously made.\nThis would be found in the response of the original request.\n"
|
|
14049
|
-
)
|
|
14050
|
-
});
|
|
14051
|
-
var getLemurResponseResponseUsageInputTokensMin = 0;
|
|
14052
|
-
var getLemurResponseResponseUsageOutputTokensMin = 0;
|
|
14053
|
-
var getLemurResponseResponseUsageInputTokensMinOne = 0;
|
|
14054
|
-
var getLemurResponseResponseUsageOutputTokensMinOne = 0;
|
|
14055
|
-
var getLemurResponseResponse = zod3.object({
|
|
14056
|
-
response: zod3.string().describe("The response generated by LeMUR.")
|
|
14057
|
-
}).and(
|
|
14058
|
-
zod3.object({
|
|
14059
|
-
request_id: zod3.string().uuid().describe("The ID of the LeMUR request"),
|
|
14060
|
-
usage: zod3.object({
|
|
14061
|
-
input_tokens: zod3.number().min(getLemurResponseResponseUsageInputTokensMin).describe("The number of input tokens used by the model"),
|
|
14062
|
-
output_tokens: zod3.number().min(getLemurResponseResponseUsageOutputTokensMin).describe("The number of output tokens generated by the model")
|
|
14063
|
-
}).describe("The usage numbers for the LeMUR request")
|
|
14064
|
-
})
|
|
14065
|
-
).or(
|
|
14066
|
-
zod3.object({
|
|
14067
|
-
request_id: zod3.string().uuid().describe("The ID of the LeMUR request"),
|
|
14068
|
-
usage: zod3.object({
|
|
14069
|
-
input_tokens: zod3.number().min(getLemurResponseResponseUsageInputTokensMinOne).describe("The number of input tokens used by the model"),
|
|
14070
|
-
output_tokens: zod3.number().min(getLemurResponseResponseUsageOutputTokensMinOne).describe("The number of output tokens generated by the model")
|
|
14071
|
-
}).describe("The usage numbers for the LeMUR request")
|
|
14072
|
-
}).and(
|
|
14073
|
-
zod3.object({
|
|
14074
|
-
response: zod3.array(
|
|
14075
|
-
zod3.object({
|
|
14076
|
-
question: zod3.string().describe("The question for LeMUR to answer"),
|
|
14077
|
-
answer: zod3.string().describe("The answer generated by LeMUR")
|
|
14078
|
-
}).describe("An answer generated by LeMUR and its question")
|
|
14079
|
-
).describe("The answers generated by LeMUR and their questions")
|
|
14080
|
-
})
|
|
14081
|
-
)
|
|
14082
|
-
);
|
|
14083
|
-
var purgeLemurRequestDataParams = zod3.object({
|
|
14084
|
-
request_id: zod3.string().describe(
|
|
14085
|
-
"The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request."
|
|
14086
|
-
)
|
|
14087
|
-
});
|
|
14088
|
-
var purgeLemurRequestDataResponse = zod3.object({
|
|
14089
|
-
request_id: zod3.string().uuid().describe("The ID of the deletion request of the LeMUR request"),
|
|
14090
|
-
request_id_to_purge: zod3.string().uuid().describe("The ID of the LeMUR request to purge the data for"),
|
|
14091
|
-
deleted: zod3.boolean().describe("Whether the request data was deleted")
|
|
14092
|
-
});
|
|
14093
14627
|
|
|
14094
14628
|
// src/generated/assemblyai/streaming-types.zod.ts
|
|
14095
14629
|
import { z as zod4 } from "zod";
|
|
@@ -14104,25 +14638,37 @@ var streamingTranscriberParams = zod4.object({
|
|
|
14104
14638
|
enableExtraSessionInformation: zod4.boolean().optional().describe(
|
|
14105
14639
|
"Set to true to receive the SessionInformation message before the session ends. Defaults to false."
|
|
14106
14640
|
),
|
|
14641
|
+
domain: zod4.string().optional().describe(
|
|
14642
|
+
'Enable domain-specific transcription models to improve accuracy for specialized terminology. Set to `"medical-v1"` to enable [Medical Mode](https://www.assemblyai.com/docs/streaming/medical-mode) for improved accuracy of medical terms such as medications, procedures, conditions, and dosages. Supported languages: English (`en`), Spanish (`es`), German (`de`), French (`fr`). If used with an unsupported language, the parameter is ignored and a warning is returned.'
|
|
14643
|
+
),
|
|
14107
14644
|
endOfTurnConfidenceThreshold: zod4.number().optional().describe("From SDK v3"),
|
|
14108
14645
|
minEndOfTurnSilenceWhenConfident: zod4.number().optional().describe("From SDK v3"),
|
|
14646
|
+
minTurnSilence: zod4.number().optional().describe("From SDK v3"),
|
|
14109
14647
|
maxTurnSilence: zod4.number().optional().describe("From SDK v3"),
|
|
14110
14648
|
vadThreshold: zod4.number().optional().describe("From SDK v3"),
|
|
14111
14649
|
formatTurns: zod4.boolean().optional().describe("From SDK v3"),
|
|
14112
14650
|
filterProfanity: zod4.boolean().optional().describe("From SDK v3"),
|
|
14113
14651
|
keyterms: zod4.array(zod4.string()).optional().describe("From SDK v3"),
|
|
14114
14652
|
keytermsPrompt: zod4.array(zod4.string()).optional().describe("From SDK v3"),
|
|
14115
|
-
|
|
14653
|
+
prompt: zod4.string().optional().describe("From SDK v3"),
|
|
14654
|
+
speechModel: zod4.enum(["universal-streaming-english", "universal-streaming-multilingual"]).describe("From SDK v3"),
|
|
14116
14655
|
languageDetection: zod4.boolean().optional().describe("From SDK v3"),
|
|
14117
|
-
inactivityTimeout: zod4.number().optional().describe("From SDK v3")
|
|
14656
|
+
inactivityTimeout: zod4.number().optional().describe("From SDK v3"),
|
|
14657
|
+
speakerLabels: zod4.boolean().optional().describe("From SDK v3"),
|
|
14658
|
+
maxSpeakers: zod4.number().optional().describe("From SDK v3"),
|
|
14659
|
+
llmGateway: zod4.unknown().optional().describe("From SDK v3")
|
|
14118
14660
|
});
|
|
14119
14661
|
var streamingUpdateConfigParams = zod4.object({
|
|
14120
14662
|
end_utterance_silence_threshold: zod4.number().min(0).max(2e4).optional().describe("The duration threshold in milliseconds"),
|
|
14121
14663
|
end_of_turn_confidence_threshold: zod4.number().optional().describe("From SDK v3"),
|
|
14122
14664
|
min_end_of_turn_silence_when_confident: zod4.number().optional().describe("From SDK v3"),
|
|
14665
|
+
min_turn_silence: zod4.number().optional().describe("From SDK v3"),
|
|
14123
14666
|
max_turn_silence: zod4.number().optional().describe("From SDK v3"),
|
|
14124
14667
|
vad_threshold: zod4.number().optional().describe("From SDK v3"),
|
|
14125
|
-
format_turns: zod4.boolean().optional().describe("From SDK v3")
|
|
14668
|
+
format_turns: zod4.boolean().optional().describe("From SDK v3"),
|
|
14669
|
+
keyterms_prompt: zod4.array(zod4.string()).optional().describe("From SDK v3"),
|
|
14670
|
+
prompt: zod4.string().optional().describe("From SDK v3"),
|
|
14671
|
+
filter_profanity: zod4.boolean().optional().describe("From SDK v3")
|
|
14126
14672
|
});
|
|
14127
14673
|
|
|
14128
14674
|
// src/generated/gladia/api/gladiaControlAPI.zod.ts
|
|
@@ -40145,7 +40691,6 @@ export {
|
|
|
40145
40691
|
createOpenAIWhisperAdapter,
|
|
40146
40692
|
createSonioxAdapter,
|
|
40147
40693
|
createSpeechmaticsAdapter,
|
|
40148
|
-
createTemporaryToken,
|
|
40149
40694
|
createTranscript,
|
|
40150
40695
|
createTranscription,
|
|
40151
40696
|
createVoiceRouter,
|