voice-router-dev 0.8.2 → 0.8.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +211 -0
- package/dist/constants.d.mts +33 -3
- package/dist/constants.d.ts +33 -3
- package/dist/constants.js +14 -1
- package/dist/constants.mjs +13 -1
- package/dist/{field-configs-DN2_WrYr.d.mts → field-configs-t_lVCkE5.d.mts} +1194 -1194
- package/dist/{field-configs-DN2_WrYr.d.ts → field-configs-t_lVCkE5.d.ts} +1194 -1194
- package/dist/field-configs.d.mts +1 -1
- package/dist/field-configs.d.ts +1 -1
- package/dist/field-configs.js +8 -8
- package/dist/field-configs.mjs +8 -8
- package/dist/index.d.mts +651 -1628
- package/dist/index.d.ts +651 -1628
- package/dist/index.js +203 -51
- package/dist/index.mjs +200 -51
- package/dist/{provider-metadata-BnkedpXm.d.mts → provider-metadata-MDUUEuqF.d.mts} +4 -4
- package/dist/{provider-metadata-DbsSGAO7.d.ts → provider-metadata-_gUWlRXS.d.ts} +4 -4
- package/dist/provider-metadata.d.mts +1 -1
- package/dist/provider-metadata.d.ts +1 -1
- package/dist/{speechToTextChunkResponseModel-DExUFZT3.d.ts → speechToTextChunkResponseModel-DjL2ncnf.d.ts} +1147 -10
- package/dist/{speechToTextChunkResponseModel-3IUnJXKx.d.mts → speechToTextChunkResponseModel-DvIT4xai.d.mts} +1147 -10
- package/dist/webhooks.d.mts +234 -215
- package/dist/webhooks.d.ts +234 -215
- package/package.json +1 -1
package/dist/index.d.mts
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
|
-
import { T as TranscribeOptions, U as UnifiedTranscriptResponse, S as StreamingOptions, a as StreamingCallbacks, b as StreamingSession, c as StreamEvent, L as ListTranscriptsOptions, G as GladiaStreamingOptions, D as DeepgramStreamingOptions, A as AssemblyAIStreamingOptions, F as FileResponse, d as StreamingSupportedEncodingEnum, e as StreamingSupportedBitDepthEnum, f as StreamingSupportedSampleRateEnum, g as StreamingSupportedModels, h as LanguageConfig, P as PreProcessingConfig, R as RealtimeProcessingConfig, i as PostProcessingConfig, M as MessagesConfig, C as CallbackConfig, j as TranscriptionMetadataDTO, k as TranscriptionDTO, l as TranslationDTO, m as SummarizationDTO, N as NamedEntityRecognitionDTO, n as SentimentAnalysisDTO, o as ChapterizationDTO, p as PreRecordedResponse, q as UtteranceDTO, r as TranscriptionLanguageCodeEnum, s as TranslationLanguageCodeEnum, W as WordDTO, t as StreamingSupportedRegions, u as AddonErrorDTO, v as AudioToLlmDTO, w as AudioToLlmDTOError, x as AudioToLlmDTOResults, y as AudioToLlmListConfigDTO, z as AudioToLlmListDTO, B as AudioToLlmListDTOError, E as AudioToLlmResultDTO, H as CallbackConfigDto, I as CallbackMethodEnum, J as CallbackTranscriptionErrorPayload, K as CallbackTranscriptionErrorPayloadCustomMetadata, O as CallbackTranscriptionErrorPayloadEvent, Q as CallbackTranscriptionSuccessPayload, V as CallbackTranscriptionSuccessPayloadCustomMetadata, X as CallbackTranscriptionSuccessPayloadEvent, Y as ChapterizationDTOError, Z as ChapterizationDTOResults, _ as CodeSwitchingConfigDTO, $ as CustomSpellingConfigDTO, a0 as CustomSpellingConfigDTOSpellingDictionary, a1 as CustomVocabularyConfigDTO, a2 as CustomVocabularyConfigDTOVocabularyItem, a3 as CustomVocabularyEntryDTO, a4 as DiarizationConfigDTO, a5 as DiarizationDTO, a6 as DiarizationDTOError, a7 as DisplayModeDTO, a8 as DisplayModeDTOError, a9 as ErrorDTO, aa as InitTranscriptionRequest, ab as InitTranscriptionRequestCustomMetadata, ac as ModerationDTO, ad as ModerationDTOError, ae as NamedEntityRecognitionDTOError, af as NamesConsistencyDTO, ag as NamesConsistencyDTOError, ah as PreRecordedRequestParamsResponse, ai as PreRecordedResponseCustomMetadata, aj as PreRecordedResponseFile, ak as PreRecordedResponseKind, al as PreRecordedResponsePostSessionMetadata, am as PreRecordedResponseRequestParams, an as PreRecordedResponseResult, ao as PreRecordedResponseStatus, ap as SentencesDTO, aq as SentencesDTOError, ar as SentimentAnalysisDTOError, as as SpeakerReidentificationDTO, at as SpeakerReidentificationDTOError, au as StreamingRequest, av as StreamingRequestCustomMetadata, aw as StructuredDataExtractionConfigDTO, ax as StructuredDataExtractionDTO, ay as StructuredDataExtractionDTOError, az as SubtitleDTO, aA as SubtitlesConfigDTO, aB as SubtitlesFormatEnum, aC as SubtitlesStyleEnum, aD as SummarizationConfigDTO, aE as SummarizationDTOError, aF as SummaryTypesEnum, aG as TranscriptionControllerListV2KindItem, aH as TranscriptionControllerListV2Params, aI as TranscriptionControllerListV2StatusItem, aJ as TranscriptionResultDTO, aK as TranslationConfigDTO, aL as TranslationDTOError, aM as TranslationModelEnum, aN as TranslationResultDTO, aO as TranslationResultDTOError, aP as TranscriptOptionalParams, aQ as TranscriptStatus, aR as TranscriptWord, aS as AudioIntelligenceModelStatus, aT as AutoHighlightResult, aU as AutoHighlightsResult, aV as Chapter$1, aW as ContentSafetyLabel, aX as ContentSafetyLabelResult, aY as ContentSafetyLabelsResult, aZ as ContentSafetyLabelsResultSeverityScoreSummary, a_ as ContentSafetyLabelsResultSummary, a$ as Entity, b0 as EntityType, b1 as ListTranscriptsParams, b2 as PiiPolicy, b3 as RedactPiiAudioQuality, b4 as RedactedAudioNotification, b5 as RedactedAudioResponse, b6 as RedactedAudioStatus, b7 as Sentiment, b8 as SentimentAnalysisResult$2, b9 as SentimentAnalysisResultChannel, ba as SentimentAnalysisResultSpeaker, bb as SeverityScoreSummary, bc as SpeechModel, bd as SubstitutionPolicy, be as SummaryModel, bf as SummaryType, bg as Timestamp, bh as TopicDetectionModelResult, bi as TopicDetectionModelResultSummary, bj as TopicDetectionResult$1, bk as TopicDetectionResultLabelsItem, bl as Transcript, bm as TranscriptAudioDuration, bn as TranscriptAudioEndAt, bo as TranscriptAudioStartFrom, bp as TranscriptAutoChapters, bq as TranscriptAutoHighlightsResult, br as TranscriptBoostParam, bs as TranscriptBoostParamProperty, bt as TranscriptChapters, bu as TranscriptConfidence, bv as TranscriptContentSafety, bw as TranscriptContentSafetyLabels, bx as TranscriptCustomSpelling, by as TranscriptCustomSpellingProperty, bz as TranscriptCustomTopics, bA as TranscriptDisfluencies, bB as TranscriptEntities, bC as TranscriptEntityDetection, bD as TranscriptFilterProfanity, bE as TranscriptFormatText, bF as TranscriptIabCategories, bG as TranscriptIabCategoriesResult, bH as TranscriptLanguageCode, bI as TranscriptLanguageCodeProperty, bJ as TranscriptLanguageConfidence, bK as TranscriptLanguageConfidenceThreshold, bL as TranscriptLanguageDetection, bM as TranscriptMultichannel, bN as TranscriptOptionalParamsLanguageCode, bO as TranscriptOptionalParamsLanguageCodeOneOf, bP as TranscriptOptionalParamsRedactPiiSub, bQ as TranscriptOptionalParamsSpeakersExpected, bR as TranscriptOptionalParamsSpeechModel, bS as TranscriptOptionalParamsSpeechThreshold, bT as TranscriptOptionalParamsWebhookAuthHeaderName, bU as TranscriptOptionalParamsWebhookAuthHeaderValue, bV as TranscriptPunctuate, bW as TranscriptReadyNotification, bX as TranscriptReadyStatus, bY as TranscriptRedactPiiAudio, bZ as TranscriptRedactPiiAudioQuality, b_ as TranscriptRedactPiiPolicies, b$ as TranscriptSentimentAnalysis, c0 as TranscriptSentimentAnalysisResults, c1 as TranscriptSpeakerLabels, c2 as TranscriptSpeakersExpected, c3 as TranscriptSpeechModel, c4 as TranscriptSpeechThreshold, c5 as TranscriptSpeedBoost, c6 as TranscriptSummary, c7 as TranscriptSummaryModel, c8 as TranscriptSummaryType, c9 as TranscriptText, ca as TranscriptThrottled, cb as TranscriptUtterance, cc as TranscriptUtteranceChannel, cd as TranscriptUtterances, ce as TranscriptWebhookAuthHeaderName, cf as TranscriptWebhookNotification, cg as TranscriptWebhookStatusCode, ch as TranscriptWebhookUrl, ci as TranscriptWordChannel, cj as TranscriptWordSpeaker, ck as TranscriptWords, cl as StreamingUpdateConfiguration, cm as Transcription$1, cn as Status, co as EntityError, cp as EntityReference, cq as DiarizationProperties, cr as DiarizationSpeakersProperties, cs as LanguageIdentificationMode, ct as LanguageIdentificationProperties, cu as LanguageIdentificationPropertiesSpeechModelMapping, cv as ProfanityFilterMode, cw as PunctuationMode, cx as TranscriptionCustomProperties, cy as TranscriptionLinks, cz as TranscriptionProperties, cA as TranscriptTextUsageTokens, cB as TranscriptionSegment, cC as RealtimeSessionCreateRequestGAModel, cD as RealtimeTranscriptionSessionCreateRequestTurnDetectionType, cE as RealtimeTranscriptionSessionCreateRequestInputAudioFormat, cF as AudioResponseFormat, cG as CreateTranscription200One, cH as CreateTranscriptionRequest, cI as CreateTranscriptionRequestModel, cJ as CreateTranscriptionRequestStream, cK as CreateTranscriptionRequestTimestampGranularitiesItem, cL as CreateTranscriptionResponseDiarizedJson, cM as CreateTranscriptionResponseDiarizedJsonTask, cN as CreateTranscriptionResponseDiarizedJsonUsage, cO as CreateTranscriptionResponseJson, cP as CreateTranscriptionResponseJsonLogprobsItem, cQ as CreateTranscriptionResponseJsonUsage, cR as CreateTranscriptionResponseVerboseJson, cS as TranscriptTextUsageDuration, cT as TranscriptTextUsageDurationType, cU as TranscriptTextUsageTokensInputTokenDetails, cV as TranscriptTextUsageTokensType, cW as TranscriptionChunkingStrategy, cX as TranscriptionChunkingStrategyAnyOf, cY as TranscriptionDiarizedSegment, cZ as TranscriptionDiarizedSegmentType, c_ as TranscriptionInclude, c$ as TranscriptionWord, d0 as VadConfig, d1 as VadConfigType, d2 as ListenV1Response, d3 as ManageV1FilterAccessorParameter, d4 as ManageV1FilterDeploymentParameter, d5 as ManageV1LimitParameter, d6 as ManageV1PageParameter, d7 as ManageV1FilterEndpointParameter, d8 as ManageV1FilterMethodParameter, d9 as SharedTopics, da as SharedIntents, db as SharedSentiments, dc as SharedCallbackParameter, dd as SharedCallbackMethodParameter, de as SharedSentimentParameter, df as SharedSummarizeParameter, dg as SharedTagParameter, dh as SharedTopicsParameter, di as SharedCustomTopicParameter, dj as SharedCustomTopicModeParameter, dk as SharedIntentsParameter, dl as SharedCustomIntentParameter, dm as SharedCustomIntentModeParameter, dn as SharedMipOptOutParameter, dp as ListenV1DetectEntitiesParameter, dq as ListenV1DetectLanguageParameter, dr as ListenV1DiarizeParameter, ds as ListenV1DictationParameter, dt as ListenV1EncodingParameter, du as ListenV1FillerWordsParameter, dv as ListenV1KeytermParameter, dw as ListenV1KeywordsParameter, dx as ListenV1LanguageParameter, dy as ListenV1MeasurementsParameter, dz as ListenV1MediaTranscribeParams, dA as ListenV1ModelParameter, dB as ListenV1MultichannelParameter, dC as ListenV1NumeralsParameter, dD as ListenV1ParagraphsParameter, dE as ListenV1ProfanityFilterParameter, dF as ListenV1PunctuateParameter, dG as ListenV1RedactParameter, dH as ListenV1RedactParameterOneOfItem, dI as ListenV1ReplaceParameter, dJ as ListenV1ResponseMetadata, dK as ListenV1ResponseMetadataIntentsInfo, dL as ListenV1ResponseMetadataModelInfo, dM as ListenV1ResponseMetadataSentimentInfo, dN as ListenV1ResponseMetadataSummaryInfo, dO as ListenV1ResponseMetadataTopicsInfo, dP as ListenV1ResponseResults, dQ as ListenV1ResponseResultsChannels, dR as ListenV1ResponseResultsChannelsItem, dS as ListenV1ResponseResultsChannelsItemAlternativesItem, dT as ListenV1ResponseResultsChannelsItemAlternativesItemEntitiesItem, dU as ListenV1ResponseResultsChannelsItemAlternativesItemParagraphs, dV as ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItem, dW as ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItemSentencesItem, dX as ListenV1ResponseResultsChannelsItemAlternativesItemSummariesItem, dY as ListenV1ResponseResultsChannelsItemAlternativesItemTopicsItem, dZ as ListenV1ResponseResultsChannelsItemAlternativesItemWordsItem, d_ as ListenV1ResponseResultsChannelsItemSearchItem, d$ as ListenV1ResponseResultsChannelsItemSearchItemHitsItem, e0 as ListenV1ResponseResultsSummary, e1 as ListenV1ResponseResultsUtterances, e2 as ListenV1ResponseResultsUtterancesItem, e3 as ListenV1ResponseResultsUtterancesItemWordsItem, e4 as ListenV1SearchParameter, e5 as ListenV1SmartFormatParameter, e6 as ListenV1UttSplitParameter, e7 as ListenV1UtterancesParameter, e8 as ListenV1VersionParameter, e9 as ManageV1EndDateTimeParameter, ea as ManageV1FilterRequestIdParameter, eb as ManageV1FilterStatusParameter, ec as ManageV1ProjectsRequestsListParams, ed as ManageV1StartDateTimeParameter, ee as SharedExtraParameter, ef as SharedIntentsResults, eg as SharedIntentsResultsIntents, eh as SharedIntentsResultsIntentsSegmentsItem, ei as SharedIntentsResultsIntentsSegmentsItemIntentsItem, ej as SharedSentimentsAverage, ek as SharedSentimentsSegmentsItem, el as SharedTopicsResults, em as SharedTopicsResultsTopics, en as SharedTopicsResultsTopicsSegmentsItem, eo as SharedTopicsResultsTopicsSegmentsItemTopicsItem, ep as SpeechToTextChunkResponseModel, eq as AdditionalFormatResponseModel, er as AdditionalFormats, es as BodySpeechToTextV1SpeechToTextPost, et as BodySpeechToTextV1SpeechToTextPostCloudStorageUrl, eu as BodySpeechToTextV1SpeechToTextPostDiarizationThreshold, ev as BodySpeechToTextV1SpeechToTextPostEntityDetection, ew as BodySpeechToTextV1SpeechToTextPostFile, ex as BodySpeechToTextV1SpeechToTextPostFileFormat, ey as BodySpeechToTextV1SpeechToTextPostLanguageCode, ez as BodySpeechToTextV1SpeechToTextPostModelId, eA as BodySpeechToTextV1SpeechToTextPostNumSpeakers, eB as BodySpeechToTextV1SpeechToTextPostSeed, eC as BodySpeechToTextV1SpeechToTextPostTemperature, eD as BodySpeechToTextV1SpeechToTextPostTimestampsGranularity, eE as BodySpeechToTextV1SpeechToTextPostWebhookId, eF as BodySpeechToTextV1SpeechToTextPostWebhookMetadata, eG as BodySpeechToTextV1SpeechToTextPostWebhookMetadataAnyOf, eH as DetectedEntity, eI as DocxExportOptions, eJ as DocxExportOptionsFormat, eK as DocxExportOptionsMaxSegmentChars, eL as DocxExportOptionsMaxSegmentDurationS, eM as DocxExportOptionsSegmentOnSilenceLongerThanS, eN as ExportOptions, eO as HtmlExportOptions, eP as HtmlExportOptionsFormat, eQ as HtmlExportOptionsMaxSegmentChars, eR as HtmlExportOptionsMaxSegmentDurationS, eS as HtmlExportOptionsSegmentOnSilenceLongerThanS, eT as PdfExportOptions, eU as PdfExportOptionsFormat, eV as PdfExportOptionsMaxSegmentChars, eW as PdfExportOptionsMaxSegmentDurationS, eX as PdfExportOptionsSegmentOnSilenceLongerThanS, eY as SegmentedJsonExportOptions, eZ as SegmentedJsonExportOptionsFormat, e_ as SegmentedJsonExportOptionsMaxSegmentChars, e$ as SegmentedJsonExportOptionsMaxSegmentDurationS, f0 as SegmentedJsonExportOptionsSegmentOnSilenceLongerThanS, f1 as SpeechToTextCharacterResponseModel, f2 as SpeechToTextCharacterResponseModelEnd, f3 as SpeechToTextCharacterResponseModelStart, f4 as SpeechToTextChunkResponseModelAdditionalFormats, f5 as SpeechToTextChunkResponseModelAdditionalFormatsAnyOfItem, f6 as SpeechToTextChunkResponseModelChannelIndex, f7 as SpeechToTextChunkResponseModelEntities, f8 as SpeechToTextChunkResponseModelTranscriptionId, f9 as SpeechToTextWordResponseModel, fa as SpeechToTextWordResponseModelCharacters, fb as SpeechToTextWordResponseModelEnd, fc as SpeechToTextWordResponseModelSpeakerId, fd as SpeechToTextWordResponseModelStart, fe as SpeechToTextWordResponseModelType, ff as SrtExportOptions, fg as SrtExportOptionsFormat, fh as SrtExportOptionsMaxCharactersPerLine, fi as SrtExportOptionsMaxSegmentChars, fj as SrtExportOptionsMaxSegmentDurationS, fk as SrtExportOptionsSegmentOnSilenceLongerThanS, fl as TxtExportOptions, fm as TxtExportOptionsFormat, fn as TxtExportOptionsMaxCharactersPerLine, fo as TxtExportOptionsMaxSegmentChars, fp as TxtExportOptionsMaxSegmentDurationS, fq as TxtExportOptionsSegmentOnSilenceLongerThanS } from './speechToTextChunkResponseModel-3IUnJXKx.mjs';
|
|
2
|
-
export {
|
|
3
|
-
import { T as TranscriptionProvider, P as ProviderCapabilities, A as AudioInput } from './provider-metadata-
|
|
4
|
-
export { k as AllLanguageCodes, o as AllProviders, a as AssemblyAICapabilities, i as AssemblyAILanguageCodes, b as AzureCapabilities, B as BatchOnlyProviderType, q as BatchOnlyProviders, C as CapabilityKeys, f as CapabilityLabels, D as DeepgramCapabilities, j as DeepgramLanguageCodes, E as ElevenLabsCapabilities, G as GladiaCapabilities, h as GladiaLanguageCodes, g as LanguageCode, L as LanguageLabels, O as OpenAICapabilities, d as ProviderCapabilitiesMap, l as ProviderDisplayNames, n as ProviderDocs, m as ProviderWebsites, c as SonioxCapabilities, S as SpeechmaticsCapabilities, e as StreamingProviderType, p as StreamingProviders } from './provider-metadata-
|
|
5
|
-
import { DeepgramRegionType, SpeechmaticsRegionType, SonioxModelCode, SonioxRegionType, ElevenLabsModelCode, ElevenLabsRegionType } from './constants.mjs';
|
|
1
|
+
import { T as TranscribeOptions, U as UnifiedTranscriptResponse, S as StreamingOptions, a as StreamingCallbacks, b as StreamingSession, c as StreamEvent, L as ListTranscriptsOptions, G as GladiaStreamingOptions, D as DeepgramStreamingOptions, A as AssemblyAIStreamingOptions, F as FileResponse, d as StreamingSupportedEncodingEnum, e as StreamingSupportedBitDepthEnum, f as StreamingSupportedSampleRateEnum, g as StreamingSupportedModels, h as LanguageConfig, P as PreProcessingConfig, R as RealtimeProcessingConfig, i as PostProcessingConfig, M as MessagesConfig, C as CallbackConfig, j as TranscriptionMetadataDTO, k as TranscriptionDTO, l as TranslationDTO, m as SummarizationDTO, N as NamedEntityRecognitionDTO, n as SentimentAnalysisDTO, o as ChapterizationDTO, p as PreRecordedResponse, q as UtteranceDTO, r as TranscriptionLanguageCodeEnum, s as TranslationLanguageCodeEnum, W as WordDTO, t as StreamingSupportedRegions, u as AddonErrorDTO, v as AudioToLlmDTO, w as AudioToLlmDTOError, x as AudioToLlmDTOResults, y as AudioToLlmListConfigDTO, z as AudioToLlmListDTO, B as AudioToLlmListDTOError, E as AudioToLlmResultDTO, H as CallbackConfigDto, I as CallbackMethodEnum, J as CallbackTranscriptionErrorPayload, K as CallbackTranscriptionErrorPayloadCustomMetadata, O as CallbackTranscriptionErrorPayloadEvent, Q as CallbackTranscriptionSuccessPayload, V as CallbackTranscriptionSuccessPayloadCustomMetadata, X as CallbackTranscriptionSuccessPayloadEvent, Y as ChapterizationDTOError, Z as ChapterizationDTOResults, _ as CodeSwitchingConfigDTO, $ as CustomSpellingConfigDTO, a0 as CustomSpellingConfigDTOSpellingDictionary, a1 as CustomVocabularyConfigDTO, a2 as CustomVocabularyConfigDTOVocabularyItem, a3 as CustomVocabularyEntryDTO, a4 as DiarizationConfigDTO, a5 as DiarizationDTO, a6 as DiarizationDTOError, a7 as DisplayModeDTO, a8 as DisplayModeDTOError, a9 as ErrorDTO, aa as InitTranscriptionRequest, ab as InitTranscriptionRequestCustomMetadata, ac as ModerationDTO, ad as ModerationDTOError, ae as NamedEntityRecognitionDTOError, af as NamesConsistencyDTO, ag as NamesConsistencyDTOError, ah as PreRecordedRequestParamsResponse, ai as PreRecordedResponseCustomMetadata, aj as PreRecordedResponseFile, ak as PreRecordedResponseKind, al as PreRecordedResponsePostSessionMetadata, am as PreRecordedResponseRequestParams, an as PreRecordedResponseResult, ao as PreRecordedResponseStatus, ap as SentencesDTO, aq as SentencesDTOError, ar as SentimentAnalysisDTOError, as as SpeakerReidentificationDTO, at as SpeakerReidentificationDTOError, au as StreamingRequest, av as StreamingRequestCustomMetadata, aw as StructuredDataExtractionConfigDTO, ax as StructuredDataExtractionDTO, ay as StructuredDataExtractionDTOError, az as SubtitleDTO, aA as SubtitlesConfigDTO, aB as SubtitlesFormatEnum, aC as SubtitlesStyleEnum, aD as SummarizationConfigDTO, aE as SummarizationDTOError, aF as SummaryTypesEnum, aG as TranscriptionControllerListV2KindItem, aH as TranscriptionControllerListV2Params, aI as TranscriptionControllerListV2StatusItem, aJ as TranscriptionResultDTO, aK as TranslationConfigDTO, aL as TranslationDTOError, aM as TranslationModelEnum, aN as TranslationResultDTO, aO as TranslationResultDTOError, aP as TranscriptOptionalParams, aQ as TranscriptStatus, aR as TranscriptWord, aS as AudioIntelligenceModelStatus, aT as AutoHighlightResult, aU as AutoHighlightsResult, aV as Chapter, aW as ContentSafetyLabel, aX as ContentSafetyLabelResult, aY as ContentSafetyLabelsResult, aZ as ContentSafetyLabelsResultSeverityScoreSummary, a_ as ContentSafetyLabelsResultSummary, a$ as Entity, b0 as EntityType, b1 as ListTranscriptsParams, b2 as PiiPolicy, b3 as RedactPiiAudioQuality, b4 as RedactedAudioNotification, b5 as RedactedAudioResponse, b6 as RedactedAudioStatus, b7 as Sentiment, b8 as SentimentAnalysisResult$1, b9 as SentimentAnalysisResultChannel, ba as SentimentAnalysisResultSpeaker, bb as SeverityScoreSummary, bc as SpeechModel, bd as SubstitutionPolicy, be as SummaryModel, bf as SummaryType, bg as Timestamp, bh as TopicDetectionModelResult, bi as TopicDetectionModelResultSummary, bj as TopicDetectionResult, bk as TopicDetectionResultLabelsItem, bl as Transcript, bm as TranscriptAudioDuration, bn as TranscriptAudioEndAt, bo as TranscriptAudioStartFrom, bp as TranscriptAutoChapters, bq as TranscriptAutoHighlightsResult, br as TranscriptBoostParam, bs as TranscriptBoostParamProperty, bt as TranscriptChapters, bu as TranscriptConfidence, bv as TranscriptContentSafety, bw as TranscriptContentSafetyLabels, bx as TranscriptCustomSpelling, by as TranscriptCustomSpellingProperty, bz as TranscriptCustomTopics, bA as TranscriptDisfluencies, bB as TranscriptEntities, bC as TranscriptEntityDetection, bD as TranscriptFilterProfanity, bE as TranscriptFormatText, bF as TranscriptIabCategories, bG as TranscriptIabCategoriesResult, bH as TranscriptLanguageCode, bI as TranscriptLanguageCodeProperty, bJ as TranscriptLanguageConfidence, bK as TranscriptLanguageConfidenceThreshold, bL as TranscriptLanguageDetection, bM as TranscriptMultichannel, bN as TranscriptOptionalParamsLanguageCode, bO as TranscriptOptionalParamsLanguageCodeOneOf, bP as TranscriptOptionalParamsRedactPiiSub, bQ as TranscriptOptionalParamsSpeakersExpected, bR as TranscriptOptionalParamsSpeechModel, bS as TranscriptOptionalParamsSpeechThreshold, bT as TranscriptOptionalParamsWebhookAuthHeaderName, bU as TranscriptOptionalParamsWebhookAuthHeaderValue, bV as TranscriptPunctuate, bW as TranscriptReadyNotification, bX as TranscriptReadyStatus, bY as TranscriptRedactPiiAudio, bZ as TranscriptRedactPiiAudioQuality, b_ as TranscriptRedactPiiPolicies, b$ as TranscriptSentimentAnalysis, c0 as TranscriptSentimentAnalysisResults, c1 as TranscriptSpeakerLabels, c2 as TranscriptSpeakersExpected, c3 as TranscriptSpeechModel, c4 as TranscriptSpeechThreshold, c5 as TranscriptSpeedBoost, c6 as TranscriptSummary, c7 as TranscriptSummaryModel, c8 as TranscriptSummaryType, c9 as TranscriptText, ca as TranscriptThrottled, cb as TranscriptUtterance, cc as TranscriptUtteranceChannel, cd as TranscriptUtterances, ce as TranscriptWebhookAuthHeaderName, cf as TranscriptWebhookNotification, cg as TranscriptWebhookStatusCode, ch as TranscriptWebhookUrl, ci as TranscriptWordChannel, cj as TranscriptWordSpeaker, ck as TranscriptWords, cl as StreamingUpdateConfiguration, cm as Transcription$1, cn as EntityError, co as Status, cp as EntityReference, cq as DiarizationProperties, cr as DiarizationSpeakersProperties, cs as LanguageIdentificationMode, ct as LanguageIdentificationProperties, cu as LanguageIdentificationPropertiesSpeechModelMapping, cv as ProfanityFilterMode, cw as PunctuationMode, cx as TranscriptionCustomProperties, cy as TranscriptionLinks, cz as TranscriptionProperties, cA as TranscriptTextUsageTokens, cB as TranscriptionSegment, cC as RealtimeSessionCreateRequestGAModel, cD as RealtimeTranscriptionSessionCreateRequestTurnDetectionType, cE as RealtimeTranscriptionSessionCreateRequestInputAudioFormat, cF as AudioResponseFormat, cG as CreateTranscription200One, cH as CreateTranscriptionRequest, cI as CreateTranscriptionRequestModel, cJ as CreateTranscriptionRequestStream, cK as CreateTranscriptionRequestTimestampGranularitiesItem, cL as CreateTranscriptionResponseDiarizedJson, cM as CreateTranscriptionResponseDiarizedJsonTask, cN as CreateTranscriptionResponseDiarizedJsonUsage, cO as CreateTranscriptionResponseJson, cP as CreateTranscriptionResponseJsonLogprobsItem, cQ as CreateTranscriptionResponseJsonUsage, cR as CreateTranscriptionResponseVerboseJson, cS as TranscriptTextUsageDuration, cT as TranscriptTextUsageDurationType, cU as TranscriptTextUsageTokensInputTokenDetails, cV as TranscriptTextUsageTokensType, cW as TranscriptionChunkingStrategy, cX as TranscriptionChunkingStrategyAnyOf, cY as TranscriptionDiarizedSegment, cZ as TranscriptionDiarizedSegmentType, c_ as TranscriptionInclude, c$ as TranscriptionWord, d0 as VadConfig, d1 as VadConfigType, d2 as ListenV1Response, d3 as ManageV1FilterAccessorParameter, d4 as ManageV1FilterDeploymentParameter, d5 as ManageV1LimitParameter, d6 as ManageV1PageParameter, d7 as ManageV1FilterEndpointParameter, d8 as ManageV1FilterMethodParameter, d9 as SharedTopics, da as SharedIntents, db as SharedSentiments, dc as SharedCallbackParameter, dd as SharedCallbackMethodParameter, de as SharedSentimentParameter, df as SharedSummarizeParameter, dg as SharedTagParameter, dh as SharedTopicsParameter, di as SharedCustomTopicParameter, dj as SharedCustomTopicModeParameter, dk as SharedIntentsParameter, dl as SharedCustomIntentParameter, dm as SharedCustomIntentModeParameter, dn as SharedMipOptOutParameter, dp as ListenV1DetectEntitiesParameter, dq as ListenV1DetectLanguageParameter, dr as ListenV1DiarizeParameter, ds as ListenV1DictationParameter, dt as ListenV1EncodingParameter, du as ListenV1FillerWordsParameter, dv as ListenV1KeytermParameter, dw as ListenV1KeywordsParameter, dx as ListenV1LanguageParameter, dy as ListenV1MeasurementsParameter, dz as ListenV1MediaTranscribeParams, dA as ListenV1ModelParameter, dB as ListenV1MultichannelParameter, dC as ListenV1NumeralsParameter, dD as ListenV1ParagraphsParameter, dE as ListenV1ProfanityFilterParameter, dF as ListenV1PunctuateParameter, dG as ListenV1RedactParameter, dH as ListenV1RedactParameterOneOfItem, dI as ListenV1ReplaceParameter, dJ as ListenV1ResponseMetadata, dK as ListenV1ResponseMetadataIntentsInfo, dL as ListenV1ResponseMetadataModelInfo, dM as ListenV1ResponseMetadataSentimentInfo, dN as ListenV1ResponseMetadataSummaryInfo, dO as ListenV1ResponseMetadataTopicsInfo, dP as ListenV1ResponseResults, dQ as ListenV1ResponseResultsChannels, dR as ListenV1ResponseResultsChannelsItem, dS as ListenV1ResponseResultsChannelsItemAlternativesItem, dT as ListenV1ResponseResultsChannelsItemAlternativesItemEntitiesItem, dU as ListenV1ResponseResultsChannelsItemAlternativesItemParagraphs, dV as ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItem, dW as ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItemSentencesItem, dX as ListenV1ResponseResultsChannelsItemAlternativesItemSummariesItem, dY as ListenV1ResponseResultsChannelsItemAlternativesItemTopicsItem, dZ as ListenV1ResponseResultsChannelsItemAlternativesItemWordsItem, d_ as ListenV1ResponseResultsChannelsItemSearchItem, d$ as ListenV1ResponseResultsChannelsItemSearchItemHitsItem, e0 as ListenV1ResponseResultsSummary, e1 as ListenV1ResponseResultsUtterances, e2 as ListenV1ResponseResultsUtterancesItem, e3 as ListenV1ResponseResultsUtterancesItemWordsItem, e4 as ListenV1SearchParameter, e5 as ListenV1SmartFormatParameter, e6 as ListenV1UttSplitParameter, e7 as ListenV1UtterancesParameter, e8 as ListenV1VersionParameter, e9 as ManageV1EndDateTimeParameter, ea as ManageV1FilterRequestIdParameter, eb as ManageV1FilterStatusParameter, ec as ManageV1ProjectsRequestsListParams, ed as ManageV1StartDateTimeParameter, ee as SharedExtraParameter, ef as SharedIntentsResults, eg as SharedIntentsResultsIntents, eh as SharedIntentsResultsIntentsSegmentsItem, ei as SharedIntentsResultsIntentsSegmentsItemIntentsItem, ej as SharedSentimentsAverage, ek as SharedSentimentsSegmentsItem, el as SharedTopicsResults, em as SharedTopicsResultsTopics, en as SharedTopicsResultsTopicsSegmentsItem, eo as SharedTopicsResultsTopicsSegmentsItemTopicsItem, ep as JobType, eq as AlignmentConfig, er as TranscriptionConfig, es as TrackingData, et as OutputConfig, eu as OperatingPoint, ev as AudioEventItem, ew as AudioEventSummary, ex as AudioEventSummaryItem, ey as AutoChaptersResult, ez as AutoChaptersResultError, eA as AutoChaptersResultErrorType, eB as Chapter$1, eC as JobInfo, eD as LanguageIdentificationResult, eE as LanguageIdentificationResultAlternative, eF as LanguageIdentificationResultError, eG as LanguageIdentificationResultItem, eH as LanguagePackInfo, eI as LanguagePackInfoWritingDirection, eJ as OutputConfigSrtOverrides, eK as RecognitionAlternative, eL as RecognitionDisplay, eM as RecognitionDisplayDirection, eN as RecognitionMetadata, eO as RecognitionResult, eP as RecognitionResultAttachesTo, eQ as RecognitionResultType, eR as RetrieveTranscriptResponse, eS as RetrieveTranscriptResponseAudioEventSummary, eT as RetrieveTranscriptResponseAudioEventSummaryChannels, eU as RetrieveTranscriptResponseTranslations, eV as SentimentAnalysisError, eW as SentimentAnalysisErrorType, eX as SentimentAnalysisResult$2, eY as SentimentAnalysisResultSentimentAnalysis, eZ as SentimentChannelSummary, e_ as SentimentSegment, e$ as SentimentSpeakerSummary, f0 as SentimentSummary, f1 as SentimentSummaryDetail, f2 as SpokenFormRecognitionResult, f3 as SpokenFormRecognitionResultType, f4 as SummarizationError, f5 as SummarizationErrorType, f6 as SummarizationResult, f7 as TopicDetectionError, f8 as TopicDetectionErrorType, f9 as TopicDetectionResult$1, fa as TopicDetectionSegment, fb as TopicDetectionSegmentTopic, fc as TopicDetectionSummary, fd as TopicDetectionSummaryOverall, fe as TrackingDataDetails, ff as TranscriptionConfigAdditionalVocabItem, fg as TranscriptionConfigDiarization, fh as TranscriptionConfigMaxDelayMode, fi as TranscriptionConfigPunctuationOverrides, fj as TranscriptionConfigSpeakerDiarizationConfig, fk as TranscriptionConfigTranscriptFilteringConfig, fl as TranscriptionConfigTranscriptFilteringConfigReplacementsItem, fm as TranslationError, fn as TranslationErrorType, fo as TranslationSentence, fp as WrittenFormRecognitionResult, fq as WrittenFormRecognitionResultType, fr as SpeechToTextChunkResponseModel, fs as AdditionalFormatResponseModel, ft as AdditionalFormats, fu as BodySpeechToTextV1SpeechToTextPost, fv as BodySpeechToTextV1SpeechToTextPostCloudStorageUrl, fw as BodySpeechToTextV1SpeechToTextPostDiarizationThreshold, fx as BodySpeechToTextV1SpeechToTextPostEntityDetection, fy as BodySpeechToTextV1SpeechToTextPostFile, fz as BodySpeechToTextV1SpeechToTextPostFileFormat, fA as BodySpeechToTextV1SpeechToTextPostLanguageCode, fB as BodySpeechToTextV1SpeechToTextPostModelId, fC as BodySpeechToTextV1SpeechToTextPostNumSpeakers, fD as BodySpeechToTextV1SpeechToTextPostSeed, fE as BodySpeechToTextV1SpeechToTextPostTemperature, fF as BodySpeechToTextV1SpeechToTextPostTimestampsGranularity, fG as BodySpeechToTextV1SpeechToTextPostWebhookId, fH as BodySpeechToTextV1SpeechToTextPostWebhookMetadata, fI as BodySpeechToTextV1SpeechToTextPostWebhookMetadataAnyOf, fJ as DetectedEntity, fK as DocxExportOptions, fL as DocxExportOptionsFormat, fM as DocxExportOptionsMaxSegmentChars, fN as DocxExportOptionsMaxSegmentDurationS, fO as DocxExportOptionsSegmentOnSilenceLongerThanS, fP as ExportOptions, fQ as HtmlExportOptions, fR as HtmlExportOptionsFormat, fS as HtmlExportOptionsMaxSegmentChars, fT as HtmlExportOptionsMaxSegmentDurationS, fU as HtmlExportOptionsSegmentOnSilenceLongerThanS, fV as PdfExportOptions, fW as PdfExportOptionsFormat, fX as PdfExportOptionsMaxSegmentChars, fY as PdfExportOptionsMaxSegmentDurationS, fZ as PdfExportOptionsSegmentOnSilenceLongerThanS, f_ as SegmentedJsonExportOptions, f$ as SegmentedJsonExportOptionsFormat, g0 as SegmentedJsonExportOptionsMaxSegmentChars, g1 as SegmentedJsonExportOptionsMaxSegmentDurationS, g2 as SegmentedJsonExportOptionsSegmentOnSilenceLongerThanS, g3 as SpeechToTextCharacterResponseModel, g4 as SpeechToTextCharacterResponseModelEnd, g5 as SpeechToTextCharacterResponseModelStart, g6 as SpeechToTextChunkResponseModelAdditionalFormats, g7 as SpeechToTextChunkResponseModelAdditionalFormatsAnyOfItem, g8 as SpeechToTextChunkResponseModelChannelIndex, g9 as SpeechToTextChunkResponseModelEntities, ga as SpeechToTextChunkResponseModelTranscriptionId, gb as SpeechToTextWordResponseModel, gc as SpeechToTextWordResponseModelCharacters, gd as SpeechToTextWordResponseModelEnd, ge as SpeechToTextWordResponseModelSpeakerId, gf as SpeechToTextWordResponseModelStart, gg as SpeechToTextWordResponseModelType, gh as SrtExportOptions, gi as SrtExportOptionsFormat, gj as SrtExportOptionsMaxCharactersPerLine, gk as SrtExportOptionsMaxSegmentChars, gl as SrtExportOptionsMaxSegmentDurationS, gm as SrtExportOptionsSegmentOnSilenceLongerThanS, gn as TxtExportOptions, go as TxtExportOptionsFormat, gp as TxtExportOptionsMaxCharactersPerLine, gq as TxtExportOptionsMaxSegmentChars, gr as TxtExportOptionsMaxSegmentDurationS, gs as TxtExportOptionsSegmentOnSilenceLongerThanS } from './speechToTextChunkResponseModel-DvIT4xai.mjs';
|
|
2
|
+
export { gw as AssemblyAIExtendedData, gX as AssemblyAIUpdateConfiguration, gT as AudioAckEvent, gV as AudioChunk, gC as BatchOnlyProvider, h2 as BeginEvent, gS as ChapterizationEvent, gy as DeepgramExtendedData, gz as ElevenLabsExtendedData, g_ as ElevenLabsStreamingOptions, gQ as EntityEvent, h5 as ErrorEvent, gx as GladiaExtendedData, gU as LifecycleEvent, gK as ListTranscriptsResponse, gY as OpenAIStreamingOptions, gA as ProviderExtendedDataMap, gL as ProviderRawResponseMap, g$ as ProviderStreamingOptions, gW as RawWebSocketMessage, gP as SentimentEvent, gD as SessionStatus, gZ as SonioxStreamingOptions, gE as Speaker, gN as SpeechEvent, gt as SpeechmaticsOperatingPoint, gM as StreamEventType, h6 as StreamingEventMessage, h8 as StreamingForceEndpoint, h0 as StreamingOptionsForProvider, gB as StreamingProvider, h7 as StreamingWord, gR as SummarizationEvent, h4 as TerminationEvent, h1 as TranscribeStreamParams, gJ as TranscriptData, gI as TranscriptMetadata, gv as TranscriptionLanguage, gu as TranscriptionModel, gH as TranscriptionStatus, gO as TranslationEvent, h3 as TurnEvent, gG as Utterance, gF as Word } from './speechToTextChunkResponseModel-DvIT4xai.mjs';
|
|
3
|
+
import { T as TranscriptionProvider, P as ProviderCapabilities, A as AudioInput } from './provider-metadata-MDUUEuqF.mjs';
|
|
4
|
+
export { k as AllLanguageCodes, o as AllProviders, a as AssemblyAICapabilities, i as AssemblyAILanguageCodes, b as AzureCapabilities, B as BatchOnlyProviderType, q as BatchOnlyProviders, C as CapabilityKeys, f as CapabilityLabels, D as DeepgramCapabilities, j as DeepgramLanguageCodes, E as ElevenLabsCapabilities, G as GladiaCapabilities, h as GladiaLanguageCodes, g as LanguageCode, L as LanguageLabels, O as OpenAICapabilities, d as ProviderCapabilitiesMap, l as ProviderDisplayNames, n as ProviderDocs, m as ProviderWebsites, c as SonioxCapabilities, S as SpeechmaticsCapabilities, e as StreamingProviderType, p as StreamingProviders } from './provider-metadata-MDUUEuqF.mjs';
|
|
5
|
+
import { AssemblyAIRegionType, DeepgramRegionType, SpeechmaticsRegionType, SonioxModelCode, SonioxRegionType, ElevenLabsModelCode, ElevenLabsRegionType } from './constants.mjs';
|
|
6
6
|
export { AssemblyAIEncoding, AssemblyAIEncodingType, AssemblyAILanguage, AssemblyAILanguageType, AssemblyAISampleRate, AssemblyAISampleRateType, AssemblyAISpeechModel, AssemblyAISpeechModelType, AssemblyAIStatus, AssemblyAITranscriptionModel, AssemblyAITranscriptionModelType, AzureLocale, AzureLocaleCode, AzureLocaleCodes, AzureLocaleLabels, AzureLocaleType, AzureLocales, DeepgramCallbackMethod, DeepgramEncoding, DeepgramIntentMode, DeepgramLanguage, DeepgramLanguageCode as DeepgramLanguageType, DeepgramModel, DeepgramModelCode as DeepgramModelType, DeepgramRedact, DeepgramRedactType, DeepgramRegion, DeepgramSampleRate, DeepgramStatus, DeepgramTTSContainer, DeepgramTTSEncoding, DeepgramTTSModel, DeepgramTTSSampleRate, DeepgramTopicMode, DeepgramTopicModeType, ElevenLabsLanguageCode, ElevenLabsLanguageCodes, ElevenLabsLanguageLabels, ElevenLabsLanguages, GladiaBitDepth, GladiaEncoding, GladiaLanguage, GladiaModel, GladiaRegion, GladiaSampleRate, GladiaStatus, GladiaTranslationLanguage, OpenAILanguageCodes, OpenAIModel, OpenAIRealtimeAudioFormat, OpenAIRealtimeModel, OpenAIRealtimeTranscriptionModel, OpenAIRealtimeTurnDetection, OpenAIResponseFormat, SonioxAsyncModel, SonioxAsyncModelCode, SonioxLanguage, SonioxLanguageCode, SonioxLanguageCodes, SonioxLanguageLabels, SonioxLanguageType, SonioxLanguages, SonioxModel, SonioxModelCodes, SonioxModelLabels, SonioxModels, SonioxRealtimeModel, SonioxRealtimeModelCode, SonioxRegion, SpeechmaticsLanguage, SpeechmaticsLanguageCode, SpeechmaticsLanguageCodes, SpeechmaticsLanguageLabels, SpeechmaticsLanguageType, SpeechmaticsLanguages, SpeechmaticsRegion } from './constants.mjs';
|
|
7
|
-
export { B as AssemblyAIListFilterFieldName, L as AssemblyAIListFilterSchema, E as AssemblyAIStreamingConfig, x as AssemblyAIStreamingFieldName, J as AssemblyAIStreamingSchema, H as AssemblyAIStreamingUpdateConfig, y as AssemblyAIStreamingUpdateFieldName, K as AssemblyAIStreamingUpdateSchema, C as AssemblyAITranscriptionConfig, A as AssemblyAITranscriptionFieldName, I as AssemblyAITranscriptionSchema, a as AssemblyAIZodSchemas, R as AzureListFilterFieldName, U as AzureListFilterSchema, S as AzureTranscriptionConfig, Q as AzureTranscriptionFieldName, T as AzureTranscriptionSchema, q as DeepgramListFilterFieldName, w as DeepgramListFilterSchema, p as DeepgramStreamingFieldName, t as DeepgramStreamingOnlyConfig, v as DeepgramStreamingOnlySchema, r as DeepgramTranscriptionConfig, D as DeepgramTranscriptionFieldName, u as DeepgramTranscriptionSchema, d as DeepgramZodSchemas, Z as FieldConfig, aK as FieldConfigProvider, c as FieldOverrides, F as FieldType, i as GladiaListFilterFieldName, n as GladiaListFilterSchema, k as GladiaStreamingConfig, h as GladiaStreamingFieldName, m as GladiaStreamingSchema, j as GladiaTranscriptionConfig, G as GladiaTranscriptionFieldName, l as GladiaTranscriptionSchema, g as GladiaZodSchemas, M as OpenAITranscriptionConfig, O as OpenAITranscriptionFieldName, N as OpenAITranscriptionSchema, o as OpenAIZodSchemas, P as ProviderFieldConfigs, b as SonioxApiZodSchemas, a8 as SonioxListFilterFieldName, af as SonioxListFilterSchema, aa as SonioxStreamingConfig, a6 as SonioxStreamingFieldName, ad as SonioxStreamingSchema, s as SonioxStreamingTypes, ab as SonioxStreamingUpdateConfig, a7 as SonioxStreamingUpdateFieldName, ae as SonioxStreamingUpdateSchema, s as SonioxStreamingZodSchemas, a9 as SonioxTranscriptionConfig, a5 as SonioxTranscriptionFieldName, ac as SonioxTranscriptionSchema, Y as SpeechmaticsListFilterFieldName, a4 as SpeechmaticsListFilterSchema, $ as SpeechmaticsStreamingConfig, W as SpeechmaticsStreamingFieldName, a2 as SpeechmaticsStreamingSchema, a0 as SpeechmaticsStreamingUpdateConfig, X as SpeechmaticsStreamingUpdateFieldName, a3 as SpeechmaticsStreamingUpdateSchema, _ as SpeechmaticsTranscriptionConfig, V as SpeechmaticsTranscriptionFieldName, a1 as SpeechmaticsTranscriptionSchema, ah as StreamingFieldName, ag as TranscriptionFieldName, Z as ZodFieldConfig, e as excludeFields, f as filterFields, aM as getAllFieldConfigs, au as getAssemblyAIFieldConfigs, ar as getAssemblyAIListFilterFields, as as getAssemblyAIStreamingFields, at as getAssemblyAIStreamingUpdateFields, aq as getAssemblyAITranscriptionFields, az as getAzureFieldConfigs, ay as getAzureListFilterFields, ax as getAzureTranscriptionFields, ap as getDeepgramFieldConfigs, an as getDeepgramListFilterFields, ao as getDeepgramStreamingFields, am as getDeepgramTranscriptionFields, al as getGladiaFieldConfigs, aj as getGladiaListFilterFields, ak as getGladiaStreamingFields, ai as getGladiaTranscriptionFields, aw as getOpenAIFieldConfigs, av as getOpenAITranscriptionFields, aL as getProviderFieldConfigs, aJ as getSonioxFieldConfigs, aG as getSonioxListFilterFields, aH as getSonioxStreamingFields, aI as getSonioxStreamingUpdateFields, aF as getSonioxTranscriptionFields, aE as getSpeechmaticsFieldConfigs, aB as getSpeechmaticsListFilterFields, aC as getSpeechmaticsStreamingFields, aD as getSpeechmaticsStreamingUpdateFields, aA as getSpeechmaticsTranscriptionFields, z as zodToFieldConfigs } from './field-configs-
|
|
7
|
+
export { B as AssemblyAIListFilterFieldName, L as AssemblyAIListFilterSchema, E as AssemblyAIStreamingConfig, x as AssemblyAIStreamingFieldName, J as AssemblyAIStreamingSchema, H as AssemblyAIStreamingUpdateConfig, y as AssemblyAIStreamingUpdateFieldName, K as AssemblyAIStreamingUpdateSchema, C as AssemblyAITranscriptionConfig, A as AssemblyAITranscriptionFieldName, I as AssemblyAITranscriptionSchema, a as AssemblyAIZodSchemas, R as AzureListFilterFieldName, U as AzureListFilterSchema, S as AzureTranscriptionConfig, Q as AzureTranscriptionFieldName, T as AzureTranscriptionSchema, q as DeepgramListFilterFieldName, w as DeepgramListFilterSchema, p as DeepgramStreamingFieldName, t as DeepgramStreamingOnlyConfig, v as DeepgramStreamingOnlySchema, r as DeepgramTranscriptionConfig, D as DeepgramTranscriptionFieldName, u as DeepgramTranscriptionSchema, d as DeepgramZodSchemas, Z as FieldConfig, aK as FieldConfigProvider, c as FieldOverrides, F as FieldType, i as GladiaListFilterFieldName, n as GladiaListFilterSchema, k as GladiaStreamingConfig, h as GladiaStreamingFieldName, m as GladiaStreamingSchema, j as GladiaTranscriptionConfig, G as GladiaTranscriptionFieldName, l as GladiaTranscriptionSchema, g as GladiaZodSchemas, M as OpenAITranscriptionConfig, O as OpenAITranscriptionFieldName, N as OpenAITranscriptionSchema, o as OpenAIZodSchemas, P as ProviderFieldConfigs, b as SonioxApiZodSchemas, a8 as SonioxListFilterFieldName, af as SonioxListFilterSchema, aa as SonioxStreamingConfig, a6 as SonioxStreamingFieldName, ad as SonioxStreamingSchema, s as SonioxStreamingTypes, ab as SonioxStreamingUpdateConfig, a7 as SonioxStreamingUpdateFieldName, ae as SonioxStreamingUpdateSchema, s as SonioxStreamingZodSchemas, a9 as SonioxTranscriptionConfig, a5 as SonioxTranscriptionFieldName, ac as SonioxTranscriptionSchema, Y as SpeechmaticsListFilterFieldName, a4 as SpeechmaticsListFilterSchema, $ as SpeechmaticsStreamingConfig, W as SpeechmaticsStreamingFieldName, a2 as SpeechmaticsStreamingSchema, a0 as SpeechmaticsStreamingUpdateConfig, X as SpeechmaticsStreamingUpdateFieldName, a3 as SpeechmaticsStreamingUpdateSchema, _ as SpeechmaticsTranscriptionConfig, V as SpeechmaticsTranscriptionFieldName, a1 as SpeechmaticsTranscriptionSchema, ah as StreamingFieldName, ag as TranscriptionFieldName, Z as ZodFieldConfig, e as excludeFields, f as filterFields, aM as getAllFieldConfigs, au as getAssemblyAIFieldConfigs, ar as getAssemblyAIListFilterFields, as as getAssemblyAIStreamingFields, at as getAssemblyAIStreamingUpdateFields, aq as getAssemblyAITranscriptionFields, az as getAzureFieldConfigs, ay as getAzureListFilterFields, ax as getAzureTranscriptionFields, ap as getDeepgramFieldConfigs, an as getDeepgramListFilterFields, ao as getDeepgramStreamingFields, am as getDeepgramTranscriptionFields, al as getGladiaFieldConfigs, aj as getGladiaListFilterFields, ak as getGladiaStreamingFields, ai as getGladiaTranscriptionFields, aw as getOpenAIFieldConfigs, av as getOpenAITranscriptionFields, aL as getProviderFieldConfigs, aJ as getSonioxFieldConfigs, aG as getSonioxListFilterFields, aH as getSonioxStreamingFields, aI as getSonioxStreamingUpdateFields, aF as getSonioxTranscriptionFields, aE as getSpeechmaticsFieldConfigs, aB as getSpeechmaticsListFilterFields, aC as getSpeechmaticsStreamingFields, aD as getSpeechmaticsStreamingUpdateFields, aA as getSpeechmaticsTranscriptionFields, z as zodToFieldConfigs } from './field-configs-t_lVCkE5.mjs';
|
|
8
8
|
import { z } from 'zod';
|
|
9
9
|
import { AxiosResponse, AxiosRequestConfig } from 'axios';
|
|
10
10
|
|
|
@@ -32,6 +32,12 @@ declare const ERROR_CODES: {
|
|
|
32
32
|
readonly CONNECTION_TIMEOUT: "CONNECTION_TIMEOUT";
|
|
33
33
|
/** Invalid input provided to API */
|
|
34
34
|
readonly INVALID_INPUT: "INVALID_INPUT";
|
|
35
|
+
/** Authentication failed (invalid or missing API key) */
|
|
36
|
+
readonly AUTHENTICATION_ERROR: "AUTHENTICATION_ERROR";
|
|
37
|
+
/** Rate limit exceeded */
|
|
38
|
+
readonly RATE_LIMIT: "RATE_LIMIT";
|
|
39
|
+
/** Provider server error (5xx) */
|
|
40
|
+
readonly SERVER_ERROR: "SERVER_ERROR";
|
|
35
41
|
/** Requested operation not supported by provider */
|
|
36
42
|
readonly NOT_SUPPORTED: "NOT_SUPPORTED";
|
|
37
43
|
/** No transcription results available */
|
|
@@ -1005,7 +1011,7 @@ declare const SentimentAnalysisMessageType: {
|
|
|
1005
1011
|
* Gladia Control API
|
|
1006
1012
|
* OpenAPI spec version: 1.0
|
|
1007
1013
|
*/
|
|
1008
|
-
interface SentimentAnalysisResult
|
|
1014
|
+
interface SentimentAnalysisResult {
|
|
1009
1015
|
sentiment: string;
|
|
1010
1016
|
emotion: string;
|
|
1011
1017
|
text: string;
|
|
@@ -1027,7 +1033,7 @@ interface SentimentAnalysisData {
|
|
|
1027
1033
|
/** The transcribed utterance */
|
|
1028
1034
|
utterance: UtteranceDTO;
|
|
1029
1035
|
/** The sentiment analysis results */
|
|
1030
|
-
results: SentimentAnalysisResult
|
|
1036
|
+
results: SentimentAnalysisResult[];
|
|
1031
1037
|
}
|
|
1032
1038
|
|
|
1033
1039
|
/**
|
|
@@ -3550,6 +3556,7 @@ type index$7_SentimentAnalysisMessage = SentimentAnalysisMessage;
|
|
|
3550
3556
|
type index$7_SentimentAnalysisMessageData = SentimentAnalysisMessageData;
|
|
3551
3557
|
type index$7_SentimentAnalysisMessageError = SentimentAnalysisMessageError;
|
|
3552
3558
|
declare const index$7_SentimentAnalysisMessageType: typeof SentimentAnalysisMessageType;
|
|
3559
|
+
type index$7_SentimentAnalysisResult = SentimentAnalysisResult;
|
|
3553
3560
|
declare const index$7_SpeakerReidentificationDTO: typeof SpeakerReidentificationDTO;
|
|
3554
3561
|
declare const index$7_SpeakerReidentificationDTOError: typeof SpeakerReidentificationDTOError;
|
|
3555
3562
|
type index$7_SpeechEndMessage = SpeechEndMessage;
|
|
@@ -3648,7 +3655,7 @@ type index$7_WebhookTranscriptionSuccessPayload = WebhookTranscriptionSuccessPay
|
|
|
3648
3655
|
declare const index$7_WebhookTranscriptionSuccessPayloadEvent: typeof WebhookTranscriptionSuccessPayloadEvent;
|
|
3649
3656
|
declare const index$7_WordDTO: typeof WordDTO;
|
|
3650
3657
|
declare namespace index$7 {
|
|
3651
|
-
export { index$7_AddonErrorDTO as AddonErrorDTO, type index$7_AudioChunkAckData as AudioChunkAckData, type index$7_AudioChunkAckMessage as AudioChunkAckMessage, type index$7_AudioChunkAckMessageData as AudioChunkAckMessageData, type index$7_AudioChunkAckMessageError as AudioChunkAckMessageError, index$7_AudioChunkAckMessageType as AudioChunkAckMessageType, type index$7_AudioChunkAction as AudioChunkAction, type index$7_AudioChunkActionData as AudioChunkActionData, index$7_AudioChunkActionType as AudioChunkActionType, index$7_AudioToLlmDTO as AudioToLlmDTO, index$7_AudioToLlmDTOError as AudioToLlmDTOError, index$7_AudioToLlmDTOResults as AudioToLlmDTOResults, index$7_AudioToLlmListConfigDTO as AudioToLlmListConfigDTO, index$7_AudioToLlmListDTO as AudioToLlmListDTO, index$7_AudioToLlmListDTOError as AudioToLlmListDTOError, index$7_AudioToLlmResultDTO as AudioToLlmResultDTO, type index$7_AudioToTextControllerAudioTranscriptionBody as AudioToTextControllerAudioTranscriptionBody, index$7_AudioToTextControllerAudioTranscriptionBodyLanguage as AudioToTextControllerAudioTranscriptionBodyLanguage, index$7_AudioToTextControllerAudioTranscriptionBodyLanguageBehaviour as AudioToTextControllerAudioTranscriptionBodyLanguageBehaviour, index$7_AudioToTextControllerAudioTranscriptionBodyOutputFormat as AudioToTextControllerAudioTranscriptionBodyOutputFormat, index$7_AudioToTextControllerAudioTranscriptionBodyTargetTranslationLanguage as AudioToTextControllerAudioTranscriptionBodyTargetTranslationLanguage, type index$7_AudioUploadMetadataDTO as AudioUploadMetadataDTO, type index$7_AudioUploadResponse as AudioUploadResponse, type index$7_BadRequestErrorResponse as BadRequestErrorResponse, index$7_CallbackConfig as CallbackConfig, index$7_CallbackConfigDto as CallbackConfigDto, type index$7_CallbackLiveAudioChunkAckMessage as CallbackLiveAudioChunkAckMessage, index$7_CallbackLiveAudioChunkAckMessageEvent as CallbackLiveAudioChunkAckMessageEvent, type index$7_CallbackLiveEndRecordingMessage as CallbackLiveEndRecordingMessage, index$7_CallbackLiveEndRecordingMessageEvent as CallbackLiveEndRecordingMessageEvent, type index$7_CallbackLiveEndSessionMessage as CallbackLiveEndSessionMessage, index$7_CallbackLiveEndSessionMessageEvent as CallbackLiveEndSessionMessageEvent, type index$7_CallbackLiveNamedEntityRecognitionMessage as CallbackLiveNamedEntityRecognitionMessage, index$7_CallbackLiveNamedEntityRecognitionMessageEvent as CallbackLiveNamedEntityRecognitionMessageEvent, type index$7_CallbackLivePostChapterizationMessage as CallbackLivePostChapterizationMessage, index$7_CallbackLivePostChapterizationMessageEvent as CallbackLivePostChapterizationMessageEvent, type index$7_CallbackLivePostFinalTranscriptMessage as CallbackLivePostFinalTranscriptMessage, index$7_CallbackLivePostFinalTranscriptMessageEvent as CallbackLivePostFinalTranscriptMessageEvent, type index$7_CallbackLivePostSummarizationMessage as CallbackLivePostSummarizationMessage, index$7_CallbackLivePostSummarizationMessageEvent as CallbackLivePostSummarizationMessageEvent, type index$7_CallbackLivePostTranscriptMessage as CallbackLivePostTranscriptMessage, index$7_CallbackLivePostTranscriptMessageEvent as CallbackLivePostTranscriptMessageEvent, type index$7_CallbackLiveSentimentAnalysisMessage as CallbackLiveSentimentAnalysisMessage, index$7_CallbackLiveSentimentAnalysisMessageEvent as CallbackLiveSentimentAnalysisMessageEvent, type index$7_CallbackLiveSpeechEndMessage as CallbackLiveSpeechEndMessage, index$7_CallbackLiveSpeechEndMessageEvent as CallbackLiveSpeechEndMessageEvent, type index$7_CallbackLiveSpeechStartMessage as CallbackLiveSpeechStartMessage, index$7_CallbackLiveSpeechStartMessageEvent as CallbackLiveSpeechStartMessageEvent, type index$7_CallbackLiveStartRecordingMessage as CallbackLiveStartRecordingMessage, index$7_CallbackLiveStartRecordingMessageEvent as CallbackLiveStartRecordingMessageEvent, type index$7_CallbackLiveStartSessionMessage as CallbackLiveStartSessionMessage, index$7_CallbackLiveStartSessionMessageEvent as CallbackLiveStartSessionMessageEvent, type index$7_CallbackLiveStopRecordingAckMessage as CallbackLiveStopRecordingAckMessage, index$7_CallbackLiveStopRecordingAckMessageEvent as CallbackLiveStopRecordingAckMessageEvent, type index$7_CallbackLiveTranscriptMessage as CallbackLiveTranscriptMessage, index$7_CallbackLiveTranscriptMessageEvent as CallbackLiveTranscriptMessageEvent, type index$7_CallbackLiveTranslationMessage as CallbackLiveTranslationMessage, index$7_CallbackLiveTranslationMessageEvent as CallbackLiveTranslationMessageEvent, index$7_CallbackMethodEnum as CallbackMethodEnum, index$7_CallbackTranscriptionErrorPayload as CallbackTranscriptionErrorPayload, index$7_CallbackTranscriptionErrorPayloadCustomMetadata as CallbackTranscriptionErrorPayloadCustomMetadata, index$7_CallbackTranscriptionErrorPayloadEvent as CallbackTranscriptionErrorPayloadEvent, index$7_CallbackTranscriptionSuccessPayload as CallbackTranscriptionSuccessPayload, index$7_CallbackTranscriptionSuccessPayloadCustomMetadata as CallbackTranscriptionSuccessPayloadCustomMetadata, index$7_CallbackTranscriptionSuccessPayloadEvent as CallbackTranscriptionSuccessPayloadEvent, index$7_ChapterizationDTO as ChapterizationDTO, index$7_ChapterizationDTOError as ChapterizationDTOError, index$7_ChapterizationDTOResults as ChapterizationDTOResults, type index$7_ChapterizationSentence as ChapterizationSentence, index$7_CodeSwitchingConfigDTO as CodeSwitchingConfigDTO, index$7_CustomSpellingConfigDTO as CustomSpellingConfigDTO, index$7_CustomSpellingConfigDTOSpellingDictionary as CustomSpellingConfigDTOSpellingDictionary, index$7_CustomVocabularyConfigDTO as CustomVocabularyConfigDTO, index$7_CustomVocabularyConfigDTOVocabularyItem as CustomVocabularyConfigDTOVocabularyItem, index$7_CustomVocabularyEntryDTO as CustomVocabularyEntryDTO, index$7_DiarizationConfigDTO as DiarizationConfigDTO, index$7_DiarizationDTO as DiarizationDTO, index$7_DiarizationDTOError as DiarizationDTOError, index$7_DisplayModeDTO as DisplayModeDTO, index$7_DisplayModeDTOError as DisplayModeDTOError, type index$7_EndRecordingMessage as EndRecordingMessage, type index$7_EndRecordingMessageData as EndRecordingMessageData, index$7_EndRecordingMessageType as EndRecordingMessageType, type index$7_EndSessionMessage as EndSessionMessage, index$7_EndSessionMessageType as EndSessionMessageType, type Error$3 as Error, index$7_ErrorDTO as ErrorDTO, type index$7_FileControllerUploadV2BodyOne as FileControllerUploadV2BodyOne, type index$7_FileControllerUploadV2BodyTwo as FileControllerUploadV2BodyTwo, index$7_FileResponse as FileResponse, type index$7_ForbiddenErrorResponse as ForbiddenErrorResponse, index$7_HistoryControllerGetListV1KindItem as HistoryControllerGetListV1KindItem, type index$7_HistoryControllerGetListV1Params as HistoryControllerGetListV1Params, index$7_HistoryControllerGetListV1StatusItem as HistoryControllerGetListV1StatusItem, type index$7_InitPreRecordedTranscriptionResponse as InitPreRecordedTranscriptionResponse, type index$7_InitStreamingResponse as InitStreamingResponse, index$7_InitTranscriptionRequest as InitTranscriptionRequest, index$7_InitTranscriptionRequestCustomMetadata as InitTranscriptionRequestCustomMetadata, index$7_LanguageConfig as LanguageConfig, type index$7_ListHistoryResponse as ListHistoryResponse, type index$7_ListHistoryResponseItemsItem as ListHistoryResponseItemsItem, type index$7_ListPreRecordedResponse as ListPreRecordedResponse, type index$7_ListStreamingResponse as ListStreamingResponse, type index$7_ListTranscriptionResponse as ListTranscriptionResponse, type index$7_ListTranscriptionResponseItemsItem as ListTranscriptionResponseItemsItem, type index$7_LiveEventPayload as LiveEventPayload, index$7_MessagesConfig as MessagesConfig, index$7_ModerationDTO as ModerationDTO, index$7_ModerationDTOError as ModerationDTOError, index$7_NamedEntityRecognitionDTO as NamedEntityRecognitionDTO, index$7_NamedEntityRecognitionDTOError as NamedEntityRecognitionDTOError, type index$7_NamedEntityRecognitionData as NamedEntityRecognitionData, type index$7_NamedEntityRecognitionMessage as NamedEntityRecognitionMessage, type index$7_NamedEntityRecognitionMessageData as NamedEntityRecognitionMessageData, type index$7_NamedEntityRecognitionMessageError as NamedEntityRecognitionMessageError, index$7_NamedEntityRecognitionMessageType as NamedEntityRecognitionMessageType, type index$7_NamedEntityRecognitionResult as NamedEntityRecognitionResult, index$7_NamesConsistencyDTO as NamesConsistencyDTO, index$7_NamesConsistencyDTOError as NamesConsistencyDTOError, type index$7_NotFoundErrorResponse as NotFoundErrorResponse, type index$7_PatchRequestParamsDTO as PatchRequestParamsDTO, type index$7_PayloadTooLargeErrorResponse as PayloadTooLargeErrorResponse, type index$7_PostChapterizationMessage as PostChapterizationMessage, type index$7_PostChapterizationMessageData as PostChapterizationMessageData, type index$7_PostChapterizationMessageDataProperty as PostChapterizationMessageDataProperty, type index$7_PostChapterizationMessageError as PostChapterizationMessageError, index$7_PostChapterizationMessageType as PostChapterizationMessageType, type index$7_PostChapterizationResult as PostChapterizationResult, type index$7_PostFinalTranscriptMessage as PostFinalTranscriptMessage, index$7_PostFinalTranscriptMessageType as PostFinalTranscriptMessageType, index$7_PostProcessingConfig as PostProcessingConfig, type index$7_PostSummarizationMessage as PostSummarizationMessage, type index$7_PostSummarizationMessageData as PostSummarizationMessageData, type index$7_PostSummarizationMessageDataProperty as PostSummarizationMessageDataProperty, type index$7_PostSummarizationMessageError as PostSummarizationMessageError, index$7_PostSummarizationMessageType as PostSummarizationMessageType, type index$7_PostTranscriptMessage as PostTranscriptMessage, index$7_PostTranscriptMessageType as PostTranscriptMessageType, index$7_PreProcessingConfig as PreProcessingConfig, type index$7_PreRecordedControllerGetPreRecordedJobsV2Params as PreRecordedControllerGetPreRecordedJobsV2Params, index$7_PreRecordedControllerGetPreRecordedJobsV2StatusItem as PreRecordedControllerGetPreRecordedJobsV2StatusItem, type index$7_PreRecordedEventPayload as PreRecordedEventPayload, index$7_PreRecordedRequestParamsResponse as PreRecordedRequestParamsResponse, index$7_PreRecordedResponse as PreRecordedResponse, index$7_PreRecordedResponseCustomMetadata as PreRecordedResponseCustomMetadata, index$7_PreRecordedResponseFile as PreRecordedResponseFile, index$7_PreRecordedResponseKind as PreRecordedResponseKind, index$7_PreRecordedResponsePostSessionMetadata as PreRecordedResponsePostSessionMetadata, index$7_PreRecordedResponseRequestParams as PreRecordedResponseRequestParams, index$7_PreRecordedResponseResult as PreRecordedResponseResult, index$7_PreRecordedResponseStatus as PreRecordedResponseStatus, index$7_RealtimeProcessingConfig as RealtimeProcessingConfig, index$7_SentencesDTO as SentencesDTO, index$7_SentencesDTOError as SentencesDTOError, index$7_SentimentAnalysisDTO as SentimentAnalysisDTO, index$7_SentimentAnalysisDTOError as SentimentAnalysisDTOError, type index$7_SentimentAnalysisData as SentimentAnalysisData, type index$7_SentimentAnalysisMessage as SentimentAnalysisMessage, type index$7_SentimentAnalysisMessageData as SentimentAnalysisMessageData, type index$7_SentimentAnalysisMessageError as SentimentAnalysisMessageError, index$7_SentimentAnalysisMessageType as SentimentAnalysisMessageType, type SentimentAnalysisResult$1 as SentimentAnalysisResult, index$7_SpeakerReidentificationDTO as SpeakerReidentificationDTO, index$7_SpeakerReidentificationDTOError as SpeakerReidentificationDTOError, type index$7_SpeechEndMessage as SpeechEndMessage, index$7_SpeechEndMessageType as SpeechEndMessageType, type index$7_SpeechMessageData as SpeechMessageData, type index$7_SpeechStartMessage as SpeechStartMessage, index$7_SpeechStartMessageType as SpeechStartMessageType, type index$7_StartRecordingMessage as StartRecordingMessage, index$7_StartRecordingMessageType as StartRecordingMessageType, type index$7_StartSessionMessage as StartSessionMessage, index$7_StartSessionMessageType as StartSessionMessageType, type index$7_StopRecordingAckData as StopRecordingAckData, type index$7_StopRecordingAckMessage as StopRecordingAckMessage, type index$7_StopRecordingAckMessageData as StopRecordingAckMessageData, type index$7_StopRecordingAckMessageError as StopRecordingAckMessageError, index$7_StopRecordingAckMessageType as StopRecordingAckMessageType, type index$7_StopRecordingAction as StopRecordingAction, index$7_StopRecordingActionType as StopRecordingActionType, type index$7_StreamingControllerGetStreamingJobsV2Params as StreamingControllerGetStreamingJobsV2Params, index$7_StreamingControllerGetStreamingJobsV2StatusItem as StreamingControllerGetStreamingJobsV2StatusItem, type index$7_StreamingControllerInitStreamingSessionV2Params as StreamingControllerInitStreamingSessionV2Params, index$7_StreamingRequest as StreamingRequest, index$7_StreamingRequestCustomMetadata as StreamingRequestCustomMetadata, type index$7_StreamingRequestParamsResponse as StreamingRequestParamsResponse, type index$7_StreamingResponse as StreamingResponse, type index$7_StreamingResponseCustomMetadata as StreamingResponseCustomMetadata, type index$7_StreamingResponseFile as StreamingResponseFile, index$7_StreamingResponseKind as StreamingResponseKind, type index$7_StreamingResponsePostSessionMetadata as StreamingResponsePostSessionMetadata, type index$7_StreamingResponseRequestParams as StreamingResponseRequestParams, type index$7_StreamingResponseResult as StreamingResponseResult, index$7_StreamingResponseStatus as StreamingResponseStatus, index$7_StreamingSupportedBitDepthEnum as StreamingSupportedBitDepthEnum, index$7_StreamingSupportedEncodingEnum as StreamingSupportedEncodingEnum, index$7_StreamingSupportedModels as StreamingSupportedModels, index$7_StreamingSupportedRegions as StreamingSupportedRegions, index$7_StreamingSupportedSampleRateEnum as StreamingSupportedSampleRateEnum, type index$7_StreamingTranscriptionResultDTO as StreamingTranscriptionResultDTO, type index$7_StreamingTranscriptionResultWithMessagesDTO as StreamingTranscriptionResultWithMessagesDTO, index$7_StructuredDataExtractionConfigDTO as StructuredDataExtractionConfigDTO, index$7_StructuredDataExtractionDTO as StructuredDataExtractionDTO, index$7_StructuredDataExtractionDTOError as StructuredDataExtractionDTOError, index$7_SubtitleDTO as SubtitleDTO, index$7_SubtitlesConfigDTO as SubtitlesConfigDTO, index$7_SubtitlesFormatEnum as SubtitlesFormatEnum, index$7_SubtitlesStyleEnum as SubtitlesStyleEnum, index$7_SummarizationConfigDTO as SummarizationConfigDTO, index$7_SummarizationDTO as SummarizationDTO, index$7_SummarizationDTOError as SummarizationDTOError, index$7_SummaryTypesEnum as SummaryTypesEnum, type index$7_TranscriptMessage as TranscriptMessage, type index$7_TranscriptMessageData as TranscriptMessageData, index$7_TranscriptMessageType as TranscriptMessageType, type index$7_TranscriptionControllerGetTranscriptV2200 as TranscriptionControllerGetTranscriptV2200, index$7_TranscriptionControllerListV2KindItem as TranscriptionControllerListV2KindItem, index$7_TranscriptionControllerListV2Params as TranscriptionControllerListV2Params, index$7_TranscriptionControllerListV2StatusItem as TranscriptionControllerListV2StatusItem, index$7_TranscriptionDTO as TranscriptionDTO, index$7_TranscriptionLanguageCodeEnum as TranscriptionLanguageCodeEnum, index$7_TranscriptionMetadataDTO as TranscriptionMetadataDTO, index$7_TranscriptionResultDTO as TranscriptionResultDTO, index$7_TranslationConfigDTO as TranslationConfigDTO, index$7_TranslationDTO as TranslationDTO, index$7_TranslationDTOError as TranslationDTOError, type index$7_TranslationData as TranslationData, index$7_TranslationLanguageCodeEnum as TranslationLanguageCodeEnum, type index$7_TranslationMessage as TranslationMessage, type index$7_TranslationMessageData as TranslationMessageData, type index$7_TranslationMessageError as TranslationMessageError, index$7_TranslationMessageType as TranslationMessageType, index$7_TranslationModelEnum as TranslationModelEnum, index$7_TranslationResultDTO as TranslationResultDTO, index$7_TranslationResultDTOError as TranslationResultDTOError, type index$7_UnauthorizedErrorResponse as UnauthorizedErrorResponse, type index$7_UnprocessableEntityErrorResponse as UnprocessableEntityErrorResponse, type index$7_UploadBody as UploadBody, index$7_UtteranceDTO as UtteranceDTO, type index$7_VideoToTextControllerVideoTranscriptionBody as VideoToTextControllerVideoTranscriptionBody, index$7_VideoToTextControllerVideoTranscriptionBodyLanguage as VideoToTextControllerVideoTranscriptionBodyLanguage, index$7_VideoToTextControllerVideoTranscriptionBodyLanguageBehaviour as VideoToTextControllerVideoTranscriptionBodyLanguageBehaviour, index$7_VideoToTextControllerVideoTranscriptionBodyOutputFormat as VideoToTextControllerVideoTranscriptionBodyOutputFormat, index$7_VideoToTextControllerVideoTranscriptionBodyTargetTranslationLanguage as VideoToTextControllerVideoTranscriptionBodyTargetTranslationLanguage, type index$7_WebhookLiveEndRecordingPayload as WebhookLiveEndRecordingPayload, index$7_WebhookLiveEndRecordingPayloadEvent as WebhookLiveEndRecordingPayloadEvent, type index$7_WebhookLiveEndSessionPayload as WebhookLiveEndSessionPayload, index$7_WebhookLiveEndSessionPayloadEvent as WebhookLiveEndSessionPayloadEvent, type index$7_WebhookLiveStartRecordingPayload as WebhookLiveStartRecordingPayload, index$7_WebhookLiveStartRecordingPayloadEvent as WebhookLiveStartRecordingPayloadEvent, type index$7_WebhookLiveStartSessionPayload as WebhookLiveStartSessionPayload, index$7_WebhookLiveStartSessionPayloadEvent as WebhookLiveStartSessionPayloadEvent, type index$7_WebhookTranscriptionCreatedPayload as WebhookTranscriptionCreatedPayload, index$7_WebhookTranscriptionCreatedPayloadEvent as WebhookTranscriptionCreatedPayloadEvent, type index$7_WebhookTranscriptionErrorPayload as WebhookTranscriptionErrorPayload, index$7_WebhookTranscriptionErrorPayloadEvent as WebhookTranscriptionErrorPayloadEvent, type index$7_WebhookTranscriptionSuccessPayload as WebhookTranscriptionSuccessPayload, index$7_WebhookTranscriptionSuccessPayloadEvent as WebhookTranscriptionSuccessPayloadEvent, index$7_WordDTO as WordDTO };
|
|
3658
|
+
export { index$7_AddonErrorDTO as AddonErrorDTO, type index$7_AudioChunkAckData as AudioChunkAckData, type index$7_AudioChunkAckMessage as AudioChunkAckMessage, type index$7_AudioChunkAckMessageData as AudioChunkAckMessageData, type index$7_AudioChunkAckMessageError as AudioChunkAckMessageError, index$7_AudioChunkAckMessageType as AudioChunkAckMessageType, type index$7_AudioChunkAction as AudioChunkAction, type index$7_AudioChunkActionData as AudioChunkActionData, index$7_AudioChunkActionType as AudioChunkActionType, index$7_AudioToLlmDTO as AudioToLlmDTO, index$7_AudioToLlmDTOError as AudioToLlmDTOError, index$7_AudioToLlmDTOResults as AudioToLlmDTOResults, index$7_AudioToLlmListConfigDTO as AudioToLlmListConfigDTO, index$7_AudioToLlmListDTO as AudioToLlmListDTO, index$7_AudioToLlmListDTOError as AudioToLlmListDTOError, index$7_AudioToLlmResultDTO as AudioToLlmResultDTO, type index$7_AudioToTextControllerAudioTranscriptionBody as AudioToTextControllerAudioTranscriptionBody, index$7_AudioToTextControllerAudioTranscriptionBodyLanguage as AudioToTextControllerAudioTranscriptionBodyLanguage, index$7_AudioToTextControllerAudioTranscriptionBodyLanguageBehaviour as AudioToTextControllerAudioTranscriptionBodyLanguageBehaviour, index$7_AudioToTextControllerAudioTranscriptionBodyOutputFormat as AudioToTextControllerAudioTranscriptionBodyOutputFormat, index$7_AudioToTextControllerAudioTranscriptionBodyTargetTranslationLanguage as AudioToTextControllerAudioTranscriptionBodyTargetTranslationLanguage, type index$7_AudioUploadMetadataDTO as AudioUploadMetadataDTO, type index$7_AudioUploadResponse as AudioUploadResponse, type index$7_BadRequestErrorResponse as BadRequestErrorResponse, index$7_CallbackConfig as CallbackConfig, index$7_CallbackConfigDto as CallbackConfigDto, type index$7_CallbackLiveAudioChunkAckMessage as CallbackLiveAudioChunkAckMessage, index$7_CallbackLiveAudioChunkAckMessageEvent as CallbackLiveAudioChunkAckMessageEvent, type index$7_CallbackLiveEndRecordingMessage as CallbackLiveEndRecordingMessage, index$7_CallbackLiveEndRecordingMessageEvent as CallbackLiveEndRecordingMessageEvent, type index$7_CallbackLiveEndSessionMessage as CallbackLiveEndSessionMessage, index$7_CallbackLiveEndSessionMessageEvent as CallbackLiveEndSessionMessageEvent, type index$7_CallbackLiveNamedEntityRecognitionMessage as CallbackLiveNamedEntityRecognitionMessage, index$7_CallbackLiveNamedEntityRecognitionMessageEvent as CallbackLiveNamedEntityRecognitionMessageEvent, type index$7_CallbackLivePostChapterizationMessage as CallbackLivePostChapterizationMessage, index$7_CallbackLivePostChapterizationMessageEvent as CallbackLivePostChapterizationMessageEvent, type index$7_CallbackLivePostFinalTranscriptMessage as CallbackLivePostFinalTranscriptMessage, index$7_CallbackLivePostFinalTranscriptMessageEvent as CallbackLivePostFinalTranscriptMessageEvent, type index$7_CallbackLivePostSummarizationMessage as CallbackLivePostSummarizationMessage, index$7_CallbackLivePostSummarizationMessageEvent as CallbackLivePostSummarizationMessageEvent, type index$7_CallbackLivePostTranscriptMessage as CallbackLivePostTranscriptMessage, index$7_CallbackLivePostTranscriptMessageEvent as CallbackLivePostTranscriptMessageEvent, type index$7_CallbackLiveSentimentAnalysisMessage as CallbackLiveSentimentAnalysisMessage, index$7_CallbackLiveSentimentAnalysisMessageEvent as CallbackLiveSentimentAnalysisMessageEvent, type index$7_CallbackLiveSpeechEndMessage as CallbackLiveSpeechEndMessage, index$7_CallbackLiveSpeechEndMessageEvent as CallbackLiveSpeechEndMessageEvent, type index$7_CallbackLiveSpeechStartMessage as CallbackLiveSpeechStartMessage, index$7_CallbackLiveSpeechStartMessageEvent as CallbackLiveSpeechStartMessageEvent, type index$7_CallbackLiveStartRecordingMessage as CallbackLiveStartRecordingMessage, index$7_CallbackLiveStartRecordingMessageEvent as CallbackLiveStartRecordingMessageEvent, type index$7_CallbackLiveStartSessionMessage as CallbackLiveStartSessionMessage, index$7_CallbackLiveStartSessionMessageEvent as CallbackLiveStartSessionMessageEvent, type index$7_CallbackLiveStopRecordingAckMessage as CallbackLiveStopRecordingAckMessage, index$7_CallbackLiveStopRecordingAckMessageEvent as CallbackLiveStopRecordingAckMessageEvent, type index$7_CallbackLiveTranscriptMessage as CallbackLiveTranscriptMessage, index$7_CallbackLiveTranscriptMessageEvent as CallbackLiveTranscriptMessageEvent, type index$7_CallbackLiveTranslationMessage as CallbackLiveTranslationMessage, index$7_CallbackLiveTranslationMessageEvent as CallbackLiveTranslationMessageEvent, index$7_CallbackMethodEnum as CallbackMethodEnum, index$7_CallbackTranscriptionErrorPayload as CallbackTranscriptionErrorPayload, index$7_CallbackTranscriptionErrorPayloadCustomMetadata as CallbackTranscriptionErrorPayloadCustomMetadata, index$7_CallbackTranscriptionErrorPayloadEvent as CallbackTranscriptionErrorPayloadEvent, index$7_CallbackTranscriptionSuccessPayload as CallbackTranscriptionSuccessPayload, index$7_CallbackTranscriptionSuccessPayloadCustomMetadata as CallbackTranscriptionSuccessPayloadCustomMetadata, index$7_CallbackTranscriptionSuccessPayloadEvent as CallbackTranscriptionSuccessPayloadEvent, index$7_ChapterizationDTO as ChapterizationDTO, index$7_ChapterizationDTOError as ChapterizationDTOError, index$7_ChapterizationDTOResults as ChapterizationDTOResults, type index$7_ChapterizationSentence as ChapterizationSentence, index$7_CodeSwitchingConfigDTO as CodeSwitchingConfigDTO, index$7_CustomSpellingConfigDTO as CustomSpellingConfigDTO, index$7_CustomSpellingConfigDTOSpellingDictionary as CustomSpellingConfigDTOSpellingDictionary, index$7_CustomVocabularyConfigDTO as CustomVocabularyConfigDTO, index$7_CustomVocabularyConfigDTOVocabularyItem as CustomVocabularyConfigDTOVocabularyItem, index$7_CustomVocabularyEntryDTO as CustomVocabularyEntryDTO, index$7_DiarizationConfigDTO as DiarizationConfigDTO, index$7_DiarizationDTO as DiarizationDTO, index$7_DiarizationDTOError as DiarizationDTOError, index$7_DisplayModeDTO as DisplayModeDTO, index$7_DisplayModeDTOError as DisplayModeDTOError, type index$7_EndRecordingMessage as EndRecordingMessage, type index$7_EndRecordingMessageData as EndRecordingMessageData, index$7_EndRecordingMessageType as EndRecordingMessageType, type index$7_EndSessionMessage as EndSessionMessage, index$7_EndSessionMessageType as EndSessionMessageType, type Error$3 as Error, index$7_ErrorDTO as ErrorDTO, type index$7_FileControllerUploadV2BodyOne as FileControllerUploadV2BodyOne, type index$7_FileControllerUploadV2BodyTwo as FileControllerUploadV2BodyTwo, index$7_FileResponse as FileResponse, type index$7_ForbiddenErrorResponse as ForbiddenErrorResponse, index$7_HistoryControllerGetListV1KindItem as HistoryControllerGetListV1KindItem, type index$7_HistoryControllerGetListV1Params as HistoryControllerGetListV1Params, index$7_HistoryControllerGetListV1StatusItem as HistoryControllerGetListV1StatusItem, type index$7_InitPreRecordedTranscriptionResponse as InitPreRecordedTranscriptionResponse, type index$7_InitStreamingResponse as InitStreamingResponse, index$7_InitTranscriptionRequest as InitTranscriptionRequest, index$7_InitTranscriptionRequestCustomMetadata as InitTranscriptionRequestCustomMetadata, index$7_LanguageConfig as LanguageConfig, type index$7_ListHistoryResponse as ListHistoryResponse, type index$7_ListHistoryResponseItemsItem as ListHistoryResponseItemsItem, type index$7_ListPreRecordedResponse as ListPreRecordedResponse, type index$7_ListStreamingResponse as ListStreamingResponse, type index$7_ListTranscriptionResponse as ListTranscriptionResponse, type index$7_ListTranscriptionResponseItemsItem as ListTranscriptionResponseItemsItem, type index$7_LiveEventPayload as LiveEventPayload, index$7_MessagesConfig as MessagesConfig, index$7_ModerationDTO as ModerationDTO, index$7_ModerationDTOError as ModerationDTOError, index$7_NamedEntityRecognitionDTO as NamedEntityRecognitionDTO, index$7_NamedEntityRecognitionDTOError as NamedEntityRecognitionDTOError, type index$7_NamedEntityRecognitionData as NamedEntityRecognitionData, type index$7_NamedEntityRecognitionMessage as NamedEntityRecognitionMessage, type index$7_NamedEntityRecognitionMessageData as NamedEntityRecognitionMessageData, type index$7_NamedEntityRecognitionMessageError as NamedEntityRecognitionMessageError, index$7_NamedEntityRecognitionMessageType as NamedEntityRecognitionMessageType, type index$7_NamedEntityRecognitionResult as NamedEntityRecognitionResult, index$7_NamesConsistencyDTO as NamesConsistencyDTO, index$7_NamesConsistencyDTOError as NamesConsistencyDTOError, type index$7_NotFoundErrorResponse as NotFoundErrorResponse, type index$7_PatchRequestParamsDTO as PatchRequestParamsDTO, type index$7_PayloadTooLargeErrorResponse as PayloadTooLargeErrorResponse, type index$7_PostChapterizationMessage as PostChapterizationMessage, type index$7_PostChapterizationMessageData as PostChapterizationMessageData, type index$7_PostChapterizationMessageDataProperty as PostChapterizationMessageDataProperty, type index$7_PostChapterizationMessageError as PostChapterizationMessageError, index$7_PostChapterizationMessageType as PostChapterizationMessageType, type index$7_PostChapterizationResult as PostChapterizationResult, type index$7_PostFinalTranscriptMessage as PostFinalTranscriptMessage, index$7_PostFinalTranscriptMessageType as PostFinalTranscriptMessageType, index$7_PostProcessingConfig as PostProcessingConfig, type index$7_PostSummarizationMessage as PostSummarizationMessage, type index$7_PostSummarizationMessageData as PostSummarizationMessageData, type index$7_PostSummarizationMessageDataProperty as PostSummarizationMessageDataProperty, type index$7_PostSummarizationMessageError as PostSummarizationMessageError, index$7_PostSummarizationMessageType as PostSummarizationMessageType, type index$7_PostTranscriptMessage as PostTranscriptMessage, index$7_PostTranscriptMessageType as PostTranscriptMessageType, index$7_PreProcessingConfig as PreProcessingConfig, type index$7_PreRecordedControllerGetPreRecordedJobsV2Params as PreRecordedControllerGetPreRecordedJobsV2Params, index$7_PreRecordedControllerGetPreRecordedJobsV2StatusItem as PreRecordedControllerGetPreRecordedJobsV2StatusItem, type index$7_PreRecordedEventPayload as PreRecordedEventPayload, index$7_PreRecordedRequestParamsResponse as PreRecordedRequestParamsResponse, index$7_PreRecordedResponse as PreRecordedResponse, index$7_PreRecordedResponseCustomMetadata as PreRecordedResponseCustomMetadata, index$7_PreRecordedResponseFile as PreRecordedResponseFile, index$7_PreRecordedResponseKind as PreRecordedResponseKind, index$7_PreRecordedResponsePostSessionMetadata as PreRecordedResponsePostSessionMetadata, index$7_PreRecordedResponseRequestParams as PreRecordedResponseRequestParams, index$7_PreRecordedResponseResult as PreRecordedResponseResult, index$7_PreRecordedResponseStatus as PreRecordedResponseStatus, index$7_RealtimeProcessingConfig as RealtimeProcessingConfig, index$7_SentencesDTO as SentencesDTO, index$7_SentencesDTOError as SentencesDTOError, index$7_SentimentAnalysisDTO as SentimentAnalysisDTO, index$7_SentimentAnalysisDTOError as SentimentAnalysisDTOError, type index$7_SentimentAnalysisData as SentimentAnalysisData, type index$7_SentimentAnalysisMessage as SentimentAnalysisMessage, type index$7_SentimentAnalysisMessageData as SentimentAnalysisMessageData, type index$7_SentimentAnalysisMessageError as SentimentAnalysisMessageError, index$7_SentimentAnalysisMessageType as SentimentAnalysisMessageType, type index$7_SentimentAnalysisResult as SentimentAnalysisResult, index$7_SpeakerReidentificationDTO as SpeakerReidentificationDTO, index$7_SpeakerReidentificationDTOError as SpeakerReidentificationDTOError, type index$7_SpeechEndMessage as SpeechEndMessage, index$7_SpeechEndMessageType as SpeechEndMessageType, type index$7_SpeechMessageData as SpeechMessageData, type index$7_SpeechStartMessage as SpeechStartMessage, index$7_SpeechStartMessageType as SpeechStartMessageType, type index$7_StartRecordingMessage as StartRecordingMessage, index$7_StartRecordingMessageType as StartRecordingMessageType, type index$7_StartSessionMessage as StartSessionMessage, index$7_StartSessionMessageType as StartSessionMessageType, type index$7_StopRecordingAckData as StopRecordingAckData, type index$7_StopRecordingAckMessage as StopRecordingAckMessage, type index$7_StopRecordingAckMessageData as StopRecordingAckMessageData, type index$7_StopRecordingAckMessageError as StopRecordingAckMessageError, index$7_StopRecordingAckMessageType as StopRecordingAckMessageType, type index$7_StopRecordingAction as StopRecordingAction, index$7_StopRecordingActionType as StopRecordingActionType, type index$7_StreamingControllerGetStreamingJobsV2Params as StreamingControllerGetStreamingJobsV2Params, index$7_StreamingControllerGetStreamingJobsV2StatusItem as StreamingControllerGetStreamingJobsV2StatusItem, type index$7_StreamingControllerInitStreamingSessionV2Params as StreamingControllerInitStreamingSessionV2Params, index$7_StreamingRequest as StreamingRequest, index$7_StreamingRequestCustomMetadata as StreamingRequestCustomMetadata, type index$7_StreamingRequestParamsResponse as StreamingRequestParamsResponse, type index$7_StreamingResponse as StreamingResponse, type index$7_StreamingResponseCustomMetadata as StreamingResponseCustomMetadata, type index$7_StreamingResponseFile as StreamingResponseFile, index$7_StreamingResponseKind as StreamingResponseKind, type index$7_StreamingResponsePostSessionMetadata as StreamingResponsePostSessionMetadata, type index$7_StreamingResponseRequestParams as StreamingResponseRequestParams, type index$7_StreamingResponseResult as StreamingResponseResult, index$7_StreamingResponseStatus as StreamingResponseStatus, index$7_StreamingSupportedBitDepthEnum as StreamingSupportedBitDepthEnum, index$7_StreamingSupportedEncodingEnum as StreamingSupportedEncodingEnum, index$7_StreamingSupportedModels as StreamingSupportedModels, index$7_StreamingSupportedRegions as StreamingSupportedRegions, index$7_StreamingSupportedSampleRateEnum as StreamingSupportedSampleRateEnum, type index$7_StreamingTranscriptionResultDTO as StreamingTranscriptionResultDTO, type index$7_StreamingTranscriptionResultWithMessagesDTO as StreamingTranscriptionResultWithMessagesDTO, index$7_StructuredDataExtractionConfigDTO as StructuredDataExtractionConfigDTO, index$7_StructuredDataExtractionDTO as StructuredDataExtractionDTO, index$7_StructuredDataExtractionDTOError as StructuredDataExtractionDTOError, index$7_SubtitleDTO as SubtitleDTO, index$7_SubtitlesConfigDTO as SubtitlesConfigDTO, index$7_SubtitlesFormatEnum as SubtitlesFormatEnum, index$7_SubtitlesStyleEnum as SubtitlesStyleEnum, index$7_SummarizationConfigDTO as SummarizationConfigDTO, index$7_SummarizationDTO as SummarizationDTO, index$7_SummarizationDTOError as SummarizationDTOError, index$7_SummaryTypesEnum as SummaryTypesEnum, type index$7_TranscriptMessage as TranscriptMessage, type index$7_TranscriptMessageData as TranscriptMessageData, index$7_TranscriptMessageType as TranscriptMessageType, type index$7_TranscriptionControllerGetTranscriptV2200 as TranscriptionControllerGetTranscriptV2200, index$7_TranscriptionControllerListV2KindItem as TranscriptionControllerListV2KindItem, index$7_TranscriptionControllerListV2Params as TranscriptionControllerListV2Params, index$7_TranscriptionControllerListV2StatusItem as TranscriptionControllerListV2StatusItem, index$7_TranscriptionDTO as TranscriptionDTO, index$7_TranscriptionLanguageCodeEnum as TranscriptionLanguageCodeEnum, index$7_TranscriptionMetadataDTO as TranscriptionMetadataDTO, index$7_TranscriptionResultDTO as TranscriptionResultDTO, index$7_TranslationConfigDTO as TranslationConfigDTO, index$7_TranslationDTO as TranslationDTO, index$7_TranslationDTOError as TranslationDTOError, type index$7_TranslationData as TranslationData, index$7_TranslationLanguageCodeEnum as TranslationLanguageCodeEnum, type index$7_TranslationMessage as TranslationMessage, type index$7_TranslationMessageData as TranslationMessageData, type index$7_TranslationMessageError as TranslationMessageError, index$7_TranslationMessageType as TranslationMessageType, index$7_TranslationModelEnum as TranslationModelEnum, index$7_TranslationResultDTO as TranslationResultDTO, index$7_TranslationResultDTOError as TranslationResultDTOError, type index$7_UnauthorizedErrorResponse as UnauthorizedErrorResponse, type index$7_UnprocessableEntityErrorResponse as UnprocessableEntityErrorResponse, type index$7_UploadBody as UploadBody, index$7_UtteranceDTO as UtteranceDTO, type index$7_VideoToTextControllerVideoTranscriptionBody as VideoToTextControllerVideoTranscriptionBody, index$7_VideoToTextControllerVideoTranscriptionBodyLanguage as VideoToTextControllerVideoTranscriptionBodyLanguage, index$7_VideoToTextControllerVideoTranscriptionBodyLanguageBehaviour as VideoToTextControllerVideoTranscriptionBodyLanguageBehaviour, index$7_VideoToTextControllerVideoTranscriptionBodyOutputFormat as VideoToTextControllerVideoTranscriptionBodyOutputFormat, index$7_VideoToTextControllerVideoTranscriptionBodyTargetTranslationLanguage as VideoToTextControllerVideoTranscriptionBodyTargetTranslationLanguage, type index$7_WebhookLiveEndRecordingPayload as WebhookLiveEndRecordingPayload, index$7_WebhookLiveEndRecordingPayloadEvent as WebhookLiveEndRecordingPayloadEvent, type index$7_WebhookLiveEndSessionPayload as WebhookLiveEndSessionPayload, index$7_WebhookLiveEndSessionPayloadEvent as WebhookLiveEndSessionPayloadEvent, type index$7_WebhookLiveStartRecordingPayload as WebhookLiveStartRecordingPayload, index$7_WebhookLiveStartRecordingPayloadEvent as WebhookLiveStartRecordingPayloadEvent, type index$7_WebhookLiveStartSessionPayload as WebhookLiveStartSessionPayload, index$7_WebhookLiveStartSessionPayloadEvent as WebhookLiveStartSessionPayloadEvent, type index$7_WebhookTranscriptionCreatedPayload as WebhookTranscriptionCreatedPayload, index$7_WebhookTranscriptionCreatedPayloadEvent as WebhookTranscriptionCreatedPayloadEvent, type index$7_WebhookTranscriptionErrorPayload as WebhookTranscriptionErrorPayload, index$7_WebhookTranscriptionErrorPayloadEvent as WebhookTranscriptionErrorPayloadEvent, type index$7_WebhookTranscriptionSuccessPayload as WebhookTranscriptionSuccessPayload, index$7_WebhookTranscriptionSuccessPayloadEvent as WebhookTranscriptionSuccessPayloadEvent, index$7_WordDTO as WordDTO };
|
|
3652
3659
|
}
|
|
3653
3660
|
|
|
3654
3661
|
/**
|
|
@@ -4999,6 +5006,7 @@ declare const index$6_AutoHighlightResult: typeof AutoHighlightResult;
|
|
|
4999
5006
|
declare const index$6_AutoHighlightsResult: typeof AutoHighlightsResult;
|
|
5000
5007
|
type index$6_BadRequestResponse = BadRequestResponse;
|
|
5001
5008
|
type index$6_CannotAccessUploadedFileResponse = CannotAccessUploadedFileResponse;
|
|
5009
|
+
declare const index$6_Chapter: typeof Chapter;
|
|
5002
5010
|
declare const index$6_ContentSafetyLabel: typeof ContentSafetyLabel;
|
|
5003
5011
|
declare const index$6_ContentSafetyLabelResult: typeof ContentSafetyLabelResult;
|
|
5004
5012
|
declare const index$6_ContentSafetyLabelsResult: typeof ContentSafetyLabelsResult;
|
|
@@ -5066,6 +5074,7 @@ declare const index$6_Timestamp: typeof Timestamp;
|
|
|
5066
5074
|
type index$6_TooManyRequestsResponse = TooManyRequestsResponse;
|
|
5067
5075
|
declare const index$6_TopicDetectionModelResult: typeof TopicDetectionModelResult;
|
|
5068
5076
|
declare const index$6_TopicDetectionModelResultSummary: typeof TopicDetectionModelResultSummary;
|
|
5077
|
+
declare const index$6_TopicDetectionResult: typeof TopicDetectionResult;
|
|
5069
5078
|
declare const index$6_TopicDetectionResultLabelsItem: typeof TopicDetectionResultLabelsItem;
|
|
5070
5079
|
declare const index$6_Transcript: typeof Transcript;
|
|
5071
5080
|
declare const index$6_TranscriptAudioDuration: typeof TranscriptAudioDuration;
|
|
@@ -5151,7 +5160,7 @@ type index$6_WordSearchParams = WordSearchParams;
|
|
|
5151
5160
|
type index$6_WordSearchResponse = WordSearchResponse;
|
|
5152
5161
|
type index$6_WordSearchTimestamp = WordSearchTimestamp;
|
|
5153
5162
|
declare namespace index$6 {
|
|
5154
|
-
export { index$6_AudioIntelligenceModelStatus as AudioIntelligenceModelStatus, index$6_AutoHighlightResult as AutoHighlightResult, index$6_AutoHighlightsResult as AutoHighlightsResult, type index$6_BadRequestResponse as BadRequestResponse, type index$6_CannotAccessUploadedFileResponse as CannotAccessUploadedFileResponse,
|
|
5163
|
+
export { index$6_AudioIntelligenceModelStatus as AudioIntelligenceModelStatus, index$6_AutoHighlightResult as AutoHighlightResult, index$6_AutoHighlightsResult as AutoHighlightsResult, type index$6_BadRequestResponse as BadRequestResponse, type index$6_CannotAccessUploadedFileResponse as CannotAccessUploadedFileResponse, index$6_Chapter as Chapter, index$6_ContentSafetyLabel as ContentSafetyLabel, index$6_ContentSafetyLabelResult as ContentSafetyLabelResult, index$6_ContentSafetyLabelsResult as ContentSafetyLabelsResult, index$6_ContentSafetyLabelsResultSeverityScoreSummary as ContentSafetyLabelsResultSeverityScoreSummary, index$6_ContentSafetyLabelsResultSummary as ContentSafetyLabelsResultSummary, type index$6_CreateRealtimeTemporaryTokenParams as CreateRealtimeTemporaryTokenParams, index$6_Entity as Entity, index$6_EntityType as EntityType, type Error$2 as Error, type index$6_GatewayTimeoutResponse as GatewayTimeoutResponse, type index$6_GetSubtitlesParams as GetSubtitlesParams, type index$6_InternalServerErrorResponse as InternalServerErrorResponse, type index$6_LemurActionItemsParams as LemurActionItemsParams, type index$6_LemurActionItemsParamsAllOf as LemurActionItemsParamsAllOf, type index$6_LemurActionItemsResponse as LemurActionItemsResponse, type index$6_LemurBaseParams as LemurBaseParams, type index$6_LemurBaseParamsContext as LemurBaseParamsContext, type index$6_LemurBaseParamsContextOneOf as LemurBaseParamsContextOneOf, type index$6_LemurBaseParamsFinalModel as LemurBaseParamsFinalModel, type index$6_LemurBaseResponse as LemurBaseResponse, index$6_LemurModel as LemurModel, type index$6_LemurQuestion as LemurQuestion, type index$6_LemurQuestionAnswer as LemurQuestionAnswer, type index$6_LemurQuestionAnswerParams as LemurQuestionAnswerParams, type index$6_LemurQuestionAnswerParamsAllOf as LemurQuestionAnswerParamsAllOf, type index$6_LemurQuestionAnswerResponse as LemurQuestionAnswerResponse, type index$6_LemurQuestionAnswerResponseAllOf as LemurQuestionAnswerResponseAllOf, type index$6_LemurQuestionContext as LemurQuestionContext, type index$6_LemurQuestionContextOneOf as LemurQuestionContextOneOf, type index$6_LemurResponse as LemurResponse, type index$6_LemurStringResponse as LemurStringResponse, type index$6_LemurStringResponseAllOf as LemurStringResponseAllOf, type index$6_LemurSummaryParams as LemurSummaryParams, type index$6_LemurSummaryParamsAllOf as LemurSummaryParamsAllOf, type index$6_LemurSummaryResponse as LemurSummaryResponse, type index$6_LemurTaskParams as LemurTaskParams, type index$6_LemurTaskParamsAllOf as LemurTaskParamsAllOf, type index$6_LemurTaskResponse as LemurTaskResponse, type index$6_LemurUsage as LemurUsage, type index$6_ListTranscriptParams as ListTranscriptParams, index$6_ListTranscriptsParams as ListTranscriptsParams, type index$6_NotFoundResponse as NotFoundResponse, type index$6_PageDetails as PageDetails, type index$6_PageDetailsNextUrl as PageDetailsNextUrl, type index$6_PageDetailsPrevUrl as PageDetailsPrevUrl, type index$6_ParagraphsResponse as ParagraphsResponse, index$6_PiiPolicy as PiiPolicy, type index$6_PurgeLemurRequestDataResponse as PurgeLemurRequestDataResponse, type index$6_RealtimeTemporaryTokenResponse as RealtimeTemporaryTokenResponse, index$6_RedactPiiAudioQuality as RedactPiiAudioQuality, index$6_RedactedAudioNotification as RedactedAudioNotification, index$6_RedactedAudioResponse as RedactedAudioResponse, index$6_RedactedAudioStatus as RedactedAudioStatus, type index$6_SentencesResponse as SentencesResponse, index$6_Sentiment as Sentiment, SentimentAnalysisResult$1 as SentimentAnalysisResult, index$6_SentimentAnalysisResultChannel as SentimentAnalysisResultChannel, index$6_SentimentAnalysisResultSpeaker as SentimentAnalysisResultSpeaker, type index$6_ServiceUnavailableResponse as ServiceUnavailableResponse, index$6_SeverityScoreSummary as SeverityScoreSummary, index$6_SpeechModel as SpeechModel, index$6_SubstitutionPolicy as SubstitutionPolicy, index$6_SubtitleFormat as SubtitleFormat, index$6_SummaryModel as SummaryModel, index$6_SummaryType as SummaryType, index$6_Timestamp as Timestamp, type index$6_TooManyRequestsResponse as TooManyRequestsResponse, index$6_TopicDetectionModelResult as TopicDetectionModelResult, index$6_TopicDetectionModelResultSummary as TopicDetectionModelResultSummary, index$6_TopicDetectionResult as TopicDetectionResult, index$6_TopicDetectionResultLabelsItem as TopicDetectionResultLabelsItem, index$6_Transcript as Transcript, index$6_TranscriptAudioDuration as TranscriptAudioDuration, index$6_TranscriptAudioEndAt as TranscriptAudioEndAt, index$6_TranscriptAudioStartFrom as TranscriptAudioStartFrom, index$6_TranscriptAutoChapters as TranscriptAutoChapters, index$6_TranscriptAutoHighlightsResult as TranscriptAutoHighlightsResult, index$6_TranscriptBoostParam as TranscriptBoostParam, index$6_TranscriptBoostParamProperty as TranscriptBoostParamProperty, index$6_TranscriptChapters as TranscriptChapters, index$6_TranscriptConfidence as TranscriptConfidence, index$6_TranscriptContentSafety as TranscriptContentSafety, index$6_TranscriptContentSafetyLabels as TranscriptContentSafetyLabels, index$6_TranscriptCustomSpelling as TranscriptCustomSpelling, index$6_TranscriptCustomSpellingProperty as TranscriptCustomSpellingProperty, index$6_TranscriptCustomTopics as TranscriptCustomTopics, index$6_TranscriptDisfluencies as TranscriptDisfluencies, index$6_TranscriptEntities as TranscriptEntities, index$6_TranscriptEntityDetection as TranscriptEntityDetection, index$6_TranscriptFilterProfanity as TranscriptFilterProfanity, index$6_TranscriptFormatText as TranscriptFormatText, index$6_TranscriptIabCategories as TranscriptIabCategories, index$6_TranscriptIabCategoriesResult as TranscriptIabCategoriesResult, index$6_TranscriptLanguageCode as TranscriptLanguageCode, index$6_TranscriptLanguageCodeProperty as TranscriptLanguageCodeProperty, index$6_TranscriptLanguageConfidence as TranscriptLanguageConfidence, index$6_TranscriptLanguageConfidenceThreshold as TranscriptLanguageConfidenceThreshold, index$6_TranscriptLanguageDetection as TranscriptLanguageDetection, type index$6_TranscriptList as TranscriptList, type index$6_TranscriptListItem as TranscriptListItem, type index$6_TranscriptListItemCompleted as TranscriptListItemCompleted, type index$6_TranscriptListItemError as TranscriptListItemError, index$6_TranscriptMultichannel as TranscriptMultichannel, index$6_TranscriptOptionalParams as TranscriptOptionalParams, index$6_TranscriptOptionalParamsLanguageCode as TranscriptOptionalParamsLanguageCode, index$6_TranscriptOptionalParamsLanguageCodeOneOf as TranscriptOptionalParamsLanguageCodeOneOf, index$6_TranscriptOptionalParamsRedactPiiSub as TranscriptOptionalParamsRedactPiiSub, index$6_TranscriptOptionalParamsSpeakersExpected as TranscriptOptionalParamsSpeakersExpected, index$6_TranscriptOptionalParamsSpeechModel as TranscriptOptionalParamsSpeechModel, index$6_TranscriptOptionalParamsSpeechThreshold as TranscriptOptionalParamsSpeechThreshold, index$6_TranscriptOptionalParamsWebhookAuthHeaderName as TranscriptOptionalParamsWebhookAuthHeaderName, index$6_TranscriptOptionalParamsWebhookAuthHeaderValue as TranscriptOptionalParamsWebhookAuthHeaderValue, type index$6_TranscriptParagraph as TranscriptParagraph, type index$6_TranscriptParams as TranscriptParams, type index$6_TranscriptParamsAllOf as TranscriptParamsAllOf, index$6_TranscriptPunctuate as TranscriptPunctuate, index$6_TranscriptReadyNotification as TranscriptReadyNotification, index$6_TranscriptReadyStatus as TranscriptReadyStatus, index$6_TranscriptRedactPiiAudio as TranscriptRedactPiiAudio, index$6_TranscriptRedactPiiAudioQuality as TranscriptRedactPiiAudioQuality, index$6_TranscriptRedactPiiPolicies as TranscriptRedactPiiPolicies, type index$6_TranscriptSentence as TranscriptSentence, type index$6_TranscriptSentenceChannel as TranscriptSentenceChannel, type index$6_TranscriptSentenceSpeaker as TranscriptSentenceSpeaker, index$6_TranscriptSentimentAnalysis as TranscriptSentimentAnalysis, index$6_TranscriptSentimentAnalysisResults as TranscriptSentimentAnalysisResults, index$6_TranscriptSpeakerLabels as TranscriptSpeakerLabels, index$6_TranscriptSpeakersExpected as TranscriptSpeakersExpected, index$6_TranscriptSpeechModel as TranscriptSpeechModel, index$6_TranscriptSpeechThreshold as TranscriptSpeechThreshold, index$6_TranscriptSpeedBoost as TranscriptSpeedBoost, index$6_TranscriptStatus as TranscriptStatus, index$6_TranscriptSummary as TranscriptSummary, index$6_TranscriptSummaryModel as TranscriptSummaryModel, index$6_TranscriptSummaryType as TranscriptSummaryType, index$6_TranscriptText as TranscriptText, index$6_TranscriptThrottled as TranscriptThrottled, index$6_TranscriptUtterance as TranscriptUtterance, index$6_TranscriptUtteranceChannel as TranscriptUtteranceChannel, index$6_TranscriptUtterances as TranscriptUtterances, index$6_TranscriptWebhookAuthHeaderName as TranscriptWebhookAuthHeaderName, index$6_TranscriptWebhookNotification as TranscriptWebhookNotification, index$6_TranscriptWebhookStatusCode as TranscriptWebhookStatusCode, index$6_TranscriptWebhookUrl as TranscriptWebhookUrl, index$6_TranscriptWord as TranscriptWord, index$6_TranscriptWordChannel as TranscriptWordChannel, index$6_TranscriptWordSpeaker as TranscriptWordSpeaker, index$6_TranscriptWords as TranscriptWords, type index$6_UnauthorizedResponse as UnauthorizedResponse, type index$6_UploadedFile as UploadedFile, type index$6_WordSearchMatch as WordSearchMatch, type index$6_WordSearchParams as WordSearchParams, type index$6_WordSearchResponse as WordSearchResponse, type index$6_WordSearchTimestamp as WordSearchTimestamp };
|
|
5155
5164
|
}
|
|
5156
5165
|
|
|
5157
5166
|
/**
|
|
@@ -5200,6 +5209,36 @@ declare const createTemporaryToken: <TData = AxiosResponse<RealtimeTemporaryToke
|
|
|
5200
5209
|
* Documentation: https://www.assemblyai.com/docs
|
|
5201
5210
|
*/
|
|
5202
5211
|
|
|
5212
|
+
/**
|
|
5213
|
+
* AssemblyAI adapter configuration
|
|
5214
|
+
*
|
|
5215
|
+
* Extends the base config with AssemblyAI-specific options like regional endpoints.
|
|
5216
|
+
*
|
|
5217
|
+
* @example
|
|
5218
|
+
* ```typescript
|
|
5219
|
+
* import { createAssemblyAIAdapter, AssemblyAIRegion } from 'voice-router-dev'
|
|
5220
|
+
*
|
|
5221
|
+
* const adapter = createAssemblyAIAdapter({
|
|
5222
|
+
* apiKey: process.env.ASSEMBLYAI_API_KEY,
|
|
5223
|
+
* region: AssemblyAIRegion.eu // EU data residency
|
|
5224
|
+
* })
|
|
5225
|
+
* ```
|
|
5226
|
+
*/
|
|
5227
|
+
interface AssemblyAIConfig extends ProviderConfig {
|
|
5228
|
+
/**
|
|
5229
|
+
* Regional endpoint for data residency
|
|
5230
|
+
*
|
|
5231
|
+
* Available regions:
|
|
5232
|
+
* - `us` - United States (default): api.assemblyai.com
|
|
5233
|
+
* - `eu` - European Union: api.eu.assemblyai.com
|
|
5234
|
+
*
|
|
5235
|
+
* The EU endpoint guarantees audio and transcription data never leaves the EU.
|
|
5236
|
+
*
|
|
5237
|
+
* @see https://www.assemblyai.com/docs/getting-started/cloud-endpoints
|
|
5238
|
+
*/
|
|
5239
|
+
region?: AssemblyAIRegionType;
|
|
5240
|
+
}
|
|
5241
|
+
|
|
5203
5242
|
/**
|
|
5204
5243
|
* AssemblyAI transcription provider adapter
|
|
5205
5244
|
*
|
|
@@ -5256,7 +5295,40 @@ declare class AssemblyAIAdapter extends BaseAdapter {
|
|
|
5256
5295
|
readonly capabilities: ProviderCapabilities;
|
|
5257
5296
|
protected baseUrl: string;
|
|
5258
5297
|
private wsBaseUrl;
|
|
5259
|
-
|
|
5298
|
+
/**
|
|
5299
|
+
* Get regional hosts for AssemblyAI
|
|
5300
|
+
*
|
|
5301
|
+
* @param region - Regional endpoint identifier
|
|
5302
|
+
* @returns Object with api and streaming hosts
|
|
5303
|
+
*/
|
|
5304
|
+
private getRegionalHosts;
|
|
5305
|
+
initialize(config: AssemblyAIConfig): void;
|
|
5306
|
+
/**
|
|
5307
|
+
* Change the regional endpoint dynamically
|
|
5308
|
+
*
|
|
5309
|
+
* Useful for switching between US and EU endpoints without reinitializing.
|
|
5310
|
+
* Affects both REST API and WebSocket streaming endpoints.
|
|
5311
|
+
*
|
|
5312
|
+
* @param region - New regional endpoint to use (`us` or `eu`)
|
|
5313
|
+
*
|
|
5314
|
+
* @example Switch to EU region
|
|
5315
|
+
* ```typescript
|
|
5316
|
+
* import { AssemblyAIRegion } from 'voice-router-dev/constants'
|
|
5317
|
+
*
|
|
5318
|
+
* adapter.setRegion(AssemblyAIRegion.eu)
|
|
5319
|
+
* await adapter.transcribe(audio) // Uses EU endpoint
|
|
5320
|
+
* ```
|
|
5321
|
+
*/
|
|
5322
|
+
setRegion(region: AssemblyAIRegionType): void;
|
|
5323
|
+
/**
|
|
5324
|
+
* Get the current regional endpoints being used
|
|
5325
|
+
*
|
|
5326
|
+
* @returns Object with current API and WebSocket URLs
|
|
5327
|
+
*/
|
|
5328
|
+
getRegion(): {
|
|
5329
|
+
api: string;
|
|
5330
|
+
websocket: string;
|
|
5331
|
+
};
|
|
5260
5332
|
/**
|
|
5261
5333
|
* Get axios config for generated API client functions
|
|
5262
5334
|
* Configures headers and base URL using authorization header
|
|
@@ -5534,8 +5606,18 @@ declare class AssemblyAIAdapter extends BaseAdapter {
|
|
|
5534
5606
|
}
|
|
5535
5607
|
/**
|
|
5536
5608
|
* Factory function to create an AssemblyAI adapter
|
|
5609
|
+
*
|
|
5610
|
+
* @example
|
|
5611
|
+
* ```typescript
|
|
5612
|
+
* import { createAssemblyAIAdapter, AssemblyAIRegion } from 'voice-router-dev'
|
|
5613
|
+
*
|
|
5614
|
+
* const adapter = createAssemblyAIAdapter({
|
|
5615
|
+
* apiKey: process.env.ASSEMBLYAI_API_KEY,
|
|
5616
|
+
* region: AssemblyAIRegion.eu // EU data residency
|
|
5617
|
+
* })
|
|
5618
|
+
* ```
|
|
5537
5619
|
*/
|
|
5538
|
-
declare function createAssemblyAIAdapter(config:
|
|
5620
|
+
declare function createAssemblyAIAdapter(config: AssemblyAIConfig): AssemblyAIAdapter;
|
|
5539
5621
|
|
|
5540
5622
|
/**
|
|
5541
5623
|
* Generated by orval v7.9.0 🍺
|
|
@@ -6111,6 +6193,123 @@ type TranscriptionsListParams = {
|
|
|
6111
6193
|
filter?: string;
|
|
6112
6194
|
};
|
|
6113
6195
|
|
|
6196
|
+
/**
|
|
6197
|
+
* Generated by orval v7.9.0 🍺
|
|
6198
|
+
* Do not edit manually.
|
|
6199
|
+
* Speech Services API version 3.2
|
|
6200
|
+
* Speech Services API version 3.2.
|
|
6201
|
+
* OpenAPI spec version: 3.2
|
|
6202
|
+
*/
|
|
6203
|
+
interface WebHookLinks {
|
|
6204
|
+
/** The URL that can be used to trigger the sending of a ping event to the registered URL of a web hook registration. See operation "WebHooks_Ping" for more details. */
|
|
6205
|
+
readonly ping?: string;
|
|
6206
|
+
/** The URL that can be used sending test events to the registered URL of a web hook registration. See operation "WebHooks_Test" for more details. */
|
|
6207
|
+
readonly test?: string;
|
|
6208
|
+
}
|
|
6209
|
+
|
|
6210
|
+
/**
|
|
6211
|
+
* Generated by orval v7.9.0 🍺
|
|
6212
|
+
* Do not edit manually.
|
|
6213
|
+
* Speech Services API version 3.2
|
|
6214
|
+
* Speech Services API version 3.2.
|
|
6215
|
+
* OpenAPI spec version: 3.2
|
|
6216
|
+
*/
|
|
6217
|
+
|
|
6218
|
+
interface WebHookProperties {
|
|
6219
|
+
error?: EntityError;
|
|
6220
|
+
/** The API version the web hook was created in. This defines the shape of the payload in the callbacks.
|
|
6221
|
+
If the payload type is not supported anymore, because the shape changed and the API version using it is removed (after deprecation),
|
|
6222
|
+
the web hook will be disabled. */
|
|
6223
|
+
readonly apiVersion?: string;
|
|
6224
|
+
/** A secret that will be used to create a SHA256 hash of the payload with the secret as HMAC key.
|
|
6225
|
+
This hash will be set as X-MicrosoftSpeechServices-Signature header when calling back into the registered URL. */
|
|
6226
|
+
secret?: string;
|
|
6227
|
+
}
|
|
6228
|
+
|
|
6229
|
+
/**
|
|
6230
|
+
* Generated by orval v7.9.0 🍺
|
|
6231
|
+
* Do not edit manually.
|
|
6232
|
+
* Speech Services API version 3.2
|
|
6233
|
+
* Speech Services API version 3.2.
|
|
6234
|
+
* OpenAPI spec version: 3.2
|
|
6235
|
+
*/
|
|
6236
|
+
interface WebHookEvents {
|
|
6237
|
+
datasetCreation?: boolean;
|
|
6238
|
+
datasetProcessing?: boolean;
|
|
6239
|
+
datasetCompletion?: boolean;
|
|
6240
|
+
datasetDeletion?: boolean;
|
|
6241
|
+
modelCreation?: boolean;
|
|
6242
|
+
modelProcessing?: boolean;
|
|
6243
|
+
modelCompletion?: boolean;
|
|
6244
|
+
modelDeletion?: boolean;
|
|
6245
|
+
evaluationCreation?: boolean;
|
|
6246
|
+
evaluationProcessing?: boolean;
|
|
6247
|
+
evaluationCompletion?: boolean;
|
|
6248
|
+
evaluationDeletion?: boolean;
|
|
6249
|
+
transcriptionCreation?: boolean;
|
|
6250
|
+
transcriptionProcessing?: boolean;
|
|
6251
|
+
transcriptionCompletion?: boolean;
|
|
6252
|
+
transcriptionDeletion?: boolean;
|
|
6253
|
+
endpointCreation?: boolean;
|
|
6254
|
+
endpointProcessing?: boolean;
|
|
6255
|
+
endpointCompletion?: boolean;
|
|
6256
|
+
endpointDeletion?: boolean;
|
|
6257
|
+
ping?: boolean;
|
|
6258
|
+
challenge?: boolean;
|
|
6259
|
+
}
|
|
6260
|
+
|
|
6261
|
+
/**
|
|
6262
|
+
* Generated by orval v7.9.0 🍺
|
|
6263
|
+
* Do not edit manually.
|
|
6264
|
+
* Speech Services API version 3.2
|
|
6265
|
+
* Speech Services API version 3.2.
|
|
6266
|
+
* OpenAPI spec version: 3.2
|
|
6267
|
+
*/
|
|
6268
|
+
/**
|
|
6269
|
+
* The custom properties of this entity. The maximum allowed key length is 64 characters, the maximum
|
|
6270
|
+
allowed value length is 256 characters and the count of allowed entries is 10.
|
|
6271
|
+
*/
|
|
6272
|
+
type WebHookCustomProperties = {
|
|
6273
|
+
[key: string]: string;
|
|
6274
|
+
};
|
|
6275
|
+
|
|
6276
|
+
/**
|
|
6277
|
+
* Generated by orval v7.9.0 🍺
|
|
6278
|
+
* Do not edit manually.
|
|
6279
|
+
* Speech Services API version 3.2
|
|
6280
|
+
* Speech Services API version 3.2.
|
|
6281
|
+
* OpenAPI spec version: 3.2
|
|
6282
|
+
*/
|
|
6283
|
+
|
|
6284
|
+
interface WebHook {
|
|
6285
|
+
/** The registered URL that will be used to send the POST requests for the registered events to. */
|
|
6286
|
+
webUrl: string;
|
|
6287
|
+
links?: WebHookLinks;
|
|
6288
|
+
properties?: WebHookProperties;
|
|
6289
|
+
/** The location of this entity. */
|
|
6290
|
+
readonly self?: string;
|
|
6291
|
+
/**
|
|
6292
|
+
* The display name of the object.
|
|
6293
|
+
* @minLength 1
|
|
6294
|
+
*/
|
|
6295
|
+
displayName: string;
|
|
6296
|
+
/** The description of the object. */
|
|
6297
|
+
description?: string;
|
|
6298
|
+
events: WebHookEvents;
|
|
6299
|
+
/** The time-stamp when the object was created.
|
|
6300
|
+
The time stamp is encoded as ISO 8601 date and time format
|
|
6301
|
+
("YYYY-MM-DDThh:mm:ssZ", see https://en.wikipedia.org/wiki/ISO_8601#Combined_date_and_time_representations). */
|
|
6302
|
+
readonly createdDateTime?: string;
|
|
6303
|
+
/** The time-stamp when the current status was entered.
|
|
6304
|
+
The time stamp is encoded as ISO 8601 date and time format
|
|
6305
|
+
("YYYY-MM-DDThh:mm:ssZ", see https://en.wikipedia.org/wiki/ISO_8601#Combined_date_and_time_representations). */
|
|
6306
|
+
readonly lastActionDateTime?: string;
|
|
6307
|
+
status?: Status;
|
|
6308
|
+
/** The custom properties of this entity. The maximum allowed key length is 64 characters, the maximum
|
|
6309
|
+
allowed value length is 256 characters and the count of allowed entries is 10. */
|
|
6310
|
+
customProperties?: WebHookCustomProperties;
|
|
6311
|
+
}
|
|
6312
|
+
|
|
6114
6313
|
/**
|
|
6115
6314
|
* Generated by orval v7.9.0 🍺
|
|
6116
6315
|
* Do not edit manually.
|
|
@@ -8056,62 +8255,15 @@ interface PaginatedProjects {
|
|
|
8056
8255
|
* Speech Services API version 3.2.
|
|
8057
8256
|
* OpenAPI spec version: 3.2
|
|
8058
8257
|
*/
|
|
8059
|
-
interface WebHookLinks {
|
|
8060
|
-
/** The URL that can be used to trigger the sending of a ping event to the registered URL of a web hook registration. See operation "WebHooks_Ping" for more details. */
|
|
8061
|
-
readonly ping?: string;
|
|
8062
|
-
/** The URL that can be used sending test events to the registered URL of a web hook registration. See operation "WebHooks_Test" for more details. */
|
|
8063
|
-
readonly test?: string;
|
|
8064
|
-
}
|
|
8065
|
-
|
|
8066
|
-
/**
|
|
8067
|
-
* Generated by orval v7.9.0 🍺
|
|
8068
|
-
* Do not edit manually.
|
|
8069
|
-
* Speech Services API version 3.2
|
|
8070
|
-
* Speech Services API version 3.2.
|
|
8071
|
-
* OpenAPI spec version: 3.2
|
|
8072
|
-
*/
|
|
8073
|
-
|
|
8074
|
-
interface WebHookProperties {
|
|
8075
|
-
error?: EntityError;
|
|
8076
|
-
/** The API version the web hook was created in. This defines the shape of the payload in the callbacks.
|
|
8077
|
-
If the payload type is not supported anymore, because the shape changed and the API version using it is removed (after deprecation),
|
|
8078
|
-
the web hook will be disabled. */
|
|
8079
|
-
readonly apiVersion?: string;
|
|
8080
|
-
/** A secret that will be used to create a SHA256 hash of the payload with the secret as HMAC key.
|
|
8081
|
-
This hash will be set as X-MicrosoftSpeechServices-Signature header when calling back into the registered URL. */
|
|
8082
|
-
secret?: string;
|
|
8083
|
-
}
|
|
8084
8258
|
|
|
8085
|
-
|
|
8086
|
-
|
|
8087
|
-
|
|
8088
|
-
|
|
8089
|
-
|
|
8090
|
-
|
|
8091
|
-
*/
|
|
8092
|
-
|
|
8093
|
-
datasetCreation?: boolean;
|
|
8094
|
-
datasetProcessing?: boolean;
|
|
8095
|
-
datasetCompletion?: boolean;
|
|
8096
|
-
datasetDeletion?: boolean;
|
|
8097
|
-
modelCreation?: boolean;
|
|
8098
|
-
modelProcessing?: boolean;
|
|
8099
|
-
modelCompletion?: boolean;
|
|
8100
|
-
modelDeletion?: boolean;
|
|
8101
|
-
evaluationCreation?: boolean;
|
|
8102
|
-
evaluationProcessing?: boolean;
|
|
8103
|
-
evaluationCompletion?: boolean;
|
|
8104
|
-
evaluationDeletion?: boolean;
|
|
8105
|
-
transcriptionCreation?: boolean;
|
|
8106
|
-
transcriptionProcessing?: boolean;
|
|
8107
|
-
transcriptionCompletion?: boolean;
|
|
8108
|
-
transcriptionDeletion?: boolean;
|
|
8109
|
-
endpointCreation?: boolean;
|
|
8110
|
-
endpointProcessing?: boolean;
|
|
8111
|
-
endpointCompletion?: boolean;
|
|
8112
|
-
endpointDeletion?: boolean;
|
|
8113
|
-
ping?: boolean;
|
|
8114
|
-
challenge?: boolean;
|
|
8259
|
+
interface PaginatedWebHooks {
|
|
8260
|
+
/** A list of entities limited by either the passed query parameters 'skip' and 'top' or their default values.
|
|
8261
|
+
|
|
8262
|
+
When iterating through a list using pagination and deleting entities in parallel, some entities will be skipped in the results.
|
|
8263
|
+
It's recommended to build a list on the client and delete after the fetching of the complete list. */
|
|
8264
|
+
readonly values?: readonly WebHook[];
|
|
8265
|
+
/** A link to the next set of paginated results if there are more entities available; otherwise null. */
|
|
8266
|
+
readonly "@nextLink"?: string;
|
|
8115
8267
|
}
|
|
8116
8268
|
|
|
8117
8269
|
/**
|
|
@@ -8125,7 +8277,7 @@ interface WebHookEvents {
|
|
|
8125
8277
|
* The custom properties of this entity. The maximum allowed key length is 64 characters, the maximum
|
|
8126
8278
|
allowed value length is 256 characters and the count of allowed entries is 10.
|
|
8127
8279
|
*/
|
|
8128
|
-
type
|
|
8280
|
+
type ProjectUpdateCustomProperties = {
|
|
8129
8281
|
[key: string]: string;
|
|
8130
8282
|
};
|
|
8131
8283
|
|
|
@@ -8137,84 +8289,14 @@ type WebHookCustomProperties = {
|
|
|
8137
8289
|
* OpenAPI spec version: 3.2
|
|
8138
8290
|
*/
|
|
8139
8291
|
|
|
8140
|
-
interface
|
|
8141
|
-
/** The
|
|
8142
|
-
|
|
8143
|
-
links?: WebHookLinks;
|
|
8144
|
-
properties?: WebHookProperties;
|
|
8145
|
-
/** The location of this entity. */
|
|
8146
|
-
readonly self?: string;
|
|
8147
|
-
/**
|
|
8148
|
-
* The display name of the object.
|
|
8149
|
-
* @minLength 1
|
|
8150
|
-
*/
|
|
8151
|
-
displayName: string;
|
|
8292
|
+
interface ProjectUpdate {
|
|
8293
|
+
/** The name of the object. */
|
|
8294
|
+
displayName?: string;
|
|
8152
8295
|
/** The description of the object. */
|
|
8153
8296
|
description?: string;
|
|
8154
|
-
events: WebHookEvents;
|
|
8155
|
-
/** The time-stamp when the object was created.
|
|
8156
|
-
The time stamp is encoded as ISO 8601 date and time format
|
|
8157
|
-
("YYYY-MM-DDThh:mm:ssZ", see https://en.wikipedia.org/wiki/ISO_8601#Combined_date_and_time_representations). */
|
|
8158
|
-
readonly createdDateTime?: string;
|
|
8159
|
-
/** The time-stamp when the current status was entered.
|
|
8160
|
-
The time stamp is encoded as ISO 8601 date and time format
|
|
8161
|
-
("YYYY-MM-DDThh:mm:ssZ", see https://en.wikipedia.org/wiki/ISO_8601#Combined_date_and_time_representations). */
|
|
8162
|
-
readonly lastActionDateTime?: string;
|
|
8163
|
-
status?: Status;
|
|
8164
8297
|
/** The custom properties of this entity. The maximum allowed key length is 64 characters, the maximum
|
|
8165
8298
|
allowed value length is 256 characters and the count of allowed entries is 10. */
|
|
8166
|
-
customProperties?:
|
|
8167
|
-
}
|
|
8168
|
-
|
|
8169
|
-
/**
|
|
8170
|
-
* Generated by orval v7.9.0 🍺
|
|
8171
|
-
* Do not edit manually.
|
|
8172
|
-
* Speech Services API version 3.2
|
|
8173
|
-
* Speech Services API version 3.2.
|
|
8174
|
-
* OpenAPI spec version: 3.2
|
|
8175
|
-
*/
|
|
8176
|
-
|
|
8177
|
-
interface PaginatedWebHooks {
|
|
8178
|
-
/** A list of entities limited by either the passed query parameters 'skip' and 'top' or their default values.
|
|
8179
|
-
|
|
8180
|
-
When iterating through a list using pagination and deleting entities in parallel, some entities will be skipped in the results.
|
|
8181
|
-
It's recommended to build a list on the client and delete after the fetching of the complete list. */
|
|
8182
|
-
readonly values?: readonly WebHook[];
|
|
8183
|
-
/** A link to the next set of paginated results if there are more entities available; otherwise null. */
|
|
8184
|
-
readonly "@nextLink"?: string;
|
|
8185
|
-
}
|
|
8186
|
-
|
|
8187
|
-
/**
|
|
8188
|
-
* Generated by orval v7.9.0 🍺
|
|
8189
|
-
* Do not edit manually.
|
|
8190
|
-
* Speech Services API version 3.2
|
|
8191
|
-
* Speech Services API version 3.2.
|
|
8192
|
-
* OpenAPI spec version: 3.2
|
|
8193
|
-
*/
|
|
8194
|
-
/**
|
|
8195
|
-
* The custom properties of this entity. The maximum allowed key length is 64 characters, the maximum
|
|
8196
|
-
allowed value length is 256 characters and the count of allowed entries is 10.
|
|
8197
|
-
*/
|
|
8198
|
-
type ProjectUpdateCustomProperties = {
|
|
8199
|
-
[key: string]: string;
|
|
8200
|
-
};
|
|
8201
|
-
|
|
8202
|
-
/**
|
|
8203
|
-
* Generated by orval v7.9.0 🍺
|
|
8204
|
-
* Do not edit manually.
|
|
8205
|
-
* Speech Services API version 3.2
|
|
8206
|
-
* Speech Services API version 3.2.
|
|
8207
|
-
* OpenAPI spec version: 3.2
|
|
8208
|
-
*/
|
|
8209
|
-
|
|
8210
|
-
interface ProjectUpdate {
|
|
8211
|
-
/** The name of the object. */
|
|
8212
|
-
displayName?: string;
|
|
8213
|
-
/** The description of the object. */
|
|
8214
|
-
description?: string;
|
|
8215
|
-
/** The custom properties of this entity. The maximum allowed key length is 64 characters, the maximum
|
|
8216
|
-
allowed value length is 256 characters and the count of allowed entries is 10. */
|
|
8217
|
-
customProperties?: ProjectUpdateCustomProperties;
|
|
8299
|
+
customProperties?: ProjectUpdateCustomProperties;
|
|
8218
8300
|
}
|
|
8219
8301
|
|
|
8220
8302
|
/**
|
|
@@ -8771,6 +8853,27 @@ declare const transcriptionsDelete: <TData = AxiosResponse<void>>(id: string, op
|
|
|
8771
8853
|
* @summary Gets the files of the transcription identified by the given ID.
|
|
8772
8854
|
*/
|
|
8773
8855
|
declare const transcriptionsListFiles: <TData = AxiosResponse<PaginatedFiles>>(id: string, params?: TranscriptionsListFilesParams, options?: AxiosRequestConfig) => Promise<TData>;
|
|
8856
|
+
/**
|
|
8857
|
+
* @summary Gets the list of web hooks for the authenticated subscription.
|
|
8858
|
+
*/
|
|
8859
|
+
declare const webHooksList: <TData = AxiosResponse<PaginatedWebHooks>>(params?: WebHooksListParams, options?: AxiosRequestConfig) => Promise<TData>;
|
|
8860
|
+
/**
|
|
8861
|
+
* If the property secret in the configuration is present and contains a non-empty string, it will be used to create a SHA256 hash of the payload with
|
|
8862
|
+
the secret as HMAC key. This hash will be set as X-MicrosoftSpeechServices-Signature header when calling back into the registered URL.
|
|
8863
|
+
|
|
8864
|
+
When calling back into the registered URL, the request will contain a X-MicrosoftSpeechServices-Event header containing one of the registered event
|
|
8865
|
+
types. There will be one request per registered event type.
|
|
8866
|
+
|
|
8867
|
+
After successfully registering the web hook, it will not be usable until a challenge/response is completed. To do this, a request with the event type
|
|
8868
|
+
challenge will be made with a query parameter called validationToken. Respond to the challenge with a 200 OK containing the value of the validationToken
|
|
8869
|
+
query parameter as the response body. When the challenge/response is successfully completed, the web hook will begin receiving events.
|
|
8870
|
+
* @summary Creates a new web hook.
|
|
8871
|
+
*/
|
|
8872
|
+
declare const webHooksCreate: <TData = AxiosResponse<WebHook>>(webHook: NonReadonly<WebHook>, options?: AxiosRequestConfig) => Promise<TData>;
|
|
8873
|
+
/**
|
|
8874
|
+
* @summary Deletes the web hook identified by the given ID.
|
|
8875
|
+
*/
|
|
8876
|
+
declare const webHooksDelete: <TData = AxiosResponse<void>>(id: string, options?: AxiosRequestConfig) => Promise<TData>;
|
|
8774
8877
|
|
|
8775
8878
|
/**
|
|
8776
8879
|
* Azure Speech-to-Text transcription provider adapter
|
|
@@ -8942,6 +9045,33 @@ declare class AzureSTTAdapter extends BaseAdapter {
|
|
|
8942
9045
|
total?: number;
|
|
8943
9046
|
hasMore?: boolean;
|
|
8944
9047
|
}>;
|
|
9048
|
+
/**
|
|
9049
|
+
* Register a subscription-wide webhook for transcription events
|
|
9050
|
+
*
|
|
9051
|
+
* Azure webhooks are subscription-wide (not per-transcription).
|
|
9052
|
+
* Call this once during setup to receive callbacks for all transcription events.
|
|
9053
|
+
* The webhook URL will receive POST requests for transcription lifecycle events.
|
|
9054
|
+
*
|
|
9055
|
+
* @param url - The webhook URL to receive events
|
|
9056
|
+
* @param options - Optional: event filters and display name
|
|
9057
|
+
* @returns Created webhook object
|
|
9058
|
+
*/
|
|
9059
|
+
registerWebhook(url: string, options?: {
|
|
9060
|
+
displayName?: string;
|
|
9061
|
+
events?: Partial<WebHookEvents>;
|
|
9062
|
+
}): Promise<WebHook>;
|
|
9063
|
+
/**
|
|
9064
|
+
* Unregister a subscription-wide webhook by ID
|
|
9065
|
+
*
|
|
9066
|
+
* @param webhookId - The webhook ID to delete
|
|
9067
|
+
*/
|
|
9068
|
+
unregisterWebhook(webhookId: string): Promise<void>;
|
|
9069
|
+
/**
|
|
9070
|
+
* List all registered webhooks for the subscription
|
|
9071
|
+
*
|
|
9072
|
+
* @returns Array of registered webhooks
|
|
9073
|
+
*/
|
|
9074
|
+
listWebhooks(): Promise<WebHook[]>;
|
|
8945
9075
|
/**
|
|
8946
9076
|
* Map unified status to Azure status format using generated enum
|
|
8947
9077
|
*/
|
|
@@ -15216,6 +15346,9 @@ type ManageV1FilterVersionParameter = boolean;
|
|
|
15216
15346
|
/**
|
|
15217
15347
|
* ManageV1GroupingParameter type definition
|
|
15218
15348
|
*/
|
|
15349
|
+
/**
|
|
15350
|
+
* ManageV1GroupingParameter type definition
|
|
15351
|
+
*/
|
|
15219
15352
|
type ManageV1GroupingParameter = typeof ManageV1GroupingParameter[keyof typeof ManageV1GroupingParameter];
|
|
15220
15353
|
declare const ManageV1GroupingParameter: {
|
|
15221
15354
|
readonly accessor: "accessor";
|
|
@@ -15285,6 +15418,9 @@ type ManageV1ModelsListParams = {
|
|
|
15285
15418
|
/**
|
|
15286
15419
|
* ManageV1ProjectKeysStatusParameter type definition
|
|
15287
15420
|
*/
|
|
15421
|
+
/**
|
|
15422
|
+
* ManageV1ProjectKeysStatusParameter type definition
|
|
15423
|
+
*/
|
|
15288
15424
|
type ManageV1ProjectKeysStatusParameter = typeof ManageV1ProjectKeysStatusParameter[keyof typeof ManageV1ProjectKeysStatusParameter];
|
|
15289
15425
|
declare const ManageV1ProjectKeysStatusParameter: {
|
|
15290
15426
|
readonly active: "active";
|
|
@@ -16167,6 +16303,9 @@ type SelfHostedV1ParamsDistributionScopesParameter = SelfHostedV1ParamsDistribut
|
|
|
16167
16303
|
/**
|
|
16168
16304
|
* SelfHostedV1ParamsDistributionProviderParameter type definition
|
|
16169
16305
|
*/
|
|
16306
|
+
/**
|
|
16307
|
+
* SelfHostedV1ParamsDistributionProviderParameter type definition
|
|
16308
|
+
*/
|
|
16170
16309
|
type SelfHostedV1ParamsDistributionProviderParameter = typeof SelfHostedV1ParamsDistributionProviderParameter[keyof typeof SelfHostedV1ParamsDistributionProviderParameter];
|
|
16171
16310
|
declare const SelfHostedV1ParamsDistributionProviderParameter: {
|
|
16172
16311
|
readonly quay: "quay";
|
|
@@ -17271,8 +17410,8 @@ declare namespace streamingTypes {
|
|
|
17271
17410
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17272
17411
|
* OpenAPI spec version: 2.0.0
|
|
17273
17412
|
*/
|
|
17274
|
-
interface
|
|
17275
|
-
|
|
17413
|
+
interface AudioEventsConfig {
|
|
17414
|
+
types?: string[];
|
|
17276
17415
|
}
|
|
17277
17416
|
|
|
17278
17417
|
/**
|
|
@@ -17282,17 +17421,8 @@ interface AlignmentConfig {
|
|
|
17282
17421
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17283
17422
|
* OpenAPI spec version: 2.0.0
|
|
17284
17423
|
*/
|
|
17285
|
-
interface
|
|
17286
|
-
|
|
17287
|
-
type?: string;
|
|
17288
|
-
/** Time (in seconds) at which the audio event starts */
|
|
17289
|
-
start_time?: number;
|
|
17290
|
-
/** Time (in seconds) at which the audio event ends */
|
|
17291
|
-
end_time?: number;
|
|
17292
|
-
/** Prediction confidence associated with this event */
|
|
17293
|
-
confidence?: number;
|
|
17294
|
-
/** Input channel this event occurred on */
|
|
17295
|
-
channel?: string;
|
|
17424
|
+
interface AutoChaptersConfig {
|
|
17425
|
+
[key: string]: unknown;
|
|
17296
17426
|
}
|
|
17297
17427
|
|
|
17298
17428
|
/**
|
|
@@ -17302,14 +17432,9 @@ interface AudioEventItem {
|
|
|
17302
17432
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17303
17433
|
* OpenAPI spec version: 2.0.0
|
|
17304
17434
|
*/
|
|
17305
|
-
|
|
17306
|
-
|
|
17307
|
-
|
|
17308
|
-
interface AudioEventSummaryItem {
|
|
17309
|
-
/** Total duration (in seconds) of all audio events of this type */
|
|
17310
|
-
total_duration?: number;
|
|
17311
|
-
/** Number of events of this type */
|
|
17312
|
-
count?: number;
|
|
17435
|
+
interface CreateJobResponse {
|
|
17436
|
+
/** The unique ID assigned to the job. Keep a record of this for later retrieval of your completed job. */
|
|
17437
|
+
id: string;
|
|
17313
17438
|
}
|
|
17314
17439
|
|
|
17315
17440
|
/**
|
|
@@ -17319,9 +17444,10 @@ interface AudioEventSummaryItem {
|
|
|
17319
17444
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17320
17445
|
* OpenAPI spec version: 2.0.0
|
|
17321
17446
|
*/
|
|
17322
|
-
|
|
17323
|
-
|
|
17324
|
-
|
|
17447
|
+
interface DataFetchConfig {
|
|
17448
|
+
url: string;
|
|
17449
|
+
/** A list of additional headers to be added to the input fetch request when using http or https. This is intended to support authentication or authorization, for example by supplying an OAuth2 bearer token. */
|
|
17450
|
+
auth_headers?: string[];
|
|
17325
17451
|
}
|
|
17326
17452
|
|
|
17327
17453
|
/**
|
|
@@ -17331,20 +17457,17 @@ interface AudioEventSummary {
|
|
|
17331
17457
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17332
17458
|
* OpenAPI spec version: 2.0.0
|
|
17333
17459
|
*/
|
|
17334
|
-
interface AudioEventsConfig {
|
|
17335
|
-
types?: string[];
|
|
17336
|
-
}
|
|
17337
|
-
|
|
17338
17460
|
/**
|
|
17339
|
-
*
|
|
17340
|
-
* Do not edit manually.
|
|
17341
|
-
* Speechmatics ASR REST API
|
|
17342
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17343
|
-
* OpenAPI spec version: 2.0.0
|
|
17461
|
+
* The status of the job. * `running` - The job is actively running. * `done` - The job completed successfully. * `rejected` - The job was accepted at first, but later could not be processed by the transcriber. * `deleted` - The user deleted the job. * `expired` - The system deleted the job. Usually because the job was in the `done` state for a very long time.
|
|
17344
17462
|
*/
|
|
17345
|
-
|
|
17346
|
-
|
|
17347
|
-
|
|
17463
|
+
type JobDetailsStatus = (typeof JobDetailsStatus)[keyof typeof JobDetailsStatus];
|
|
17464
|
+
declare const JobDetailsStatus: {
|
|
17465
|
+
readonly running: "running";
|
|
17466
|
+
readonly done: "done";
|
|
17467
|
+
readonly rejected: "rejected";
|
|
17468
|
+
readonly deleted: "deleted";
|
|
17469
|
+
readonly expired: "expired";
|
|
17470
|
+
};
|
|
17348
17471
|
|
|
17349
17472
|
/**
|
|
17350
17473
|
* Generated by orval v7.9.0 🍺
|
|
@@ -17353,12 +17476,19 @@ interface AutoChaptersConfig {
|
|
|
17353
17476
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17354
17477
|
* OpenAPI spec version: 2.0.0
|
|
17355
17478
|
*/
|
|
17356
|
-
|
|
17357
|
-
|
|
17358
|
-
|
|
17359
|
-
|
|
17360
|
-
|
|
17361
|
-
|
|
17479
|
+
type NotificationConfigContentsItem = (typeof NotificationConfigContentsItem)[keyof typeof NotificationConfigContentsItem];
|
|
17480
|
+
declare const NotificationConfigContentsItem: {
|
|
17481
|
+
readonly jobinfo: "jobinfo";
|
|
17482
|
+
readonly transcript: "transcript";
|
|
17483
|
+
readonly "transcriptjson-v2": "transcript.json-v2";
|
|
17484
|
+
readonly transcripttxt: "transcript.txt";
|
|
17485
|
+
readonly transcriptsrt: "transcript.srt";
|
|
17486
|
+
readonly alignment: "alignment";
|
|
17487
|
+
readonly alignmentword_start_and_end: "alignment.word_start_and_end";
|
|
17488
|
+
readonly alignmentone_per_line: "alignment.one_per_line";
|
|
17489
|
+
readonly data: "data";
|
|
17490
|
+
readonly text: "text";
|
|
17491
|
+
};
|
|
17362
17492
|
|
|
17363
17493
|
/**
|
|
17364
17494
|
* Generated by orval v7.9.0 🍺
|
|
@@ -17367,11 +17497,14 @@ interface Chapter {
|
|
|
17367
17497
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17368
17498
|
* OpenAPI spec version: 2.0.0
|
|
17369
17499
|
*/
|
|
17370
|
-
|
|
17371
17500
|
/**
|
|
17372
|
-
*
|
|
17501
|
+
* The method to be used with http and https urls. The default is post.
|
|
17373
17502
|
*/
|
|
17374
|
-
type
|
|
17503
|
+
type NotificationConfigMethod = (typeof NotificationConfigMethod)[keyof typeof NotificationConfigMethod];
|
|
17504
|
+
declare const NotificationConfigMethod: {
|
|
17505
|
+
readonly post: "post";
|
|
17506
|
+
readonly put: "put";
|
|
17507
|
+
};
|
|
17375
17508
|
|
|
17376
17509
|
/**
|
|
17377
17510
|
* Generated by orval v7.9.0 🍺
|
|
@@ -17380,11 +17513,44 @@ type AutoChaptersResult = Chapter[];
|
|
|
17380
17513
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17381
17514
|
* OpenAPI spec version: 2.0.0
|
|
17382
17515
|
*/
|
|
17383
|
-
|
|
17384
|
-
|
|
17385
|
-
|
|
17386
|
-
|
|
17387
|
-
|
|
17516
|
+
|
|
17517
|
+
interface NotificationConfig {
|
|
17518
|
+
/** The url to which a notification message will be sent upon
|
|
17519
|
+
completion of the job. The job `id` and `status` are added
|
|
17520
|
+
as query parameters, and any combination of the job inputs
|
|
17521
|
+
and outputs can be included by listing them in `contents`.
|
|
17522
|
+
|
|
17523
|
+
If `contents` is empty, the body of the request will be
|
|
17524
|
+
empty.
|
|
17525
|
+
|
|
17526
|
+
If only one item is listed, it will be sent as the body of
|
|
17527
|
+
the request with `Content-Type` set to an appropriate value
|
|
17528
|
+
such as `application/octet-stream` or `application/json`.
|
|
17529
|
+
|
|
17530
|
+
If multiple items are listed they will be sent as named file
|
|
17531
|
+
attachments using the multipart content type.
|
|
17532
|
+
|
|
17533
|
+
If `contents` is not specified, the `transcript` item will
|
|
17534
|
+
be sent as a file attachment named `data_file`, for
|
|
17535
|
+
backwards compatibility.
|
|
17536
|
+
|
|
17537
|
+
If the job was rejected or failed during processing, that
|
|
17538
|
+
will be indicated by the status, and any output items that
|
|
17539
|
+
are not available as a result will be omitted. The body
|
|
17540
|
+
formatting rules will still be followed as if all items were
|
|
17541
|
+
available.
|
|
17542
|
+
|
|
17543
|
+
The user-agent header is set to `Speechmatics-API/2.0`, or
|
|
17544
|
+
`Speechmatics API V2` in older API versions.
|
|
17545
|
+
*/
|
|
17546
|
+
url: string;
|
|
17547
|
+
/** Specifies a list of items to be attached to the notification message. When multiple items are requested, they are included as named file attachments. */
|
|
17548
|
+
contents?: NotificationConfigContentsItem[];
|
|
17549
|
+
/** The method to be used with http and https urls. The default is post. */
|
|
17550
|
+
method?: NotificationConfigMethod;
|
|
17551
|
+
/** A list of additional headers to be added to the notification request when using http or https. This is intended to support authentication or authorization, for example by supplying an OAuth2 bearer token. */
|
|
17552
|
+
auth_headers?: string[];
|
|
17553
|
+
}
|
|
17388
17554
|
|
|
17389
17555
|
/**
|
|
17390
17556
|
* Generated by orval v7.9.0 🍺
|
|
@@ -17393,11 +17559,9 @@ declare const AutoChaptersResultErrorType: {
|
|
|
17393
17559
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17394
17560
|
* OpenAPI spec version: 2.0.0
|
|
17395
17561
|
*/
|
|
17396
|
-
|
|
17397
|
-
|
|
17398
|
-
|
|
17399
|
-
/** Human readable error message */
|
|
17400
|
-
message?: string;
|
|
17562
|
+
interface TranslationConfig$1 {
|
|
17563
|
+
/** @maxItems 5 */
|
|
17564
|
+
target_languages: string[];
|
|
17401
17565
|
}
|
|
17402
17566
|
|
|
17403
17567
|
/**
|
|
@@ -17407,10 +17571,15 @@ interface AutoChaptersResultError {
|
|
|
17407
17571
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17408
17572
|
* OpenAPI spec version: 2.0.0
|
|
17409
17573
|
*/
|
|
17410
|
-
|
|
17411
|
-
|
|
17412
|
-
|
|
17413
|
-
|
|
17574
|
+
/**
|
|
17575
|
+
* Action to take if all of the predicted languages are below the confidence threshold
|
|
17576
|
+
*/
|
|
17577
|
+
type LanguageIdentificationConfigLowConfidenceAction = (typeof LanguageIdentificationConfigLowConfidenceAction)[keyof typeof LanguageIdentificationConfigLowConfidenceAction];
|
|
17578
|
+
declare const LanguageIdentificationConfigLowConfidenceAction: {
|
|
17579
|
+
readonly allow: "allow";
|
|
17580
|
+
readonly reject: "reject";
|
|
17581
|
+
readonly use_default_language: "use_default_language";
|
|
17582
|
+
};
|
|
17414
17583
|
|
|
17415
17584
|
/**
|
|
17416
17585
|
* Generated by orval v7.9.0 🍺
|
|
@@ -17419,10 +17588,12 @@ interface CreateJobResponse {
|
|
|
17419
17588
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17420
17589
|
* OpenAPI spec version: 2.0.0
|
|
17421
17590
|
*/
|
|
17422
|
-
|
|
17423
|
-
|
|
17424
|
-
|
|
17425
|
-
|
|
17591
|
+
|
|
17592
|
+
interface LanguageIdentificationConfig {
|
|
17593
|
+
expected_languages?: string[];
|
|
17594
|
+
/** Action to take if all of the predicted languages are below the confidence threshold */
|
|
17595
|
+
low_confidence_action?: LanguageIdentificationConfigLowConfidenceAction;
|
|
17596
|
+
default_language?: string;
|
|
17426
17597
|
}
|
|
17427
17598
|
|
|
17428
17599
|
/**
|
|
@@ -17432,16 +17603,11 @@ interface DataFetchConfig {
|
|
|
17432
17603
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17433
17604
|
* OpenAPI spec version: 2.0.0
|
|
17434
17605
|
*/
|
|
17435
|
-
|
|
17436
|
-
|
|
17437
|
-
|
|
17438
|
-
|
|
17439
|
-
|
|
17440
|
-
readonly running: "running";
|
|
17441
|
-
readonly done: "done";
|
|
17442
|
-
readonly rejected: "rejected";
|
|
17443
|
-
readonly deleted: "deleted";
|
|
17444
|
-
readonly expired: "expired";
|
|
17606
|
+
type SummarizationConfigContentType = (typeof SummarizationConfigContentType)[keyof typeof SummarizationConfigContentType];
|
|
17607
|
+
declare const SummarizationConfigContentType: {
|
|
17608
|
+
readonly auto: "auto";
|
|
17609
|
+
readonly informative: "informative";
|
|
17610
|
+
readonly conversational: "conversational";
|
|
17445
17611
|
};
|
|
17446
17612
|
|
|
17447
17613
|
/**
|
|
@@ -17451,10 +17617,10 @@ declare const JobDetailsStatus: {
|
|
|
17451
17617
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17452
17618
|
* OpenAPI spec version: 2.0.0
|
|
17453
17619
|
*/
|
|
17454
|
-
type
|
|
17455
|
-
declare const
|
|
17456
|
-
readonly
|
|
17457
|
-
readonly
|
|
17620
|
+
type SummarizationConfigSummaryLength = (typeof SummarizationConfigSummaryLength)[keyof typeof SummarizationConfigSummaryLength];
|
|
17621
|
+
declare const SummarizationConfigSummaryLength: {
|
|
17622
|
+
readonly brief: "brief";
|
|
17623
|
+
readonly detailed: "detailed";
|
|
17458
17624
|
};
|
|
17459
17625
|
|
|
17460
17626
|
/**
|
|
@@ -17464,10 +17630,10 @@ declare const JobType: {
|
|
|
17464
17630
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17465
17631
|
* OpenAPI spec version: 2.0.0
|
|
17466
17632
|
*/
|
|
17467
|
-
type
|
|
17468
|
-
declare const
|
|
17469
|
-
readonly
|
|
17470
|
-
readonly
|
|
17633
|
+
type SummarizationConfigSummaryType = (typeof SummarizationConfigSummaryType)[keyof typeof SummarizationConfigSummaryType];
|
|
17634
|
+
declare const SummarizationConfigSummaryType: {
|
|
17635
|
+
readonly paragraphs: "paragraphs";
|
|
17636
|
+
readonly bullets: "bullets";
|
|
17471
17637
|
};
|
|
17472
17638
|
|
|
17473
17639
|
/**
|
|
@@ -17477,10 +17643,12 @@ declare const OperatingPoint: {
|
|
|
17477
17643
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17478
17644
|
* OpenAPI spec version: 2.0.0
|
|
17479
17645
|
*/
|
|
17480
|
-
|
|
17481
|
-
|
|
17482
|
-
|
|
17483
|
-
|
|
17646
|
+
|
|
17647
|
+
interface SummarizationConfig {
|
|
17648
|
+
content_type?: SummarizationConfigContentType;
|
|
17649
|
+
summary_length?: SummarizationConfigSummaryLength;
|
|
17650
|
+
summary_type?: SummarizationConfigSummaryType;
|
|
17651
|
+
}
|
|
17484
17652
|
|
|
17485
17653
|
/**
|
|
17486
17654
|
* Generated by orval v7.9.0 🍺
|
|
@@ -17489,19 +17657,9 @@ type TranscriptionConfigAdditionalVocabItem = {
|
|
|
17489
17657
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17490
17658
|
* OpenAPI spec version: 2.0.0
|
|
17491
17659
|
*/
|
|
17492
|
-
|
|
17493
|
-
|
|
17494
|
-
|
|
17495
|
-
type TranscriptionConfigPunctuationOverrides = {
|
|
17496
|
-
/**
|
|
17497
|
-
* Ranges between zero and one. Higher values will produce more punctuation. The default is 0.5.
|
|
17498
|
-
* @minimum 0
|
|
17499
|
-
* @maximum 1
|
|
17500
|
-
*/
|
|
17501
|
-
sensitivity?: number;
|
|
17502
|
-
/** The punctuation marks which the client is prepared to accept in transcription output, or the special value 'all' (the default). Unsupported marks are ignored. This value is used to guide the transcription process. */
|
|
17503
|
-
permitted_marks?: string[];
|
|
17504
|
-
};
|
|
17660
|
+
interface SentimentAnalysisConfig {
|
|
17661
|
+
[key: string]: unknown;
|
|
17662
|
+
}
|
|
17505
17663
|
|
|
17506
17664
|
/**
|
|
17507
17665
|
* Generated by orval v7.9.0 🍺
|
|
@@ -17510,21 +17668,9 @@ type TranscriptionConfigPunctuationOverrides = {
|
|
|
17510
17668
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17511
17669
|
* OpenAPI spec version: 2.0.0
|
|
17512
17670
|
*/
|
|
17513
|
-
|
|
17514
|
-
|
|
17515
|
-
|
|
17516
|
-
- **none**: no speaker or channel labels are added.
|
|
17517
|
-
- **speaker**: speaker attribution is performed based on acoustic matching;
|
|
17518
|
-
all input channels are mixed into a single stream for processing.
|
|
17519
|
-
- **channel**: multiple input channels are processed individually and collated
|
|
17520
|
-
into a single transcript.
|
|
17521
|
-
*/
|
|
17522
|
-
type TranscriptionConfigDiarization = (typeof TranscriptionConfigDiarization)[keyof typeof TranscriptionConfigDiarization];
|
|
17523
|
-
declare const TranscriptionConfigDiarization: {
|
|
17524
|
-
readonly none: "none";
|
|
17525
|
-
readonly speaker: "speaker";
|
|
17526
|
-
readonly channel: "channel";
|
|
17527
|
-
};
|
|
17671
|
+
interface TopicDetectionConfig {
|
|
17672
|
+
topics?: string[];
|
|
17673
|
+
}
|
|
17528
17674
|
|
|
17529
17675
|
/**
|
|
17530
17676
|
* Generated by orval v7.9.0 🍺
|
|
@@ -17533,1078 +17679,40 @@ declare const TranscriptionConfigDiarization: {
|
|
|
17533
17679
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17534
17680
|
* OpenAPI spec version: 2.0.0
|
|
17535
17681
|
*/
|
|
17536
|
-
/**
|
|
17537
|
-
* Whether or not to enable flexible endpointing and allow the entity to continue to be spoken.
|
|
17538
|
-
*/
|
|
17539
|
-
type TranscriptionConfigMaxDelayMode = (typeof TranscriptionConfigMaxDelayMode)[keyof typeof TranscriptionConfigMaxDelayMode];
|
|
17540
|
-
declare const TranscriptionConfigMaxDelayMode: {
|
|
17541
|
-
readonly fixed: "fixed";
|
|
17542
|
-
readonly flexible: "flexible";
|
|
17543
|
-
};
|
|
17544
17682
|
|
|
17545
17683
|
/**
|
|
17546
|
-
*
|
|
17547
|
-
|
|
17548
|
-
|
|
17549
|
-
|
|
17550
|
-
|
|
17551
|
-
*/
|
|
17552
|
-
type TranscriptionConfigTranscriptFilteringConfigReplacementsItem = {
|
|
17553
|
-
from: string;
|
|
17554
|
-
to: string;
|
|
17555
|
-
};
|
|
17556
|
-
|
|
17557
|
-
/**
|
|
17558
|
-
* Generated by orval v7.9.0 🍺
|
|
17559
|
-
* Do not edit manually.
|
|
17560
|
-
* Speechmatics ASR REST API
|
|
17561
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17562
|
-
* OpenAPI spec version: 2.0.0
|
|
17563
|
-
*/
|
|
17564
|
-
|
|
17565
|
-
/**
|
|
17566
|
-
* Configuration for applying filtering to the transcription
|
|
17567
|
-
*/
|
|
17568
|
-
type TranscriptionConfigTranscriptFilteringConfig = {
|
|
17569
|
-
/** If true, words that are identified as disfluencies will be removed from the transcript. If false (default), they are tagged in the transcript as 'disfluency'. */
|
|
17570
|
-
remove_disfluencies?: boolean;
|
|
17571
|
-
/** A list of replacements to apply to the transcript. Each replacement is a pair of strings, where the first string is the pattern to be replaced and the second string is the replacement text. */
|
|
17572
|
-
replacements?: TranscriptionConfigTranscriptFilteringConfigReplacementsItem[];
|
|
17573
|
-
};
|
|
17574
|
-
|
|
17575
|
-
/**
|
|
17576
|
-
* Generated by orval v7.9.0 🍺
|
|
17577
|
-
* Do not edit manually.
|
|
17578
|
-
* Speechmatics ASR REST API
|
|
17579
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17580
|
-
* OpenAPI spec version: 2.0.0
|
|
17581
|
-
*/
|
|
17582
|
-
/**
|
|
17583
|
-
* Configuration for speaker diarization
|
|
17584
|
-
*/
|
|
17585
|
-
type TranscriptionConfigSpeakerDiarizationConfig = {
|
|
17586
|
-
/** If true, the algorithm will prefer to stay with the current active speaker if it is a close enough match, even if other speakers may be closer. This is useful for cases where we can flip incorrectly between similar speakers during a single speaker section." */
|
|
17587
|
-
prefer_current_speaker?: boolean;
|
|
17588
|
-
/**
|
|
17589
|
-
* Controls how sensitive the algorithm is in terms of keeping similar speakers separate, as opposed to combining them into a single speaker. Higher values will typically lead to more speakers, as the degree of difference between speakers in order to allow them to remain distinct will be lower. A lower value for this parameter will conversely guide the algorithm towards being less sensitive in terms of retaining similar speakers, and as such may lead to fewer speakers overall. The default is 0.5.
|
|
17590
|
-
* @minimum 0
|
|
17591
|
-
* @maximum 1
|
|
17592
|
-
*/
|
|
17593
|
-
speaker_sensitivity?: number;
|
|
17594
|
-
};
|
|
17595
|
-
|
|
17596
|
-
/**
|
|
17597
|
-
* Generated by orval v7.9.0 🍺
|
|
17598
|
-
* Do not edit manually.
|
|
17599
|
-
* Speechmatics ASR REST API
|
|
17600
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17601
|
-
* OpenAPI spec version: 2.0.0
|
|
17602
|
-
*/
|
|
17603
|
-
|
|
17604
|
-
interface TranscriptionConfig {
|
|
17605
|
-
/** Language model to process the audio input, normally specified as an ISO language code */
|
|
17606
|
-
language: string;
|
|
17607
|
-
/** Request a specialized model based on 'language' but optimized for a particular field, e.g. "finance" or "medical". */
|
|
17608
|
-
domain?: string;
|
|
17609
|
-
/** Language locale to be used when generating the transcription output, normally specified as an ISO language code */
|
|
17610
|
-
output_locale?: string;
|
|
17611
|
-
operating_point?: OperatingPoint;
|
|
17612
|
-
/** List of custom words or phrases that should be recognized. Alternative pronunciations can be specified to aid recognition. */
|
|
17613
|
-
additional_vocab?: TranscriptionConfigAdditionalVocabItem[];
|
|
17614
|
-
/** Control punctuation settings. */
|
|
17615
|
-
punctuation_overrides?: TranscriptionConfigPunctuationOverrides;
|
|
17616
|
-
/** Specify whether speaker or channel labels are added to the transcript.
|
|
17617
|
-
The default is `none`.
|
|
17618
|
-
- **none**: no speaker or channel labels are added.
|
|
17619
|
-
- **speaker**: speaker attribution is performed based on acoustic matching;
|
|
17620
|
-
all input channels are mixed into a single stream for processing.
|
|
17621
|
-
- **channel**: multiple input channels are processed individually and collated
|
|
17622
|
-
into a single transcript. */
|
|
17623
|
-
diarization?: TranscriptionConfigDiarization;
|
|
17624
|
-
/** Transcript labels to use when using collating separate input channels. */
|
|
17625
|
-
channel_diarization_labels?: string[];
|
|
17626
|
-
/** Include additional 'entity' objects in the transcription results (e.g. dates, numbers) and their original spoken form. These entities are interleaved with other types of results. The concatenation of these words is represented as a single entity with the concatenated written form present in the 'content' field. The entities contain a 'spoken_form' field, which can be used in place of the corresponding 'word' type results, in case a spoken form is preferred to a written form. They also contain a 'written_form', which can be used instead of the entity, if you want a breakdown of the words without spaces. They can still contain non-breaking spaces and other special whitespace characters, as they are considered part of the word for the formatting output. In case of a written_form, the individual word times are estimated and might not be accurate if the order of the words in the written form does not correspond to the order they were actually spoken (such as 'one hundred million dollars' and '$100 million'). */
|
|
17627
|
-
enable_entities?: boolean;
|
|
17628
|
-
/** Whether or not to enable flexible endpointing and allow the entity to continue to be spoken. */
|
|
17629
|
-
max_delay_mode?: TranscriptionConfigMaxDelayMode;
|
|
17630
|
-
/** Configuration for applying filtering to the transcription */
|
|
17631
|
-
transcript_filtering_config?: TranscriptionConfigTranscriptFilteringConfig;
|
|
17632
|
-
/** Configuration for speaker diarization */
|
|
17633
|
-
speaker_diarization_config?: TranscriptionConfigSpeakerDiarizationConfig;
|
|
17634
|
-
}
|
|
17635
|
-
|
|
17636
|
-
/**
|
|
17637
|
-
* Generated by orval v7.9.0 🍺
|
|
17638
|
-
* Do not edit manually.
|
|
17639
|
-
* Speechmatics ASR REST API
|
|
17640
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17641
|
-
* OpenAPI spec version: 2.0.0
|
|
17642
|
-
*/
|
|
17643
|
-
type NotificationConfigContentsItem = (typeof NotificationConfigContentsItem)[keyof typeof NotificationConfigContentsItem];
|
|
17644
|
-
declare const NotificationConfigContentsItem: {
|
|
17645
|
-
readonly jobinfo: "jobinfo";
|
|
17646
|
-
readonly transcript: "transcript";
|
|
17647
|
-
readonly "transcriptjson-v2": "transcript.json-v2";
|
|
17648
|
-
readonly transcripttxt: "transcript.txt";
|
|
17649
|
-
readonly transcriptsrt: "transcript.srt";
|
|
17650
|
-
readonly alignment: "alignment";
|
|
17651
|
-
readonly alignmentword_start_and_end: "alignment.word_start_and_end";
|
|
17652
|
-
readonly alignmentone_per_line: "alignment.one_per_line";
|
|
17653
|
-
readonly data: "data";
|
|
17654
|
-
readonly text: "text";
|
|
17655
|
-
};
|
|
17656
|
-
|
|
17657
|
-
/**
|
|
17658
|
-
* Generated by orval v7.9.0 🍺
|
|
17659
|
-
* Do not edit manually.
|
|
17660
|
-
* Speechmatics ASR REST API
|
|
17661
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17662
|
-
* OpenAPI spec version: 2.0.0
|
|
17663
|
-
*/
|
|
17664
|
-
/**
|
|
17665
|
-
* The method to be used with http and https urls. The default is post.
|
|
17666
|
-
*/
|
|
17667
|
-
type NotificationConfigMethod = (typeof NotificationConfigMethod)[keyof typeof NotificationConfigMethod];
|
|
17668
|
-
declare const NotificationConfigMethod: {
|
|
17669
|
-
readonly post: "post";
|
|
17670
|
-
readonly put: "put";
|
|
17671
|
-
};
|
|
17672
|
-
|
|
17673
|
-
/**
|
|
17674
|
-
* Generated by orval v7.9.0 🍺
|
|
17675
|
-
* Do not edit manually.
|
|
17676
|
-
* Speechmatics ASR REST API
|
|
17677
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17678
|
-
* OpenAPI spec version: 2.0.0
|
|
17679
|
-
*/
|
|
17680
|
-
|
|
17681
|
-
interface NotificationConfig {
|
|
17682
|
-
/** The url to which a notification message will be sent upon
|
|
17683
|
-
completion of the job. The job `id` and `status` are added
|
|
17684
|
-
as query parameters, and any combination of the job inputs
|
|
17685
|
-
and outputs can be included by listing them in `contents`.
|
|
17686
|
-
|
|
17687
|
-
If `contents` is empty, the body of the request will be
|
|
17688
|
-
empty.
|
|
17689
|
-
|
|
17690
|
-
If only one item is listed, it will be sent as the body of
|
|
17691
|
-
the request with `Content-Type` set to an appropriate value
|
|
17692
|
-
such as `application/octet-stream` or `application/json`.
|
|
17693
|
-
|
|
17694
|
-
If multiple items are listed they will be sent as named file
|
|
17695
|
-
attachments using the multipart content type.
|
|
17696
|
-
|
|
17697
|
-
If `contents` is not specified, the `transcript` item will
|
|
17698
|
-
be sent as a file attachment named `data_file`, for
|
|
17699
|
-
backwards compatibility.
|
|
17700
|
-
|
|
17701
|
-
If the job was rejected or failed during processing, that
|
|
17702
|
-
will be indicated by the status, and any output items that
|
|
17703
|
-
are not available as a result will be omitted. The body
|
|
17704
|
-
formatting rules will still be followed as if all items were
|
|
17705
|
-
available.
|
|
17706
|
-
|
|
17707
|
-
The user-agent header is set to `Speechmatics-API/2.0`, or
|
|
17708
|
-
`Speechmatics API V2` in older API versions.
|
|
17709
|
-
*/
|
|
17710
|
-
url: string;
|
|
17711
|
-
/** Specifies a list of items to be attached to the notification message. When multiple items are requested, they are included as named file attachments. */
|
|
17712
|
-
contents?: NotificationConfigContentsItem[];
|
|
17713
|
-
/** The method to be used with http and https urls. The default is post. */
|
|
17714
|
-
method?: NotificationConfigMethod;
|
|
17715
|
-
/** A list of additional headers to be added to the notification request when using http or https. This is intended to support authentication or authorization, for example by supplying an OAuth2 bearer token. */
|
|
17716
|
-
auth_headers?: string[];
|
|
17717
|
-
}
|
|
17718
|
-
|
|
17719
|
-
/**
|
|
17720
|
-
* Generated by orval v7.9.0 🍺
|
|
17721
|
-
* Do not edit manually.
|
|
17722
|
-
* Speechmatics ASR REST API
|
|
17723
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17724
|
-
* OpenAPI spec version: 2.0.0
|
|
17725
|
-
*/
|
|
17726
|
-
/**
|
|
17727
|
-
* Customer-defined JSON structure.
|
|
17728
|
-
*/
|
|
17729
|
-
type TrackingDataDetails = {
|
|
17730
|
-
[key: string]: unknown;
|
|
17731
|
-
};
|
|
17732
|
-
|
|
17733
|
-
/**
|
|
17734
|
-
* Generated by orval v7.9.0 🍺
|
|
17735
|
-
* Do not edit manually.
|
|
17736
|
-
* Speechmatics ASR REST API
|
|
17737
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17738
|
-
* OpenAPI spec version: 2.0.0
|
|
17739
|
-
*/
|
|
17740
|
-
|
|
17741
|
-
interface TrackingData {
|
|
17742
|
-
/** The title of the job. */
|
|
17743
|
-
title?: string;
|
|
17744
|
-
/** External system reference. */
|
|
17745
|
-
reference?: string;
|
|
17746
|
-
tags?: string[];
|
|
17747
|
-
/** Customer-defined JSON structure. */
|
|
17748
|
-
details?: TrackingDataDetails;
|
|
17749
|
-
}
|
|
17750
|
-
|
|
17751
|
-
/**
|
|
17752
|
-
* Generated by orval v7.9.0 🍺
|
|
17753
|
-
* Do not edit manually.
|
|
17754
|
-
* Speechmatics ASR REST API
|
|
17755
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17756
|
-
* OpenAPI spec version: 2.0.0
|
|
17757
|
-
*/
|
|
17758
|
-
/**
|
|
17759
|
-
* Parameters that override default values of srt conversion. max_line_length: sets maximum count of characters per subtitle line including white space. max_lines: sets maximum count of lines in a subtitle section.
|
|
17760
|
-
*/
|
|
17761
|
-
type OutputConfigSrtOverrides = {
|
|
17762
|
-
max_line_length?: number;
|
|
17763
|
-
max_lines?: number;
|
|
17764
|
-
};
|
|
17765
|
-
|
|
17766
|
-
/**
|
|
17767
|
-
* Generated by orval v7.9.0 🍺
|
|
17768
|
-
* Do not edit manually.
|
|
17769
|
-
* Speechmatics ASR REST API
|
|
17770
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17771
|
-
* OpenAPI spec version: 2.0.0
|
|
17772
|
-
*/
|
|
17773
|
-
|
|
17774
|
-
interface OutputConfig {
|
|
17775
|
-
/** Parameters that override default values of srt conversion. max_line_length: sets maximum count of characters per subtitle line including white space. max_lines: sets maximum count of lines in a subtitle section. */
|
|
17776
|
-
srt_overrides?: OutputConfigSrtOverrides;
|
|
17777
|
-
}
|
|
17778
|
-
|
|
17779
|
-
/**
|
|
17780
|
-
* Generated by orval v7.9.0 🍺
|
|
17781
|
-
* Do not edit manually.
|
|
17782
|
-
* Speechmatics ASR REST API
|
|
17783
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17784
|
-
* OpenAPI spec version: 2.0.0
|
|
17785
|
-
*/
|
|
17786
|
-
interface TranslationConfig$1 {
|
|
17787
|
-
/** @maxItems 5 */
|
|
17788
|
-
target_languages: string[];
|
|
17789
|
-
}
|
|
17790
|
-
|
|
17791
|
-
/**
|
|
17792
|
-
* Generated by orval v7.9.0 🍺
|
|
17793
|
-
* Do not edit manually.
|
|
17794
|
-
* Speechmatics ASR REST API
|
|
17795
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17796
|
-
* OpenAPI spec version: 2.0.0
|
|
17797
|
-
*/
|
|
17798
|
-
/**
|
|
17799
|
-
* Action to take if all of the predicted languages are below the confidence threshold
|
|
17800
|
-
*/
|
|
17801
|
-
type LanguageIdentificationConfigLowConfidenceAction = (typeof LanguageIdentificationConfigLowConfidenceAction)[keyof typeof LanguageIdentificationConfigLowConfidenceAction];
|
|
17802
|
-
declare const LanguageIdentificationConfigLowConfidenceAction: {
|
|
17803
|
-
readonly allow: "allow";
|
|
17804
|
-
readonly reject: "reject";
|
|
17805
|
-
readonly use_default_language: "use_default_language";
|
|
17806
|
-
};
|
|
17807
|
-
|
|
17808
|
-
/**
|
|
17809
|
-
* Generated by orval v7.9.0 🍺
|
|
17810
|
-
* Do not edit manually.
|
|
17811
|
-
* Speechmatics ASR REST API
|
|
17812
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17813
|
-
* OpenAPI spec version: 2.0.0
|
|
17814
|
-
*/
|
|
17815
|
-
|
|
17816
|
-
interface LanguageIdentificationConfig {
|
|
17817
|
-
expected_languages?: string[];
|
|
17818
|
-
/** Action to take if all of the predicted languages are below the confidence threshold */
|
|
17819
|
-
low_confidence_action?: LanguageIdentificationConfigLowConfidenceAction;
|
|
17820
|
-
default_language?: string;
|
|
17821
|
-
}
|
|
17822
|
-
|
|
17823
|
-
/**
|
|
17824
|
-
* Generated by orval v7.9.0 🍺
|
|
17825
|
-
* Do not edit manually.
|
|
17826
|
-
* Speechmatics ASR REST API
|
|
17827
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17828
|
-
* OpenAPI spec version: 2.0.0
|
|
17829
|
-
*/
|
|
17830
|
-
type SummarizationConfigContentType = (typeof SummarizationConfigContentType)[keyof typeof SummarizationConfigContentType];
|
|
17831
|
-
declare const SummarizationConfigContentType: {
|
|
17832
|
-
readonly auto: "auto";
|
|
17833
|
-
readonly informative: "informative";
|
|
17834
|
-
readonly conversational: "conversational";
|
|
17835
|
-
};
|
|
17836
|
-
|
|
17837
|
-
/**
|
|
17838
|
-
* Generated by orval v7.9.0 🍺
|
|
17839
|
-
* Do not edit manually.
|
|
17840
|
-
* Speechmatics ASR REST API
|
|
17841
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17842
|
-
* OpenAPI spec version: 2.0.0
|
|
17843
|
-
*/
|
|
17844
|
-
type SummarizationConfigSummaryLength = (typeof SummarizationConfigSummaryLength)[keyof typeof SummarizationConfigSummaryLength];
|
|
17845
|
-
declare const SummarizationConfigSummaryLength: {
|
|
17846
|
-
readonly brief: "brief";
|
|
17847
|
-
readonly detailed: "detailed";
|
|
17848
|
-
};
|
|
17849
|
-
|
|
17850
|
-
/**
|
|
17851
|
-
* Generated by orval v7.9.0 🍺
|
|
17852
|
-
* Do not edit manually.
|
|
17853
|
-
* Speechmatics ASR REST API
|
|
17854
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17855
|
-
* OpenAPI spec version: 2.0.0
|
|
17856
|
-
*/
|
|
17857
|
-
type SummarizationConfigSummaryType = (typeof SummarizationConfigSummaryType)[keyof typeof SummarizationConfigSummaryType];
|
|
17858
|
-
declare const SummarizationConfigSummaryType: {
|
|
17859
|
-
readonly paragraphs: "paragraphs";
|
|
17860
|
-
readonly bullets: "bullets";
|
|
17861
|
-
};
|
|
17862
|
-
|
|
17863
|
-
/**
|
|
17864
|
-
* Generated by orval v7.9.0 🍺
|
|
17865
|
-
* Do not edit manually.
|
|
17866
|
-
* Speechmatics ASR REST API
|
|
17867
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17868
|
-
* OpenAPI spec version: 2.0.0
|
|
17869
|
-
*/
|
|
17870
|
-
|
|
17871
|
-
interface SummarizationConfig {
|
|
17872
|
-
content_type?: SummarizationConfigContentType;
|
|
17873
|
-
summary_length?: SummarizationConfigSummaryLength;
|
|
17874
|
-
summary_type?: SummarizationConfigSummaryType;
|
|
17875
|
-
}
|
|
17876
|
-
|
|
17877
|
-
/**
|
|
17878
|
-
* Generated by orval v7.9.0 🍺
|
|
17879
|
-
* Do not edit manually.
|
|
17880
|
-
* Speechmatics ASR REST API
|
|
17881
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17882
|
-
* OpenAPI spec version: 2.0.0
|
|
17883
|
-
*/
|
|
17884
|
-
interface SentimentAnalysisConfig {
|
|
17885
|
-
[key: string]: unknown;
|
|
17886
|
-
}
|
|
17887
|
-
|
|
17888
|
-
/**
|
|
17889
|
-
* Generated by orval v7.9.0 🍺
|
|
17890
|
-
* Do not edit manually.
|
|
17891
|
-
* Speechmatics ASR REST API
|
|
17892
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17893
|
-
* OpenAPI spec version: 2.0.0
|
|
17894
|
-
*/
|
|
17895
|
-
interface TopicDetectionConfig {
|
|
17896
|
-
topics?: string[];
|
|
17897
|
-
}
|
|
17898
|
-
|
|
17899
|
-
/**
|
|
17900
|
-
* Generated by orval v7.9.0 🍺
|
|
17901
|
-
* Do not edit manually.
|
|
17902
|
-
* Speechmatics ASR REST API
|
|
17903
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17904
|
-
* OpenAPI spec version: 2.0.0
|
|
17905
|
-
*/
|
|
17906
|
-
|
|
17907
|
-
/**
|
|
17908
|
-
* JSON object that contains various groups of job configuration
|
|
17909
|
-
parameters. Based on the value of `type`, a type-specific object
|
|
17910
|
-
such as `transcription_config` is required to be present to
|
|
17911
|
-
specify all configuration settings or parameters needed to
|
|
17912
|
-
process the job inputs as expected.
|
|
17913
|
-
|
|
17914
|
-
If the results of the job are to be forwarded on completion,
|
|
17915
|
-
`notification_config` can be provided with a list of callbacks
|
|
17916
|
-
to be made; no assumptions should be made about the order in
|
|
17917
|
-
which they will occur.
|
|
17918
|
-
|
|
17919
|
-
Customer specific job details or metadata can be supplied in
|
|
17920
|
-
`tracking`, and this information will be available where
|
|
17921
|
-
possible in the job results and in callbacks.
|
|
17922
|
-
|
|
17923
|
-
*/
|
|
17924
|
-
interface JobConfig {
|
|
17925
|
-
type: JobType;
|
|
17926
|
-
fetch_data?: DataFetchConfig;
|
|
17927
|
-
fetch_text?: DataFetchConfig;
|
|
17928
|
-
alignment_config?: AlignmentConfig;
|
|
17929
|
-
transcription_config?: TranscriptionConfig;
|
|
17930
|
-
notification_config?: NotificationConfig[];
|
|
17931
|
-
tracking?: TrackingData;
|
|
17932
|
-
output_config?: OutputConfig;
|
|
17933
|
-
translation_config?: TranslationConfig$1;
|
|
17934
|
-
language_identification_config?: LanguageIdentificationConfig;
|
|
17935
|
-
summarization_config?: SummarizationConfig;
|
|
17936
|
-
sentiment_analysis_config?: SentimentAnalysisConfig;
|
|
17937
|
-
topic_detection_config?: TopicDetectionConfig;
|
|
17938
|
-
auto_chapters_config?: AutoChaptersConfig;
|
|
17939
|
-
audio_events_config?: AudioEventsConfig;
|
|
17940
|
-
}
|
|
17941
|
-
|
|
17942
|
-
/**
|
|
17943
|
-
* Generated by orval v7.9.0 🍺
|
|
17944
|
-
* Do not edit manually.
|
|
17945
|
-
* Speechmatics ASR REST API
|
|
17946
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17947
|
-
* OpenAPI spec version: 2.0.0
|
|
17948
|
-
*/
|
|
17949
|
-
interface JobDetailError {
|
|
17950
|
-
timestamp: string;
|
|
17951
|
-
message: string;
|
|
17952
|
-
}
|
|
17953
|
-
|
|
17954
|
-
/**
|
|
17955
|
-
* Generated by orval v7.9.0 🍺
|
|
17956
|
-
* Do not edit manually.
|
|
17957
|
-
* Speechmatics ASR REST API
|
|
17958
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17959
|
-
* OpenAPI spec version: 2.0.0
|
|
17960
|
-
*/
|
|
17961
|
-
|
|
17962
|
-
/**
|
|
17963
|
-
* Document describing a job. JobConfig will be present in JobDetails returned for GET jobs/<id> request in SaaS and in Batch Appliance, but it will not be present in JobDetails returned as item in RetrieveJobsResponse in case of Batch Appliance.
|
|
17964
|
-
*/
|
|
17965
|
-
interface JobDetails {
|
|
17966
|
-
/** The UTC date time the job was created. */
|
|
17967
|
-
created_at: string;
|
|
17968
|
-
/** Name of the data file submitted for job. */
|
|
17969
|
-
data_name: string;
|
|
17970
|
-
/** Name of the text file submitted to be aligned to audio. */
|
|
17971
|
-
text_name?: string;
|
|
17972
|
-
/**
|
|
17973
|
-
* The file duration (in seconds). May be missing for fetch URL jobs.
|
|
17974
|
-
* @minimum 0
|
|
17975
|
-
*/
|
|
17976
|
-
duration?: number;
|
|
17977
|
-
/** The unique id assigned to the job. */
|
|
17978
|
-
id: string;
|
|
17979
|
-
/** The status of the job. * `running` - The job is actively running. * `done` - The job completed successfully. * `rejected` - The job was accepted at first, but later could not be processed by the transcriber. * `deleted` - The user deleted the job. * `expired` - The system deleted the job. Usually because the job was in the `done` state for a very long time. */
|
|
17980
|
-
status: JobDetailsStatus;
|
|
17981
|
-
config?: JobConfig;
|
|
17982
|
-
/** Optional parameter used for backwards compatibility with v1 api */
|
|
17983
|
-
lang?: string;
|
|
17984
|
-
/** Optional list of errors that have occurred in user interaction, for example: audio could not be fetched or notification could not be sent. */
|
|
17985
|
-
errors?: JobDetailError[];
|
|
17986
|
-
}
|
|
17987
|
-
|
|
17988
|
-
/**
|
|
17989
|
-
* Generated by orval v7.9.0 🍺
|
|
17990
|
-
* Do not edit manually.
|
|
17991
|
-
* Speechmatics ASR REST API
|
|
17992
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
17993
|
-
* OpenAPI spec version: 2.0.0
|
|
17994
|
-
*/
|
|
17995
|
-
|
|
17996
|
-
interface DeleteJobResponse {
|
|
17997
|
-
job: JobDetails;
|
|
17998
|
-
}
|
|
17999
|
-
|
|
18000
|
-
/**
|
|
18001
|
-
* Generated by orval v7.9.0 🍺
|
|
18002
|
-
* Do not edit manually.
|
|
18003
|
-
* Speechmatics ASR REST API
|
|
18004
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18005
|
-
* OpenAPI spec version: 2.0.0
|
|
18006
|
-
*/
|
|
18007
|
-
type DeleteJobsJobidParams = {
|
|
18008
|
-
/**
|
|
18009
|
-
* When set, a running job will be force terminated. When unset (default), a running job will not be terminated and request will return HTTP 423 Locked.
|
|
18010
|
-
*/
|
|
18011
|
-
force?: boolean;
|
|
18012
|
-
};
|
|
18013
|
-
|
|
18014
|
-
/**
|
|
18015
|
-
* Generated by orval v7.9.0 🍺
|
|
18016
|
-
* Do not edit manually.
|
|
18017
|
-
* Speechmatics ASR REST API
|
|
18018
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18019
|
-
* OpenAPI spec version: 2.0.0
|
|
18020
|
-
*/
|
|
18021
|
-
/**
|
|
18022
|
-
* Early Access Release Tag
|
|
18023
|
-
*/
|
|
18024
|
-
type EARTagParameter = string;
|
|
18025
|
-
|
|
18026
|
-
/**
|
|
18027
|
-
* Generated by orval v7.9.0 🍺
|
|
18028
|
-
* Do not edit manually.
|
|
18029
|
-
* Speechmatics ASR REST API
|
|
18030
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18031
|
-
* OpenAPI spec version: 2.0.0
|
|
18032
|
-
*/
|
|
18033
|
-
/**
|
|
18034
|
-
* The error message.
|
|
18035
|
-
*/
|
|
18036
|
-
type ErrorResponseError = (typeof ErrorResponseError)[keyof typeof ErrorResponseError];
|
|
18037
|
-
declare const ErrorResponseError: {
|
|
18038
|
-
readonly Bad_Request: "Bad Request";
|
|
18039
|
-
readonly File_Expired: "File Expired";
|
|
18040
|
-
readonly Forbidden: "Forbidden";
|
|
18041
|
-
readonly Resource_Locked: "Resource Locked";
|
|
18042
|
-
readonly Format_Not_Supported: "Format Not Supported";
|
|
18043
|
-
readonly Internal_Server_Error: "Internal Server Error";
|
|
18044
|
-
readonly Job_error: "Job error";
|
|
18045
|
-
readonly Job_Expired: "Job Expired";
|
|
18046
|
-
readonly Job_In_Progress: "Job In Progress";
|
|
18047
|
-
readonly Job_is_not_of_type_alignment: "Job is not of type alignment";
|
|
18048
|
-
readonly Job_is_not_of_type_transcription: "Job is not of type transcription";
|
|
18049
|
-
readonly Job_not_found: "Job not found";
|
|
18050
|
-
readonly Job_rejected: "Job rejected";
|
|
18051
|
-
readonly Job_rejected_due_to_invalid_audio: "Job rejected due to invalid audio";
|
|
18052
|
-
readonly Job_rejected_due_to_invalid_text: "Job rejected due to invalid text";
|
|
18053
|
-
readonly Malformed_request: "Malformed request";
|
|
18054
|
-
readonly Missing_callback: "Missing callback";
|
|
18055
|
-
readonly Missing_data_file: "Missing data_file";
|
|
18056
|
-
readonly Missing_text_file: "Missing text_file";
|
|
18057
|
-
readonly No_language_selected: "No language selected";
|
|
18058
|
-
readonly Not_Implemented: "Not Implemented";
|
|
18059
|
-
readonly Permission_Denied: "Permission Denied";
|
|
18060
|
-
readonly Requested_product_not_available: "Requested product not available";
|
|
18061
|
-
readonly Transcription_not_ready: "Transcription not ready";
|
|
18062
|
-
readonly Log_file_not_available: "Log file not available";
|
|
18063
|
-
readonly Requested_Early_Access_Release_not_available: "Requested Early Access Release not available";
|
|
18064
|
-
readonly Unprocessable_Entity: "Unprocessable Entity";
|
|
18065
|
-
};
|
|
18066
|
-
|
|
18067
|
-
/**
|
|
18068
|
-
* Generated by orval v7.9.0 🍺
|
|
18069
|
-
* Do not edit manually.
|
|
18070
|
-
* Speechmatics ASR REST API
|
|
18071
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18072
|
-
* OpenAPI spec version: 2.0.0
|
|
18073
|
-
*/
|
|
18074
|
-
|
|
18075
|
-
interface ErrorResponse {
|
|
18076
|
-
/**
|
|
18077
|
-
* The HTTP status code.
|
|
18078
|
-
* @minimum 100
|
|
18079
|
-
*/
|
|
18080
|
-
code: number;
|
|
18081
|
-
/** The error message. */
|
|
18082
|
-
error: ErrorResponseError;
|
|
18083
|
-
/** The details of the error. */
|
|
18084
|
-
detail?: string;
|
|
18085
|
-
}
|
|
18086
|
-
|
|
18087
|
-
/**
|
|
18088
|
-
* Generated by orval v7.9.0 🍺
|
|
18089
|
-
* Do not edit manually.
|
|
18090
|
-
* Speechmatics ASR REST API
|
|
18091
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18092
|
-
* OpenAPI spec version: 2.0.0
|
|
18093
|
-
*/
|
|
18094
|
-
type GetJobsJobidAlignmentTags = (typeof GetJobsJobidAlignmentTags)[keyof typeof GetJobsJobidAlignmentTags];
|
|
18095
|
-
declare const GetJobsJobidAlignmentTags: {
|
|
18096
|
-
readonly word_start_and_end: "word_start_and_end";
|
|
18097
|
-
readonly one_per_line: "one_per_line";
|
|
18098
|
-
};
|
|
18099
|
-
|
|
18100
|
-
/**
|
|
18101
|
-
* Generated by orval v7.9.0 🍺
|
|
18102
|
-
* Do not edit manually.
|
|
18103
|
-
* Speechmatics ASR REST API
|
|
18104
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18105
|
-
* OpenAPI spec version: 2.0.0
|
|
18106
|
-
*/
|
|
18107
|
-
|
|
18108
|
-
type GetJobsJobidAlignmentParams = {
|
|
18109
|
-
/**
|
|
18110
|
-
* Control how timing information is added to the text file provided as input to the alignment job. If set to `word_start_and_end`, SGML tags are inserted at the start and end of each word, for example <time=0.41>. If set to `one_per_line` square bracket tags are inserted at the start of each line, for example `[00:00:00.4] `. The default is `word_start_and_end`.
|
|
18111
|
-
*/
|
|
18112
|
-
tags?: GetJobsJobidAlignmentTags;
|
|
18113
|
-
};
|
|
18114
|
-
|
|
18115
|
-
/**
|
|
18116
|
-
* Generated by orval v7.9.0 🍺
|
|
18117
|
-
* Do not edit manually.
|
|
18118
|
-
* Speechmatics ASR REST API
|
|
18119
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18120
|
-
* OpenAPI spec version: 2.0.0
|
|
18121
|
-
*/
|
|
18122
|
-
type GetJobsJobidTranscriptFormat = (typeof GetJobsJobidTranscriptFormat)[keyof typeof GetJobsJobidTranscriptFormat];
|
|
18123
|
-
declare const GetJobsJobidTranscriptFormat: {
|
|
18124
|
-
readonly "json-v2": "json-v2";
|
|
18125
|
-
readonly txt: "txt";
|
|
18126
|
-
readonly srt: "srt";
|
|
18127
|
-
};
|
|
18128
|
-
|
|
18129
|
-
/**
|
|
18130
|
-
* Generated by orval v7.9.0 🍺
|
|
18131
|
-
* Do not edit manually.
|
|
18132
|
-
* Speechmatics ASR REST API
|
|
18133
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18134
|
-
* OpenAPI spec version: 2.0.0
|
|
18135
|
-
*/
|
|
18136
|
-
|
|
18137
|
-
type GetJobsJobidTranscriptParams = {
|
|
18138
|
-
/**
|
|
18139
|
-
* The transcription format (by default the `json-v2` format is returned).
|
|
18140
|
-
*/
|
|
18141
|
-
format?: GetJobsJobidTranscriptFormat;
|
|
18142
|
-
};
|
|
18143
|
-
|
|
18144
|
-
/**
|
|
18145
|
-
* Generated by orval v7.9.0 🍺
|
|
18146
|
-
* Do not edit manually.
|
|
18147
|
-
* Speechmatics ASR REST API
|
|
18148
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18149
|
-
* OpenAPI spec version: 2.0.0
|
|
18150
|
-
*/
|
|
18151
|
-
type GetJobsParams = {
|
|
18152
|
-
/**
|
|
18153
|
-
* UTC Timestamp cursor for paginating request response. Filters jobs based on creation time to the nearest millisecond. Accepts up to nanosecond precision, truncating to millisecond precision. By default, the response will start with the most recent job.
|
|
18154
|
-
*/
|
|
18155
|
-
created_before?: string;
|
|
18156
|
-
/**
|
|
18157
|
-
* Limit for paginating the request response. Defaults to 100.
|
|
18158
|
-
*/
|
|
18159
|
-
limit?: number;
|
|
18160
|
-
/**
|
|
18161
|
-
* Specifies whether deleted jobs should be included in the response. Defaults to false.
|
|
18162
|
-
*/
|
|
18163
|
-
include_deleted?: boolean;
|
|
18164
|
-
};
|
|
18165
|
-
|
|
18166
|
-
/**
|
|
18167
|
-
* Generated by orval v7.9.0 🍺
|
|
18168
|
-
* Do not edit manually.
|
|
18169
|
-
* Speechmatics ASR REST API
|
|
18170
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18171
|
-
* OpenAPI spec version: 2.0.0
|
|
18172
|
-
*/
|
|
18173
|
-
type GetUsageParams = {
|
|
18174
|
-
/**
|
|
18175
|
-
* Include usage after the given date (inclusive). This is a [ISO-8601](https://en.wikipedia.org/wiki/ISO_8601) calendar date format: `YYYY-MM-DD`.
|
|
18176
|
-
*/
|
|
18177
|
-
since?: string;
|
|
18178
|
-
/**
|
|
18179
|
-
* Include usage before the given date (inclusive). This is a [ISO-8601](https://en.wikipedia.org/wiki/ISO_8601) calendar date format: `YYYY-MM-DD`.
|
|
18180
|
-
*/
|
|
18181
|
-
until?: string;
|
|
18182
|
-
};
|
|
18183
|
-
|
|
18184
|
-
/**
|
|
18185
|
-
* Generated by orval v7.9.0 🍺
|
|
18186
|
-
* Do not edit manually.
|
|
18187
|
-
* Speechmatics ASR REST API
|
|
18188
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18189
|
-
* OpenAPI spec version: 2.0.0
|
|
18190
|
-
*/
|
|
18191
|
-
|
|
18192
|
-
/**
|
|
18193
|
-
* Summary information about an ASR job, to support identification and tracking.
|
|
18194
|
-
*/
|
|
18195
|
-
interface JobInfo {
|
|
18196
|
-
/** The UTC date time the job was created. */
|
|
18197
|
-
created_at: string;
|
|
18198
|
-
/** Name of data file submitted for job. */
|
|
18199
|
-
data_name: string;
|
|
18200
|
-
/**
|
|
18201
|
-
* The data file audio duration (in seconds).
|
|
18202
|
-
* @minimum 0
|
|
18203
|
-
*/
|
|
18204
|
-
duration: number;
|
|
18205
|
-
/** The unique id assigned to the job. */
|
|
18206
|
-
id: string;
|
|
18207
|
-
/** Name of the text file submitted to be aligned to audio. */
|
|
18208
|
-
text_name?: string;
|
|
18209
|
-
tracking?: TrackingData;
|
|
18210
|
-
}
|
|
18211
|
-
|
|
18212
|
-
/**
|
|
18213
|
-
* Generated by orval v7.9.0 🍺
|
|
18214
|
-
* Do not edit manually.
|
|
18215
|
-
* Speechmatics ASR REST API
|
|
18216
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18217
|
-
* OpenAPI spec version: 2.0.0
|
|
18218
|
-
*/
|
|
18219
|
-
type JobMode = (typeof JobMode)[keyof typeof JobMode];
|
|
18220
|
-
declare const JobMode: {
|
|
18221
|
-
readonly batch: "batch";
|
|
18222
|
-
};
|
|
18223
|
-
|
|
18224
|
-
/**
|
|
18225
|
-
* Generated by orval v7.9.0 🍺
|
|
18226
|
-
* Do not edit manually.
|
|
18227
|
-
* Speechmatics ASR REST API
|
|
18228
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18229
|
-
* OpenAPI spec version: 2.0.0
|
|
18230
|
-
*/
|
|
18231
|
-
interface LanguageIdentificationResultAlternative {
|
|
18232
|
-
language?: string;
|
|
18233
|
-
confidence?: number;
|
|
18234
|
-
}
|
|
18235
|
-
|
|
18236
|
-
/**
|
|
18237
|
-
* Generated by orval v7.9.0 🍺
|
|
18238
|
-
* Do not edit manually.
|
|
18239
|
-
* Speechmatics ASR REST API
|
|
18240
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18241
|
-
* OpenAPI spec version: 2.0.0
|
|
18242
|
-
*/
|
|
18243
|
-
|
|
18244
|
-
interface LanguageIdentificationResultItem {
|
|
18245
|
-
alternatives?: LanguageIdentificationResultAlternative[];
|
|
18246
|
-
start_time?: number;
|
|
18247
|
-
end_time?: number;
|
|
18248
|
-
}
|
|
18249
|
-
|
|
18250
|
-
/**
|
|
18251
|
-
* Generated by orval v7.9.0 🍺
|
|
18252
|
-
* Do not edit manually.
|
|
18253
|
-
* Speechmatics ASR REST API
|
|
18254
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18255
|
-
* OpenAPI spec version: 2.0.0
|
|
18256
|
-
*/
|
|
18257
|
-
type LanguageIdentificationResultError = (typeof LanguageIdentificationResultError)[keyof typeof LanguageIdentificationResultError];
|
|
18258
|
-
declare const LanguageIdentificationResultError: {
|
|
18259
|
-
readonly LOW_CONFIDENCE: "LOW_CONFIDENCE";
|
|
18260
|
-
readonly UNEXPECTED_LANGUAGE: "UNEXPECTED_LANGUAGE";
|
|
18261
|
-
readonly NO_SPEECH: "NO_SPEECH";
|
|
18262
|
-
readonly FILE_UNREADABLE: "FILE_UNREADABLE";
|
|
18263
|
-
readonly OTHER: "OTHER";
|
|
18264
|
-
};
|
|
18265
|
-
|
|
18266
|
-
/**
|
|
18267
|
-
* Generated by orval v7.9.0 🍺
|
|
18268
|
-
* Do not edit manually.
|
|
18269
|
-
* Speechmatics ASR REST API
|
|
18270
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18271
|
-
* OpenAPI spec version: 2.0.0
|
|
18272
|
-
*/
|
|
18273
|
-
|
|
18274
|
-
interface LanguageIdentificationResult {
|
|
18275
|
-
results?: LanguageIdentificationResultItem[];
|
|
18276
|
-
error?: LanguageIdentificationResultError;
|
|
18277
|
-
message?: string;
|
|
18278
|
-
}
|
|
18279
|
-
|
|
18280
|
-
/**
|
|
18281
|
-
* Generated by orval v7.9.0 🍺
|
|
18282
|
-
* Do not edit manually.
|
|
18283
|
-
* Speechmatics ASR REST API
|
|
18284
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18285
|
-
* OpenAPI spec version: 2.0.0
|
|
18286
|
-
*/
|
|
18287
|
-
/**
|
|
18288
|
-
* The direction that words in the language should be written and read in.
|
|
18289
|
-
*/
|
|
18290
|
-
type LanguagePackInfoWritingDirection = (typeof LanguagePackInfoWritingDirection)[keyof typeof LanguagePackInfoWritingDirection];
|
|
18291
|
-
declare const LanguagePackInfoWritingDirection: {
|
|
18292
|
-
readonly "left-to-right": "left-to-right";
|
|
18293
|
-
readonly "right-to-left": "right-to-left";
|
|
18294
|
-
};
|
|
18295
|
-
|
|
18296
|
-
/**
|
|
18297
|
-
* Generated by orval v7.9.0 🍺
|
|
18298
|
-
* Do not edit manually.
|
|
18299
|
-
* Speechmatics ASR REST API
|
|
18300
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18301
|
-
* OpenAPI spec version: 2.0.0
|
|
18302
|
-
*/
|
|
18303
|
-
|
|
18304
|
-
/**
|
|
18305
|
-
* Properties of the language pack.
|
|
18306
|
-
*/
|
|
18307
|
-
interface LanguagePackInfo {
|
|
18308
|
-
/** Full descriptive name of the language, e.g. 'Japanese'. */
|
|
18309
|
-
language_description?: string;
|
|
18310
|
-
/** The character to use to separate words. */
|
|
18311
|
-
word_delimiter: string;
|
|
18312
|
-
/** The direction that words in the language should be written and read in. */
|
|
18313
|
-
writing_direction?: LanguagePackInfoWritingDirection;
|
|
18314
|
-
/** Whether or not ITN (inverse text normalization) is available for the language pack. */
|
|
18315
|
-
itn?: boolean;
|
|
18316
|
-
/** Whether or not language model adaptation has been applied to the language pack. */
|
|
18317
|
-
adapted?: boolean;
|
|
18318
|
-
}
|
|
18319
|
-
|
|
18320
|
-
/**
|
|
18321
|
-
* Generated by orval v7.9.0 🍺
|
|
18322
|
-
* Do not edit manually.
|
|
18323
|
-
* Speechmatics ASR REST API
|
|
18324
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18325
|
-
* OpenAPI spec version: 2.0.0
|
|
18326
|
-
*/
|
|
18327
|
-
type PostJobsBody = {
|
|
18328
|
-
/** JSON containing a `JobConfig` model indicating the type and parameters for the recognition job. */
|
|
18329
|
-
config: string;
|
|
18330
|
-
/** The data file to be processed. Alternatively the data file can be fetched from a url specified in `JobConfig`. */
|
|
18331
|
-
data_file?: Blob;
|
|
18332
|
-
/** For alignment jobs, the text file that the data file should be aligned to. */
|
|
18333
|
-
text_file?: Blob;
|
|
18334
|
-
};
|
|
18335
|
-
|
|
18336
|
-
/**
|
|
18337
|
-
* Generated by orval v7.9.0 🍺
|
|
18338
|
-
* Do not edit manually.
|
|
18339
|
-
* Speechmatics ASR REST API
|
|
18340
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18341
|
-
* OpenAPI spec version: 2.0.0
|
|
18342
|
-
*/
|
|
18343
|
-
type RecognitionDisplayDirection = (typeof RecognitionDisplayDirection)[keyof typeof RecognitionDisplayDirection];
|
|
18344
|
-
declare const RecognitionDisplayDirection: {
|
|
18345
|
-
readonly ltr: "ltr";
|
|
18346
|
-
readonly rtl: "rtl";
|
|
18347
|
-
};
|
|
18348
|
-
|
|
18349
|
-
/**
|
|
18350
|
-
* Generated by orval v7.9.0 🍺
|
|
18351
|
-
* Do not edit manually.
|
|
18352
|
-
* Speechmatics ASR REST API
|
|
18353
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18354
|
-
* OpenAPI spec version: 2.0.0
|
|
18355
|
-
*/
|
|
18356
|
-
|
|
18357
|
-
interface RecognitionDisplay {
|
|
18358
|
-
direction: RecognitionDisplayDirection;
|
|
18359
|
-
}
|
|
18360
|
-
|
|
18361
|
-
/**
|
|
18362
|
-
* Generated by orval v7.9.0 🍺
|
|
18363
|
-
* Do not edit manually.
|
|
18364
|
-
* Speechmatics ASR REST API
|
|
18365
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18366
|
-
* OpenAPI spec version: 2.0.0
|
|
18367
|
-
*/
|
|
18368
|
-
|
|
18369
|
-
/**
|
|
18370
|
-
* List of possible job output item values, ordered by likelihood.
|
|
18371
|
-
*/
|
|
18372
|
-
interface RecognitionAlternative {
|
|
18373
|
-
content: string;
|
|
18374
|
-
confidence: number;
|
|
18375
|
-
language: string;
|
|
18376
|
-
display?: RecognitionDisplay;
|
|
18377
|
-
speaker?: string;
|
|
18378
|
-
tags?: string[];
|
|
18379
|
-
}
|
|
18380
|
-
|
|
18381
|
-
/**
|
|
18382
|
-
* Generated by orval v7.9.0 🍺
|
|
18383
|
-
* Do not edit manually.
|
|
18384
|
-
* Speechmatics ASR REST API
|
|
18385
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18386
|
-
* OpenAPI spec version: 2.0.0
|
|
18387
|
-
*/
|
|
18388
|
-
type TranslationErrorType = (typeof TranslationErrorType)[keyof typeof TranslationErrorType];
|
|
18389
|
-
declare const TranslationErrorType: {
|
|
18390
|
-
readonly translation_failed: "translation_failed";
|
|
18391
|
-
readonly unsupported_translation_pair: "unsupported_translation_pair";
|
|
18392
|
-
};
|
|
18393
|
-
|
|
18394
|
-
/**
|
|
18395
|
-
* Generated by orval v7.9.0 🍺
|
|
18396
|
-
* Do not edit manually.
|
|
18397
|
-
* Speechmatics ASR REST API
|
|
18398
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18399
|
-
* OpenAPI spec version: 2.0.0
|
|
18400
|
-
*/
|
|
18401
|
-
|
|
18402
|
-
interface TranslationError {
|
|
18403
|
-
type?: TranslationErrorType;
|
|
18404
|
-
/** Human readable error message */
|
|
18405
|
-
message?: string;
|
|
18406
|
-
}
|
|
18407
|
-
|
|
18408
|
-
/**
|
|
18409
|
-
* Generated by orval v7.9.0 🍺
|
|
18410
|
-
* Do not edit manually.
|
|
18411
|
-
* Speechmatics ASR REST API
|
|
18412
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18413
|
-
* OpenAPI spec version: 2.0.0
|
|
18414
|
-
*/
|
|
18415
|
-
type SummarizationErrorType = (typeof SummarizationErrorType)[keyof typeof SummarizationErrorType];
|
|
18416
|
-
declare const SummarizationErrorType: {
|
|
18417
|
-
readonly summarization_failed: "summarization_failed";
|
|
18418
|
-
readonly unsupported_language: "unsupported_language";
|
|
18419
|
-
};
|
|
18420
|
-
|
|
18421
|
-
/**
|
|
18422
|
-
* Generated by orval v7.9.0 🍺
|
|
18423
|
-
* Do not edit manually.
|
|
18424
|
-
* Speechmatics ASR REST API
|
|
18425
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18426
|
-
* OpenAPI spec version: 2.0.0
|
|
18427
|
-
*/
|
|
18428
|
-
|
|
18429
|
-
interface SummarizationError {
|
|
18430
|
-
type?: SummarizationErrorType;
|
|
18431
|
-
/** Human readable error message */
|
|
18432
|
-
message?: string;
|
|
18433
|
-
}
|
|
18434
|
-
|
|
18435
|
-
/**
|
|
18436
|
-
* Generated by orval v7.9.0 🍺
|
|
18437
|
-
* Do not edit manually.
|
|
18438
|
-
* Speechmatics ASR REST API
|
|
18439
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18440
|
-
* OpenAPI spec version: 2.0.0
|
|
18441
|
-
*/
|
|
18442
|
-
type SentimentAnalysisErrorType = (typeof SentimentAnalysisErrorType)[keyof typeof SentimentAnalysisErrorType];
|
|
18443
|
-
declare const SentimentAnalysisErrorType: {
|
|
18444
|
-
readonly sentiment_analysis_failed: "sentiment_analysis_failed";
|
|
18445
|
-
readonly unsupported_language: "unsupported_language";
|
|
18446
|
-
};
|
|
18447
|
-
|
|
18448
|
-
/**
|
|
18449
|
-
* Generated by orval v7.9.0 🍺
|
|
18450
|
-
* Do not edit manually.
|
|
18451
|
-
* Speechmatics ASR REST API
|
|
18452
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18453
|
-
* OpenAPI spec version: 2.0.0
|
|
18454
|
-
*/
|
|
18455
|
-
|
|
18456
|
-
interface SentimentAnalysisError {
|
|
18457
|
-
type?: SentimentAnalysisErrorType;
|
|
18458
|
-
/** Human readable error message */
|
|
18459
|
-
message?: string;
|
|
18460
|
-
}
|
|
18461
|
-
|
|
18462
|
-
/**
|
|
18463
|
-
* Generated by orval v7.9.0 🍺
|
|
18464
|
-
* Do not edit manually.
|
|
18465
|
-
* Speechmatics ASR REST API
|
|
18466
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18467
|
-
* OpenAPI spec version: 2.0.0
|
|
18468
|
-
*/
|
|
18469
|
-
type TopicDetectionErrorType = (typeof TopicDetectionErrorType)[keyof typeof TopicDetectionErrorType];
|
|
18470
|
-
declare const TopicDetectionErrorType: {
|
|
18471
|
-
readonly topic_detection_failed: "topic_detection_failed";
|
|
18472
|
-
readonly unsupported_list_of_topics: "unsupported_list_of_topics";
|
|
18473
|
-
readonly unsupported_language: "unsupported_language";
|
|
18474
|
-
};
|
|
18475
|
-
|
|
18476
|
-
/**
|
|
18477
|
-
* Generated by orval v7.9.0 🍺
|
|
18478
|
-
* Do not edit manually.
|
|
18479
|
-
* Speechmatics ASR REST API
|
|
18480
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18481
|
-
* OpenAPI spec version: 2.0.0
|
|
18482
|
-
*/
|
|
18483
|
-
|
|
18484
|
-
interface TopicDetectionError {
|
|
18485
|
-
type?: TopicDetectionErrorType;
|
|
18486
|
-
/** Human readable error message */
|
|
18487
|
-
message?: string;
|
|
18488
|
-
}
|
|
18489
|
-
|
|
18490
|
-
/**
|
|
18491
|
-
* Generated by orval v7.9.0 🍺
|
|
18492
|
-
* Do not edit manually.
|
|
18493
|
-
* Speechmatics ASR REST API
|
|
18494
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18495
|
-
* OpenAPI spec version: 2.0.0
|
|
18496
|
-
*/
|
|
18497
|
-
|
|
18498
|
-
/**
|
|
18499
|
-
* Summary information about the output from an ASR job, comprising the job type and configuration parameters used when generating the output.
|
|
18500
|
-
*/
|
|
18501
|
-
interface RecognitionMetadata {
|
|
18502
|
-
/** The UTC date time the transcription output was created. */
|
|
18503
|
-
created_at: string;
|
|
18504
|
-
type: JobType;
|
|
18505
|
-
transcription_config?: TranscriptionConfig;
|
|
18506
|
-
/** List of errors that occurred in the translation stage. */
|
|
18507
|
-
translation_errors?: TranslationError[];
|
|
18508
|
-
/** List of errors that occurred in the summarization stage. */
|
|
18509
|
-
summarization_errors?: SummarizationError[];
|
|
18510
|
-
/** List of errors that occurred in the sentiment analysis stage. */
|
|
18511
|
-
sentiment_analysis_errors?: SentimentAnalysisError[];
|
|
18512
|
-
/** List of errors that occurred in the topic detection stage. */
|
|
18513
|
-
topic_detection_errors?: TopicDetectionError[];
|
|
18514
|
-
/** List of errors that occurred in the auto chapters stage. */
|
|
18515
|
-
auto_chapters_errors?: AutoChaptersResultError[];
|
|
18516
|
-
alignment_config?: AlignmentConfig;
|
|
18517
|
-
output_config?: OutputConfig;
|
|
18518
|
-
language_pack_info?: LanguagePackInfo;
|
|
18519
|
-
language_identification?: LanguageIdentificationResult;
|
|
18520
|
-
/** Orchestrator version in PEP 440 Format or set to 'version_not_found' as default. */
|
|
18521
|
-
orchestrator_version?: string;
|
|
18522
|
-
}
|
|
18523
|
-
|
|
18524
|
-
/**
|
|
18525
|
-
* Generated by orval v7.9.0 🍺
|
|
18526
|
-
* Do not edit manually.
|
|
18527
|
-
* Speechmatics ASR REST API
|
|
18528
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18529
|
-
* OpenAPI spec version: 2.0.0
|
|
18530
|
-
*/
|
|
18531
|
-
/**
|
|
18532
|
-
* New types of items may appear without being requested; unrecognized item types can be ignored.
|
|
18533
|
-
*/
|
|
18534
|
-
type RecognitionResultType = (typeof RecognitionResultType)[keyof typeof RecognitionResultType];
|
|
18535
|
-
declare const RecognitionResultType: {
|
|
18536
|
-
readonly word: "word";
|
|
18537
|
-
readonly punctuation: "punctuation";
|
|
18538
|
-
readonly entity: "entity";
|
|
18539
|
-
};
|
|
18540
|
-
|
|
18541
|
-
/**
|
|
18542
|
-
* Generated by orval v7.9.0 🍺
|
|
18543
|
-
* Do not edit manually.
|
|
18544
|
-
* Speechmatics ASR REST API
|
|
18545
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18546
|
-
* OpenAPI spec version: 2.0.0
|
|
18547
|
-
*/
|
|
18548
|
-
/**
|
|
18549
|
-
* What kind of object this is. See #/Definitions/RecognitionResult for definitions of the enums.
|
|
18550
|
-
*/
|
|
18551
|
-
type WrittenFormRecognitionResultType = (typeof WrittenFormRecognitionResultType)[keyof typeof WrittenFormRecognitionResultType];
|
|
18552
|
-
declare const WrittenFormRecognitionResultType: {
|
|
18553
|
-
readonly word: "word";
|
|
18554
|
-
};
|
|
18555
|
-
|
|
18556
|
-
/**
|
|
18557
|
-
* Generated by orval v7.9.0 🍺
|
|
18558
|
-
* Do not edit manually.
|
|
18559
|
-
* Speechmatics ASR REST API
|
|
18560
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18561
|
-
* OpenAPI spec version: 2.0.0
|
|
18562
|
-
*/
|
|
18563
|
-
|
|
18564
|
-
/**
|
|
18565
|
-
* A WrittenFormRecognitionResult describes a simple object which consists solely of 'word' type entries with a start and end time. It can occur only inside the written_form property of a full RecognitionResult"
|
|
18566
|
-
*/
|
|
18567
|
-
interface WrittenFormRecognitionResult {
|
|
18568
|
-
alternatives: RecognitionAlternative[];
|
|
18569
|
-
end_time: number;
|
|
18570
|
-
start_time: number;
|
|
18571
|
-
/** What kind of object this is. See #/Definitions/RecognitionResult for definitions of the enums. */
|
|
18572
|
-
type: WrittenFormRecognitionResultType;
|
|
18573
|
-
}
|
|
18574
|
-
|
|
18575
|
-
/**
|
|
18576
|
-
* Generated by orval v7.9.0 🍺
|
|
18577
|
-
* Do not edit manually.
|
|
18578
|
-
* Speechmatics ASR REST API
|
|
18579
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18580
|
-
* OpenAPI spec version: 2.0.0
|
|
18581
|
-
*/
|
|
18582
|
-
/**
|
|
18583
|
-
* What kind of object this is. See #/Definitions/RecognitionResult for definitions of the enums.
|
|
18584
|
-
*/
|
|
18585
|
-
type SpokenFormRecognitionResultType = (typeof SpokenFormRecognitionResultType)[keyof typeof SpokenFormRecognitionResultType];
|
|
18586
|
-
declare const SpokenFormRecognitionResultType: {
|
|
18587
|
-
readonly word: "word";
|
|
18588
|
-
readonly punctuation: "punctuation";
|
|
18589
|
-
};
|
|
17684
|
+
* JSON object that contains various groups of job configuration
|
|
17685
|
+
parameters. Based on the value of `type`, a type-specific object
|
|
17686
|
+
such as `transcription_config` is required to be present to
|
|
17687
|
+
specify all configuration settings or parameters needed to
|
|
17688
|
+
process the job inputs as expected.
|
|
18590
17689
|
|
|
18591
|
-
|
|
18592
|
-
|
|
18593
|
-
|
|
18594
|
-
|
|
18595
|
-
|
|
18596
|
-
|
|
18597
|
-
|
|
17690
|
+
If the results of the job are to be forwarded on completion,
|
|
17691
|
+
`notification_config` can be provided with a list of callbacks
|
|
17692
|
+
to be made; no assumptions should be made about the order in
|
|
17693
|
+
which they will occur.
|
|
17694
|
+
|
|
17695
|
+
Customer specific job details or metadata can be supplied in
|
|
17696
|
+
`tracking`, and this information will be available where
|
|
17697
|
+
possible in the job results and in callbacks.
|
|
18598
17698
|
|
|
18599
|
-
/**
|
|
18600
|
-
* A SpokenFormRecognitionResult describes a simple object which consists solely of 'word' or 'punctuation' type entries with a start and end time. It can occur only inside the spoken_form property of a full "RecognitionResult"
|
|
18601
17699
|
*/
|
|
18602
|
-
interface
|
|
18603
|
-
|
|
18604
|
-
|
|
18605
|
-
|
|
18606
|
-
|
|
18607
|
-
|
|
17700
|
+
interface JobConfig {
|
|
17701
|
+
type: JobType;
|
|
17702
|
+
fetch_data?: DataFetchConfig;
|
|
17703
|
+
fetch_text?: DataFetchConfig;
|
|
17704
|
+
alignment_config?: AlignmentConfig;
|
|
17705
|
+
transcription_config?: TranscriptionConfig;
|
|
17706
|
+
notification_config?: NotificationConfig[];
|
|
17707
|
+
tracking?: TrackingData;
|
|
17708
|
+
output_config?: OutputConfig;
|
|
17709
|
+
translation_config?: TranslationConfig$1;
|
|
17710
|
+
language_identification_config?: LanguageIdentificationConfig;
|
|
17711
|
+
summarization_config?: SummarizationConfig;
|
|
17712
|
+
sentiment_analysis_config?: SentimentAnalysisConfig;
|
|
17713
|
+
topic_detection_config?: TopicDetectionConfig;
|
|
17714
|
+
auto_chapters_config?: AutoChaptersConfig;
|
|
17715
|
+
audio_events_config?: AudioEventsConfig;
|
|
18608
17716
|
}
|
|
18609
17717
|
|
|
18610
17718
|
/**
|
|
@@ -18614,16 +17722,10 @@ interface SpokenFormRecognitionResult {
|
|
|
18614
17722
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18615
17723
|
* OpenAPI spec version: 2.0.0
|
|
18616
17724
|
*/
|
|
18617
|
-
|
|
18618
|
-
|
|
18619
|
-
|
|
18620
|
-
|
|
18621
|
-
declare const RecognitionResultAttachesTo: {
|
|
18622
|
-
readonly previous: "previous";
|
|
18623
|
-
readonly next: "next";
|
|
18624
|
-
readonly both: "both";
|
|
18625
|
-
readonly none: "none";
|
|
18626
|
-
};
|
|
17725
|
+
interface JobDetailError {
|
|
17726
|
+
timestamp: string;
|
|
17727
|
+
message: string;
|
|
17728
|
+
}
|
|
18627
17729
|
|
|
18628
17730
|
/**
|
|
18629
17731
|
* Generated by orval v7.9.0 🍺
|
|
@@ -18634,27 +17736,29 @@ declare const RecognitionResultAttachesTo: {
|
|
|
18634
17736
|
*/
|
|
18635
17737
|
|
|
18636
17738
|
/**
|
|
18637
|
-
*
|
|
17739
|
+
* Document describing a job. JobConfig will be present in JobDetails returned for GET jobs/<id> request in SaaS and in Batch Appliance, but it will not be present in JobDetails returned as item in RetrieveJobsResponse in case of Batch Appliance.
|
|
18638
17740
|
*/
|
|
18639
|
-
interface
|
|
18640
|
-
|
|
18641
|
-
|
|
18642
|
-
|
|
17741
|
+
interface JobDetails {
|
|
17742
|
+
/** The UTC date time the job was created. */
|
|
17743
|
+
created_at: string;
|
|
17744
|
+
/** Name of the data file submitted for job. */
|
|
17745
|
+
data_name: string;
|
|
17746
|
+
/** Name of the text file submitted to be aligned to audio. */
|
|
17747
|
+
text_name?: string;
|
|
18643
17748
|
/**
|
|
18644
|
-
*
|
|
17749
|
+
* The file duration (in seconds). May be missing for fetch URL jobs.
|
|
18645
17750
|
* @minimum 0
|
|
18646
|
-
* @maximum 100
|
|
18647
17751
|
*/
|
|
18648
|
-
|
|
18649
|
-
/**
|
|
18650
|
-
|
|
18651
|
-
/**
|
|
18652
|
-
|
|
18653
|
-
|
|
18654
|
-
|
|
18655
|
-
|
|
18656
|
-
/**
|
|
18657
|
-
|
|
17752
|
+
duration?: number;
|
|
17753
|
+
/** The unique id assigned to the job. */
|
|
17754
|
+
id: string;
|
|
17755
|
+
/** The status of the job. * `running` - The job is actively running. * `done` - The job completed successfully. * `rejected` - The job was accepted at first, but later could not be processed by the transcriber. * `deleted` - The user deleted the job. * `expired` - The system deleted the job. Usually because the job was in the `done` state for a very long time. */
|
|
17756
|
+
status: JobDetailsStatus;
|
|
17757
|
+
config?: JobConfig;
|
|
17758
|
+
/** Optional parameter used for backwards compatibility with v1 api */
|
|
17759
|
+
lang?: string;
|
|
17760
|
+
/** Optional list of errors that have occurred in user interaction, for example: audio could not be fetched or notification could not be sent. */
|
|
17761
|
+
errors?: JobDetailError[];
|
|
18658
17762
|
}
|
|
18659
17763
|
|
|
18660
17764
|
/**
|
|
@@ -18665,7 +17769,7 @@ interface RecognitionResult {
|
|
|
18665
17769
|
* OpenAPI spec version: 2.0.0
|
|
18666
17770
|
*/
|
|
18667
17771
|
|
|
18668
|
-
interface
|
|
17772
|
+
interface DeleteJobResponse {
|
|
18669
17773
|
job: JobDetails;
|
|
18670
17774
|
}
|
|
18671
17775
|
|
|
@@ -18676,39 +17780,11 @@ interface RetrieveJobResponse {
|
|
|
18676
17780
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18677
17781
|
* OpenAPI spec version: 2.0.0
|
|
18678
17782
|
*/
|
|
18679
|
-
|
|
18680
|
-
|
|
18681
|
-
|
|
18682
|
-
|
|
18683
|
-
|
|
18684
|
-
/**
|
|
18685
|
-
* Generated by orval v7.9.0 🍺
|
|
18686
|
-
* Do not edit manually.
|
|
18687
|
-
* Speechmatics ASR REST API
|
|
18688
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18689
|
-
* OpenAPI spec version: 2.0.0
|
|
18690
|
-
*/
|
|
18691
|
-
interface TranslationSentence {
|
|
18692
|
-
start_time?: number;
|
|
18693
|
-
end_time?: number;
|
|
18694
|
-
content?: string;
|
|
18695
|
-
speaker?: string;
|
|
18696
|
-
channel?: string;
|
|
18697
|
-
}
|
|
18698
|
-
|
|
18699
|
-
/**
|
|
18700
|
-
* Generated by orval v7.9.0 🍺
|
|
18701
|
-
* Do not edit manually.
|
|
18702
|
-
* Speechmatics ASR REST API
|
|
18703
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18704
|
-
* OpenAPI spec version: 2.0.0
|
|
18705
|
-
*/
|
|
18706
|
-
|
|
18707
|
-
/**
|
|
18708
|
-
* Translations of the transcript into other languages. It is a map of ISO language codes to arrays of translated sentences. Configured using `translation_config`.
|
|
18709
|
-
*/
|
|
18710
|
-
type RetrieveTranscriptResponseTranslations = {
|
|
18711
|
-
[key: string]: TranslationSentence[];
|
|
17783
|
+
type DeleteJobsJobidParams = {
|
|
17784
|
+
/**
|
|
17785
|
+
* When set, a running job will be force terminated. When unset (default), a running job will not be terminated and request will return HTTP 423 Locked.
|
|
17786
|
+
*/
|
|
17787
|
+
force?: boolean;
|
|
18712
17788
|
};
|
|
18713
17789
|
|
|
18714
17790
|
/**
|
|
@@ -18719,64 +17795,9 @@ type RetrieveTranscriptResponseTranslations = {
|
|
|
18719
17795
|
* OpenAPI spec version: 2.0.0
|
|
18720
17796
|
*/
|
|
18721
17797
|
/**
|
|
18722
|
-
*
|
|
18723
|
-
*/
|
|
18724
|
-
interface SummarizationResult {
|
|
18725
|
-
content?: string;
|
|
18726
|
-
}
|
|
18727
|
-
|
|
18728
|
-
/**
|
|
18729
|
-
* Generated by orval v7.9.0 🍺
|
|
18730
|
-
* Do not edit manually.
|
|
18731
|
-
* Speechmatics ASR REST API
|
|
18732
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18733
|
-
* OpenAPI spec version: 2.0.0
|
|
18734
|
-
*/
|
|
18735
|
-
/**
|
|
18736
|
-
* Represents a segment of text and its associated sentiment.
|
|
18737
|
-
*/
|
|
18738
|
-
interface SentimentSegment {
|
|
18739
|
-
text?: string;
|
|
18740
|
-
start_time?: number;
|
|
18741
|
-
end_time?: number;
|
|
18742
|
-
sentiment?: string;
|
|
18743
|
-
speaker?: string;
|
|
18744
|
-
channel?: string;
|
|
18745
|
-
confidence?: number;
|
|
18746
|
-
}
|
|
18747
|
-
|
|
18748
|
-
/**
|
|
18749
|
-
* Generated by orval v7.9.0 🍺
|
|
18750
|
-
* Do not edit manually.
|
|
18751
|
-
* Speechmatics ASR REST API
|
|
18752
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18753
|
-
* OpenAPI spec version: 2.0.0
|
|
18754
|
-
*/
|
|
18755
|
-
/**
|
|
18756
|
-
* Holds the count of sentiment information grouped by positive, neutral and negative.
|
|
18757
|
-
*/
|
|
18758
|
-
interface SentimentSummaryDetail {
|
|
18759
|
-
positive_count?: number;
|
|
18760
|
-
negative_count?: number;
|
|
18761
|
-
neutral_count?: number;
|
|
18762
|
-
}
|
|
18763
|
-
|
|
18764
|
-
/**
|
|
18765
|
-
* Generated by orval v7.9.0 🍺
|
|
18766
|
-
* Do not edit manually.
|
|
18767
|
-
* Speechmatics ASR REST API
|
|
18768
|
-
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18769
|
-
* OpenAPI spec version: 2.0.0
|
|
18770
|
-
*/
|
|
18771
|
-
/**
|
|
18772
|
-
* Holds sentiment information for a specific speaker.
|
|
17798
|
+
* Early Access Release Tag
|
|
18773
17799
|
*/
|
|
18774
|
-
|
|
18775
|
-
speaker?: string;
|
|
18776
|
-
positive_count?: number;
|
|
18777
|
-
negative_count?: number;
|
|
18778
|
-
neutral_count?: number;
|
|
18779
|
-
}
|
|
17800
|
+
type EARTagParameter = string;
|
|
18780
17801
|
|
|
18781
17802
|
/**
|
|
18782
17803
|
* Generated by orval v7.9.0 🍺
|
|
@@ -18786,14 +17807,38 @@ interface SentimentSpeakerSummary {
|
|
|
18786
17807
|
* OpenAPI spec version: 2.0.0
|
|
18787
17808
|
*/
|
|
18788
17809
|
/**
|
|
18789
|
-
*
|
|
17810
|
+
* The error message.
|
|
18790
17811
|
*/
|
|
18791
|
-
|
|
18792
|
-
|
|
18793
|
-
|
|
18794
|
-
|
|
18795
|
-
|
|
18796
|
-
|
|
17812
|
+
type ErrorResponseError = (typeof ErrorResponseError)[keyof typeof ErrorResponseError];
|
|
17813
|
+
declare const ErrorResponseError: {
|
|
17814
|
+
readonly Bad_Request: "Bad Request";
|
|
17815
|
+
readonly File_Expired: "File Expired";
|
|
17816
|
+
readonly Forbidden: "Forbidden";
|
|
17817
|
+
readonly Resource_Locked: "Resource Locked";
|
|
17818
|
+
readonly Format_Not_Supported: "Format Not Supported";
|
|
17819
|
+
readonly Internal_Server_Error: "Internal Server Error";
|
|
17820
|
+
readonly Job_error: "Job error";
|
|
17821
|
+
readonly Job_Expired: "Job Expired";
|
|
17822
|
+
readonly Job_In_Progress: "Job In Progress";
|
|
17823
|
+
readonly Job_is_not_of_type_alignment: "Job is not of type alignment";
|
|
17824
|
+
readonly Job_is_not_of_type_transcription: "Job is not of type transcription";
|
|
17825
|
+
readonly Job_not_found: "Job not found";
|
|
17826
|
+
readonly Job_rejected: "Job rejected";
|
|
17827
|
+
readonly Job_rejected_due_to_invalid_audio: "Job rejected due to invalid audio";
|
|
17828
|
+
readonly Job_rejected_due_to_invalid_text: "Job rejected due to invalid text";
|
|
17829
|
+
readonly Malformed_request: "Malformed request";
|
|
17830
|
+
readonly Missing_callback: "Missing callback";
|
|
17831
|
+
readonly Missing_data_file: "Missing data_file";
|
|
17832
|
+
readonly Missing_text_file: "Missing text_file";
|
|
17833
|
+
readonly No_language_selected: "No language selected";
|
|
17834
|
+
readonly Not_Implemented: "Not Implemented";
|
|
17835
|
+
readonly Permission_Denied: "Permission Denied";
|
|
17836
|
+
readonly Requested_product_not_available: "Requested product not available";
|
|
17837
|
+
readonly Transcription_not_ready: "Transcription not ready";
|
|
17838
|
+
readonly Log_file_not_available: "Log file not available";
|
|
17839
|
+
readonly Requested_Early_Access_Release_not_available: "Requested Early Access Release not available";
|
|
17840
|
+
readonly Unprocessable_Entity: "Unprocessable Entity";
|
|
17841
|
+
};
|
|
18797
17842
|
|
|
18798
17843
|
/**
|
|
18799
17844
|
* Generated by orval v7.9.0 🍺
|
|
@@ -18803,15 +17848,16 @@ interface SentimentChannelSummary {
|
|
|
18803
17848
|
* OpenAPI spec version: 2.0.0
|
|
18804
17849
|
*/
|
|
18805
17850
|
|
|
18806
|
-
|
|
18807
|
-
|
|
18808
|
-
|
|
18809
|
-
|
|
18810
|
-
|
|
18811
|
-
|
|
18812
|
-
|
|
18813
|
-
|
|
18814
|
-
|
|
17851
|
+
interface ErrorResponse {
|
|
17852
|
+
/**
|
|
17853
|
+
* The HTTP status code.
|
|
17854
|
+
* @minimum 100
|
|
17855
|
+
*/
|
|
17856
|
+
code: number;
|
|
17857
|
+
/** The error message. */
|
|
17858
|
+
error: ErrorResponseError;
|
|
17859
|
+
/** The details of the error. */
|
|
17860
|
+
detail?: string;
|
|
18815
17861
|
}
|
|
18816
17862
|
|
|
18817
17863
|
/**
|
|
@@ -18821,14 +17867,10 @@ interface SentimentSummary {
|
|
|
18821
17867
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18822
17868
|
* OpenAPI spec version: 2.0.0
|
|
18823
17869
|
*/
|
|
18824
|
-
|
|
18825
|
-
|
|
18826
|
-
|
|
18827
|
-
|
|
18828
|
-
type SentimentAnalysisResultSentimentAnalysis = {
|
|
18829
|
-
/** An array of objects that represent a segment of text and its associated sentiment. */
|
|
18830
|
-
segments?: SentimentSegment[];
|
|
18831
|
-
summary?: SentimentSummary;
|
|
17870
|
+
type GetJobsJobidAlignmentTags = (typeof GetJobsJobidAlignmentTags)[keyof typeof GetJobsJobidAlignmentTags];
|
|
17871
|
+
declare const GetJobsJobidAlignmentTags: {
|
|
17872
|
+
readonly word_start_and_end: "word_start_and_end";
|
|
17873
|
+
readonly one_per_line: "one_per_line";
|
|
18832
17874
|
};
|
|
18833
17875
|
|
|
18834
17876
|
/**
|
|
@@ -18839,13 +17881,12 @@ type SentimentAnalysisResultSentimentAnalysis = {
|
|
|
18839
17881
|
* OpenAPI spec version: 2.0.0
|
|
18840
17882
|
*/
|
|
18841
17883
|
|
|
18842
|
-
|
|
18843
|
-
|
|
18844
|
-
|
|
18845
|
-
|
|
18846
|
-
|
|
18847
|
-
|
|
18848
|
-
}
|
|
17884
|
+
type GetJobsJobidAlignmentParams = {
|
|
17885
|
+
/**
|
|
17886
|
+
* Control how timing information is added to the text file provided as input to the alignment job. If set to `word_start_and_end`, SGML tags are inserted at the start and end of each word, for example <time=0.41>. If set to `one_per_line` square bracket tags are inserted at the start of each line, for example `[00:00:00.4] `. The default is `word_start_and_end`.
|
|
17887
|
+
*/
|
|
17888
|
+
tags?: GetJobsJobidAlignmentTags;
|
|
17889
|
+
};
|
|
18849
17890
|
|
|
18850
17891
|
/**
|
|
18851
17892
|
* Generated by orval v7.9.0 🍺
|
|
@@ -18854,12 +17895,12 @@ interface SentimentAnalysisResult {
|
|
|
18854
17895
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18855
17896
|
* OpenAPI spec version: 2.0.0
|
|
18856
17897
|
*/
|
|
18857
|
-
|
|
18858
|
-
|
|
18859
|
-
|
|
18860
|
-
|
|
18861
|
-
|
|
18862
|
-
}
|
|
17898
|
+
type GetJobsJobidTranscriptFormat = (typeof GetJobsJobidTranscriptFormat)[keyof typeof GetJobsJobidTranscriptFormat];
|
|
17899
|
+
declare const GetJobsJobidTranscriptFormat: {
|
|
17900
|
+
readonly "json-v2": "json-v2";
|
|
17901
|
+
readonly txt: "txt";
|
|
17902
|
+
readonly srt: "srt";
|
|
17903
|
+
};
|
|
18863
17904
|
|
|
18864
17905
|
/**
|
|
18865
17906
|
* Generated by orval v7.9.0 🍺
|
|
@@ -18869,15 +17910,12 @@ interface TopicDetectionSegmentTopic {
|
|
|
18869
17910
|
* OpenAPI spec version: 2.0.0
|
|
18870
17911
|
*/
|
|
18871
17912
|
|
|
18872
|
-
|
|
18873
|
-
|
|
18874
|
-
|
|
18875
|
-
|
|
18876
|
-
|
|
18877
|
-
|
|
18878
|
-
end_time?: number;
|
|
18879
|
-
topics?: TopicDetectionSegmentTopic[];
|
|
18880
|
-
}
|
|
17913
|
+
type GetJobsJobidTranscriptParams = {
|
|
17914
|
+
/**
|
|
17915
|
+
* The transcription format (by default the `json-v2` format is returned).
|
|
17916
|
+
*/
|
|
17917
|
+
format?: GetJobsJobidTranscriptFormat;
|
|
17918
|
+
};
|
|
18881
17919
|
|
|
18882
17920
|
/**
|
|
18883
17921
|
* Generated by orval v7.9.0 🍺
|
|
@@ -18886,12 +17924,20 @@ interface TopicDetectionSegment {
|
|
|
18886
17924
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18887
17925
|
* OpenAPI spec version: 2.0.0
|
|
18888
17926
|
*/
|
|
18889
|
-
|
|
18890
|
-
|
|
18891
|
-
|
|
18892
|
-
|
|
18893
|
-
|
|
18894
|
-
|
|
17927
|
+
type GetJobsParams = {
|
|
17928
|
+
/**
|
|
17929
|
+
* UTC Timestamp cursor for paginating request response. Filters jobs based on creation time to the nearest millisecond. Accepts up to nanosecond precision, truncating to millisecond precision. By default, the response will start with the most recent job.
|
|
17930
|
+
*/
|
|
17931
|
+
created_before?: string;
|
|
17932
|
+
/**
|
|
17933
|
+
* Limit for paginating the request response. Defaults to 100.
|
|
17934
|
+
*/
|
|
17935
|
+
limit?: number;
|
|
17936
|
+
/**
|
|
17937
|
+
* Specifies whether deleted jobs should be included in the response. Defaults to false.
|
|
17938
|
+
*/
|
|
17939
|
+
include_deleted?: boolean;
|
|
17940
|
+
};
|
|
18895
17941
|
|
|
18896
17942
|
/**
|
|
18897
17943
|
* Generated by orval v7.9.0 🍺
|
|
@@ -18900,13 +17946,16 @@ interface TopicDetectionSummaryOverall {
|
|
|
18900
17946
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18901
17947
|
* OpenAPI spec version: 2.0.0
|
|
18902
17948
|
*/
|
|
18903
|
-
|
|
18904
|
-
/**
|
|
18905
|
-
|
|
18906
|
-
|
|
18907
|
-
|
|
18908
|
-
|
|
18909
|
-
|
|
17949
|
+
type GetUsageParams = {
|
|
17950
|
+
/**
|
|
17951
|
+
* Include usage after the given date (inclusive). This is a [ISO-8601](https://en.wikipedia.org/wiki/ISO_8601) calendar date format: `YYYY-MM-DD`.
|
|
17952
|
+
*/
|
|
17953
|
+
since?: string;
|
|
17954
|
+
/**
|
|
17955
|
+
* Include usage before the given date (inclusive). This is a [ISO-8601](https://en.wikipedia.org/wiki/ISO_8601) calendar date format: `YYYY-MM-DD`.
|
|
17956
|
+
*/
|
|
17957
|
+
until?: string;
|
|
17958
|
+
};
|
|
18910
17959
|
|
|
18911
17960
|
/**
|
|
18912
17961
|
* Generated by orval v7.9.0 🍺
|
|
@@ -18915,15 +17964,10 @@ interface TopicDetectionSummary {
|
|
|
18915
17964
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18916
17965
|
* OpenAPI spec version: 2.0.0
|
|
18917
17966
|
*/
|
|
18918
|
-
|
|
18919
|
-
|
|
18920
|
-
|
|
18921
|
-
|
|
18922
|
-
interface TopicDetectionResult {
|
|
18923
|
-
/** An array of objects that represent a segment of text and its associated topic information. */
|
|
18924
|
-
segments?: TopicDetectionSegment[];
|
|
18925
|
-
summary?: TopicDetectionSummary;
|
|
18926
|
-
}
|
|
17967
|
+
type JobMode = (typeof JobMode)[keyof typeof JobMode];
|
|
17968
|
+
declare const JobMode: {
|
|
17969
|
+
readonly batch: "batch";
|
|
17970
|
+
};
|
|
18927
17971
|
|
|
18928
17972
|
/**
|
|
18929
17973
|
* Generated by orval v7.9.0 🍺
|
|
@@ -18932,12 +17976,13 @@ interface TopicDetectionResult {
|
|
|
18932
17976
|
* The Speechmatics Automatic Speech Recognition REST API is used to submit ASR jobs and receive the results. The supported job type is transcription of audio files.
|
|
18933
17977
|
* OpenAPI spec version: 2.0.0
|
|
18934
17978
|
*/
|
|
18935
|
-
|
|
18936
|
-
/**
|
|
18937
|
-
|
|
18938
|
-
*/
|
|
18939
|
-
|
|
18940
|
-
|
|
17979
|
+
type PostJobsBody = {
|
|
17980
|
+
/** JSON containing a `JobConfig` model indicating the type and parameters for the recognition job. */
|
|
17981
|
+
config: string;
|
|
17982
|
+
/** The data file to be processed. Alternatively the data file can be fetched from a url specified in `JobConfig`. */
|
|
17983
|
+
data_file?: Blob;
|
|
17984
|
+
/** For alignment jobs, the text file that the data file should be aligned to. */
|
|
17985
|
+
text_file?: Blob;
|
|
18941
17986
|
};
|
|
18942
17987
|
|
|
18943
17988
|
/**
|
|
@@ -18948,14 +17993,9 @@ type RetrieveTranscriptResponseAudioEventSummaryChannels = {
|
|
|
18948
17993
|
* OpenAPI spec version: 2.0.0
|
|
18949
17994
|
*/
|
|
18950
17995
|
|
|
18951
|
-
|
|
18952
|
-
|
|
18953
|
-
|
|
18954
|
-
type RetrieveTranscriptResponseAudioEventSummary = {
|
|
18955
|
-
overall?: AudioEventSummary;
|
|
18956
|
-
/** Summary keyed by channel, only set if channel diarization is enabled */
|
|
18957
|
-
channels?: RetrieveTranscriptResponseAudioEventSummaryChannels;
|
|
18958
|
-
};
|
|
17996
|
+
interface RetrieveJobResponse {
|
|
17997
|
+
job: JobDetails;
|
|
17998
|
+
}
|
|
18959
17999
|
|
|
18960
18000
|
/**
|
|
18961
18001
|
* Generated by orval v7.9.0 🍺
|
|
@@ -18965,22 +18005,8 @@ type RetrieveTranscriptResponseAudioEventSummary = {
|
|
|
18965
18005
|
* OpenAPI spec version: 2.0.0
|
|
18966
18006
|
*/
|
|
18967
18007
|
|
|
18968
|
-
interface
|
|
18969
|
-
|
|
18970
|
-
format: string;
|
|
18971
|
-
job: JobInfo;
|
|
18972
|
-
metadata: RecognitionMetadata;
|
|
18973
|
-
results: RecognitionResult[];
|
|
18974
|
-
/** Translations of the transcript into other languages. It is a map of ISO language codes to arrays of translated sentences. Configured using `translation_config`. */
|
|
18975
|
-
translations?: RetrieveTranscriptResponseTranslations;
|
|
18976
|
-
summary?: SummarizationResult;
|
|
18977
|
-
sentiment_analysis?: SentimentAnalysisResult;
|
|
18978
|
-
topics?: TopicDetectionResult;
|
|
18979
|
-
chapters?: AutoChaptersResult;
|
|
18980
|
-
/** Timestamped audio events, only set if `audio_events_config` is in the config */
|
|
18981
|
-
audio_events?: AudioEventItem[];
|
|
18982
|
-
/** Summary statistics per event type, keyed by `type`, e.g. music */
|
|
18983
|
-
audio_event_summary?: RetrieveTranscriptResponseAudioEventSummary;
|
|
18008
|
+
interface RetrieveJobsResponse {
|
|
18009
|
+
jobs: JobDetails[];
|
|
18984
18010
|
}
|
|
18985
18011
|
|
|
18986
18012
|
/**
|
|
@@ -19025,16 +18051,15 @@ interface UsageResponse {
|
|
|
19025
18051
|
* OpenAPI spec version: 2.0.0
|
|
19026
18052
|
*/
|
|
19027
18053
|
|
|
19028
|
-
|
|
19029
|
-
|
|
19030
|
-
|
|
19031
|
-
|
|
18054
|
+
declare const index$2_AlignmentConfig: typeof AlignmentConfig;
|
|
18055
|
+
declare const index$2_AudioEventItem: typeof AudioEventItem;
|
|
18056
|
+
declare const index$2_AudioEventSummary: typeof AudioEventSummary;
|
|
18057
|
+
declare const index$2_AudioEventSummaryItem: typeof AudioEventSummaryItem;
|
|
19032
18058
|
type index$2_AudioEventsConfig = AudioEventsConfig;
|
|
19033
18059
|
type index$2_AutoChaptersConfig = AutoChaptersConfig;
|
|
19034
|
-
|
|
19035
|
-
|
|
18060
|
+
declare const index$2_AutoChaptersResult: typeof AutoChaptersResult;
|
|
18061
|
+
declare const index$2_AutoChaptersResultError: typeof AutoChaptersResultError;
|
|
19036
18062
|
declare const index$2_AutoChaptersResultErrorType: typeof AutoChaptersResultErrorType;
|
|
19037
|
-
type index$2_Chapter = Chapter;
|
|
19038
18063
|
type index$2_CreateJobResponse = CreateJobResponse;
|
|
19039
18064
|
type index$2_DataFetchConfig = DataFetchConfig;
|
|
19040
18065
|
type index$2_DeleteJobResponse = DeleteJobResponse;
|
|
@@ -19052,83 +18077,81 @@ type index$2_JobConfig = JobConfig;
|
|
|
19052
18077
|
type index$2_JobDetailError = JobDetailError;
|
|
19053
18078
|
type index$2_JobDetails = JobDetails;
|
|
19054
18079
|
declare const index$2_JobDetailsStatus: typeof JobDetailsStatus;
|
|
19055
|
-
|
|
18080
|
+
declare const index$2_JobInfo: typeof JobInfo;
|
|
19056
18081
|
declare const index$2_JobMode: typeof JobMode;
|
|
19057
18082
|
declare const index$2_JobType: typeof JobType;
|
|
19058
18083
|
type index$2_LanguageIdentificationConfig = LanguageIdentificationConfig;
|
|
19059
18084
|
declare const index$2_LanguageIdentificationConfigLowConfidenceAction: typeof LanguageIdentificationConfigLowConfidenceAction;
|
|
19060
|
-
|
|
19061
|
-
|
|
18085
|
+
declare const index$2_LanguageIdentificationResult: typeof LanguageIdentificationResult;
|
|
18086
|
+
declare const index$2_LanguageIdentificationResultAlternative: typeof LanguageIdentificationResultAlternative;
|
|
19062
18087
|
declare const index$2_LanguageIdentificationResultError: typeof LanguageIdentificationResultError;
|
|
19063
|
-
|
|
19064
|
-
|
|
18088
|
+
declare const index$2_LanguageIdentificationResultItem: typeof LanguageIdentificationResultItem;
|
|
18089
|
+
declare const index$2_LanguagePackInfo: typeof LanguagePackInfo;
|
|
19065
18090
|
declare const index$2_LanguagePackInfoWritingDirection: typeof LanguagePackInfoWritingDirection;
|
|
19066
18091
|
type index$2_NotificationConfig = NotificationConfig;
|
|
19067
18092
|
declare const index$2_NotificationConfigContentsItem: typeof NotificationConfigContentsItem;
|
|
19068
18093
|
declare const index$2_NotificationConfigMethod: typeof NotificationConfigMethod;
|
|
19069
18094
|
declare const index$2_OperatingPoint: typeof OperatingPoint;
|
|
19070
|
-
|
|
19071
|
-
|
|
18095
|
+
declare const index$2_OutputConfig: typeof OutputConfig;
|
|
18096
|
+
declare const index$2_OutputConfigSrtOverrides: typeof OutputConfigSrtOverrides;
|
|
19072
18097
|
type index$2_PostJobsBody = PostJobsBody;
|
|
19073
|
-
|
|
19074
|
-
|
|
18098
|
+
declare const index$2_RecognitionAlternative: typeof RecognitionAlternative;
|
|
18099
|
+
declare const index$2_RecognitionDisplay: typeof RecognitionDisplay;
|
|
19075
18100
|
declare const index$2_RecognitionDisplayDirection: typeof RecognitionDisplayDirection;
|
|
19076
|
-
|
|
19077
|
-
|
|
18101
|
+
declare const index$2_RecognitionMetadata: typeof RecognitionMetadata;
|
|
18102
|
+
declare const index$2_RecognitionResult: typeof RecognitionResult;
|
|
19078
18103
|
declare const index$2_RecognitionResultAttachesTo: typeof RecognitionResultAttachesTo;
|
|
19079
18104
|
declare const index$2_RecognitionResultType: typeof RecognitionResultType;
|
|
19080
18105
|
type index$2_RetrieveJobResponse = RetrieveJobResponse;
|
|
19081
18106
|
type index$2_RetrieveJobsResponse = RetrieveJobsResponse;
|
|
19082
|
-
|
|
19083
|
-
|
|
19084
|
-
|
|
19085
|
-
|
|
18107
|
+
declare const index$2_RetrieveTranscriptResponse: typeof RetrieveTranscriptResponse;
|
|
18108
|
+
declare const index$2_RetrieveTranscriptResponseAudioEventSummary: typeof RetrieveTranscriptResponseAudioEventSummary;
|
|
18109
|
+
declare const index$2_RetrieveTranscriptResponseAudioEventSummaryChannels: typeof RetrieveTranscriptResponseAudioEventSummaryChannels;
|
|
18110
|
+
declare const index$2_RetrieveTranscriptResponseTranslations: typeof RetrieveTranscriptResponseTranslations;
|
|
19086
18111
|
type index$2_SentimentAnalysisConfig = SentimentAnalysisConfig;
|
|
19087
|
-
|
|
18112
|
+
declare const index$2_SentimentAnalysisError: typeof SentimentAnalysisError;
|
|
19088
18113
|
declare const index$2_SentimentAnalysisErrorType: typeof SentimentAnalysisErrorType;
|
|
19089
|
-
|
|
19090
|
-
|
|
19091
|
-
|
|
19092
|
-
|
|
19093
|
-
|
|
19094
|
-
|
|
19095
|
-
|
|
19096
|
-
type index$2_SpokenFormRecognitionResult = SpokenFormRecognitionResult;
|
|
18114
|
+
declare const index$2_SentimentAnalysisResultSentimentAnalysis: typeof SentimentAnalysisResultSentimentAnalysis;
|
|
18115
|
+
declare const index$2_SentimentChannelSummary: typeof SentimentChannelSummary;
|
|
18116
|
+
declare const index$2_SentimentSegment: typeof SentimentSegment;
|
|
18117
|
+
declare const index$2_SentimentSpeakerSummary: typeof SentimentSpeakerSummary;
|
|
18118
|
+
declare const index$2_SentimentSummary: typeof SentimentSummary;
|
|
18119
|
+
declare const index$2_SentimentSummaryDetail: typeof SentimentSummaryDetail;
|
|
18120
|
+
declare const index$2_SpokenFormRecognitionResult: typeof SpokenFormRecognitionResult;
|
|
19097
18121
|
declare const index$2_SpokenFormRecognitionResultType: typeof SpokenFormRecognitionResultType;
|
|
19098
18122
|
type index$2_SummarizationConfig = SummarizationConfig;
|
|
19099
18123
|
declare const index$2_SummarizationConfigContentType: typeof SummarizationConfigContentType;
|
|
19100
18124
|
declare const index$2_SummarizationConfigSummaryLength: typeof SummarizationConfigSummaryLength;
|
|
19101
18125
|
declare const index$2_SummarizationConfigSummaryType: typeof SummarizationConfigSummaryType;
|
|
19102
|
-
|
|
18126
|
+
declare const index$2_SummarizationError: typeof SummarizationError;
|
|
19103
18127
|
declare const index$2_SummarizationErrorType: typeof SummarizationErrorType;
|
|
19104
|
-
|
|
18128
|
+
declare const index$2_SummarizationResult: typeof SummarizationResult;
|
|
19105
18129
|
type index$2_TopicDetectionConfig = TopicDetectionConfig;
|
|
19106
|
-
|
|
18130
|
+
declare const index$2_TopicDetectionError: typeof TopicDetectionError;
|
|
19107
18131
|
declare const index$2_TopicDetectionErrorType: typeof TopicDetectionErrorType;
|
|
19108
|
-
|
|
19109
|
-
|
|
19110
|
-
|
|
19111
|
-
|
|
19112
|
-
|
|
19113
|
-
|
|
19114
|
-
|
|
19115
|
-
|
|
19116
|
-
type index$2_TranscriptionConfigAdditionalVocabItem = TranscriptionConfigAdditionalVocabItem;
|
|
18132
|
+
declare const index$2_TopicDetectionSegment: typeof TopicDetectionSegment;
|
|
18133
|
+
declare const index$2_TopicDetectionSegmentTopic: typeof TopicDetectionSegmentTopic;
|
|
18134
|
+
declare const index$2_TopicDetectionSummary: typeof TopicDetectionSummary;
|
|
18135
|
+
declare const index$2_TopicDetectionSummaryOverall: typeof TopicDetectionSummaryOverall;
|
|
18136
|
+
declare const index$2_TrackingData: typeof TrackingData;
|
|
18137
|
+
declare const index$2_TrackingDataDetails: typeof TrackingDataDetails;
|
|
18138
|
+
declare const index$2_TranscriptionConfig: typeof TranscriptionConfig;
|
|
18139
|
+
declare const index$2_TranscriptionConfigAdditionalVocabItem: typeof TranscriptionConfigAdditionalVocabItem;
|
|
19117
18140
|
declare const index$2_TranscriptionConfigDiarization: typeof TranscriptionConfigDiarization;
|
|
19118
18141
|
declare const index$2_TranscriptionConfigMaxDelayMode: typeof TranscriptionConfigMaxDelayMode;
|
|
19119
|
-
|
|
19120
|
-
|
|
19121
|
-
|
|
19122
|
-
|
|
19123
|
-
|
|
18142
|
+
declare const index$2_TranscriptionConfigPunctuationOverrides: typeof TranscriptionConfigPunctuationOverrides;
|
|
18143
|
+
declare const index$2_TranscriptionConfigSpeakerDiarizationConfig: typeof TranscriptionConfigSpeakerDiarizationConfig;
|
|
18144
|
+
declare const index$2_TranscriptionConfigTranscriptFilteringConfig: typeof TranscriptionConfigTranscriptFilteringConfig;
|
|
18145
|
+
declare const index$2_TranscriptionConfigTranscriptFilteringConfigReplacementsItem: typeof TranscriptionConfigTranscriptFilteringConfigReplacementsItem;
|
|
18146
|
+
declare const index$2_TranslationError: typeof TranslationError;
|
|
19124
18147
|
declare const index$2_TranslationErrorType: typeof TranslationErrorType;
|
|
19125
|
-
|
|
18148
|
+
declare const index$2_TranslationSentence: typeof TranslationSentence;
|
|
19126
18149
|
type index$2_UsageDetails = UsageDetails;
|
|
19127
18150
|
type index$2_UsageResponse = UsageResponse;
|
|
19128
|
-
|
|
18151
|
+
declare const index$2_WrittenFormRecognitionResult: typeof WrittenFormRecognitionResult;
|
|
19129
18152
|
declare const index$2_WrittenFormRecognitionResultType: typeof WrittenFormRecognitionResultType;
|
|
19130
18153
|
declare namespace index$2 {
|
|
19131
|
-
export {
|
|
18154
|
+
export { index$2_AlignmentConfig as AlignmentConfig, index$2_AudioEventItem as AudioEventItem, index$2_AudioEventSummary as AudioEventSummary, index$2_AudioEventSummaryItem as AudioEventSummaryItem, type index$2_AudioEventsConfig as AudioEventsConfig, type index$2_AutoChaptersConfig as AutoChaptersConfig, index$2_AutoChaptersResult as AutoChaptersResult, index$2_AutoChaptersResultError as AutoChaptersResultError, index$2_AutoChaptersResultErrorType as AutoChaptersResultErrorType, Chapter$1 as Chapter, type index$2_CreateJobResponse as CreateJobResponse, type index$2_DataFetchConfig as DataFetchConfig, type index$2_DeleteJobResponse as DeleteJobResponse, type index$2_DeleteJobsJobidParams as DeleteJobsJobidParams, type index$2_EARTagParameter as EARTagParameter, type index$2_ErrorResponse as ErrorResponse, index$2_ErrorResponseError as ErrorResponseError, type index$2_GetJobsJobidAlignmentParams as GetJobsJobidAlignmentParams, index$2_GetJobsJobidAlignmentTags as GetJobsJobidAlignmentTags, index$2_GetJobsJobidTranscriptFormat as GetJobsJobidTranscriptFormat, type index$2_GetJobsJobidTranscriptParams as GetJobsJobidTranscriptParams, type index$2_GetJobsParams as GetJobsParams, type index$2_GetUsageParams as GetUsageParams, type index$2_JobConfig as JobConfig, type index$2_JobDetailError as JobDetailError, type index$2_JobDetails as JobDetails, index$2_JobDetailsStatus as JobDetailsStatus, index$2_JobInfo as JobInfo, index$2_JobMode as JobMode, index$2_JobType as JobType, type index$2_LanguageIdentificationConfig as LanguageIdentificationConfig, index$2_LanguageIdentificationConfigLowConfidenceAction as LanguageIdentificationConfigLowConfidenceAction, index$2_LanguageIdentificationResult as LanguageIdentificationResult, index$2_LanguageIdentificationResultAlternative as LanguageIdentificationResultAlternative, index$2_LanguageIdentificationResultError as LanguageIdentificationResultError, index$2_LanguageIdentificationResultItem as LanguageIdentificationResultItem, index$2_LanguagePackInfo as LanguagePackInfo, index$2_LanguagePackInfoWritingDirection as LanguagePackInfoWritingDirection, type index$2_NotificationConfig as NotificationConfig, index$2_NotificationConfigContentsItem as NotificationConfigContentsItem, index$2_NotificationConfigMethod as NotificationConfigMethod, index$2_OperatingPoint as OperatingPoint, index$2_OutputConfig as OutputConfig, index$2_OutputConfigSrtOverrides as OutputConfigSrtOverrides, type index$2_PostJobsBody as PostJobsBody, index$2_RecognitionAlternative as RecognitionAlternative, index$2_RecognitionDisplay as RecognitionDisplay, index$2_RecognitionDisplayDirection as RecognitionDisplayDirection, index$2_RecognitionMetadata as RecognitionMetadata, index$2_RecognitionResult as RecognitionResult, index$2_RecognitionResultAttachesTo as RecognitionResultAttachesTo, index$2_RecognitionResultType as RecognitionResultType, type index$2_RetrieveJobResponse as RetrieveJobResponse, type index$2_RetrieveJobsResponse as RetrieveJobsResponse, index$2_RetrieveTranscriptResponse as RetrieveTranscriptResponse, index$2_RetrieveTranscriptResponseAudioEventSummary as RetrieveTranscriptResponseAudioEventSummary, index$2_RetrieveTranscriptResponseAudioEventSummaryChannels as RetrieveTranscriptResponseAudioEventSummaryChannels, index$2_RetrieveTranscriptResponseTranslations as RetrieveTranscriptResponseTranslations, type index$2_SentimentAnalysisConfig as SentimentAnalysisConfig, index$2_SentimentAnalysisError as SentimentAnalysisError, index$2_SentimentAnalysisErrorType as SentimentAnalysisErrorType, SentimentAnalysisResult$2 as SentimentAnalysisResult, index$2_SentimentAnalysisResultSentimentAnalysis as SentimentAnalysisResultSentimentAnalysis, index$2_SentimentChannelSummary as SentimentChannelSummary, index$2_SentimentSegment as SentimentSegment, index$2_SentimentSpeakerSummary as SentimentSpeakerSummary, index$2_SentimentSummary as SentimentSummary, index$2_SentimentSummaryDetail as SentimentSummaryDetail, index$2_SpokenFormRecognitionResult as SpokenFormRecognitionResult, index$2_SpokenFormRecognitionResultType as SpokenFormRecognitionResultType, type index$2_SummarizationConfig as SummarizationConfig, index$2_SummarizationConfigContentType as SummarizationConfigContentType, index$2_SummarizationConfigSummaryLength as SummarizationConfigSummaryLength, index$2_SummarizationConfigSummaryType as SummarizationConfigSummaryType, index$2_SummarizationError as SummarizationError, index$2_SummarizationErrorType as SummarizationErrorType, index$2_SummarizationResult as SummarizationResult, type index$2_TopicDetectionConfig as TopicDetectionConfig, index$2_TopicDetectionError as TopicDetectionError, index$2_TopicDetectionErrorType as TopicDetectionErrorType, TopicDetectionResult$1 as TopicDetectionResult, index$2_TopicDetectionSegment as TopicDetectionSegment, index$2_TopicDetectionSegmentTopic as TopicDetectionSegmentTopic, index$2_TopicDetectionSummary as TopicDetectionSummary, index$2_TopicDetectionSummaryOverall as TopicDetectionSummaryOverall, index$2_TrackingData as TrackingData, index$2_TrackingDataDetails as TrackingDataDetails, index$2_TranscriptionConfig as TranscriptionConfig, index$2_TranscriptionConfigAdditionalVocabItem as TranscriptionConfigAdditionalVocabItem, index$2_TranscriptionConfigDiarization as TranscriptionConfigDiarization, index$2_TranscriptionConfigMaxDelayMode as TranscriptionConfigMaxDelayMode, index$2_TranscriptionConfigPunctuationOverrides as TranscriptionConfigPunctuationOverrides, index$2_TranscriptionConfigSpeakerDiarizationConfig as TranscriptionConfigSpeakerDiarizationConfig, index$2_TranscriptionConfigTranscriptFilteringConfig as TranscriptionConfigTranscriptFilteringConfig, index$2_TranscriptionConfigTranscriptFilteringConfigReplacementsItem as TranscriptionConfigTranscriptFilteringConfigReplacementsItem, type TranslationConfig$1 as TranslationConfig, index$2_TranslationError as TranslationError, index$2_TranslationErrorType as TranslationErrorType, index$2_TranslationSentence as TranslationSentence, type index$2_UsageDetails as UsageDetails, type index$2_UsageResponse as UsageResponse, index$2_WrittenFormRecognitionResult as WrittenFormRecognitionResult, index$2_WrittenFormRecognitionResultType as WrittenFormRecognitionResultType };
|
|
19132
18155
|
}
|
|
19133
18156
|
|
|
19134
18157
|
/**
|
|
@@ -20510,7 +19533,7 @@ declare const getJobsResponse: z.ZodObject<{
|
|
|
20510
19533
|
}, "strip", z.ZodTypeAny, {
|
|
20511
19534
|
language: string;
|
|
20512
19535
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
20513
|
-
operating_point?: "
|
|
19536
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
20514
19537
|
domain?: string | undefined;
|
|
20515
19538
|
output_locale?: string | undefined;
|
|
20516
19539
|
additional_vocab?: {
|
|
@@ -20538,7 +19561,7 @@ declare const getJobsResponse: z.ZodObject<{
|
|
|
20538
19561
|
}, {
|
|
20539
19562
|
language: string;
|
|
20540
19563
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
20541
|
-
operating_point?: "
|
|
19564
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
20542
19565
|
domain?: string | undefined;
|
|
20543
19566
|
output_locale?: string | undefined;
|
|
20544
19567
|
additional_vocab?: {
|
|
@@ -20697,7 +19720,7 @@ declare const getJobsResponse: z.ZodObject<{
|
|
|
20697
19720
|
transcription_config?: {
|
|
20698
19721
|
language: string;
|
|
20699
19722
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
20700
|
-
operating_point?: "
|
|
19723
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
20701
19724
|
domain?: string | undefined;
|
|
20702
19725
|
output_locale?: string | undefined;
|
|
20703
19726
|
additional_vocab?: {
|
|
@@ -20778,7 +19801,7 @@ declare const getJobsResponse: z.ZodObject<{
|
|
|
20778
19801
|
transcription_config?: {
|
|
20779
19802
|
language: string;
|
|
20780
19803
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
20781
|
-
operating_point?: "
|
|
19804
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
20782
19805
|
domain?: string | undefined;
|
|
20783
19806
|
output_locale?: string | undefined;
|
|
20784
19807
|
additional_vocab?: {
|
|
@@ -20877,7 +19900,7 @@ declare const getJobsResponse: z.ZodObject<{
|
|
|
20877
19900
|
transcription_config?: {
|
|
20878
19901
|
language: string;
|
|
20879
19902
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
20880
|
-
operating_point?: "
|
|
19903
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
20881
19904
|
domain?: string | undefined;
|
|
20882
19905
|
output_locale?: string | undefined;
|
|
20883
19906
|
additional_vocab?: {
|
|
@@ -20971,7 +19994,7 @@ declare const getJobsResponse: z.ZodObject<{
|
|
|
20971
19994
|
transcription_config?: {
|
|
20972
19995
|
language: string;
|
|
20973
19996
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
20974
|
-
operating_point?: "
|
|
19997
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
20975
19998
|
domain?: string | undefined;
|
|
20976
19999
|
output_locale?: string | undefined;
|
|
20977
20000
|
additional_vocab?: {
|
|
@@ -21067,7 +20090,7 @@ declare const getJobsResponse: z.ZodObject<{
|
|
|
21067
20090
|
transcription_config?: {
|
|
21068
20091
|
language: string;
|
|
21069
20092
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
21070
|
-
operating_point?: "
|
|
20093
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
21071
20094
|
domain?: string | undefined;
|
|
21072
20095
|
output_locale?: string | undefined;
|
|
21073
20096
|
additional_vocab?: {
|
|
@@ -21163,7 +20186,7 @@ declare const getJobsResponse: z.ZodObject<{
|
|
|
21163
20186
|
transcription_config?: {
|
|
21164
20187
|
language: string;
|
|
21165
20188
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
21166
|
-
operating_point?: "
|
|
20189
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
21167
20190
|
domain?: string | undefined;
|
|
21168
20191
|
output_locale?: string | undefined;
|
|
21169
20192
|
additional_vocab?: {
|
|
@@ -21345,7 +20368,7 @@ declare const getJobsJobidResponse: z.ZodObject<{
|
|
|
21345
20368
|
}, "strip", z.ZodTypeAny, {
|
|
21346
20369
|
language: string;
|
|
21347
20370
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
21348
|
-
operating_point?: "
|
|
20371
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
21349
20372
|
domain?: string | undefined;
|
|
21350
20373
|
output_locale?: string | undefined;
|
|
21351
20374
|
additional_vocab?: {
|
|
@@ -21373,7 +20396,7 @@ declare const getJobsJobidResponse: z.ZodObject<{
|
|
|
21373
20396
|
}, {
|
|
21374
20397
|
language: string;
|
|
21375
20398
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
21376
|
-
operating_point?: "
|
|
20399
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
21377
20400
|
domain?: string | undefined;
|
|
21378
20401
|
output_locale?: string | undefined;
|
|
21379
20402
|
additional_vocab?: {
|
|
@@ -21532,7 +20555,7 @@ declare const getJobsJobidResponse: z.ZodObject<{
|
|
|
21532
20555
|
transcription_config?: {
|
|
21533
20556
|
language: string;
|
|
21534
20557
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
21535
|
-
operating_point?: "
|
|
20558
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
21536
20559
|
domain?: string | undefined;
|
|
21537
20560
|
output_locale?: string | undefined;
|
|
21538
20561
|
additional_vocab?: {
|
|
@@ -21613,7 +20636,7 @@ declare const getJobsJobidResponse: z.ZodObject<{
|
|
|
21613
20636
|
transcription_config?: {
|
|
21614
20637
|
language: string;
|
|
21615
20638
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
21616
|
-
operating_point?: "
|
|
20639
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
21617
20640
|
domain?: string | undefined;
|
|
21618
20641
|
output_locale?: string | undefined;
|
|
21619
20642
|
additional_vocab?: {
|
|
@@ -21712,7 +20735,7 @@ declare const getJobsJobidResponse: z.ZodObject<{
|
|
|
21712
20735
|
transcription_config?: {
|
|
21713
20736
|
language: string;
|
|
21714
20737
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
21715
|
-
operating_point?: "
|
|
20738
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
21716
20739
|
domain?: string | undefined;
|
|
21717
20740
|
output_locale?: string | undefined;
|
|
21718
20741
|
additional_vocab?: {
|
|
@@ -21806,7 +20829,7 @@ declare const getJobsJobidResponse: z.ZodObject<{
|
|
|
21806
20829
|
transcription_config?: {
|
|
21807
20830
|
language: string;
|
|
21808
20831
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
21809
|
-
operating_point?: "
|
|
20832
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
21810
20833
|
domain?: string | undefined;
|
|
21811
20834
|
output_locale?: string | undefined;
|
|
21812
20835
|
additional_vocab?: {
|
|
@@ -21902,7 +20925,7 @@ declare const getJobsJobidResponse: z.ZodObject<{
|
|
|
21902
20925
|
transcription_config?: {
|
|
21903
20926
|
language: string;
|
|
21904
20927
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
21905
|
-
operating_point?: "
|
|
20928
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
21906
20929
|
domain?: string | undefined;
|
|
21907
20930
|
output_locale?: string | undefined;
|
|
21908
20931
|
additional_vocab?: {
|
|
@@ -21998,7 +21021,7 @@ declare const getJobsJobidResponse: z.ZodObject<{
|
|
|
21998
21021
|
transcription_config?: {
|
|
21999
21022
|
language: string;
|
|
22000
21023
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
22001
|
-
operating_point?: "
|
|
21024
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
22002
21025
|
domain?: string | undefined;
|
|
22003
21026
|
output_locale?: string | undefined;
|
|
22004
21027
|
additional_vocab?: {
|
|
@@ -22187,7 +21210,7 @@ declare const deleteJobsJobidResponse: z.ZodObject<{
|
|
|
22187
21210
|
}, "strip", z.ZodTypeAny, {
|
|
22188
21211
|
language: string;
|
|
22189
21212
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
22190
|
-
operating_point?: "
|
|
21213
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
22191
21214
|
domain?: string | undefined;
|
|
22192
21215
|
output_locale?: string | undefined;
|
|
22193
21216
|
additional_vocab?: {
|
|
@@ -22215,7 +21238,7 @@ declare const deleteJobsJobidResponse: z.ZodObject<{
|
|
|
22215
21238
|
}, {
|
|
22216
21239
|
language: string;
|
|
22217
21240
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
22218
|
-
operating_point?: "
|
|
21241
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
22219
21242
|
domain?: string | undefined;
|
|
22220
21243
|
output_locale?: string | undefined;
|
|
22221
21244
|
additional_vocab?: {
|
|
@@ -22374,7 +21397,7 @@ declare const deleteJobsJobidResponse: z.ZodObject<{
|
|
|
22374
21397
|
transcription_config?: {
|
|
22375
21398
|
language: string;
|
|
22376
21399
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
22377
|
-
operating_point?: "
|
|
21400
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
22378
21401
|
domain?: string | undefined;
|
|
22379
21402
|
output_locale?: string | undefined;
|
|
22380
21403
|
additional_vocab?: {
|
|
@@ -22455,7 +21478,7 @@ declare const deleteJobsJobidResponse: z.ZodObject<{
|
|
|
22455
21478
|
transcription_config?: {
|
|
22456
21479
|
language: string;
|
|
22457
21480
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
22458
|
-
operating_point?: "
|
|
21481
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
22459
21482
|
domain?: string | undefined;
|
|
22460
21483
|
output_locale?: string | undefined;
|
|
22461
21484
|
additional_vocab?: {
|
|
@@ -22554,7 +21577,7 @@ declare const deleteJobsJobidResponse: z.ZodObject<{
|
|
|
22554
21577
|
transcription_config?: {
|
|
22555
21578
|
language: string;
|
|
22556
21579
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
22557
|
-
operating_point?: "
|
|
21580
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
22558
21581
|
domain?: string | undefined;
|
|
22559
21582
|
output_locale?: string | undefined;
|
|
22560
21583
|
additional_vocab?: {
|
|
@@ -22648,7 +21671,7 @@ declare const deleteJobsJobidResponse: z.ZodObject<{
|
|
|
22648
21671
|
transcription_config?: {
|
|
22649
21672
|
language: string;
|
|
22650
21673
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
22651
|
-
operating_point?: "
|
|
21674
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
22652
21675
|
domain?: string | undefined;
|
|
22653
21676
|
output_locale?: string | undefined;
|
|
22654
21677
|
additional_vocab?: {
|
|
@@ -22744,7 +21767,7 @@ declare const deleteJobsJobidResponse: z.ZodObject<{
|
|
|
22744
21767
|
transcription_config?: {
|
|
22745
21768
|
language: string;
|
|
22746
21769
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
22747
|
-
operating_point?: "
|
|
21770
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
22748
21771
|
domain?: string | undefined;
|
|
22749
21772
|
output_locale?: string | undefined;
|
|
22750
21773
|
additional_vocab?: {
|
|
@@ -22840,7 +21863,7 @@ declare const deleteJobsJobidResponse: z.ZodObject<{
|
|
|
22840
21863
|
transcription_config?: {
|
|
22841
21864
|
language: string;
|
|
22842
21865
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
22843
|
-
operating_point?: "
|
|
21866
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
22844
21867
|
domain?: string | undefined;
|
|
22845
21868
|
output_locale?: string | undefined;
|
|
22846
21869
|
additional_vocab?: {
|
|
@@ -23067,7 +22090,7 @@ declare const getJobsJobidTranscriptResponse: z.ZodObject<{
|
|
|
23067
22090
|
}, "strip", z.ZodTypeAny, {
|
|
23068
22091
|
language: string;
|
|
23069
22092
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
23070
|
-
operating_point?: "
|
|
22093
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
23071
22094
|
domain?: string | undefined;
|
|
23072
22095
|
output_locale?: string | undefined;
|
|
23073
22096
|
additional_vocab?: {
|
|
@@ -23095,7 +22118,7 @@ declare const getJobsJobidTranscriptResponse: z.ZodObject<{
|
|
|
23095
22118
|
}, {
|
|
23096
22119
|
language: string;
|
|
23097
22120
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
23098
|
-
operating_point?: "
|
|
22121
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
23099
22122
|
domain?: string | undefined;
|
|
23100
22123
|
output_locale?: string | undefined;
|
|
23101
22124
|
additional_vocab?: {
|
|
@@ -23283,7 +22306,7 @@ declare const getJobsJobidTranscriptResponse: z.ZodObject<{
|
|
|
23283
22306
|
transcription_config?: {
|
|
23284
22307
|
language: string;
|
|
23285
22308
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
23286
|
-
operating_point?: "
|
|
22309
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
23287
22310
|
domain?: string | undefined;
|
|
23288
22311
|
output_locale?: string | undefined;
|
|
23289
22312
|
additional_vocab?: {
|
|
@@ -23364,7 +22387,7 @@ declare const getJobsJobidTranscriptResponse: z.ZodObject<{
|
|
|
23364
22387
|
transcription_config?: {
|
|
23365
22388
|
language: string;
|
|
23366
22389
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
23367
|
-
operating_point?: "
|
|
22390
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
23368
22391
|
domain?: string | undefined;
|
|
23369
22392
|
output_locale?: string | undefined;
|
|
23370
22393
|
additional_vocab?: {
|
|
@@ -24106,7 +23129,7 @@ declare const getJobsJobidTranscriptResponse: z.ZodObject<{
|
|
|
24106
23129
|
transcription_config?: {
|
|
24107
23130
|
language: string;
|
|
24108
23131
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
24109
|
-
operating_point?: "
|
|
23132
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
24110
23133
|
domain?: string | undefined;
|
|
24111
23134
|
output_locale?: string | undefined;
|
|
24112
23135
|
additional_vocab?: {
|
|
@@ -24330,7 +23353,7 @@ declare const getJobsJobidTranscriptResponse: z.ZodObject<{
|
|
|
24330
23353
|
transcription_config?: {
|
|
24331
23354
|
language: string;
|
|
24332
23355
|
diarization?: "none" | "speaker" | "channel" | undefined;
|
|
24333
|
-
operating_point?: "
|
|
23356
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
24334
23357
|
domain?: string | undefined;
|
|
24335
23358
|
output_locale?: string | undefined;
|
|
24336
23359
|
additional_vocab?: {
|
|
@@ -24603,14 +23626,14 @@ declare const getUsageResponse: z.ZodObject<{
|
|
|
24603
23626
|
mode: "batch";
|
|
24604
23627
|
duration_hrs: number;
|
|
24605
23628
|
language?: string | undefined;
|
|
24606
|
-
operating_point?: "
|
|
23629
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
24607
23630
|
}, {
|
|
24608
23631
|
type: "transcription" | "alignment";
|
|
24609
23632
|
count: number;
|
|
24610
23633
|
mode: "batch";
|
|
24611
23634
|
duration_hrs: number;
|
|
24612
23635
|
language?: string | undefined;
|
|
24613
|
-
operating_point?: "
|
|
23636
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
24614
23637
|
}>, "many">;
|
|
24615
23638
|
details: z.ZodArray<z.ZodObject<{
|
|
24616
23639
|
mode: z.ZodEnum<["batch"]>;
|
|
@@ -24625,14 +23648,14 @@ declare const getUsageResponse: z.ZodObject<{
|
|
|
24625
23648
|
mode: "batch";
|
|
24626
23649
|
duration_hrs: number;
|
|
24627
23650
|
language?: string | undefined;
|
|
24628
|
-
operating_point?: "
|
|
23651
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
24629
23652
|
}, {
|
|
24630
23653
|
type: "transcription" | "alignment";
|
|
24631
23654
|
count: number;
|
|
24632
23655
|
mode: "batch";
|
|
24633
23656
|
duration_hrs: number;
|
|
24634
23657
|
language?: string | undefined;
|
|
24635
|
-
operating_point?: "
|
|
23658
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
24636
23659
|
}>, "many">;
|
|
24637
23660
|
}, "strip", z.ZodTypeAny, {
|
|
24638
23661
|
summary: {
|
|
@@ -24641,7 +23664,7 @@ declare const getUsageResponse: z.ZodObject<{
|
|
|
24641
23664
|
mode: "batch";
|
|
24642
23665
|
duration_hrs: number;
|
|
24643
23666
|
language?: string | undefined;
|
|
24644
|
-
operating_point?: "
|
|
23667
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
24645
23668
|
}[];
|
|
24646
23669
|
details: {
|
|
24647
23670
|
type: "transcription" | "alignment";
|
|
@@ -24649,7 +23672,7 @@ declare const getUsageResponse: z.ZodObject<{
|
|
|
24649
23672
|
mode: "batch";
|
|
24650
23673
|
duration_hrs: number;
|
|
24651
23674
|
language?: string | undefined;
|
|
24652
|
-
operating_point?: "
|
|
23675
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
24653
23676
|
}[];
|
|
24654
23677
|
since: string;
|
|
24655
23678
|
until: string;
|
|
@@ -24660,7 +23683,7 @@ declare const getUsageResponse: z.ZodObject<{
|
|
|
24660
23683
|
mode: "batch";
|
|
24661
23684
|
duration_hrs: number;
|
|
24662
23685
|
language?: string | undefined;
|
|
24663
|
-
operating_point?: "
|
|
23686
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
24664
23687
|
}[];
|
|
24665
23688
|
details: {
|
|
24666
23689
|
type: "transcription" | "alignment";
|
|
@@ -24668,7 +23691,7 @@ declare const getUsageResponse: z.ZodObject<{
|
|
|
24668
23691
|
mode: "batch";
|
|
24669
23692
|
duration_hrs: number;
|
|
24670
23693
|
language?: string | undefined;
|
|
24671
|
-
operating_point?: "
|
|
23694
|
+
operating_point?: "standard" | "enhanced" | undefined;
|
|
24672
23695
|
}[];
|
|
24673
23696
|
since: string;
|
|
24674
23697
|
until: string;
|
|
@@ -25912,4 +24935,4 @@ declare namespace elevenLabsSpeechToTextAPI_zod {
|
|
|
25912
24935
|
export { elevenLabsSpeechToTextAPI_zod_deleteTranscriptByIdHeader as deleteTranscriptByIdHeader, elevenLabsSpeechToTextAPI_zod_deleteTranscriptByIdParams as deleteTranscriptByIdParams, elevenLabsSpeechToTextAPI_zod_deleteTranscriptByIdResponse as deleteTranscriptByIdResponse, elevenLabsSpeechToTextAPI_zod_getTranscriptByIdHeader as getTranscriptByIdHeader, elevenLabsSpeechToTextAPI_zod_getTranscriptByIdParams as getTranscriptByIdParams, elevenLabsSpeechToTextAPI_zod_getTranscriptByIdResponse as getTranscriptByIdResponse, elevenLabsSpeechToTextAPI_zod_speechToTextBody as speechToTextBody, elevenLabsSpeechToTextAPI_zod_speechToTextBodyAdditionalFormatsItemIncludeSpeakersDefault as speechToTextBodyAdditionalFormatsItemIncludeSpeakersDefault, elevenLabsSpeechToTextAPI_zod_speechToTextBodyAdditionalFormatsItemIncludeSpeakersDefaultFive as speechToTextBodyAdditionalFormatsItemIncludeSpeakersDefaultFive, elevenLabsSpeechToTextAPI_zod_speechToTextBodyAdditionalFormatsItemIncludeSpeakersDefaultFour as speechToTextBodyAdditionalFormatsItemIncludeSpeakersDefaultFour, elevenLabsSpeechToTextAPI_zod_speechToTextBodyAdditionalFormatsItemIncludeSpeakersDefaultOne as speechToTextBodyAdditionalFormatsItemIncludeSpeakersDefaultOne, elevenLabsSpeechToTextAPI_zod_speechToTextBodyAdditionalFormatsItemIncludeSpeakersDefaultThree as speechToTextBodyAdditionalFormatsItemIncludeSpeakersDefaultThree, elevenLabsSpeechToTextAPI_zod_speechToTextBodyAdditionalFormatsItemIncludeSpeakersDefaultTwo as speechToTextBodyAdditionalFormatsItemIncludeSpeakersDefaultTwo, elevenLabsSpeechToTextAPI_zod_speechToTextBodyAdditionalFormatsItemIncludeTimestampsDefault as speechToTextBodyAdditionalFormatsItemIncludeTimestampsDefault, elevenLabsSpeechToTextAPI_zod_speechToTextBodyAdditionalFormatsItemIncludeTimestampsDefaultFive as speechToTextBodyAdditionalFormatsItemIncludeTimestampsDefaultFive, elevenLabsSpeechToTextAPI_zod_speechToTextBodyAdditionalFormatsItemIncludeTimestampsDefaultFour as speechToTextBodyAdditionalFormatsItemIncludeTimestampsDefaultFour, elevenLabsSpeechToTextAPI_zod_speechToTextBodyAdditionalFormatsItemIncludeTimestampsDefaultOne as speechToTextBodyAdditionalFormatsItemIncludeTimestampsDefaultOne, elevenLabsSpeechToTextAPI_zod_speechToTextBodyAdditionalFormatsItemIncludeTimestampsDefaultThree as speechToTextBodyAdditionalFormatsItemIncludeTimestampsDefaultThree, elevenLabsSpeechToTextAPI_zod_speechToTextBodyAdditionalFormatsItemIncludeTimestampsDefaultTwo as speechToTextBodyAdditionalFormatsItemIncludeTimestampsDefaultTwo, elevenLabsSpeechToTextAPI_zod_speechToTextBodyAdditionalFormatsItemMaxCharactersPerLineDefault as speechToTextBodyAdditionalFormatsItemMaxCharactersPerLineDefault, elevenLabsSpeechToTextAPI_zod_speechToTextBodyAdditionalFormatsItemMaxCharactersPerLineDefaultThree as speechToTextBodyAdditionalFormatsItemMaxCharactersPerLineDefaultThree, elevenLabsSpeechToTextAPI_zod_speechToTextBodyAdditionalFormatsItemMaxSegmentCharsDefaultOnefive as speechToTextBodyAdditionalFormatsItemMaxSegmentCharsDefaultOnefive, elevenLabsSpeechToTextAPI_zod_speechToTextBodyAdditionalFormatsItemMaxSegmentDurationSDefaultOnefive as speechToTextBodyAdditionalFormatsItemMaxSegmentDurationSDefaultOnefive, elevenLabsSpeechToTextAPI_zod_speechToTextBodyAdditionalFormatsItemSegmentOnSilenceLongerThanSDefaultOnefive as speechToTextBodyAdditionalFormatsItemSegmentOnSilenceLongerThanSDefaultOnefive, elevenLabsSpeechToTextAPI_zod_speechToTextBodyAdditionalFormatsMax as speechToTextBodyAdditionalFormatsMax, elevenLabsSpeechToTextAPI_zod_speechToTextBodyDiarizationThresholdMaxOne as speechToTextBodyDiarizationThresholdMaxOne, elevenLabsSpeechToTextAPI_zod_speechToTextBodyDiarizationThresholdMinOne as speechToTextBodyDiarizationThresholdMinOne, elevenLabsSpeechToTextAPI_zod_speechToTextBodyDiarizeDefault as speechToTextBodyDiarizeDefault, elevenLabsSpeechToTextAPI_zod_speechToTextBodyFileFormatDefault as speechToTextBodyFileFormatDefault, elevenLabsSpeechToTextAPI_zod_speechToTextBodyKeytermsDefault as speechToTextBodyKeytermsDefault, elevenLabsSpeechToTextAPI_zod_speechToTextBodyNoVerbatimDefault as speechToTextBodyNoVerbatimDefault, elevenLabsSpeechToTextAPI_zod_speechToTextBodyNumSpeakersMaxOne as speechToTextBodyNumSpeakersMaxOne, elevenLabsSpeechToTextAPI_zod_speechToTextBodySeedMaxOne as speechToTextBodySeedMaxOne, elevenLabsSpeechToTextAPI_zod_speechToTextBodySeedMinOne as speechToTextBodySeedMinOne, elevenLabsSpeechToTextAPI_zod_speechToTextBodyTagAudioEventsDefault as speechToTextBodyTagAudioEventsDefault, elevenLabsSpeechToTextAPI_zod_speechToTextBodyTemperatureMaxOne as speechToTextBodyTemperatureMaxOne, elevenLabsSpeechToTextAPI_zod_speechToTextBodyTemperatureMinOne as speechToTextBodyTemperatureMinOne, elevenLabsSpeechToTextAPI_zod_speechToTextBodyTimestampsGranularityDefault as speechToTextBodyTimestampsGranularityDefault, elevenLabsSpeechToTextAPI_zod_speechToTextBodyUseMultiChannelDefault as speechToTextBodyUseMultiChannelDefault, elevenLabsSpeechToTextAPI_zod_speechToTextBodyWebhookDefault as speechToTextBodyWebhookDefault, elevenLabsSpeechToTextAPI_zod_speechToTextHeader as speechToTextHeader, elevenLabsSpeechToTextAPI_zod_speechToTextQueryEnableLoggingDefault as speechToTextQueryEnableLoggingDefault, elevenLabsSpeechToTextAPI_zod_speechToTextQueryParams as speechToTextQueryParams, elevenLabsSpeechToTextAPI_zod_speechToTextResponse as speechToTextResponse };
|
|
25913
24936
|
}
|
|
25914
24937
|
|
|
25915
|
-
export { AssemblyAIAdapter, Chapter
|
|
24938
|
+
export { AssemblyAIAdapter, Chapter as AssemblyAIChapter, type AssemblyAIConfig, ContentSafetyLabelsResult as AssemblyAIContentSafetyResult, Entity as AssemblyAIEntity, AutoHighlightsResult as AssemblyAIHighlightsResult, TranscriptOptionalParams as AssemblyAIOptions, SentimentAnalysisResult$1 as AssemblyAISentimentResult, AssemblyAIStreamingOptions, TopicDetectionModelResult as AssemblyAITopicsResult, index$6 as AssemblyAITypes, type AudioChunkAckMessage, AudioInput, AudioResponseFormat, AzureSTTAdapter, Status as AzureStatus, index$5 as AzureTypes, BaseAdapter, CreateTranscriptionRequest, CreateTranscriptionRequestModel, CreateTranscriptionResponseDiarizedJson, CreateTranscriptionResponseVerboseJson, DeepgramAdapter, type DeepgramConfig, ListenV1ResponseMetadata as DeepgramMetadata, ListenV1MediaTranscribeParams as DeepgramOptions, DeepgramStreamingOptions, index$3 as DeepgramTypes, ElevenLabsAdapter, type ElevenLabsConfig, index as ElevenLabsTypes, elevenLabsSpeechToTextAPI_zod as ElevenLabsZodSchemas, type EndRecordingMessage, type EndSessionMessage, type GetProjectRequestV1Response, GladiaAdapter, AudioToLlmListConfigDTO as GladiaAudioToLlmConfig, AudioToLlmListDTO as GladiaAudioToLlmResult, ChapterizationDTO as GladiaChapters, CodeSwitchingConfigDTO as GladiaCodeSwitchingConfig, NamedEntityRecognitionDTO as GladiaEntities, ModerationDTO as GladiaModeration, InitTranscriptionRequest as GladiaOptions, SentimentAnalysisDTO as GladiaSentiment, SpeakerReidentificationDTO as GladiaSpeakerReidentification, GladiaStreamingOptions, StreamingRequest as GladiaStreamingRequest, StructuredDataExtractionDTO as GladiaStructuredData, TranslationDTO as GladiaTranslation, index$7 as GladiaTypes, InitTranscriptionRequest, LanguageIdentificationMode, LanguageIdentificationProperties, type ListProjectRequestsV1Response, type ListTranscriptionResponseItemsItem, ListTranscriptsOptions, ListTranscriptsParams, ListenV1EncodingParameter, ListenV1LanguageParameter, ListenV1MediaTranscribeParams, ListenV1ModelParameter, ListenV1Response, ListenV1ResponseResultsChannelsItemAlternativesItem, ListenV1ResponseResultsChannelsItemAlternativesItemWordsItem, ListenV1ResponseResultsUtterancesItem, ListenV1VersionParameter, ManageV1FilterEndpointParameter, ManageV1FilterStatusParameter, ManageV1ProjectsRequestsListParams, type NamedEntityRecognitionMessage, streamingTypes as OpenAIStreamingTypes, index$4 as OpenAITypes, OpenAIWhisperAdapter, CreateTranscriptionRequest as OpenAIWhisperOptions, type PaginatedTranscriptions, type PostChapterizationMessage, type PostFinalTranscriptMessage, type PostSummarizationMessage, type PostTranscriptMessage, PreRecordedResponse, ProfanityFilterMode, type ProjectRequestResponse, ProviderCapabilities, type ProviderConfig, PunctuationMode, type SentimentAnalysisMessage, SonioxAdapter, type SonioxConfig, SonioxModelCode, index$1 as SonioxTypes, SpeakV1ContainerParameter, SpeakV1EncodingParameter, SpeakV1SampleRateParameter, type SpeechEndMessage, type SpeechStartMessage, SpeechmaticsAdapter, type SpeechmaticsConfig, index$2 as SpeechmaticsTypes, speechmaticsASRRESTAPI_zod as SpeechmaticsZodSchemas, type StartRecordingMessage, type StartSessionMessage, type StopRecordingAckMessage, StreamEvent, StreamingCallbacks, StreamingOptions, StreamingRequest, type StreamingResponse, StreamingSession, StreamingSupportedBitDepthEnum, StreamingSupportedEncodingEnum, StreamingSupportedModels, StreamingSupportedSampleRateEnum, StreamingUpdateConfiguration, TranscribeOptions, Transcript, type TranscriptListItem, type TranscriptMessage, TranscriptOptionalParamsSpeechModel, type TranscriptParams, TranscriptStatus, TranscriptUtterance, TranscriptWord, Transcription$1 as Transcription, type TranscriptionAdapter, TranscriptionControllerListV2Params, TranscriptionControllerListV2StatusItem, TranscriptionDTO, TranscriptionLanguageCodeEnum, TranscriptionProperties, TranscriptionProvider, type TranscriptionsListParams, type TranslationMessage, UnifiedTranscriptResponse, UtteranceDTO, VoiceRouter, type VoiceRouterConfig, type WebHook, type WebHookEvents, WordDTO, createAssemblyAIAdapter, createAzureSTTAdapter, createDeepgramAdapter, createElevenLabsAdapter, createGladiaAdapter, createOpenAIWhisperAdapter, createSonioxAdapter, createSpeechmaticsAdapter, createTemporaryToken, createTranscript, createTranscription, createVoiceRouter, deleteTranscript as deleteTranscriptAPI, getTranscript as getTranscriptAPI, listTranscripts as listTranscriptsAPI, preRecordedControllerDeletePreRecordedJobV2, preRecordedControllerGetAudioV2, preRecordedControllerGetPreRecordedJobV2, preRecordedControllerInitPreRecordedJobV2, streamingControllerDeleteStreamingJobV2, streamingControllerGetAudioV2, streamingControllerInitStreamingSessionV2, transcriptionControllerListV2, transcriptionsCreate, transcriptionsDelete, transcriptionsGet, transcriptionsList, transcriptionsListFiles, webHooksCreate, webHooksDelete, webHooksList };
|