voice-router-dev 0.2.4 → 0.2.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +1606 -641
- package/dist/index.d.ts +1606 -641
- package/dist/index.js +205 -37
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +205 -37
- package/dist/index.mjs.map +1 -1
- package/package.json +2 -3
package/dist/index.d.mts
CHANGED
|
@@ -4373,21 +4373,1311 @@ declare const StreamingSupportedModels: {
|
|
|
4373
4373
|
readonly "solaria-1": "solaria-1";
|
|
4374
4374
|
};
|
|
4375
4375
|
|
|
4376
|
+
/**
|
|
4377
|
+
* Generated by orval v7.9.0 🍺
|
|
4378
|
+
* Do not edit manually.
|
|
4379
|
+
* Deepgram API Specification
|
|
4380
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4381
|
+
|
|
4382
|
+
* OpenAPI spec version: 1.0.0
|
|
4383
|
+
*/
|
|
4384
|
+
/**
|
|
4385
|
+
* URL to which we'll make the callback request
|
|
4386
|
+
*/
|
|
4387
|
+
type SharedCallbackParameter = string;
|
|
4388
|
+
|
|
4389
|
+
/**
|
|
4390
|
+
* Generated by orval v7.9.0 🍺
|
|
4391
|
+
* Do not edit manually.
|
|
4392
|
+
* Deepgram API Specification
|
|
4393
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4394
|
+
|
|
4395
|
+
* OpenAPI spec version: 1.0.0
|
|
4396
|
+
*/
|
|
4397
|
+
/**
|
|
4398
|
+
* SharedCallbackMethodParameter type definition
|
|
4399
|
+
*/
|
|
4400
|
+
/**
|
|
4401
|
+
* SharedCallbackMethodParameter type definition
|
|
4402
|
+
*/
|
|
4403
|
+
/**
|
|
4404
|
+
* SharedCallbackMethodParameter type definition
|
|
4405
|
+
*/
|
|
4406
|
+
/**
|
|
4407
|
+
* SharedCallbackMethodParameter type definition
|
|
4408
|
+
*/
|
|
4409
|
+
/**
|
|
4410
|
+
* SharedCallbackMethodParameter type definition
|
|
4411
|
+
*/
|
|
4412
|
+
/**
|
|
4413
|
+
* SharedCallbackMethodParameter type definition
|
|
4414
|
+
*/
|
|
4415
|
+
/**
|
|
4416
|
+
* SharedCallbackMethodParameter type definition
|
|
4417
|
+
*/
|
|
4418
|
+
/**
|
|
4419
|
+
* SharedCallbackMethodParameter type definition
|
|
4420
|
+
*/
|
|
4421
|
+
type SharedCallbackMethodParameter = typeof SharedCallbackMethodParameter[keyof typeof SharedCallbackMethodParameter];
|
|
4422
|
+
declare const SharedCallbackMethodParameter: {
|
|
4423
|
+
readonly POST: "POST";
|
|
4424
|
+
readonly PUT: "PUT";
|
|
4425
|
+
};
|
|
4426
|
+
|
|
4427
|
+
/**
|
|
4428
|
+
* Generated by orval v7.9.0 🍺
|
|
4429
|
+
* Do not edit manually.
|
|
4430
|
+
* Deepgram API Specification
|
|
4431
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4432
|
+
|
|
4433
|
+
* OpenAPI spec version: 1.0.0
|
|
4434
|
+
*/
|
|
4435
|
+
type SharedExtraParameter = string | string[];
|
|
4436
|
+
|
|
4437
|
+
/**
|
|
4438
|
+
* Generated by orval v7.9.0 🍺
|
|
4439
|
+
* Do not edit manually.
|
|
4440
|
+
* Deepgram API Specification
|
|
4441
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4442
|
+
|
|
4443
|
+
* OpenAPI spec version: 1.0.0
|
|
4444
|
+
*/
|
|
4445
|
+
/**
|
|
4446
|
+
* Recognizes the sentiment throughout a transcript or text
|
|
4447
|
+
*/
|
|
4448
|
+
type SharedSentimentParameter = boolean;
|
|
4449
|
+
|
|
4450
|
+
/**
|
|
4451
|
+
* Generated by orval v7.9.0 🍺
|
|
4452
|
+
* Do not edit manually.
|
|
4453
|
+
* Deepgram API Specification
|
|
4454
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4455
|
+
|
|
4456
|
+
* OpenAPI spec version: 1.0.0
|
|
4457
|
+
*/
|
|
4458
|
+
type SharedSummarizeParameter = "v2" | boolean;
|
|
4459
|
+
|
|
4460
|
+
/**
|
|
4461
|
+
* Generated by orval v7.9.0 🍺
|
|
4462
|
+
* Do not edit manually.
|
|
4463
|
+
* Deepgram API Specification
|
|
4464
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4465
|
+
|
|
4466
|
+
* OpenAPI spec version: 1.0.0
|
|
4467
|
+
*/
|
|
4468
|
+
type SharedTagParameter = string | string[];
|
|
4469
|
+
|
|
4470
|
+
/**
|
|
4471
|
+
* Generated by orval v7.9.0 🍺
|
|
4472
|
+
* Do not edit manually.
|
|
4473
|
+
* Deepgram API Specification
|
|
4474
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4475
|
+
|
|
4476
|
+
* OpenAPI spec version: 1.0.0
|
|
4477
|
+
*/
|
|
4478
|
+
/**
|
|
4479
|
+
* Detect topics throughout a transcript or text
|
|
4480
|
+
*/
|
|
4481
|
+
type SharedTopicsParameter = boolean;
|
|
4482
|
+
|
|
4483
|
+
/**
|
|
4484
|
+
* Generated by orval v7.9.0 🍺
|
|
4485
|
+
* Do not edit manually.
|
|
4486
|
+
* Deepgram API Specification
|
|
4487
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4488
|
+
|
|
4489
|
+
* OpenAPI spec version: 1.0.0
|
|
4490
|
+
*/
|
|
4491
|
+
type SharedCustomTopicParameter = string | string[];
|
|
4492
|
+
|
|
4493
|
+
/**
|
|
4494
|
+
* Generated by orval v7.9.0 🍺
|
|
4495
|
+
* Do not edit manually.
|
|
4496
|
+
* Deepgram API Specification
|
|
4497
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4498
|
+
|
|
4499
|
+
* OpenAPI spec version: 1.0.0
|
|
4500
|
+
*/
|
|
4501
|
+
/**
|
|
4502
|
+
* SharedCustomTopicModeParameter type definition
|
|
4503
|
+
*/
|
|
4504
|
+
/**
|
|
4505
|
+
* SharedCustomTopicModeParameter type definition
|
|
4506
|
+
*/
|
|
4507
|
+
/**
|
|
4508
|
+
* SharedCustomTopicModeParameter type definition
|
|
4509
|
+
*/
|
|
4510
|
+
/**
|
|
4511
|
+
* SharedCustomTopicModeParameter type definition
|
|
4512
|
+
*/
|
|
4513
|
+
/**
|
|
4514
|
+
* SharedCustomTopicModeParameter type definition
|
|
4515
|
+
*/
|
|
4516
|
+
/**
|
|
4517
|
+
* SharedCustomTopicModeParameter type definition
|
|
4518
|
+
*/
|
|
4519
|
+
/**
|
|
4520
|
+
* SharedCustomTopicModeParameter type definition
|
|
4521
|
+
*/
|
|
4522
|
+
/**
|
|
4523
|
+
* SharedCustomTopicModeParameter type definition
|
|
4524
|
+
*/
|
|
4525
|
+
type SharedCustomTopicModeParameter = typeof SharedCustomTopicModeParameter[keyof typeof SharedCustomTopicModeParameter];
|
|
4526
|
+
declare const SharedCustomTopicModeParameter: {
|
|
4527
|
+
readonly extended: "extended";
|
|
4528
|
+
readonly strict: "strict";
|
|
4529
|
+
};
|
|
4530
|
+
|
|
4531
|
+
/**
|
|
4532
|
+
* Generated by orval v7.9.0 🍺
|
|
4533
|
+
* Do not edit manually.
|
|
4534
|
+
* Deepgram API Specification
|
|
4535
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4536
|
+
|
|
4537
|
+
* OpenAPI spec version: 1.0.0
|
|
4538
|
+
*/
|
|
4539
|
+
/**
|
|
4540
|
+
* Recognizes speaker intent throughout a transcript or text
|
|
4541
|
+
*/
|
|
4542
|
+
type SharedIntentsParameter = boolean;
|
|
4543
|
+
|
|
4544
|
+
/**
|
|
4545
|
+
* Generated by orval v7.9.0 🍺
|
|
4546
|
+
* Do not edit manually.
|
|
4547
|
+
* Deepgram API Specification
|
|
4548
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4549
|
+
|
|
4550
|
+
* OpenAPI spec version: 1.0.0
|
|
4551
|
+
*/
|
|
4552
|
+
type SharedCustomIntentParameter = string | string[];
|
|
4553
|
+
|
|
4554
|
+
/**
|
|
4555
|
+
* Generated by orval v7.9.0 🍺
|
|
4556
|
+
* Do not edit manually.
|
|
4557
|
+
* Deepgram API Specification
|
|
4558
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4559
|
+
|
|
4560
|
+
* OpenAPI spec version: 1.0.0
|
|
4561
|
+
*/
|
|
4562
|
+
/**
|
|
4563
|
+
* SharedCustomIntentModeParameter type definition
|
|
4564
|
+
*/
|
|
4565
|
+
/**
|
|
4566
|
+
* SharedCustomIntentModeParameter type definition
|
|
4567
|
+
*/
|
|
4568
|
+
/**
|
|
4569
|
+
* SharedCustomIntentModeParameter type definition
|
|
4570
|
+
*/
|
|
4571
|
+
/**
|
|
4572
|
+
* SharedCustomIntentModeParameter type definition
|
|
4573
|
+
*/
|
|
4574
|
+
/**
|
|
4575
|
+
* SharedCustomIntentModeParameter type definition
|
|
4576
|
+
*/
|
|
4577
|
+
/**
|
|
4578
|
+
* SharedCustomIntentModeParameter type definition
|
|
4579
|
+
*/
|
|
4580
|
+
/**
|
|
4581
|
+
* SharedCustomIntentModeParameter type definition
|
|
4582
|
+
*/
|
|
4583
|
+
/**
|
|
4584
|
+
* SharedCustomIntentModeParameter type definition
|
|
4585
|
+
*/
|
|
4586
|
+
type SharedCustomIntentModeParameter = typeof SharedCustomIntentModeParameter[keyof typeof SharedCustomIntentModeParameter];
|
|
4587
|
+
declare const SharedCustomIntentModeParameter: {
|
|
4588
|
+
readonly extended: "extended";
|
|
4589
|
+
readonly strict: "strict";
|
|
4590
|
+
};
|
|
4591
|
+
|
|
4592
|
+
/**
|
|
4593
|
+
* Generated by orval v7.9.0 🍺
|
|
4594
|
+
* Do not edit manually.
|
|
4595
|
+
* Deepgram API Specification
|
|
4596
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4597
|
+
|
|
4598
|
+
* OpenAPI spec version: 1.0.0
|
|
4599
|
+
*/
|
|
4600
|
+
/**
|
|
4601
|
+
* Identifies and extracts key entities from content in submitted audio
|
|
4602
|
+
*/
|
|
4603
|
+
type ListenV1DetectEntitiesParameter = boolean;
|
|
4604
|
+
|
|
4605
|
+
/**
|
|
4606
|
+
* Generated by orval v7.9.0 🍺
|
|
4607
|
+
* Do not edit manually.
|
|
4608
|
+
* Deepgram API Specification
|
|
4609
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4610
|
+
|
|
4611
|
+
* OpenAPI spec version: 1.0.0
|
|
4612
|
+
*/
|
|
4613
|
+
type ListenV1DetectLanguageParameter = boolean | string[];
|
|
4614
|
+
|
|
4615
|
+
/**
|
|
4616
|
+
* Generated by orval v7.9.0 🍺
|
|
4617
|
+
* Do not edit manually.
|
|
4618
|
+
* Deepgram API Specification
|
|
4619
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4620
|
+
|
|
4621
|
+
* OpenAPI spec version: 1.0.0
|
|
4622
|
+
*/
|
|
4623
|
+
/**
|
|
4624
|
+
* Recognize speaker changes. Each word in the transcript will be assigned a speaker number starting at 0
|
|
4625
|
+
*/
|
|
4626
|
+
type ListenV1DiarizeParameter = boolean;
|
|
4627
|
+
|
|
4628
|
+
/**
|
|
4629
|
+
* Generated by orval v7.9.0 🍺
|
|
4630
|
+
* Do not edit manually.
|
|
4631
|
+
* Deepgram API Specification
|
|
4632
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4633
|
+
|
|
4634
|
+
* OpenAPI spec version: 1.0.0
|
|
4635
|
+
*/
|
|
4636
|
+
/**
|
|
4637
|
+
* Dictation mode for controlling formatting with dictated speech
|
|
4638
|
+
*/
|
|
4639
|
+
type ListenV1DictationParameter = boolean;
|
|
4640
|
+
|
|
4641
|
+
/**
|
|
4642
|
+
* Generated by orval v7.9.0 🍺
|
|
4643
|
+
* Do not edit manually.
|
|
4644
|
+
* Deepgram API Specification
|
|
4645
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4646
|
+
|
|
4647
|
+
* OpenAPI spec version: 1.0.0
|
|
4648
|
+
*/
|
|
4649
|
+
/**
|
|
4650
|
+
* ListenV1EncodingParameter type definition
|
|
4651
|
+
*/
|
|
4652
|
+
/**
|
|
4653
|
+
* ListenV1EncodingParameter type definition
|
|
4654
|
+
*/
|
|
4655
|
+
/**
|
|
4656
|
+
* ListenV1EncodingParameter type definition
|
|
4657
|
+
*/
|
|
4658
|
+
/**
|
|
4659
|
+
* ListenV1EncodingParameter type definition
|
|
4660
|
+
*/
|
|
4661
|
+
/**
|
|
4662
|
+
* ListenV1EncodingParameter type definition
|
|
4663
|
+
*/
|
|
4664
|
+
/**
|
|
4665
|
+
* ListenV1EncodingParameter type definition
|
|
4666
|
+
*/
|
|
4667
|
+
/**
|
|
4668
|
+
* ListenV1EncodingParameter type definition
|
|
4669
|
+
*/
|
|
4670
|
+
/**
|
|
4671
|
+
* ListenV1EncodingParameter type definition
|
|
4672
|
+
*/
|
|
4673
|
+
type ListenV1EncodingParameter = typeof ListenV1EncodingParameter[keyof typeof ListenV1EncodingParameter];
|
|
4674
|
+
declare const ListenV1EncodingParameter: {
|
|
4675
|
+
readonly linear16: "linear16";
|
|
4676
|
+
readonly flac: "flac";
|
|
4677
|
+
readonly mulaw: "mulaw";
|
|
4678
|
+
readonly opus: "opus";
|
|
4679
|
+
readonly speex: "speex";
|
|
4680
|
+
readonly g729: "g729";
|
|
4681
|
+
};
|
|
4682
|
+
|
|
4683
|
+
/**
|
|
4684
|
+
* Generated by orval v7.9.0 🍺
|
|
4685
|
+
* Do not edit manually.
|
|
4686
|
+
* Deepgram API Specification
|
|
4687
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4688
|
+
|
|
4689
|
+
* OpenAPI spec version: 1.0.0
|
|
4690
|
+
*/
|
|
4691
|
+
/**
|
|
4692
|
+
* Filler Words can help transcribe interruptions in your audio, like "uh" and "um"
|
|
4693
|
+
*/
|
|
4694
|
+
type ListenV1FillerWordsParameter = boolean;
|
|
4695
|
+
|
|
4696
|
+
/**
|
|
4697
|
+
* Generated by orval v7.9.0 🍺
|
|
4698
|
+
* Do not edit manually.
|
|
4699
|
+
* Deepgram API Specification
|
|
4700
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4701
|
+
|
|
4702
|
+
* OpenAPI spec version: 1.0.0
|
|
4703
|
+
*/
|
|
4704
|
+
/**
|
|
4705
|
+
* Key term prompting can boost or suppress specialized terminology and brands. Only compatible with Nova-3
|
|
4706
|
+
*/
|
|
4707
|
+
type ListenV1KeytermParameter = string[];
|
|
4708
|
+
|
|
4709
|
+
/**
|
|
4710
|
+
* Generated by orval v7.9.0 🍺
|
|
4711
|
+
* Do not edit manually.
|
|
4712
|
+
* Deepgram API Specification
|
|
4713
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4714
|
+
|
|
4715
|
+
* OpenAPI spec version: 1.0.0
|
|
4716
|
+
*/
|
|
4717
|
+
type ListenV1KeywordsParameter = string | string[];
|
|
4718
|
+
|
|
4719
|
+
/**
|
|
4720
|
+
* Generated by orval v7.9.0 🍺
|
|
4721
|
+
* Do not edit manually.
|
|
4722
|
+
* Deepgram API Specification
|
|
4723
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4724
|
+
|
|
4725
|
+
* OpenAPI spec version: 1.0.0
|
|
4726
|
+
*/
|
|
4727
|
+
/**
|
|
4728
|
+
* The [BCP-47 language tag](https://tools.ietf.org/html/bcp47) that hints at the primary spoken language. Depending on the Model and API endpoint you choose only certain languages are available
|
|
4729
|
+
*/
|
|
4730
|
+
type ListenV1LanguageParameter = string;
|
|
4731
|
+
|
|
4732
|
+
/**
|
|
4733
|
+
* Generated by orval v7.9.0 🍺
|
|
4734
|
+
* Do not edit manually.
|
|
4735
|
+
* Deepgram API Specification
|
|
4736
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4737
|
+
|
|
4738
|
+
* OpenAPI spec version: 1.0.0
|
|
4739
|
+
*/
|
|
4740
|
+
/**
|
|
4741
|
+
* Spoken measurements will be converted to their corresponding abbreviations
|
|
4742
|
+
*/
|
|
4743
|
+
type ListenV1MeasurementsParameter = boolean;
|
|
4744
|
+
|
|
4745
|
+
/**
|
|
4746
|
+
* Generated by orval v7.9.0 🍺
|
|
4747
|
+
* Do not edit manually.
|
|
4748
|
+
* Deepgram API Specification
|
|
4749
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4750
|
+
|
|
4751
|
+
* OpenAPI spec version: 1.0.0
|
|
4752
|
+
*/
|
|
4753
|
+
/**
|
|
4754
|
+
* Transcribe each audio channel independently
|
|
4755
|
+
*/
|
|
4756
|
+
type ListenV1MultichannelParameter = boolean;
|
|
4757
|
+
|
|
4758
|
+
/**
|
|
4759
|
+
* Generated by orval v7.9.0 🍺
|
|
4760
|
+
* Do not edit manually.
|
|
4761
|
+
* Deepgram API Specification
|
|
4762
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4763
|
+
|
|
4764
|
+
* OpenAPI spec version: 1.0.0
|
|
4765
|
+
*/
|
|
4766
|
+
/**
|
|
4767
|
+
* Numerals converts numbers from written format to numerical format
|
|
4768
|
+
*/
|
|
4769
|
+
type ListenV1NumeralsParameter = boolean;
|
|
4770
|
+
|
|
4771
|
+
/**
|
|
4772
|
+
* Generated by orval v7.9.0 🍺
|
|
4773
|
+
* Do not edit manually.
|
|
4774
|
+
* Deepgram API Specification
|
|
4775
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4776
|
+
|
|
4777
|
+
* OpenAPI spec version: 1.0.0
|
|
4778
|
+
*/
|
|
4779
|
+
/**
|
|
4780
|
+
* Splits audio into paragraphs to improve transcript readability
|
|
4781
|
+
*/
|
|
4782
|
+
type ListenV1ParagraphsParameter = boolean;
|
|
4783
|
+
|
|
4784
|
+
/**
|
|
4785
|
+
* Generated by orval v7.9.0 🍺
|
|
4786
|
+
* Do not edit manually.
|
|
4787
|
+
* Deepgram API Specification
|
|
4788
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4789
|
+
|
|
4790
|
+
* OpenAPI spec version: 1.0.0
|
|
4791
|
+
*/
|
|
4792
|
+
/**
|
|
4793
|
+
* Profanity Filter looks for recognized profanity and converts it to the nearest recognized non-profane word or removes it from the transcript completely
|
|
4794
|
+
*/
|
|
4795
|
+
type ListenV1ProfanityFilterParameter = boolean;
|
|
4796
|
+
|
|
4797
|
+
/**
|
|
4798
|
+
* Generated by orval v7.9.0 🍺
|
|
4799
|
+
* Do not edit manually.
|
|
4800
|
+
* Deepgram API Specification
|
|
4801
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4802
|
+
|
|
4803
|
+
* OpenAPI spec version: 1.0.0
|
|
4804
|
+
*/
|
|
4805
|
+
/**
|
|
4806
|
+
* Add punctuation and capitalization to the transcript
|
|
4807
|
+
*/
|
|
4808
|
+
type ListenV1PunctuateParameter = boolean;
|
|
4809
|
+
|
|
4810
|
+
/**
|
|
4811
|
+
* Generated by orval v7.9.0 🍺
|
|
4812
|
+
* Do not edit manually.
|
|
4813
|
+
* Deepgram API Specification
|
|
4814
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4815
|
+
|
|
4816
|
+
* OpenAPI spec version: 1.0.0
|
|
4817
|
+
*/
|
|
4818
|
+
type ListenV1RedactParameterOneOfItem = (typeof ListenV1RedactParameterOneOfItem)[keyof typeof ListenV1RedactParameterOneOfItem];
|
|
4819
|
+
declare const ListenV1RedactParameterOneOfItem: {
|
|
4820
|
+
readonly pci: "pci";
|
|
4821
|
+
readonly pii: "pii";
|
|
4822
|
+
readonly numbers: "numbers";
|
|
4823
|
+
};
|
|
4824
|
+
|
|
4825
|
+
/**
|
|
4826
|
+
* Generated by orval v7.9.0 🍺
|
|
4827
|
+
* Do not edit manually.
|
|
4828
|
+
* Deepgram API Specification
|
|
4829
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4830
|
+
|
|
4831
|
+
* OpenAPI spec version: 1.0.0
|
|
4832
|
+
*/
|
|
4833
|
+
|
|
4834
|
+
type ListenV1RedactParameter = string | ListenV1RedactParameterOneOfItem[];
|
|
4835
|
+
|
|
4836
|
+
/**
|
|
4837
|
+
* Generated by orval v7.9.0 🍺
|
|
4838
|
+
* Do not edit manually.
|
|
4839
|
+
* Deepgram API Specification
|
|
4840
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4841
|
+
|
|
4842
|
+
* OpenAPI spec version: 1.0.0
|
|
4843
|
+
*/
|
|
4844
|
+
type ListenV1ReplaceParameter = string | string[];
|
|
4845
|
+
|
|
4846
|
+
/**
|
|
4847
|
+
* Generated by orval v7.9.0 🍺
|
|
4848
|
+
* Do not edit manually.
|
|
4849
|
+
* Deepgram API Specification
|
|
4850
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4851
|
+
|
|
4852
|
+
* OpenAPI spec version: 1.0.0
|
|
4853
|
+
*/
|
|
4854
|
+
type ListenV1SearchParameter = string | string[];
|
|
4855
|
+
|
|
4856
|
+
/**
|
|
4857
|
+
* Generated by orval v7.9.0 🍺
|
|
4858
|
+
* Do not edit manually.
|
|
4859
|
+
* Deepgram API Specification
|
|
4860
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4861
|
+
|
|
4862
|
+
* OpenAPI spec version: 1.0.0
|
|
4863
|
+
*/
|
|
4864
|
+
/**
|
|
4865
|
+
* Apply formatting to transcript output. When set to true, additional formatting will be applied to transcripts to improve readability
|
|
4866
|
+
*/
|
|
4867
|
+
type ListenV1SmartFormatParameter = boolean;
|
|
4868
|
+
|
|
4869
|
+
/**
|
|
4870
|
+
* Generated by orval v7.9.0 🍺
|
|
4871
|
+
* Do not edit manually.
|
|
4872
|
+
* Deepgram API Specification
|
|
4873
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4874
|
+
|
|
4875
|
+
* OpenAPI spec version: 1.0.0
|
|
4876
|
+
*/
|
|
4877
|
+
/**
|
|
4878
|
+
* Segments speech into meaningful semantic units
|
|
4879
|
+
*/
|
|
4880
|
+
type ListenV1UtterancesParameter = boolean;
|
|
4881
|
+
|
|
4882
|
+
/**
|
|
4883
|
+
* Generated by orval v7.9.0 🍺
|
|
4884
|
+
* Do not edit manually.
|
|
4885
|
+
* Deepgram API Specification
|
|
4886
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4887
|
+
|
|
4888
|
+
* OpenAPI spec version: 1.0.0
|
|
4889
|
+
*/
|
|
4890
|
+
/**
|
|
4891
|
+
* Seconds to wait before detecting a pause between words in submitted audio
|
|
4892
|
+
*/
|
|
4893
|
+
type ListenV1UttSplitParameter = number;
|
|
4894
|
+
|
|
4895
|
+
/**
|
|
4896
|
+
* Generated by orval v7.9.0 🍺
|
|
4897
|
+
* Do not edit manually.
|
|
4898
|
+
* Deepgram API Specification
|
|
4899
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4900
|
+
|
|
4901
|
+
* OpenAPI spec version: 1.0.0
|
|
4902
|
+
*/
|
|
4903
|
+
type ListenV1VersionParameter = "latest" | string;
|
|
4904
|
+
|
|
4905
|
+
/**
|
|
4906
|
+
* Generated by orval v7.9.0 🍺
|
|
4907
|
+
* Do not edit manually.
|
|
4908
|
+
* Deepgram API Specification
|
|
4909
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4910
|
+
|
|
4911
|
+
* OpenAPI spec version: 1.0.0
|
|
4912
|
+
*/
|
|
4913
|
+
/**
|
|
4914
|
+
* Opts out requests from the Deepgram Model Improvement Program. Refer to our Docs for pricing impacts before setting this to true. https://dpgr.am/deepgram-mip
|
|
4915
|
+
*/
|
|
4916
|
+
type SharedMipOptOutParameter = boolean;
|
|
4917
|
+
|
|
4918
|
+
/**
|
|
4919
|
+
* Generated by orval v7.9.0 🍺
|
|
4920
|
+
* Do not edit manually.
|
|
4921
|
+
* Deepgram API Specification
|
|
4922
|
+
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
4923
|
+
|
|
4924
|
+
* OpenAPI spec version: 1.0.0
|
|
4925
|
+
*/
|
|
4926
|
+
|
|
4927
|
+
type ListenV1MediaTranscribeParams = {
|
|
4928
|
+
/**
|
|
4929
|
+
* URL to which we'll make the callback request
|
|
4930
|
+
*/
|
|
4931
|
+
callback?: SharedCallbackParameter;
|
|
4932
|
+
/**
|
|
4933
|
+
* HTTP method by which the callback request will be made
|
|
4934
|
+
*/
|
|
4935
|
+
callback_method?: SharedCallbackMethodParameter;
|
|
4936
|
+
/**
|
|
4937
|
+
* Arbitrary key-value pairs that are attached to the API response for usage in downstream processing
|
|
4938
|
+
*/
|
|
4939
|
+
extra?: SharedExtraParameter;
|
|
4940
|
+
/**
|
|
4941
|
+
* Recognizes the sentiment throughout a transcript or text
|
|
4942
|
+
*/
|
|
4943
|
+
sentiment?: SharedSentimentParameter;
|
|
4944
|
+
/**
|
|
4945
|
+
* Summarize content. For Listen API, supports string version option. For Read API, accepts boolean only.
|
|
4946
|
+
*/
|
|
4947
|
+
summarize?: SharedSummarizeParameter;
|
|
4948
|
+
/**
|
|
4949
|
+
* Label your requests for the purpose of identification during usage reporting
|
|
4950
|
+
*/
|
|
4951
|
+
tag?: SharedTagParameter;
|
|
4952
|
+
/**
|
|
4953
|
+
* Detect topics throughout a transcript or text
|
|
4954
|
+
*/
|
|
4955
|
+
topics?: SharedTopicsParameter;
|
|
4956
|
+
/**
|
|
4957
|
+
* Custom topics you want the model to detect within your input audio or text if present Submit up to `100`.
|
|
4958
|
+
*/
|
|
4959
|
+
custom_topic?: SharedCustomTopicParameter;
|
|
4960
|
+
/**
|
|
4961
|
+
* Sets how the model will interpret strings submitted to the `custom_topic` param. When `strict`, the model will only return topics submitted using the `custom_topic` param. When `extended`, the model will return its own detected topics in addition to those submitted using the `custom_topic` param
|
|
4962
|
+
*/
|
|
4963
|
+
custom_topic_mode?: SharedCustomTopicModeParameter;
|
|
4964
|
+
/**
|
|
4965
|
+
* Recognizes speaker intent throughout a transcript or text
|
|
4966
|
+
*/
|
|
4967
|
+
intents?: SharedIntentsParameter;
|
|
4968
|
+
/**
|
|
4969
|
+
* Custom intents you want the model to detect within your input audio if present
|
|
4970
|
+
*/
|
|
4971
|
+
custom_intent?: SharedCustomIntentParameter;
|
|
4972
|
+
/**
|
|
4973
|
+
* Sets how the model will interpret intents submitted to the `custom_intent` param. When `strict`, the model will only return intents submitted using the `custom_intent` param. When `extended`, the model will return its own detected intents in the `custom_intent` param.
|
|
4974
|
+
*/
|
|
4975
|
+
custom_intent_mode?: SharedCustomIntentModeParameter;
|
|
4976
|
+
/**
|
|
4977
|
+
* Identifies and extracts key entities from content in submitted audio
|
|
4978
|
+
*/
|
|
4979
|
+
detect_entities?: ListenV1DetectEntitiesParameter;
|
|
4980
|
+
/**
|
|
4981
|
+
* Identifies the dominant language spoken in submitted audio
|
|
4982
|
+
*/
|
|
4983
|
+
detect_language?: ListenV1DetectLanguageParameter;
|
|
4984
|
+
/**
|
|
4985
|
+
* Recognize speaker changes. Each word in the transcript will be assigned a speaker number starting at 0
|
|
4986
|
+
*/
|
|
4987
|
+
diarize?: ListenV1DiarizeParameter;
|
|
4988
|
+
/**
|
|
4989
|
+
* Dictation mode for controlling formatting with dictated speech
|
|
4990
|
+
*/
|
|
4991
|
+
dictation?: ListenV1DictationParameter;
|
|
4992
|
+
/**
|
|
4993
|
+
* Specify the expected encoding of your submitted audio
|
|
4994
|
+
*/
|
|
4995
|
+
encoding?: ListenV1EncodingParameter;
|
|
4996
|
+
/**
|
|
4997
|
+
* Filler Words can help transcribe interruptions in your audio, like "uh" and "um"
|
|
4998
|
+
*/
|
|
4999
|
+
filler_words?: ListenV1FillerWordsParameter;
|
|
5000
|
+
/**
|
|
5001
|
+
* Key term prompting can boost or suppress specialized terminology and brands. Only compatible with Nova-3
|
|
5002
|
+
*/
|
|
5003
|
+
keyterm?: ListenV1KeytermParameter;
|
|
5004
|
+
/**
|
|
5005
|
+
* Keywords can boost or suppress specialized terminology and brands
|
|
5006
|
+
*/
|
|
5007
|
+
keywords?: ListenV1KeywordsParameter;
|
|
5008
|
+
/**
|
|
5009
|
+
* The [BCP-47 language tag](https://tools.ietf.org/html/bcp47) that hints at the primary spoken language. Depending on the Model and API endpoint you choose only certain languages are available
|
|
5010
|
+
*/
|
|
5011
|
+
language?: ListenV1LanguageParameter;
|
|
5012
|
+
/**
|
|
5013
|
+
* Spoken measurements will be converted to their corresponding abbreviations
|
|
5014
|
+
*/
|
|
5015
|
+
measurements?: ListenV1MeasurementsParameter;
|
|
5016
|
+
/**
|
|
5017
|
+
* AI model used to process submitted audio
|
|
5018
|
+
*/
|
|
5019
|
+
model?: ListenV1ModelParameter;
|
|
5020
|
+
/**
|
|
5021
|
+
* Transcribe each audio channel independently
|
|
5022
|
+
*/
|
|
5023
|
+
multichannel?: ListenV1MultichannelParameter;
|
|
5024
|
+
/**
|
|
5025
|
+
* Numerals converts numbers from written format to numerical format
|
|
5026
|
+
*/
|
|
5027
|
+
numerals?: ListenV1NumeralsParameter;
|
|
5028
|
+
/**
|
|
5029
|
+
* Splits audio into paragraphs to improve transcript readability
|
|
5030
|
+
*/
|
|
5031
|
+
paragraphs?: ListenV1ParagraphsParameter;
|
|
5032
|
+
/**
|
|
5033
|
+
* Profanity Filter looks for recognized profanity and converts it to the nearest recognized non-profane word or removes it from the transcript completely
|
|
5034
|
+
*/
|
|
5035
|
+
profanity_filter?: ListenV1ProfanityFilterParameter;
|
|
5036
|
+
/**
|
|
5037
|
+
* Add punctuation and capitalization to the transcript
|
|
5038
|
+
*/
|
|
5039
|
+
punctuate?: ListenV1PunctuateParameter;
|
|
5040
|
+
/**
|
|
5041
|
+
* Redaction removes sensitive information from your transcripts
|
|
5042
|
+
*/
|
|
5043
|
+
redact?: ListenV1RedactParameter;
|
|
5044
|
+
/**
|
|
5045
|
+
* Search for terms or phrases in submitted audio and replaces them
|
|
5046
|
+
*/
|
|
5047
|
+
replace?: ListenV1ReplaceParameter;
|
|
5048
|
+
/**
|
|
5049
|
+
* Search for terms or phrases in submitted audio
|
|
5050
|
+
*/
|
|
5051
|
+
search?: ListenV1SearchParameter;
|
|
5052
|
+
/**
|
|
5053
|
+
* Apply formatting to transcript output. When set to true, additional formatting will be applied to transcripts to improve readability
|
|
5054
|
+
*/
|
|
5055
|
+
smart_format?: ListenV1SmartFormatParameter;
|
|
5056
|
+
/**
|
|
5057
|
+
* Segments speech into meaningful semantic units
|
|
5058
|
+
*/
|
|
5059
|
+
utterances?: ListenV1UtterancesParameter;
|
|
5060
|
+
/**
|
|
5061
|
+
* Seconds to wait before detecting a pause between words in submitted audio
|
|
5062
|
+
*/
|
|
5063
|
+
utt_split?: ListenV1UttSplitParameter;
|
|
5064
|
+
/**
|
|
5065
|
+
* Version of an AI model to use
|
|
5066
|
+
*/
|
|
5067
|
+
version?: ListenV1VersionParameter;
|
|
5068
|
+
/**
|
|
5069
|
+
* Opts out requests from the Deepgram Model Improvement Program. Refer to our Docs for pricing impacts before setting this to true. https://dpgr.am/deepgram-mip
|
|
5070
|
+
*/
|
|
5071
|
+
mip_opt_out?: SharedMipOptOutParameter;
|
|
5072
|
+
};
|
|
5073
|
+
|
|
5074
|
+
/**
|
|
5075
|
+
* Generated by orval v7.9.0 🍺
|
|
5076
|
+
* Do not edit manually.
|
|
5077
|
+
* AssemblyAI API
|
|
5078
|
+
* AssemblyAI API
|
|
5079
|
+
* OpenAPI spec version: 1.3.4
|
|
5080
|
+
*/
|
|
5081
|
+
/**
|
|
5082
|
+
* The model to summarize the transcript
|
|
5083
|
+
*/
|
|
5084
|
+
type SummaryModel = (typeof SummaryModel)[keyof typeof SummaryModel];
|
|
5085
|
+
declare const SummaryModel: {
|
|
5086
|
+
readonly informative: "informative";
|
|
5087
|
+
readonly conversational: "conversational";
|
|
5088
|
+
readonly catchy: "catchy";
|
|
5089
|
+
};
|
|
5090
|
+
|
|
5091
|
+
/**
|
|
5092
|
+
* Generated by orval v7.9.0 🍺
|
|
5093
|
+
* Do not edit manually.
|
|
5094
|
+
* AssemblyAI API
|
|
5095
|
+
* AssemblyAI API
|
|
5096
|
+
* OpenAPI spec version: 1.3.4
|
|
5097
|
+
*/
|
|
5098
|
+
/**
|
|
5099
|
+
* The type of summary
|
|
5100
|
+
*/
|
|
5101
|
+
type SummaryType = (typeof SummaryType)[keyof typeof SummaryType];
|
|
5102
|
+
declare const SummaryType: {
|
|
5103
|
+
readonly bullets: "bullets";
|
|
5104
|
+
readonly bullets_verbose: "bullets_verbose";
|
|
5105
|
+
readonly gist: "gist";
|
|
5106
|
+
readonly headline: "headline";
|
|
5107
|
+
readonly paragraph: "paragraph";
|
|
5108
|
+
};
|
|
5109
|
+
|
|
5110
|
+
/**
|
|
5111
|
+
* Generated by orval v7.9.0 🍺
|
|
5112
|
+
* Do not edit manually.
|
|
5113
|
+
* AssemblyAI API
|
|
5114
|
+
* AssemblyAI API
|
|
5115
|
+
* OpenAPI spec version: 1.3.4
|
|
5116
|
+
*/
|
|
5117
|
+
/**
|
|
5118
|
+
* How much to boost specified words
|
|
5119
|
+
*/
|
|
5120
|
+
type TranscriptBoostParam = (typeof TranscriptBoostParam)[keyof typeof TranscriptBoostParam];
|
|
5121
|
+
declare const TranscriptBoostParam: {
|
|
5122
|
+
readonly low: "low";
|
|
5123
|
+
readonly default: "default";
|
|
5124
|
+
readonly high: "high";
|
|
5125
|
+
};
|
|
5126
|
+
|
|
5127
|
+
/**
|
|
5128
|
+
* Generated by orval v7.9.0 🍺
|
|
5129
|
+
* Do not edit manually.
|
|
5130
|
+
* AssemblyAI API
|
|
5131
|
+
* AssemblyAI API
|
|
5132
|
+
* OpenAPI spec version: 1.3.4
|
|
5133
|
+
*/
|
|
5134
|
+
|
|
5135
|
+
type TranscriptOptionalParamsLanguageCodeOneOf = TranscriptLanguageCode | string;
|
|
5136
|
+
|
|
5137
|
+
/**
|
|
5138
|
+
* Generated by orval v7.9.0 🍺
|
|
5139
|
+
* Do not edit manually.
|
|
5140
|
+
* AssemblyAI API
|
|
5141
|
+
* AssemblyAI API
|
|
5142
|
+
* OpenAPI spec version: 1.3.4
|
|
5143
|
+
*/
|
|
5144
|
+
|
|
5145
|
+
/**
|
|
5146
|
+
* The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).
|
|
5147
|
+
The default value is 'en_us'.
|
|
5148
|
+
|
|
5149
|
+
*/
|
|
5150
|
+
type TranscriptOptionalParamsLanguageCode = TranscriptOptionalParamsLanguageCodeOneOf | null;
|
|
5151
|
+
|
|
5152
|
+
/**
|
|
5153
|
+
* Generated by orval v7.9.0 🍺
|
|
5154
|
+
* Do not edit manually.
|
|
5155
|
+
* AssemblyAI API
|
|
5156
|
+
* AssemblyAI API
|
|
5157
|
+
* OpenAPI spec version: 1.3.4
|
|
5158
|
+
*/
|
|
5159
|
+
|
|
5160
|
+
/**
|
|
5161
|
+
* The replacement logic for detected PII, can be "entity_type" or "hash". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
|
|
5162
|
+
*/
|
|
5163
|
+
type TranscriptOptionalParamsRedactPiiSub = SubstitutionPolicy | null;
|
|
5164
|
+
|
|
5165
|
+
/**
|
|
5166
|
+
* Generated by orval v7.9.0 🍺
|
|
5167
|
+
* Do not edit manually.
|
|
5168
|
+
* AssemblyAI API
|
|
5169
|
+
* AssemblyAI API
|
|
5170
|
+
* OpenAPI spec version: 1.3.4
|
|
5171
|
+
*/
|
|
5172
|
+
/**
|
|
5173
|
+
* Tells the speaker label model how many speakers it should attempt to identify. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.
|
|
5174
|
+
*/
|
|
5175
|
+
type TranscriptOptionalParamsSpeakersExpected = number | null;
|
|
5176
|
+
|
|
5177
|
+
/**
|
|
5178
|
+
* Generated by orval v7.9.0 🍺
|
|
5179
|
+
* Do not edit manually.
|
|
5180
|
+
* AssemblyAI API
|
|
5181
|
+
* AssemblyAI API
|
|
5182
|
+
* OpenAPI spec version: 1.3.4
|
|
5183
|
+
*/
|
|
5184
|
+
|
|
5185
|
+
/**
|
|
5186
|
+
* The speech model to use for the transcription. When `null`, the "best" model is used.
|
|
5187
|
+
*/
|
|
5188
|
+
type TranscriptOptionalParamsSpeechModel = SpeechModel | null;
|
|
5189
|
+
|
|
5190
|
+
/**
|
|
5191
|
+
* Generated by orval v7.9.0 🍺
|
|
5192
|
+
* Do not edit manually.
|
|
5193
|
+
* AssemblyAI API
|
|
5194
|
+
* AssemblyAI API
|
|
5195
|
+
* OpenAPI spec version: 1.3.4
|
|
5196
|
+
*/
|
|
5197
|
+
/**
|
|
5198
|
+
* Reject audio files that contain less than this fraction of speech.
|
|
5199
|
+
Valid values are in the range [0, 1] inclusive.
|
|
5200
|
+
|
|
5201
|
+
* @minimum 0
|
|
5202
|
+
* @maximum 1
|
|
5203
|
+
*/
|
|
5204
|
+
type TranscriptOptionalParamsSpeechThreshold = number | null;
|
|
5205
|
+
|
|
5206
|
+
/**
|
|
5207
|
+
* Generated by orval v7.9.0 🍺
|
|
5208
|
+
* Do not edit manually.
|
|
5209
|
+
* AssemblyAI API
|
|
5210
|
+
* AssemblyAI API
|
|
5211
|
+
* OpenAPI spec version: 1.3.4
|
|
5212
|
+
*/
|
|
5213
|
+
/**
|
|
5214
|
+
* The header name to be sent with the transcript completed or failed webhook requests
|
|
5215
|
+
*/
|
|
5216
|
+
type TranscriptOptionalParamsWebhookAuthHeaderName = string | null;
|
|
5217
|
+
|
|
5218
|
+
/**
|
|
5219
|
+
* Generated by orval v7.9.0 🍺
|
|
5220
|
+
* Do not edit manually.
|
|
5221
|
+
* AssemblyAI API
|
|
5222
|
+
* AssemblyAI API
|
|
5223
|
+
* OpenAPI spec version: 1.3.4
|
|
5224
|
+
*/
|
|
5225
|
+
/**
|
|
5226
|
+
* The header value to send back with the transcript completed or failed webhook requests for added security
|
|
5227
|
+
*/
|
|
5228
|
+
type TranscriptOptionalParamsWebhookAuthHeaderValue = string | null;
|
|
5229
|
+
|
|
5230
|
+
/**
|
|
5231
|
+
* Generated by orval v7.9.0 🍺
|
|
5232
|
+
* Do not edit manually.
|
|
5233
|
+
* AssemblyAI API
|
|
5234
|
+
* AssemblyAI API
|
|
5235
|
+
* OpenAPI spec version: 1.3.4
|
|
5236
|
+
*/
|
|
5237
|
+
|
|
5238
|
+
/**
|
|
5239
|
+
* The parameters for creating a transcript
|
|
5240
|
+
*/
|
|
5241
|
+
interface TranscriptOptionalParams {
|
|
5242
|
+
/** The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).
|
|
5243
|
+
The default value is 'en_us'.
|
|
5244
|
+
*/
|
|
5245
|
+
language_code?: TranscriptOptionalParamsLanguageCode;
|
|
5246
|
+
/** Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false. */
|
|
5247
|
+
language_detection?: boolean;
|
|
5248
|
+
/**
|
|
5249
|
+
* The confidence threshold for the automatically detected language.
|
|
5250
|
+
An error will be returned if the language confidence is below this threshold.
|
|
5251
|
+
Defaults to 0.
|
|
5252
|
+
|
|
5253
|
+
* @minimum 0
|
|
5254
|
+
* @maximum 1
|
|
5255
|
+
*/
|
|
5256
|
+
language_confidence_threshold?: number;
|
|
5257
|
+
/** The speech model to use for the transcription. When `null`, the "best" model is used. */
|
|
5258
|
+
speech_model?: TranscriptOptionalParamsSpeechModel;
|
|
5259
|
+
/** Enable Automatic Punctuation, can be true or false */
|
|
5260
|
+
punctuate?: boolean;
|
|
5261
|
+
/** Enable Text Formatting, can be true or false */
|
|
5262
|
+
format_text?: boolean;
|
|
5263
|
+
/** Transcribe Filler Words, like "umm", in your media file; can be true or false */
|
|
5264
|
+
disfluencies?: boolean;
|
|
5265
|
+
/** Enable [Multichannel](https://www.assemblyai.com/docs/models/speech-recognition#multichannel-transcription) transcription, can be true or false. */
|
|
5266
|
+
multichannel?: boolean;
|
|
5267
|
+
/** The URL to which we send webhook requests.
|
|
5268
|
+
We sends two different types of webhook requests.
|
|
5269
|
+
One request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.
|
|
5270
|
+
*/
|
|
5271
|
+
webhook_url?: string;
|
|
5272
|
+
/** The header name to be sent with the transcript completed or failed webhook requests */
|
|
5273
|
+
webhook_auth_header_name?: TranscriptOptionalParamsWebhookAuthHeaderName;
|
|
5274
|
+
/** The header value to send back with the transcript completed or failed webhook requests for added security */
|
|
5275
|
+
webhook_auth_header_value?: TranscriptOptionalParamsWebhookAuthHeaderValue;
|
|
5276
|
+
/** Enable Key Phrases, either true or false */
|
|
5277
|
+
auto_highlights?: boolean;
|
|
5278
|
+
/** The point in time, in milliseconds, to begin transcribing in your media file */
|
|
5279
|
+
audio_start_from?: number;
|
|
5280
|
+
/** The point in time, in milliseconds, to stop transcribing in your media file */
|
|
5281
|
+
audio_end_at?: number;
|
|
5282
|
+
/**
|
|
5283
|
+
* The list of custom vocabulary to boost transcription probability for
|
|
5284
|
+
* @deprecated
|
|
5285
|
+
*/
|
|
5286
|
+
word_boost?: string[];
|
|
5287
|
+
/** How much to boost specified words */
|
|
5288
|
+
boost_param?: TranscriptBoostParam;
|
|
5289
|
+
/** Filter profanity from the transcribed text, can be true or false */
|
|
5290
|
+
filter_profanity?: boolean;
|
|
5291
|
+
/** Redact PII from the transcribed text using the Redact PII model, can be true or false */
|
|
5292
|
+
redact_pii?: boolean;
|
|
5293
|
+
/** Generate a copy of the original media file with spoken PII "beeped" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details. */
|
|
5294
|
+
redact_pii_audio?: boolean;
|
|
5295
|
+
/** Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details. */
|
|
5296
|
+
redact_pii_audio_quality?: RedactPiiAudioQuality;
|
|
5297
|
+
/** The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details. */
|
|
5298
|
+
redact_pii_policies?: PiiPolicy[];
|
|
5299
|
+
/** The replacement logic for detected PII, can be "entity_type" or "hash". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details. */
|
|
5300
|
+
redact_pii_sub?: TranscriptOptionalParamsRedactPiiSub;
|
|
5301
|
+
/** Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false */
|
|
5302
|
+
speaker_labels?: boolean;
|
|
5303
|
+
/** Tells the speaker label model how many speakers it should attempt to identify. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details. */
|
|
5304
|
+
speakers_expected?: TranscriptOptionalParamsSpeakersExpected;
|
|
5305
|
+
/** Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false */
|
|
5306
|
+
content_safety?: boolean;
|
|
5307
|
+
/**
|
|
5308
|
+
* The confidence threshold for the Content Moderation model. Values must be between 25 and 100.
|
|
5309
|
+
* @minimum 25
|
|
5310
|
+
* @maximum 100
|
|
5311
|
+
*/
|
|
5312
|
+
content_safety_confidence?: number;
|
|
5313
|
+
/** Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false */
|
|
5314
|
+
iab_categories?: boolean;
|
|
5315
|
+
/** Customize how words are spelled and formatted using to and from values */
|
|
5316
|
+
custom_spelling?: TranscriptCustomSpelling[];
|
|
5317
|
+
/** <Warning>`keyterms_prompt` is only supported when the `speech_model` is specified as `slam-1`</Warning>
|
|
5318
|
+
Improve accuracy with up to 1000 domain-specific words or phrases (maximum 6 words per phrase).
|
|
5319
|
+
*/
|
|
5320
|
+
keyterms_prompt?: string[];
|
|
5321
|
+
/**
|
|
5322
|
+
* This parameter does not currently have any functionality attached to it.
|
|
5323
|
+
* @deprecated
|
|
5324
|
+
*/
|
|
5325
|
+
prompt?: string;
|
|
5326
|
+
/** Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false */
|
|
5327
|
+
sentiment_analysis?: boolean;
|
|
5328
|
+
/** Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false */
|
|
5329
|
+
auto_chapters?: boolean;
|
|
5330
|
+
/** Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false */
|
|
5331
|
+
entity_detection?: boolean;
|
|
5332
|
+
/**
|
|
5333
|
+
* Reject audio files that contain less than this fraction of speech.
|
|
5334
|
+
Valid values are in the range [0, 1] inclusive.
|
|
5335
|
+
|
|
5336
|
+
* @minimum 0
|
|
5337
|
+
* @maximum 1
|
|
5338
|
+
*/
|
|
5339
|
+
speech_threshold?: TranscriptOptionalParamsSpeechThreshold;
|
|
5340
|
+
/** Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false */
|
|
5341
|
+
summarization?: boolean;
|
|
5342
|
+
/** The model to summarize the transcript */
|
|
5343
|
+
summary_model?: SummaryModel;
|
|
5344
|
+
/** The type of summary */
|
|
5345
|
+
summary_type?: SummaryType;
|
|
5346
|
+
/**
|
|
5347
|
+
* Enable custom topics, either true or false
|
|
5348
|
+
* @deprecated
|
|
5349
|
+
*/
|
|
5350
|
+
custom_topics?: boolean;
|
|
5351
|
+
/** The list of custom topics */
|
|
5352
|
+
topics?: string[];
|
|
5353
|
+
}
|
|
5354
|
+
|
|
5355
|
+
/**
|
|
5356
|
+
* Generated by orval v7.9.0 🍺
|
|
5357
|
+
* Do not edit manually.
|
|
5358
|
+
* Gladia Control API
|
|
5359
|
+
* OpenAPI spec version: 1.0
|
|
5360
|
+
*/
|
|
5361
|
+
/**
|
|
5362
|
+
* Custom metadata you can attach to this transcription
|
|
5363
|
+
*/
|
|
5364
|
+
type InitTranscriptionRequestCustomMetadata = {
|
|
5365
|
+
[key: string]: unknown;
|
|
5366
|
+
};
|
|
5367
|
+
|
|
5368
|
+
/**
|
|
5369
|
+
* Generated by orval v7.9.0 🍺
|
|
5370
|
+
* Do not edit manually.
|
|
5371
|
+
* Gladia Control API
|
|
5372
|
+
* OpenAPI spec version: 1.0
|
|
5373
|
+
*/
|
|
5374
|
+
|
|
5375
|
+
interface InitTranscriptionRequest {
|
|
5376
|
+
/**
|
|
5377
|
+
* **[Deprecated]** Context to feed the transcription model with for possible better accuracy
|
|
5378
|
+
* @deprecated
|
|
5379
|
+
*/
|
|
5380
|
+
context_prompt?: string;
|
|
5381
|
+
/** **[Beta]** Can be either boolean to enable custom_vocabulary for this audio or an array with specific vocabulary list to feed the transcription model with */
|
|
5382
|
+
custom_vocabulary?: boolean;
|
|
5383
|
+
/** **[Beta]** Custom vocabulary configuration, if `custom_vocabulary` is enabled */
|
|
5384
|
+
custom_vocabulary_config?: CustomVocabularyConfigDTO;
|
|
5385
|
+
/**
|
|
5386
|
+
* **[Deprecated]** Use `language_config` instead. Detect the language from the given audio
|
|
5387
|
+
* @deprecated
|
|
5388
|
+
*/
|
|
5389
|
+
detect_language?: boolean;
|
|
5390
|
+
/**
|
|
5391
|
+
* **[Deprecated]** Use `language_config` instead.Detect multiple languages in the given audio
|
|
5392
|
+
* @deprecated
|
|
5393
|
+
*/
|
|
5394
|
+
enable_code_switching?: boolean;
|
|
5395
|
+
/**
|
|
5396
|
+
* **[Deprecated]** Use `language_config` instead. Specify the configuration for code switching
|
|
5397
|
+
* @deprecated
|
|
5398
|
+
*/
|
|
5399
|
+
code_switching_config?: CodeSwitchingConfigDTO;
|
|
5400
|
+
/**
|
|
5401
|
+
* **[Deprecated]** Use `language_config` instead. Set the spoken language for the given audio (ISO 639 standard)
|
|
5402
|
+
* @deprecated
|
|
5403
|
+
*/
|
|
5404
|
+
language?: TranscriptionLanguageCodeEnum;
|
|
5405
|
+
/**
|
|
5406
|
+
* **[Deprecated]** Use `callback`/`callback_config` instead. Callback URL we will do a `POST` request to with the result of the transcription
|
|
5407
|
+
* @deprecated
|
|
5408
|
+
*/
|
|
5409
|
+
callback_url?: string;
|
|
5410
|
+
/** Enable callback for this transcription. If true, the `callback_config` property will be used to customize the callback behaviour */
|
|
5411
|
+
callback?: boolean;
|
|
5412
|
+
/** Customize the callback behaviour (url and http method) */
|
|
5413
|
+
callback_config?: CallbackConfigDto;
|
|
5414
|
+
/** Enable subtitles generation for this transcription */
|
|
5415
|
+
subtitles?: boolean;
|
|
5416
|
+
/** Configuration for subtitles generation if `subtitles` is enabled */
|
|
5417
|
+
subtitles_config?: SubtitlesConfigDTO;
|
|
5418
|
+
/** Enable speaker recognition (diarization) for this audio */
|
|
5419
|
+
diarization?: boolean;
|
|
5420
|
+
/** Speaker recognition configuration, if `diarization` is enabled */
|
|
5421
|
+
diarization_config?: DiarizationConfigDTO;
|
|
5422
|
+
/** **[Beta]** Enable translation for this audio */
|
|
5423
|
+
translation?: boolean;
|
|
5424
|
+
/** **[Beta]** Translation configuration, if `translation` is enabled */
|
|
5425
|
+
translation_config?: TranslationConfigDTO;
|
|
5426
|
+
/** **[Beta]** Enable summarization for this audio */
|
|
5427
|
+
summarization?: boolean;
|
|
5428
|
+
/** **[Beta]** Summarization configuration, if `summarization` is enabled */
|
|
5429
|
+
summarization_config?: SummarizationConfigDTO;
|
|
5430
|
+
/** **[Alpha]** Enable moderation for this audio */
|
|
5431
|
+
moderation?: boolean;
|
|
5432
|
+
/** **[Alpha]** Enable named entity recognition for this audio */
|
|
5433
|
+
named_entity_recognition?: boolean;
|
|
5434
|
+
/** **[Alpha]** Enable chapterization for this audio */
|
|
5435
|
+
chapterization?: boolean;
|
|
5436
|
+
/** **[Alpha]** Enable names consistency for this audio */
|
|
5437
|
+
name_consistency?: boolean;
|
|
5438
|
+
/** **[Alpha]** Enable custom spelling for this audio */
|
|
5439
|
+
custom_spelling?: boolean;
|
|
5440
|
+
/** **[Alpha]** Custom spelling configuration, if `custom_spelling` is enabled */
|
|
5441
|
+
custom_spelling_config?: CustomSpellingConfigDTO;
|
|
5442
|
+
/** **[Alpha]** Enable structured data extraction for this audio */
|
|
5443
|
+
structured_data_extraction?: boolean;
|
|
5444
|
+
/** **[Alpha]** Structured data extraction configuration, if `structured_data_extraction` is enabled */
|
|
5445
|
+
structured_data_extraction_config?: StructuredDataExtractionConfigDTO;
|
|
5446
|
+
/** Enable sentiment analysis for this audio */
|
|
5447
|
+
sentiment_analysis?: boolean;
|
|
5448
|
+
/** **[Alpha]** Enable audio to llm processing for this audio */
|
|
5449
|
+
audio_to_llm?: boolean;
|
|
5450
|
+
/** **[Alpha]** Audio to llm configuration, if `audio_to_llm` is enabled */
|
|
5451
|
+
audio_to_llm_config?: AudioToLlmListConfigDTO;
|
|
5452
|
+
/** Custom metadata you can attach to this transcription */
|
|
5453
|
+
custom_metadata?: InitTranscriptionRequestCustomMetadata;
|
|
5454
|
+
/** Enable sentences for this audio */
|
|
5455
|
+
sentences?: boolean;
|
|
5456
|
+
/** **[Alpha]** Allows to change the output display_mode for this audio. The output will be reordered, creating new utterances when speakers overlapped */
|
|
5457
|
+
display_mode?: boolean;
|
|
5458
|
+
/** **[Alpha]** Use enhanced punctuation for this audio */
|
|
5459
|
+
punctuation_enhanced?: boolean;
|
|
5460
|
+
/** Specify the language configuration */
|
|
5461
|
+
language_config?: LanguageConfig;
|
|
5462
|
+
/** URL to a Gladia file or to an external audio or video file */
|
|
5463
|
+
audio_url: string;
|
|
5464
|
+
}
|
|
5465
|
+
|
|
5466
|
+
/**
|
|
5467
|
+
* Generated by orval v7.9.0 🍺
|
|
5468
|
+
* Do not edit manually.
|
|
5469
|
+
* OpenAI API
|
|
5470
|
+
* The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details.
|
|
5471
|
+
* OpenAPI spec version: 2.3.0
|
|
5472
|
+
*/
|
|
5473
|
+
/**
|
|
5474
|
+
* The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, `vtt`, or `diarized_json`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, the only supported format is `json`. For `gpt-4o-transcribe-diarize`, the supported formats are `json`, `text`, and `diarized_json`, with `diarized_json` required to receive speaker annotations.
|
|
5475
|
+
|
|
5476
|
+
*/
|
|
5477
|
+
type AudioResponseFormat = (typeof AudioResponseFormat)[keyof typeof AudioResponseFormat];
|
|
5478
|
+
declare const AudioResponseFormat: {
|
|
5479
|
+
readonly json: "json";
|
|
5480
|
+
readonly text: "text";
|
|
5481
|
+
readonly srt: "srt";
|
|
5482
|
+
readonly verbose_json: "verbose_json";
|
|
5483
|
+
readonly vtt: "vtt";
|
|
5484
|
+
readonly diarized_json: "diarized_json";
|
|
5485
|
+
};
|
|
5486
|
+
|
|
5487
|
+
/**
|
|
5488
|
+
* Generated by orval v7.9.0 🍺
|
|
5489
|
+
* Do not edit manually.
|
|
5490
|
+
* OpenAI API
|
|
5491
|
+
* The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details.
|
|
5492
|
+
* OpenAPI spec version: 2.3.0
|
|
5493
|
+
*/
|
|
5494
|
+
/**
|
|
5495
|
+
* ID of the model to use. The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source Whisper V2 model), and `gpt-4o-transcribe-diarize`.
|
|
5496
|
+
|
|
5497
|
+
*/
|
|
5498
|
+
type CreateTranscriptionRequestModel = string | "whisper-1" | "gpt-4o-transcribe" | "gpt-4o-mini-transcribe" | "gpt-4o-transcribe-diarize";
|
|
5499
|
+
|
|
5500
|
+
/**
|
|
5501
|
+
* Generated by orval v7.9.0 🍺
|
|
5502
|
+
* Do not edit manually.
|
|
5503
|
+
* OpenAI API
|
|
5504
|
+
* The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details.
|
|
5505
|
+
* OpenAPI spec version: 2.3.0
|
|
5506
|
+
*/
|
|
5507
|
+
type CreateTranscriptionRequestStream = boolean | null;
|
|
5508
|
+
|
|
5509
|
+
/**
|
|
5510
|
+
* Generated by orval v7.9.0 🍺
|
|
5511
|
+
* Do not edit manually.
|
|
5512
|
+
* OpenAI API
|
|
5513
|
+
* The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details.
|
|
5514
|
+
* OpenAPI spec version: 2.3.0
|
|
5515
|
+
*/
|
|
5516
|
+
type CreateTranscriptionRequestTimestampGranularitiesItem = (typeof CreateTranscriptionRequestTimestampGranularitiesItem)[keyof typeof CreateTranscriptionRequestTimestampGranularitiesItem];
|
|
5517
|
+
declare const CreateTranscriptionRequestTimestampGranularitiesItem: {
|
|
5518
|
+
readonly word: "word";
|
|
5519
|
+
readonly segment: "segment";
|
|
5520
|
+
};
|
|
5521
|
+
|
|
5522
|
+
/**
|
|
5523
|
+
* Generated by orval v7.9.0 🍺
|
|
5524
|
+
* Do not edit manually.
|
|
5525
|
+
* OpenAI API
|
|
5526
|
+
* The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details.
|
|
5527
|
+
* OpenAPI spec version: 2.3.0
|
|
5528
|
+
*/
|
|
5529
|
+
/**
|
|
5530
|
+
* Must be set to `server_vad` to enable manual chunking using server side VAD.
|
|
5531
|
+
*/
|
|
5532
|
+
type VadConfigType = (typeof VadConfigType)[keyof typeof VadConfigType];
|
|
5533
|
+
declare const VadConfigType: {
|
|
5534
|
+
readonly server_vad: "server_vad";
|
|
5535
|
+
};
|
|
5536
|
+
|
|
5537
|
+
/**
|
|
5538
|
+
* Generated by orval v7.9.0 🍺
|
|
5539
|
+
* Do not edit manually.
|
|
5540
|
+
* OpenAI API
|
|
5541
|
+
* The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details.
|
|
5542
|
+
* OpenAPI spec version: 2.3.0
|
|
5543
|
+
*/
|
|
5544
|
+
|
|
5545
|
+
interface VadConfig {
|
|
5546
|
+
/** Must be set to `server_vad` to enable manual chunking using server side VAD. */
|
|
5547
|
+
type: VadConfigType;
|
|
5548
|
+
/** Amount of audio to include before the VAD detected speech (in
|
|
5549
|
+
milliseconds).
|
|
5550
|
+
*/
|
|
5551
|
+
prefix_padding_ms?: number;
|
|
5552
|
+
/** Duration of silence to detect speech stop (in milliseconds).
|
|
5553
|
+
With shorter values the model will respond more quickly,
|
|
5554
|
+
but may jump in on short pauses from the user.
|
|
5555
|
+
*/
|
|
5556
|
+
silence_duration_ms?: number;
|
|
5557
|
+
/** Sensitivity threshold (0.0 to 1.0) for voice activity detection. A
|
|
5558
|
+
higher threshold will require louder audio to activate the model, and
|
|
5559
|
+
thus might perform better in noisy environments.
|
|
5560
|
+
*/
|
|
5561
|
+
threshold?: number;
|
|
5562
|
+
}
|
|
5563
|
+
|
|
5564
|
+
/**
|
|
5565
|
+
* Generated by orval v7.9.0 🍺
|
|
5566
|
+
* Do not edit manually.
|
|
5567
|
+
* OpenAI API
|
|
5568
|
+
* The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details.
|
|
5569
|
+
* OpenAPI spec version: 2.3.0
|
|
5570
|
+
*/
|
|
5571
|
+
|
|
5572
|
+
/**
|
|
5573
|
+
* Controls how the audio is cut into chunks. When set to `"auto"`, the server first normalizes loudness and then uses voice activity detection (VAD) to choose boundaries. `server_vad` object can be provided to tweak VAD detection parameters manually. If unset, the audio is transcribed as a single block. Required when using `gpt-4o-transcribe-diarize` for inputs longer than 30 seconds.
|
|
5574
|
+
*/
|
|
5575
|
+
type TranscriptionChunkingStrategyAnyOf = "auto" | VadConfig;
|
|
5576
|
+
|
|
5577
|
+
/**
|
|
5578
|
+
* Generated by orval v7.9.0 🍺
|
|
5579
|
+
* Do not edit manually.
|
|
5580
|
+
* OpenAI API
|
|
5581
|
+
* The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details.
|
|
5582
|
+
* OpenAPI spec version: 2.3.0
|
|
5583
|
+
*/
|
|
5584
|
+
|
|
5585
|
+
type TranscriptionChunkingStrategy = TranscriptionChunkingStrategyAnyOf | null;
|
|
5586
|
+
|
|
5587
|
+
/**
|
|
5588
|
+
* Generated by orval v7.9.0 🍺
|
|
5589
|
+
* Do not edit manually.
|
|
5590
|
+
* OpenAI API
|
|
5591
|
+
* The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details.
|
|
5592
|
+
* OpenAPI spec version: 2.3.0
|
|
5593
|
+
*/
|
|
5594
|
+
type TranscriptionInclude = (typeof TranscriptionInclude)[keyof typeof TranscriptionInclude];
|
|
5595
|
+
declare const TranscriptionInclude: {
|
|
5596
|
+
readonly logprobs: "logprobs";
|
|
5597
|
+
};
|
|
5598
|
+
|
|
5599
|
+
/**
|
|
5600
|
+
* Generated by orval v7.9.0 🍺
|
|
5601
|
+
* Do not edit manually.
|
|
5602
|
+
* OpenAI API
|
|
5603
|
+
* The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details.
|
|
5604
|
+
* OpenAPI spec version: 2.3.0
|
|
5605
|
+
*/
|
|
5606
|
+
|
|
5607
|
+
interface CreateTranscriptionRequest {
|
|
5608
|
+
/** The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
|
|
5609
|
+
*/
|
|
5610
|
+
file: Blob;
|
|
5611
|
+
/** ID of the model to use. The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source Whisper V2 model), and `gpt-4o-transcribe-diarize`.
|
|
5612
|
+
*/
|
|
5613
|
+
model: CreateTranscriptionRequestModel;
|
|
5614
|
+
/** The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) format will improve accuracy and latency.
|
|
5615
|
+
*/
|
|
5616
|
+
language?: string;
|
|
5617
|
+
/** An optional text to guide the model's style or continue a previous audio segment. The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) should match the audio language. This field is not supported when using `gpt-4o-transcribe-diarize`.
|
|
5618
|
+
*/
|
|
5619
|
+
prompt?: string;
|
|
5620
|
+
response_format?: AudioResponseFormat;
|
|
5621
|
+
/** The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
|
|
5622
|
+
*/
|
|
5623
|
+
temperature?: number;
|
|
5624
|
+
/** Additional information to include in the transcription response.
|
|
5625
|
+
`logprobs` will return the log probabilities of the tokens in the
|
|
5626
|
+
response to understand the model's confidence in the transcription.
|
|
5627
|
+
`logprobs` only works with response_format set to `json` and only with
|
|
5628
|
+
the models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`. This field is not supported when using `gpt-4o-transcribe-diarize`.
|
|
5629
|
+
*/
|
|
5630
|
+
include?: TranscriptionInclude[];
|
|
5631
|
+
/** The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency.
|
|
5632
|
+
This option is not available for `gpt-4o-transcribe-diarize`.
|
|
5633
|
+
*/
|
|
5634
|
+
timestamp_granularities?: CreateTranscriptionRequestTimestampGranularitiesItem[];
|
|
5635
|
+
stream?: CreateTranscriptionRequestStream;
|
|
5636
|
+
chunking_strategy?: TranscriptionChunkingStrategy;
|
|
5637
|
+
/**
|
|
5638
|
+
* Optional list of speaker names that correspond to the audio samples provided in `known_speaker_references[]`. Each entry should be a short identifier (for example `customer` or `agent`). Up to 4 speakers are supported.
|
|
5639
|
+
|
|
5640
|
+
* @maxItems 4
|
|
5641
|
+
*/
|
|
5642
|
+
known_speaker_names?: string[];
|
|
5643
|
+
/**
|
|
5644
|
+
* Optional list of audio samples (as [data URLs](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs)) that contain known speaker references matching `known_speaker_names[]`. Each sample must be between 2 and 10 seconds, and can use any of the same input audio formats supported by `file`.
|
|
5645
|
+
|
|
5646
|
+
* @maxItems 4
|
|
5647
|
+
*/
|
|
5648
|
+
known_speaker_references?: string[];
|
|
5649
|
+
}
|
|
5650
|
+
|
|
4376
5651
|
/**
|
|
4377
5652
|
* Unified types for the Voice Router SDK
|
|
4378
5653
|
* These types provide a provider-agnostic interface for transcription services
|
|
4379
5654
|
*/
|
|
4380
5655
|
|
|
5656
|
+
/**
|
|
5657
|
+
* Speechmatics operating point (model) type
|
|
5658
|
+
* Manually defined as Speechmatics OpenAPI spec doesn't export this cleanly
|
|
5659
|
+
*/
|
|
5660
|
+
type SpeechmaticsOperatingPoint = "standard" | "enhanced";
|
|
4381
5661
|
/**
|
|
4382
5662
|
* Unified transcription model type with autocomplete for all providers
|
|
4383
5663
|
*
|
|
4384
5664
|
* Includes all known models from:
|
|
4385
5665
|
* - Deepgram: nova-3, nova-2, enhanced, base, whisper, etc.
|
|
5666
|
+
* - AssemblyAI: best, slam-1, universal
|
|
4386
5667
|
* - Gladia: solaria-1
|
|
5668
|
+
* - Speechmatics: standard, enhanced
|
|
4387
5669
|
*
|
|
4388
5670
|
* Also accepts any string for future/custom models.
|
|
4389
5671
|
*/
|
|
4390
|
-
type TranscriptionModel = ListenV1ModelParameter | StreamingSupportedModels;
|
|
5672
|
+
type TranscriptionModel = ListenV1ModelParameter | StreamingSupportedModels | SpeechModel | SpeechmaticsOperatingPoint;
|
|
5673
|
+
/**
|
|
5674
|
+
* Unified transcription language type with autocomplete for all providers
|
|
5675
|
+
*
|
|
5676
|
+
* Includes language codes from AssemblyAI and Gladia OpenAPI specs.
|
|
5677
|
+
* Deepgram uses string for flexibility.
|
|
5678
|
+
*/
|
|
5679
|
+
type TranscriptionLanguage = TranscriptLanguageCode | TranscriptionLanguageCodeEnum | string;
|
|
5680
|
+
|
|
4391
5681
|
/**
|
|
4392
5682
|
* Supported transcription providers
|
|
4393
5683
|
*/
|
|
@@ -4443,12 +5733,44 @@ type AudioInput = {
|
|
|
4443
5733
|
};
|
|
4444
5734
|
/**
|
|
4445
5735
|
* Common transcription options across all providers
|
|
5736
|
+
*
|
|
5737
|
+
* For provider-specific options, use the typed provider options:
|
|
5738
|
+
* - `deepgram`: Full Deepgram API options
|
|
5739
|
+
* - `assemblyai`: Full AssemblyAI API options
|
|
5740
|
+
* - `gladia`: Full Gladia API options
|
|
4446
5741
|
*/
|
|
4447
5742
|
interface TranscribeOptions {
|
|
4448
|
-
/**
|
|
4449
|
-
|
|
5743
|
+
/**
|
|
5744
|
+
* Model to use for transcription (provider-specific)
|
|
5745
|
+
*
|
|
5746
|
+
* Type-safe model selection derived from OpenAPI specs:
|
|
5747
|
+
* - Deepgram: 'nova-3', 'nova-2', 'enhanced', 'base', etc.
|
|
5748
|
+
* - AssemblyAI: 'best', 'slam-1', 'universal'
|
|
5749
|
+
* - Speechmatics: 'standard', 'enhanced' (operating point)
|
|
5750
|
+
* - Gladia: 'solaria-1' (streaming only)
|
|
5751
|
+
*
|
|
5752
|
+
* @see TranscriptionModel for full list of available models
|
|
5753
|
+
*/
|
|
5754
|
+
model?: TranscriptionModel;
|
|
5755
|
+
/**
|
|
5756
|
+
* Language code with autocomplete from OpenAPI specs
|
|
5757
|
+
*
|
|
5758
|
+
* @example 'en', 'en_us', 'fr', 'de', 'es'
|
|
5759
|
+
* @see TranscriptionLanguage for full list
|
|
5760
|
+
*/
|
|
5761
|
+
language?: TranscriptionLanguage;
|
|
4450
5762
|
/** Enable automatic language detection */
|
|
4451
5763
|
languageDetection?: boolean;
|
|
5764
|
+
/**
|
|
5765
|
+
* Enable code switching (multilingual audio detection)
|
|
5766
|
+
* Supported by: Gladia
|
|
5767
|
+
*/
|
|
5768
|
+
codeSwitching?: boolean;
|
|
5769
|
+
/**
|
|
5770
|
+
* Code switching configuration (Gladia-specific)
|
|
5771
|
+
* @see GladiaCodeSwitchingConfig
|
|
5772
|
+
*/
|
|
5773
|
+
codeSwitchingConfig?: CodeSwitchingConfigDTO;
|
|
4452
5774
|
/** Enable speaker diarization */
|
|
4453
5775
|
diarization?: boolean;
|
|
4454
5776
|
/** Expected number of speakers (for diarization) */
|
|
@@ -4467,8 +5789,32 @@ interface TranscribeOptions {
|
|
|
4467
5789
|
piiRedaction?: boolean;
|
|
4468
5790
|
/** Webhook URL for async results */
|
|
4469
5791
|
webhookUrl?: string;
|
|
4470
|
-
/**
|
|
4471
|
-
|
|
5792
|
+
/**
|
|
5793
|
+
* Audio-to-LLM configuration (Gladia-specific)
|
|
5794
|
+
* Run custom LLM prompts on the transcription
|
|
5795
|
+
* @see GladiaAudioToLlmConfig
|
|
5796
|
+
*/
|
|
5797
|
+
audioToLlm?: AudioToLlmListConfigDTO;
|
|
5798
|
+
/**
|
|
5799
|
+
* Deepgram-specific options (passed directly to API)
|
|
5800
|
+
* @see https://developers.deepgram.com/reference/listen-file
|
|
5801
|
+
*/
|
|
5802
|
+
deepgram?: Partial<ListenV1MediaTranscribeParams>;
|
|
5803
|
+
/**
|
|
5804
|
+
* AssemblyAI-specific options (passed directly to API)
|
|
5805
|
+
* @see https://www.assemblyai.com/docs/api-reference/transcripts/submit
|
|
5806
|
+
*/
|
|
5807
|
+
assemblyai?: Partial<TranscriptOptionalParams>;
|
|
5808
|
+
/**
|
|
5809
|
+
* Gladia-specific options (passed directly to API)
|
|
5810
|
+
* @see https://docs.gladia.io/
|
|
5811
|
+
*/
|
|
5812
|
+
gladia?: Partial<InitTranscriptionRequest>;
|
|
5813
|
+
/**
|
|
5814
|
+
* OpenAI Whisper-specific options (passed directly to API)
|
|
5815
|
+
* @see https://platform.openai.com/docs/api-reference/audio/createTranscription
|
|
5816
|
+
*/
|
|
5817
|
+
openai?: Partial<Omit<CreateTranscriptionRequest, "file" | "model">>;
|
|
4472
5818
|
}
|
|
4473
5819
|
/**
|
|
4474
5820
|
* Speaker information from diarization
|
|
@@ -4985,81 +6331,16 @@ declare const StreamingSupportedSampleRateEnum: {
|
|
|
4985
6331
|
* Gladia Control API
|
|
4986
6332
|
* OpenAPI spec version: 1.0
|
|
4987
6333
|
*/
|
|
4988
|
-
/**
|
|
4989
|
-
* The bit depth of the audio stream
|
|
4990
|
-
*/
|
|
4991
|
-
type StreamingSupportedBitDepthEnum = (typeof StreamingSupportedBitDepthEnum)[keyof typeof StreamingSupportedBitDepthEnum];
|
|
4992
|
-
declare const StreamingSupportedBitDepthEnum: {
|
|
4993
|
-
readonly NUMBER_8: 8;
|
|
4994
|
-
readonly NUMBER_16: 16;
|
|
4995
|
-
readonly NUMBER_24: 24;
|
|
4996
|
-
readonly NUMBER_32: 32;
|
|
4997
|
-
};
|
|
4998
|
-
|
|
4999
|
-
/**
|
|
5000
|
-
* Generated by orval v7.9.0 🍺
|
|
5001
|
-
* Do not edit manually.
|
|
5002
|
-
* Deepgram API Specification
|
|
5003
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5004
|
-
|
|
5005
|
-
* OpenAPI spec version: 1.0.0
|
|
5006
|
-
*/
|
|
5007
|
-
/**
|
|
5008
|
-
* ListenV1EncodingParameter type definition
|
|
5009
|
-
*/
|
|
5010
|
-
/**
|
|
5011
|
-
* ListenV1EncodingParameter type definition
|
|
5012
|
-
*/
|
|
5013
|
-
/**
|
|
5014
|
-
* ListenV1EncodingParameter type definition
|
|
5015
|
-
*/
|
|
5016
|
-
/**
|
|
5017
|
-
* ListenV1EncodingParameter type definition
|
|
5018
|
-
*/
|
|
5019
|
-
/**
|
|
5020
|
-
* ListenV1EncodingParameter type definition
|
|
5021
|
-
*/
|
|
5022
|
-
/**
|
|
5023
|
-
* ListenV1EncodingParameter type definition
|
|
5024
|
-
*/
|
|
5025
|
-
/**
|
|
5026
|
-
* ListenV1EncodingParameter type definition
|
|
5027
|
-
*/
|
|
5028
|
-
/**
|
|
5029
|
-
* ListenV1EncodingParameter type definition
|
|
5030
|
-
*/
|
|
5031
|
-
type ListenV1EncodingParameter = typeof ListenV1EncodingParameter[keyof typeof ListenV1EncodingParameter];
|
|
5032
|
-
declare const ListenV1EncodingParameter: {
|
|
5033
|
-
readonly linear16: "linear16";
|
|
5034
|
-
readonly flac: "flac";
|
|
5035
|
-
readonly mulaw: "mulaw";
|
|
5036
|
-
readonly opus: "opus";
|
|
5037
|
-
readonly speex: "speex";
|
|
5038
|
-
readonly g729: "g729";
|
|
5039
|
-
};
|
|
5040
|
-
|
|
5041
|
-
/**
|
|
5042
|
-
* Generated by orval v7.9.0 🍺
|
|
5043
|
-
* Do not edit manually.
|
|
5044
|
-
* Deepgram API Specification
|
|
5045
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5046
|
-
|
|
5047
|
-
* OpenAPI spec version: 1.0.0
|
|
5048
|
-
*/
|
|
5049
|
-
/**
|
|
5050
|
-
* The [BCP-47 language tag](https://tools.ietf.org/html/bcp47) that hints at the primary spoken language. Depending on the Model and API endpoint you choose only certain languages are available
|
|
5051
|
-
*/
|
|
5052
|
-
type ListenV1LanguageParameter = string;
|
|
5053
|
-
|
|
5054
|
-
/**
|
|
5055
|
-
* Generated by orval v7.9.0 🍺
|
|
5056
|
-
* Do not edit manually.
|
|
5057
|
-
* Deepgram API Specification
|
|
5058
|
-
* APIs for speech-to-text transcription, text-to-speech synthesis, language understanding, and account management.
|
|
5059
|
-
|
|
5060
|
-
* OpenAPI spec version: 1.0.0
|
|
5061
|
-
*/
|
|
5062
|
-
type ListenV1VersionParameter = "latest" | string;
|
|
6334
|
+
/**
|
|
6335
|
+
* The bit depth of the audio stream
|
|
6336
|
+
*/
|
|
6337
|
+
type StreamingSupportedBitDepthEnum = (typeof StreamingSupportedBitDepthEnum)[keyof typeof StreamingSupportedBitDepthEnum];
|
|
6338
|
+
declare const StreamingSupportedBitDepthEnum: {
|
|
6339
|
+
readonly NUMBER_8: 8;
|
|
6340
|
+
readonly NUMBER_16: 16;
|
|
6341
|
+
readonly NUMBER_24: 24;
|
|
6342
|
+
readonly NUMBER_32: 32;
|
|
6343
|
+
};
|
|
5063
6344
|
|
|
5064
6345
|
/**
|
|
5065
6346
|
* Provider-specific streaming option types using OpenAPI-generated schemas
|
|
@@ -5561,27 +6842,6 @@ declare const AudioTranscriptionModel: {
|
|
|
5561
6842
|
readonly "gpt-4o-transcribe-diarize": "gpt-4o-transcribe-diarize";
|
|
5562
6843
|
};
|
|
5563
6844
|
|
|
5564
|
-
/**
|
|
5565
|
-
* Generated by orval v7.9.0 🍺
|
|
5566
|
-
* Do not edit manually.
|
|
5567
|
-
* OpenAI API
|
|
5568
|
-
* The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details.
|
|
5569
|
-
* OpenAPI spec version: 2.3.0
|
|
5570
|
-
*/
|
|
5571
|
-
/**
|
|
5572
|
-
* The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, `vtt`, or `diarized_json`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, the only supported format is `json`. For `gpt-4o-transcribe-diarize`, the supported formats are `json`, `text`, and `diarized_json`, with `diarized_json` required to receive speaker annotations.
|
|
5573
|
-
|
|
5574
|
-
*/
|
|
5575
|
-
type AudioResponseFormat = (typeof AudioResponseFormat)[keyof typeof AudioResponseFormat];
|
|
5576
|
-
declare const AudioResponseFormat: {
|
|
5577
|
-
readonly json: "json";
|
|
5578
|
-
readonly text: "text";
|
|
5579
|
-
readonly srt: "srt";
|
|
5580
|
-
readonly verbose_json: "verbose_json";
|
|
5581
|
-
readonly vtt: "vtt";
|
|
5582
|
-
readonly diarized_json: "diarized_json";
|
|
5583
|
-
};
|
|
5584
|
-
|
|
5585
6845
|
/**
|
|
5586
6846
|
* Gladia transcription provider adapter
|
|
5587
6847
|
* Documentation: https://docs.gladia.io/
|
|
@@ -5734,8 +6994,33 @@ declare class GladiaAdapter extends BaseAdapter {
|
|
|
5734
6994
|
*/
|
|
5735
6995
|
private extractUtterances;
|
|
5736
6996
|
/**
|
|
5737
|
-
*
|
|
6997
|
+
* Delete a transcription job and its associated data
|
|
6998
|
+
*
|
|
6999
|
+
* Removes the transcription data from Gladia's servers. This action is
|
|
7000
|
+
* irreversible. Supports both pre-recorded and streaming job IDs.
|
|
7001
|
+
*
|
|
7002
|
+
* @param transcriptId - The ID of the transcript/job to delete
|
|
7003
|
+
* @param jobType - Type of job: 'pre-recorded' or 'streaming' (defaults to 'pre-recorded')
|
|
7004
|
+
* @returns Promise with success status
|
|
7005
|
+
*
|
|
7006
|
+
* @example Delete a pre-recorded transcript
|
|
7007
|
+
* ```typescript
|
|
7008
|
+
* const result = await adapter.deleteTranscript('abc123');
|
|
7009
|
+
* if (result.success) {
|
|
7010
|
+
* console.log('Transcript deleted successfully');
|
|
7011
|
+
* }
|
|
7012
|
+
* ```
|
|
7013
|
+
*
|
|
7014
|
+
* @example Delete a streaming job
|
|
7015
|
+
* ```typescript
|
|
7016
|
+
* const result = await adapter.deleteTranscript('stream-456', 'streaming');
|
|
7017
|
+
* ```
|
|
7018
|
+
*
|
|
7019
|
+
* @see https://docs.gladia.io/
|
|
5738
7020
|
*/
|
|
7021
|
+
deleteTranscript(transcriptId: string, jobType?: "pre-recorded" | "streaming"): Promise<{
|
|
7022
|
+
success: boolean;
|
|
7023
|
+
}>;
|
|
5739
7024
|
/**
|
|
5740
7025
|
* Stream audio for real-time transcription
|
|
5741
7026
|
*
|
|
@@ -5924,6 +7209,29 @@ declare class AssemblyAIAdapter extends BaseAdapter {
|
|
|
5924
7209
|
* Get transcription result by ID
|
|
5925
7210
|
*/
|
|
5926
7211
|
getTranscript(transcriptId: string): Promise<UnifiedTranscriptResponse>;
|
|
7212
|
+
/**
|
|
7213
|
+
* Delete a transcription and its associated data
|
|
7214
|
+
*
|
|
7215
|
+
* Removes the transcription data from AssemblyAI's servers. This action
|
|
7216
|
+
* is irreversible. The transcript will be marked as deleted and its
|
|
7217
|
+
* content will no longer be accessible.
|
|
7218
|
+
*
|
|
7219
|
+
* @param transcriptId - The ID of the transcript to delete
|
|
7220
|
+
* @returns Promise with success status
|
|
7221
|
+
*
|
|
7222
|
+
* @example Delete a transcript
|
|
7223
|
+
* ```typescript
|
|
7224
|
+
* const result = await adapter.deleteTranscript('abc123');
|
|
7225
|
+
* if (result.success) {
|
|
7226
|
+
* console.log('Transcript deleted successfully');
|
|
7227
|
+
* }
|
|
7228
|
+
* ```
|
|
7229
|
+
*
|
|
7230
|
+
* @see https://www.assemblyai.com/docs/api-reference/transcripts/delete
|
|
7231
|
+
*/
|
|
7232
|
+
deleteTranscript(transcriptId: string): Promise<{
|
|
7233
|
+
success: boolean;
|
|
7234
|
+
}>;
|
|
5927
7235
|
/**
|
|
5928
7236
|
* Build AssemblyAI transcription request from unified options
|
|
5929
7237
|
*/
|
|
@@ -6294,6 +7602,27 @@ declare class AzureSTTAdapter extends BaseAdapter {
|
|
|
6294
7602
|
* @returns Transcription response with status and results
|
|
6295
7603
|
*/
|
|
6296
7604
|
getTranscript(transcriptId: string): Promise<UnifiedTranscriptResponse>;
|
|
7605
|
+
/**
|
|
7606
|
+
* Delete a transcription and its associated data
|
|
7607
|
+
*
|
|
7608
|
+
* Removes the transcription from Azure's servers. This action is irreversible.
|
|
7609
|
+
*
|
|
7610
|
+
* @param transcriptId - The ID of the transcription to delete
|
|
7611
|
+
* @returns Promise with success status
|
|
7612
|
+
*
|
|
7613
|
+
* @example Delete a transcription
|
|
7614
|
+
* ```typescript
|
|
7615
|
+
* const result = await adapter.deleteTranscript('abc123-def456');
|
|
7616
|
+
* if (result.success) {
|
|
7617
|
+
* console.log('Transcription deleted successfully');
|
|
7618
|
+
* }
|
|
7619
|
+
* ```
|
|
7620
|
+
*
|
|
7621
|
+
* @see https://learn.microsoft.com/azure/cognitive-services/speech-service/batch-transcription
|
|
7622
|
+
*/
|
|
7623
|
+
deleteTranscript(transcriptId: string): Promise<{
|
|
7624
|
+
success: boolean;
|
|
7625
|
+
}>;
|
|
6297
7626
|
/**
|
|
6298
7627
|
* Build Azure-specific transcription properties
|
|
6299
7628
|
*/
|
|
@@ -6563,6 +7892,34 @@ declare class SpeechmaticsAdapter extends BaseAdapter {
|
|
|
6563
7892
|
* @returns Transcription response with status and results
|
|
6564
7893
|
*/
|
|
6565
7894
|
getTranscript(transcriptId: string): Promise<UnifiedTranscriptResponse>;
|
|
7895
|
+
/**
|
|
7896
|
+
* Delete a transcription job and its associated data
|
|
7897
|
+
*
|
|
7898
|
+
* Removes the job and all associated resources from Speechmatics' servers.
|
|
7899
|
+
* This action is irreversible.
|
|
7900
|
+
*
|
|
7901
|
+
* @param transcriptId - The job ID to delete
|
|
7902
|
+
* @param force - Force delete even if job is still running (default: false)
|
|
7903
|
+
* @returns Promise with success status
|
|
7904
|
+
*
|
|
7905
|
+
* @example Delete a completed job
|
|
7906
|
+
* ```typescript
|
|
7907
|
+
* const result = await adapter.deleteTranscript('job-abc123');
|
|
7908
|
+
* if (result.success) {
|
|
7909
|
+
* console.log('Job deleted successfully');
|
|
7910
|
+
* }
|
|
7911
|
+
* ```
|
|
7912
|
+
*
|
|
7913
|
+
* @example Force delete a running job
|
|
7914
|
+
* ```typescript
|
|
7915
|
+
* const result = await adapter.deleteTranscript('job-abc123', true);
|
|
7916
|
+
* ```
|
|
7917
|
+
*
|
|
7918
|
+
* @see https://docs.speechmatics.com/
|
|
7919
|
+
*/
|
|
7920
|
+
deleteTranscript(transcriptId: string, force?: boolean): Promise<{
|
|
7921
|
+
success: boolean;
|
|
7922
|
+
}>;
|
|
6566
7923
|
/**
|
|
6567
7924
|
* Normalize Speechmatics status to unified status
|
|
6568
7925
|
*/
|
|
@@ -9323,178 +10680,67 @@ declare const HistoryControllerGetListV1StatusItem: {
|
|
|
9323
10680
|
* Gladia Control API
|
|
9324
10681
|
* OpenAPI spec version: 1.0
|
|
9325
10682
|
*/
|
|
9326
|
-
|
|
9327
|
-
type HistoryControllerGetListV1Params = {
|
|
9328
|
-
/**
|
|
9329
|
-
* The starting point for pagination. A value of 0 starts from the first item.
|
|
9330
|
-
*/
|
|
9331
|
-
offset?: number;
|
|
9332
|
-
/**
|
|
9333
|
-
* The maximum number of items to return. Useful for pagination and controlling data payload size.
|
|
9334
|
-
*/
|
|
9335
|
-
limit?: number;
|
|
9336
|
-
/**
|
|
9337
|
-
* Filter items relevant to a specific date in ISO format (YYYY-MM-DD).
|
|
9338
|
-
*/
|
|
9339
|
-
date?: string;
|
|
9340
|
-
/**
|
|
9341
|
-
* Include items that occurred before the specified date in ISO format.
|
|
9342
|
-
*/
|
|
9343
|
-
before_date?: string;
|
|
9344
|
-
/**
|
|
9345
|
-
* Filter for items after the specified date. Use with `before_date` for a range. Date in ISO format.
|
|
9346
|
-
*/
|
|
9347
|
-
after_date?: string;
|
|
9348
|
-
/**
|
|
9349
|
-
* Filter the list based on item status. Accepts multiple values from the predefined list.
|
|
9350
|
-
*/
|
|
9351
|
-
status?: HistoryControllerGetListV1StatusItem[];
|
|
9352
|
-
custom_metadata?: {
|
|
9353
|
-
[key: string]: unknown;
|
|
9354
|
-
};
|
|
9355
|
-
/**
|
|
9356
|
-
* Filter the list based on the item type. Supports multiple values from the predefined list.
|
|
9357
|
-
*/
|
|
9358
|
-
kind?: HistoryControllerGetListV1KindItem[];
|
|
9359
|
-
};
|
|
9360
|
-
|
|
9361
|
-
/**
|
|
9362
|
-
* Generated by orval v7.9.0 🍺
|
|
9363
|
-
* Do not edit manually.
|
|
9364
|
-
* Gladia Control API
|
|
9365
|
-
* OpenAPI spec version: 1.0
|
|
9366
|
-
*/
|
|
9367
|
-
interface InitPreRecordedTranscriptionResponse {
|
|
9368
|
-
/** Id of the job */
|
|
9369
|
-
id: string;
|
|
9370
|
-
/** Prebuilt URL with your transcription `id` to fetch the result */
|
|
9371
|
-
result_url: string;
|
|
9372
|
-
}
|
|
9373
|
-
|
|
9374
|
-
/**
|
|
9375
|
-
* Generated by orval v7.9.0 🍺
|
|
9376
|
-
* Do not edit manually.
|
|
9377
|
-
* Gladia Control API
|
|
9378
|
-
* OpenAPI spec version: 1.0
|
|
9379
|
-
*/
|
|
9380
|
-
interface InitStreamingResponse {
|
|
9381
|
-
/** Id of the job */
|
|
9382
|
-
id: string;
|
|
9383
|
-
/** Creation date */
|
|
9384
|
-
created_at: string;
|
|
9385
|
-
/** The websocket url to connect to for sending audio data. The url will contain the temporary token to authenticate the session. */
|
|
9386
|
-
url: string;
|
|
9387
|
-
}
|
|
9388
|
-
|
|
9389
|
-
/**
|
|
9390
|
-
* Generated by orval v7.9.0 🍺
|
|
9391
|
-
* Do not edit manually.
|
|
9392
|
-
* Gladia Control API
|
|
9393
|
-
* OpenAPI spec version: 1.0
|
|
9394
|
-
*/
|
|
9395
|
-
/**
|
|
9396
|
-
* Custom metadata you can attach to this transcription
|
|
9397
|
-
*/
|
|
9398
|
-
type InitTranscriptionRequestCustomMetadata = {
|
|
9399
|
-
[key: string]: unknown;
|
|
9400
|
-
};
|
|
9401
|
-
|
|
9402
|
-
/**
|
|
9403
|
-
* Generated by orval v7.9.0 🍺
|
|
9404
|
-
* Do not edit manually.
|
|
9405
|
-
* Gladia Control API
|
|
9406
|
-
* OpenAPI spec version: 1.0
|
|
9407
|
-
*/
|
|
9408
|
-
|
|
9409
|
-
interface InitTranscriptionRequest {
|
|
9410
|
-
/**
|
|
9411
|
-
* **[Deprecated]** Context to feed the transcription model with for possible better accuracy
|
|
9412
|
-
* @deprecated
|
|
9413
|
-
*/
|
|
9414
|
-
context_prompt?: string;
|
|
9415
|
-
/** **[Beta]** Can be either boolean to enable custom_vocabulary for this audio or an array with specific vocabulary list to feed the transcription model with */
|
|
9416
|
-
custom_vocabulary?: boolean;
|
|
9417
|
-
/** **[Beta]** Custom vocabulary configuration, if `custom_vocabulary` is enabled */
|
|
9418
|
-
custom_vocabulary_config?: CustomVocabularyConfigDTO;
|
|
9419
|
-
/**
|
|
9420
|
-
* **[Deprecated]** Use `language_config` instead. Detect the language from the given audio
|
|
9421
|
-
* @deprecated
|
|
9422
|
-
*/
|
|
9423
|
-
detect_language?: boolean;
|
|
9424
|
-
/**
|
|
9425
|
-
* **[Deprecated]** Use `language_config` instead.Detect multiple languages in the given audio
|
|
9426
|
-
* @deprecated
|
|
9427
|
-
*/
|
|
9428
|
-
enable_code_switching?: boolean;
|
|
9429
|
-
/**
|
|
9430
|
-
* **[Deprecated]** Use `language_config` instead. Specify the configuration for code switching
|
|
9431
|
-
* @deprecated
|
|
9432
|
-
*/
|
|
9433
|
-
code_switching_config?: CodeSwitchingConfigDTO;
|
|
9434
|
-
/**
|
|
9435
|
-
* **[Deprecated]** Use `language_config` instead. Set the spoken language for the given audio (ISO 639 standard)
|
|
9436
|
-
* @deprecated
|
|
9437
|
-
*/
|
|
9438
|
-
language?: TranscriptionLanguageCodeEnum;
|
|
9439
|
-
/**
|
|
9440
|
-
* **[Deprecated]** Use `callback`/`callback_config` instead. Callback URL we will do a `POST` request to with the result of the transcription
|
|
9441
|
-
* @deprecated
|
|
9442
|
-
*/
|
|
9443
|
-
callback_url?: string;
|
|
9444
|
-
/** Enable callback for this transcription. If true, the `callback_config` property will be used to customize the callback behaviour */
|
|
9445
|
-
callback?: boolean;
|
|
9446
|
-
/** Customize the callback behaviour (url and http method) */
|
|
9447
|
-
callback_config?: CallbackConfigDto;
|
|
9448
|
-
/** Enable subtitles generation for this transcription */
|
|
9449
|
-
subtitles?: boolean;
|
|
9450
|
-
/** Configuration for subtitles generation if `subtitles` is enabled */
|
|
9451
|
-
subtitles_config?: SubtitlesConfigDTO;
|
|
9452
|
-
/** Enable speaker recognition (diarization) for this audio */
|
|
9453
|
-
diarization?: boolean;
|
|
9454
|
-
/** Speaker recognition configuration, if `diarization` is enabled */
|
|
9455
|
-
diarization_config?: DiarizationConfigDTO;
|
|
9456
|
-
/** **[Beta]** Enable translation for this audio */
|
|
9457
|
-
translation?: boolean;
|
|
9458
|
-
/** **[Beta]** Translation configuration, if `translation` is enabled */
|
|
9459
|
-
translation_config?: TranslationConfigDTO;
|
|
9460
|
-
/** **[Beta]** Enable summarization for this audio */
|
|
9461
|
-
summarization?: boolean;
|
|
9462
|
-
/** **[Beta]** Summarization configuration, if `summarization` is enabled */
|
|
9463
|
-
summarization_config?: SummarizationConfigDTO;
|
|
9464
|
-
/** **[Alpha]** Enable moderation for this audio */
|
|
9465
|
-
moderation?: boolean;
|
|
9466
|
-
/** **[Alpha]** Enable named entity recognition for this audio */
|
|
9467
|
-
named_entity_recognition?: boolean;
|
|
9468
|
-
/** **[Alpha]** Enable chapterization for this audio */
|
|
9469
|
-
chapterization?: boolean;
|
|
9470
|
-
/** **[Alpha]** Enable names consistency for this audio */
|
|
9471
|
-
name_consistency?: boolean;
|
|
9472
|
-
/** **[Alpha]** Enable custom spelling for this audio */
|
|
9473
|
-
custom_spelling?: boolean;
|
|
9474
|
-
/** **[Alpha]** Custom spelling configuration, if `custom_spelling` is enabled */
|
|
9475
|
-
custom_spelling_config?: CustomSpellingConfigDTO;
|
|
9476
|
-
/** **[Alpha]** Enable structured data extraction for this audio */
|
|
9477
|
-
structured_data_extraction?: boolean;
|
|
9478
|
-
/** **[Alpha]** Structured data extraction configuration, if `structured_data_extraction` is enabled */
|
|
9479
|
-
structured_data_extraction_config?: StructuredDataExtractionConfigDTO;
|
|
9480
|
-
/** Enable sentiment analysis for this audio */
|
|
9481
|
-
sentiment_analysis?: boolean;
|
|
9482
|
-
/** **[Alpha]** Enable audio to llm processing for this audio */
|
|
9483
|
-
audio_to_llm?: boolean;
|
|
9484
|
-
/** **[Alpha]** Audio to llm configuration, if `audio_to_llm` is enabled */
|
|
9485
|
-
audio_to_llm_config?: AudioToLlmListConfigDTO;
|
|
9486
|
-
/** Custom metadata you can attach to this transcription */
|
|
9487
|
-
custom_metadata?: InitTranscriptionRequestCustomMetadata;
|
|
9488
|
-
/** Enable sentences for this audio */
|
|
9489
|
-
sentences?: boolean;
|
|
9490
|
-
/** **[Alpha]** Allows to change the output display_mode for this audio. The output will be reordered, creating new utterances when speakers overlapped */
|
|
9491
|
-
display_mode?: boolean;
|
|
9492
|
-
/** **[Alpha]** Use enhanced punctuation for this audio */
|
|
9493
|
-
punctuation_enhanced?: boolean;
|
|
9494
|
-
/** Specify the language configuration */
|
|
9495
|
-
language_config?: LanguageConfig;
|
|
9496
|
-
/** URL to a Gladia file or to an external audio or video file */
|
|
9497
|
-
audio_url: string;
|
|
10683
|
+
|
|
10684
|
+
type HistoryControllerGetListV1Params = {
|
|
10685
|
+
/**
|
|
10686
|
+
* The starting point for pagination. A value of 0 starts from the first item.
|
|
10687
|
+
*/
|
|
10688
|
+
offset?: number;
|
|
10689
|
+
/**
|
|
10690
|
+
* The maximum number of items to return. Useful for pagination and controlling data payload size.
|
|
10691
|
+
*/
|
|
10692
|
+
limit?: number;
|
|
10693
|
+
/**
|
|
10694
|
+
* Filter items relevant to a specific date in ISO format (YYYY-MM-DD).
|
|
10695
|
+
*/
|
|
10696
|
+
date?: string;
|
|
10697
|
+
/**
|
|
10698
|
+
* Include items that occurred before the specified date in ISO format.
|
|
10699
|
+
*/
|
|
10700
|
+
before_date?: string;
|
|
10701
|
+
/**
|
|
10702
|
+
* Filter for items after the specified date. Use with `before_date` for a range. Date in ISO format.
|
|
10703
|
+
*/
|
|
10704
|
+
after_date?: string;
|
|
10705
|
+
/**
|
|
10706
|
+
* Filter the list based on item status. Accepts multiple values from the predefined list.
|
|
10707
|
+
*/
|
|
10708
|
+
status?: HistoryControllerGetListV1StatusItem[];
|
|
10709
|
+
custom_metadata?: {
|
|
10710
|
+
[key: string]: unknown;
|
|
10711
|
+
};
|
|
10712
|
+
/**
|
|
10713
|
+
* Filter the list based on the item type. Supports multiple values from the predefined list.
|
|
10714
|
+
*/
|
|
10715
|
+
kind?: HistoryControllerGetListV1KindItem[];
|
|
10716
|
+
};
|
|
10717
|
+
|
|
10718
|
+
/**
|
|
10719
|
+
* Generated by orval v7.9.0 🍺
|
|
10720
|
+
* Do not edit manually.
|
|
10721
|
+
* Gladia Control API
|
|
10722
|
+
* OpenAPI spec version: 1.0
|
|
10723
|
+
*/
|
|
10724
|
+
interface InitPreRecordedTranscriptionResponse {
|
|
10725
|
+
/** Id of the job */
|
|
10726
|
+
id: string;
|
|
10727
|
+
/** Prebuilt URL with your transcription `id` to fetch the result */
|
|
10728
|
+
result_url: string;
|
|
10729
|
+
}
|
|
10730
|
+
|
|
10731
|
+
/**
|
|
10732
|
+
* Generated by orval v7.9.0 🍺
|
|
10733
|
+
* Do not edit manually.
|
|
10734
|
+
* Gladia Control API
|
|
10735
|
+
* OpenAPI spec version: 1.0
|
|
10736
|
+
*/
|
|
10737
|
+
interface InitStreamingResponse {
|
|
10738
|
+
/** Id of the job */
|
|
10739
|
+
id: string;
|
|
10740
|
+
/** Creation date */
|
|
10741
|
+
created_at: string;
|
|
10742
|
+
/** The websocket url to connect to for sending audio data. The url will contain the temporary token to authenticate the session. */
|
|
10743
|
+
url: string;
|
|
9498
10744
|
}
|
|
9499
10745
|
|
|
9500
10746
|
/**
|
|
@@ -11789,202 +13035,7 @@ interface RedactedAudioResponse {
|
|
|
11789
13035
|
/**
|
|
11790
13036
|
* The notification when the redacted audio is ready.
|
|
11791
13037
|
*/
|
|
11792
|
-
type RedactedAudioNotification = RedactedAudioResponse;
|
|
11793
|
-
|
|
11794
|
-
/**
|
|
11795
|
-
* Generated by orval v7.9.0 🍺
|
|
11796
|
-
* Do not edit manually.
|
|
11797
|
-
* AssemblyAI API
|
|
11798
|
-
* AssemblyAI API
|
|
11799
|
-
* OpenAPI spec version: 1.3.4
|
|
11800
|
-
*/
|
|
11801
|
-
/**
|
|
11802
|
-
* The channel of the sentence. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially.
|
|
11803
|
-
*/
|
|
11804
|
-
type TranscriptSentenceChannel = string | null;
|
|
11805
|
-
|
|
11806
|
-
/**
|
|
11807
|
-
* Generated by orval v7.9.0 🍺
|
|
11808
|
-
* Do not edit manually.
|
|
11809
|
-
* AssemblyAI API
|
|
11810
|
-
* AssemblyAI API
|
|
11811
|
-
* OpenAPI spec version: 1.3.4
|
|
11812
|
-
*/
|
|
11813
|
-
/**
|
|
11814
|
-
* The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null
|
|
11815
|
-
*/
|
|
11816
|
-
type TranscriptSentenceSpeaker = string | null;
|
|
11817
|
-
|
|
11818
|
-
/**
|
|
11819
|
-
* Generated by orval v7.9.0 🍺
|
|
11820
|
-
* Do not edit manually.
|
|
11821
|
-
* AssemblyAI API
|
|
11822
|
-
* AssemblyAI API
|
|
11823
|
-
* OpenAPI spec version: 1.3.4
|
|
11824
|
-
*/
|
|
11825
|
-
|
|
11826
|
-
interface TranscriptSentence {
|
|
11827
|
-
/** The transcript of the sentence */
|
|
11828
|
-
text: string;
|
|
11829
|
-
/** The starting time, in milliseconds, for the sentence */
|
|
11830
|
-
start: number;
|
|
11831
|
-
/** The ending time, in milliseconds, for the sentence */
|
|
11832
|
-
end: number;
|
|
11833
|
-
/**
|
|
11834
|
-
* The confidence score for the transcript of this sentence
|
|
11835
|
-
* @minimum 0
|
|
11836
|
-
* @maximum 1
|
|
11837
|
-
*/
|
|
11838
|
-
confidence: number;
|
|
11839
|
-
/** An array of words in the sentence */
|
|
11840
|
-
words: TranscriptWord[];
|
|
11841
|
-
/** The channel of the sentence. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially. */
|
|
11842
|
-
channel?: TranscriptSentenceChannel;
|
|
11843
|
-
/** The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null */
|
|
11844
|
-
speaker: TranscriptSentenceSpeaker;
|
|
11845
|
-
}
|
|
11846
|
-
|
|
11847
|
-
/**
|
|
11848
|
-
* Generated by orval v7.9.0 🍺
|
|
11849
|
-
* Do not edit manually.
|
|
11850
|
-
* AssemblyAI API
|
|
11851
|
-
* AssemblyAI API
|
|
11852
|
-
* OpenAPI spec version: 1.3.4
|
|
11853
|
-
*/
|
|
11854
|
-
|
|
11855
|
-
interface SentencesResponse {
|
|
11856
|
-
/** The unique identifier for the transcript */
|
|
11857
|
-
id: string;
|
|
11858
|
-
/**
|
|
11859
|
-
* The confidence score for the transcript
|
|
11860
|
-
* @minimum 0
|
|
11861
|
-
* @maximum 1
|
|
11862
|
-
*/
|
|
11863
|
-
confidence: number;
|
|
11864
|
-
/** The duration of the audio file in seconds */
|
|
11865
|
-
audio_duration: number;
|
|
11866
|
-
/** An array of sentences in the transcript */
|
|
11867
|
-
sentences: TranscriptSentence[];
|
|
11868
|
-
}
|
|
11869
|
-
|
|
11870
|
-
/**
|
|
11871
|
-
* Generated by orval v7.9.0 🍺
|
|
11872
|
-
* Do not edit manually.
|
|
11873
|
-
* AssemblyAI API
|
|
11874
|
-
* AssemblyAI API
|
|
11875
|
-
* OpenAPI spec version: 1.3.4
|
|
11876
|
-
*/
|
|
11877
|
-
/**
|
|
11878
|
-
* Service unavailable
|
|
11879
|
-
*/
|
|
11880
|
-
type ServiceUnavailableResponse = void;
|
|
11881
|
-
|
|
11882
|
-
/**
|
|
11883
|
-
* Generated by orval v7.9.0 🍺
|
|
11884
|
-
* Do not edit manually.
|
|
11885
|
-
* AssemblyAI API
|
|
11886
|
-
* AssemblyAI API
|
|
11887
|
-
* OpenAPI spec version: 1.3.4
|
|
11888
|
-
*/
|
|
11889
|
-
/**
|
|
11890
|
-
* Format of the subtitles
|
|
11891
|
-
*/
|
|
11892
|
-
type SubtitleFormat = (typeof SubtitleFormat)[keyof typeof SubtitleFormat];
|
|
11893
|
-
declare const SubtitleFormat: {
|
|
11894
|
-
readonly srt: "srt";
|
|
11895
|
-
readonly vtt: "vtt";
|
|
11896
|
-
};
|
|
11897
|
-
|
|
11898
|
-
/**
|
|
11899
|
-
* Generated by orval v7.9.0 🍺
|
|
11900
|
-
* Do not edit manually.
|
|
11901
|
-
* AssemblyAI API
|
|
11902
|
-
* AssemblyAI API
|
|
11903
|
-
* OpenAPI spec version: 1.3.4
|
|
11904
|
-
*/
|
|
11905
|
-
/**
|
|
11906
|
-
* The model to summarize the transcript
|
|
11907
|
-
*/
|
|
11908
|
-
type SummaryModel = (typeof SummaryModel)[keyof typeof SummaryModel];
|
|
11909
|
-
declare const SummaryModel: {
|
|
11910
|
-
readonly informative: "informative";
|
|
11911
|
-
readonly conversational: "conversational";
|
|
11912
|
-
readonly catchy: "catchy";
|
|
11913
|
-
};
|
|
11914
|
-
|
|
11915
|
-
/**
|
|
11916
|
-
* Generated by orval v7.9.0 🍺
|
|
11917
|
-
* Do not edit manually.
|
|
11918
|
-
* AssemblyAI API
|
|
11919
|
-
* AssemblyAI API
|
|
11920
|
-
* OpenAPI spec version: 1.3.4
|
|
11921
|
-
*/
|
|
11922
|
-
/**
|
|
11923
|
-
* The type of summary
|
|
11924
|
-
*/
|
|
11925
|
-
type SummaryType = (typeof SummaryType)[keyof typeof SummaryType];
|
|
11926
|
-
declare const SummaryType: {
|
|
11927
|
-
readonly bullets: "bullets";
|
|
11928
|
-
readonly bullets_verbose: "bullets_verbose";
|
|
11929
|
-
readonly gist: "gist";
|
|
11930
|
-
readonly headline: "headline";
|
|
11931
|
-
readonly paragraph: "paragraph";
|
|
11932
|
-
};
|
|
11933
|
-
|
|
11934
|
-
/**
|
|
11935
|
-
* Generated by orval v7.9.0 🍺
|
|
11936
|
-
* Do not edit manually.
|
|
11937
|
-
* AssemblyAI API
|
|
11938
|
-
* AssemblyAI API
|
|
11939
|
-
* OpenAPI spec version: 1.3.4
|
|
11940
|
-
*/
|
|
11941
|
-
|
|
11942
|
-
/**
|
|
11943
|
-
* Too many requests
|
|
11944
|
-
*/
|
|
11945
|
-
type TooManyRequestsResponse = Error$1;
|
|
11946
|
-
|
|
11947
|
-
/**
|
|
11948
|
-
* Generated by orval v7.9.0 🍺
|
|
11949
|
-
* Do not edit manually.
|
|
11950
|
-
* AssemblyAI API
|
|
11951
|
-
* AssemblyAI API
|
|
11952
|
-
* OpenAPI spec version: 1.3.4
|
|
11953
|
-
*/
|
|
11954
|
-
/**
|
|
11955
|
-
* How much to boost specified words
|
|
11956
|
-
*/
|
|
11957
|
-
type TranscriptBoostParam = (typeof TranscriptBoostParam)[keyof typeof TranscriptBoostParam];
|
|
11958
|
-
declare const TranscriptBoostParam: {
|
|
11959
|
-
readonly low: "low";
|
|
11960
|
-
readonly default: "default";
|
|
11961
|
-
readonly high: "high";
|
|
11962
|
-
};
|
|
11963
|
-
|
|
11964
|
-
/**
|
|
11965
|
-
* Generated by orval v7.9.0 🍺
|
|
11966
|
-
* Do not edit manually.
|
|
11967
|
-
* AssemblyAI API
|
|
11968
|
-
* AssemblyAI API
|
|
11969
|
-
* OpenAPI spec version: 1.3.4
|
|
11970
|
-
*/
|
|
11971
|
-
/**
|
|
11972
|
-
* The date and time the transcript was completed
|
|
11973
|
-
* @pattern ^(?:(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2}(?:\.\d+)?))$
|
|
11974
|
-
*/
|
|
11975
|
-
type TranscriptListItemCompleted = string | null;
|
|
11976
|
-
|
|
11977
|
-
/**
|
|
11978
|
-
* Generated by orval v7.9.0 🍺
|
|
11979
|
-
* Do not edit manually.
|
|
11980
|
-
* AssemblyAI API
|
|
11981
|
-
* AssemblyAI API
|
|
11982
|
-
* OpenAPI spec version: 1.3.4
|
|
11983
|
-
*/
|
|
11984
|
-
/**
|
|
11985
|
-
* Error message of why the transcript failed
|
|
11986
|
-
*/
|
|
11987
|
-
type TranscriptListItemError = string | null;
|
|
13038
|
+
type RedactedAudioNotification = RedactedAudioResponse;
|
|
11988
13039
|
|
|
11989
13040
|
/**
|
|
11990
13041
|
* Generated by orval v7.9.0 🍺
|
|
@@ -11993,29 +13044,10 @@ type TranscriptListItemError = string | null;
|
|
|
11993
13044
|
* AssemblyAI API
|
|
11994
13045
|
* OpenAPI spec version: 1.3.4
|
|
11995
13046
|
*/
|
|
11996
|
-
|
|
11997
|
-
|
|
11998
|
-
|
|
11999
|
-
|
|
12000
|
-
/** The URL to retrieve the transcript */
|
|
12001
|
-
resource_url: string;
|
|
12002
|
-
/** The status of the transcript */
|
|
12003
|
-
status: TranscriptStatus;
|
|
12004
|
-
/**
|
|
12005
|
-
* The date and time the transcript was created
|
|
12006
|
-
* @pattern ^(?:(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2}(?:\.\d+)?))$
|
|
12007
|
-
*/
|
|
12008
|
-
created: string;
|
|
12009
|
-
/**
|
|
12010
|
-
* The date and time the transcript was completed
|
|
12011
|
-
* @pattern ^(?:(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2}(?:\.\d+)?))$
|
|
12012
|
-
*/
|
|
12013
|
-
completed: TranscriptListItemCompleted;
|
|
12014
|
-
/** The URL to the audio file */
|
|
12015
|
-
audio_url: string;
|
|
12016
|
-
/** Error message of why the transcript failed */
|
|
12017
|
-
error: TranscriptListItemError;
|
|
12018
|
-
}
|
|
13047
|
+
/**
|
|
13048
|
+
* The channel of the sentence. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially.
|
|
13049
|
+
*/
|
|
13050
|
+
type TranscriptSentenceChannel = string | null;
|
|
12019
13051
|
|
|
12020
13052
|
/**
|
|
12021
13053
|
* Generated by orval v7.9.0 🍺
|
|
@@ -12024,16 +13056,10 @@ interface TranscriptListItem {
|
|
|
12024
13056
|
* AssemblyAI API
|
|
12025
13057
|
* OpenAPI spec version: 1.3.4
|
|
12026
13058
|
*/
|
|
12027
|
-
|
|
12028
13059
|
/**
|
|
12029
|
-
*
|
|
13060
|
+
* The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null
|
|
12030
13061
|
*/
|
|
12031
|
-
|
|
12032
|
-
/** Details of the transcript page */
|
|
12033
|
-
page_details: PageDetails;
|
|
12034
|
-
/** An array of transcripts */
|
|
12035
|
-
transcripts: TranscriptListItem[];
|
|
12036
|
-
}
|
|
13062
|
+
type TranscriptSentenceSpeaker = string | null;
|
|
12037
13063
|
|
|
12038
13064
|
/**
|
|
12039
13065
|
* Generated by orval v7.9.0 🍺
|
|
@@ -12043,7 +13069,26 @@ interface TranscriptList {
|
|
|
12043
13069
|
* OpenAPI spec version: 1.3.4
|
|
12044
13070
|
*/
|
|
12045
13071
|
|
|
12046
|
-
|
|
13072
|
+
interface TranscriptSentence {
|
|
13073
|
+
/** The transcript of the sentence */
|
|
13074
|
+
text: string;
|
|
13075
|
+
/** The starting time, in milliseconds, for the sentence */
|
|
13076
|
+
start: number;
|
|
13077
|
+
/** The ending time, in milliseconds, for the sentence */
|
|
13078
|
+
end: number;
|
|
13079
|
+
/**
|
|
13080
|
+
* The confidence score for the transcript of this sentence
|
|
13081
|
+
* @minimum 0
|
|
13082
|
+
* @maximum 1
|
|
13083
|
+
*/
|
|
13084
|
+
confidence: number;
|
|
13085
|
+
/** An array of words in the sentence */
|
|
13086
|
+
words: TranscriptWord[];
|
|
13087
|
+
/** The channel of the sentence. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially. */
|
|
13088
|
+
channel?: TranscriptSentenceChannel;
|
|
13089
|
+
/** The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null */
|
|
13090
|
+
speaker: TranscriptSentenceSpeaker;
|
|
13091
|
+
}
|
|
12047
13092
|
|
|
12048
13093
|
/**
|
|
12049
13094
|
* Generated by orval v7.9.0 🍺
|
|
@@ -12053,12 +13098,20 @@ type TranscriptOptionalParamsLanguageCodeOneOf = TranscriptLanguageCode | string
|
|
|
12053
13098
|
* OpenAPI spec version: 1.3.4
|
|
12054
13099
|
*/
|
|
12055
13100
|
|
|
12056
|
-
|
|
12057
|
-
|
|
12058
|
-
|
|
12059
|
-
|
|
12060
|
-
|
|
12061
|
-
|
|
13101
|
+
interface SentencesResponse {
|
|
13102
|
+
/** The unique identifier for the transcript */
|
|
13103
|
+
id: string;
|
|
13104
|
+
/**
|
|
13105
|
+
* The confidence score for the transcript
|
|
13106
|
+
* @minimum 0
|
|
13107
|
+
* @maximum 1
|
|
13108
|
+
*/
|
|
13109
|
+
confidence: number;
|
|
13110
|
+
/** The duration of the audio file in seconds */
|
|
13111
|
+
audio_duration: number;
|
|
13112
|
+
/** An array of sentences in the transcript */
|
|
13113
|
+
sentences: TranscriptSentence[];
|
|
13114
|
+
}
|
|
12062
13115
|
|
|
12063
13116
|
/**
|
|
12064
13117
|
* Generated by orval v7.9.0 🍺
|
|
@@ -12067,11 +13120,10 @@ type TranscriptOptionalParamsLanguageCode = TranscriptOptionalParamsLanguageCode
|
|
|
12067
13120
|
* AssemblyAI API
|
|
12068
13121
|
* OpenAPI spec version: 1.3.4
|
|
12069
13122
|
*/
|
|
12070
|
-
|
|
12071
13123
|
/**
|
|
12072
|
-
*
|
|
13124
|
+
* Service unavailable
|
|
12073
13125
|
*/
|
|
12074
|
-
type
|
|
13126
|
+
type ServiceUnavailableResponse = void;
|
|
12075
13127
|
|
|
12076
13128
|
/**
|
|
12077
13129
|
* Generated by orval v7.9.0 🍺
|
|
@@ -12081,9 +13133,13 @@ type TranscriptOptionalParamsRedactPiiSub = SubstitutionPolicy | null;
|
|
|
12081
13133
|
* OpenAPI spec version: 1.3.4
|
|
12082
13134
|
*/
|
|
12083
13135
|
/**
|
|
12084
|
-
*
|
|
13136
|
+
* Format of the subtitles
|
|
12085
13137
|
*/
|
|
12086
|
-
type
|
|
13138
|
+
type SubtitleFormat = (typeof SubtitleFormat)[keyof typeof SubtitleFormat];
|
|
13139
|
+
declare const SubtitleFormat: {
|
|
13140
|
+
readonly srt: "srt";
|
|
13141
|
+
readonly vtt: "vtt";
|
|
13142
|
+
};
|
|
12087
13143
|
|
|
12088
13144
|
/**
|
|
12089
13145
|
* Generated by orval v7.9.0 🍺
|
|
@@ -12094,9 +13150,9 @@ type TranscriptOptionalParamsSpeakersExpected = number | null;
|
|
|
12094
13150
|
*/
|
|
12095
13151
|
|
|
12096
13152
|
/**
|
|
12097
|
-
*
|
|
13153
|
+
* Too many requests
|
|
12098
13154
|
*/
|
|
12099
|
-
type
|
|
13155
|
+
type TooManyRequestsResponse = Error$1;
|
|
12100
13156
|
|
|
12101
13157
|
/**
|
|
12102
13158
|
* Generated by orval v7.9.0 🍺
|
|
@@ -12106,13 +13162,10 @@ type TranscriptOptionalParamsSpeechModel = SpeechModel | null;
|
|
|
12106
13162
|
* OpenAPI spec version: 1.3.4
|
|
12107
13163
|
*/
|
|
12108
13164
|
/**
|
|
12109
|
-
*
|
|
12110
|
-
|
|
12111
|
-
|
|
12112
|
-
* @minimum 0
|
|
12113
|
-
* @maximum 1
|
|
13165
|
+
* The date and time the transcript was completed
|
|
13166
|
+
* @pattern ^(?:(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2}(?:\.\d+)?))$
|
|
12114
13167
|
*/
|
|
12115
|
-
type
|
|
13168
|
+
type TranscriptListItemCompleted = string | null;
|
|
12116
13169
|
|
|
12117
13170
|
/**
|
|
12118
13171
|
* Generated by orval v7.9.0 🍺
|
|
@@ -12122,9 +13175,9 @@ type TranscriptOptionalParamsSpeechThreshold = number | null;
|
|
|
12122
13175
|
* OpenAPI spec version: 1.3.4
|
|
12123
13176
|
*/
|
|
12124
13177
|
/**
|
|
12125
|
-
*
|
|
13178
|
+
* Error message of why the transcript failed
|
|
12126
13179
|
*/
|
|
12127
|
-
type
|
|
13180
|
+
type TranscriptListItemError = string | null;
|
|
12128
13181
|
|
|
12129
13182
|
/**
|
|
12130
13183
|
* Generated by orval v7.9.0 🍺
|
|
@@ -12133,10 +13186,29 @@ type TranscriptOptionalParamsWebhookAuthHeaderName = string | null;
|
|
|
12133
13186
|
* AssemblyAI API
|
|
12134
13187
|
* OpenAPI spec version: 1.3.4
|
|
12135
13188
|
*/
|
|
12136
|
-
|
|
12137
|
-
|
|
12138
|
-
*/
|
|
12139
|
-
|
|
13189
|
+
|
|
13190
|
+
interface TranscriptListItem {
|
|
13191
|
+
/** The unique identifier for the transcript */
|
|
13192
|
+
id: string;
|
|
13193
|
+
/** The URL to retrieve the transcript */
|
|
13194
|
+
resource_url: string;
|
|
13195
|
+
/** The status of the transcript */
|
|
13196
|
+
status: TranscriptStatus;
|
|
13197
|
+
/**
|
|
13198
|
+
* The date and time the transcript was created
|
|
13199
|
+
* @pattern ^(?:(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2}(?:\.\d+)?))$
|
|
13200
|
+
*/
|
|
13201
|
+
created: string;
|
|
13202
|
+
/**
|
|
13203
|
+
* The date and time the transcript was completed
|
|
13204
|
+
* @pattern ^(?:(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2}(?:\.\d+)?))$
|
|
13205
|
+
*/
|
|
13206
|
+
completed: TranscriptListItemCompleted;
|
|
13207
|
+
/** The URL to the audio file */
|
|
13208
|
+
audio_url: string;
|
|
13209
|
+
/** Error message of why the transcript failed */
|
|
13210
|
+
error: TranscriptListItemError;
|
|
13211
|
+
}
|
|
12140
13212
|
|
|
12141
13213
|
/**
|
|
12142
13214
|
* Generated by orval v7.9.0 🍺
|
|
@@ -12147,120 +13219,13 @@ type TranscriptOptionalParamsWebhookAuthHeaderValue = string | null;
|
|
|
12147
13219
|
*/
|
|
12148
13220
|
|
|
12149
13221
|
/**
|
|
12150
|
-
* The
|
|
13222
|
+
* A list of transcripts. Transcripts are sorted from newest to oldest. The previous URL always points to a page with older transcripts.
|
|
12151
13223
|
*/
|
|
12152
|
-
interface
|
|
12153
|
-
/**
|
|
12154
|
-
|
|
12155
|
-
|
|
12156
|
-
|
|
12157
|
-
/** Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false. */
|
|
12158
|
-
language_detection?: boolean;
|
|
12159
|
-
/**
|
|
12160
|
-
* The confidence threshold for the automatically detected language.
|
|
12161
|
-
An error will be returned if the language confidence is below this threshold.
|
|
12162
|
-
Defaults to 0.
|
|
12163
|
-
|
|
12164
|
-
* @minimum 0
|
|
12165
|
-
* @maximum 1
|
|
12166
|
-
*/
|
|
12167
|
-
language_confidence_threshold?: number;
|
|
12168
|
-
/** The speech model to use for the transcription. When `null`, the "best" model is used. */
|
|
12169
|
-
speech_model?: TranscriptOptionalParamsSpeechModel;
|
|
12170
|
-
/** Enable Automatic Punctuation, can be true or false */
|
|
12171
|
-
punctuate?: boolean;
|
|
12172
|
-
/** Enable Text Formatting, can be true or false */
|
|
12173
|
-
format_text?: boolean;
|
|
12174
|
-
/** Transcribe Filler Words, like "umm", in your media file; can be true or false */
|
|
12175
|
-
disfluencies?: boolean;
|
|
12176
|
-
/** Enable [Multichannel](https://www.assemblyai.com/docs/models/speech-recognition#multichannel-transcription) transcription, can be true or false. */
|
|
12177
|
-
multichannel?: boolean;
|
|
12178
|
-
/** The URL to which we send webhook requests.
|
|
12179
|
-
We sends two different types of webhook requests.
|
|
12180
|
-
One request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.
|
|
12181
|
-
*/
|
|
12182
|
-
webhook_url?: string;
|
|
12183
|
-
/** The header name to be sent with the transcript completed or failed webhook requests */
|
|
12184
|
-
webhook_auth_header_name?: TranscriptOptionalParamsWebhookAuthHeaderName;
|
|
12185
|
-
/** The header value to send back with the transcript completed or failed webhook requests for added security */
|
|
12186
|
-
webhook_auth_header_value?: TranscriptOptionalParamsWebhookAuthHeaderValue;
|
|
12187
|
-
/** Enable Key Phrases, either true or false */
|
|
12188
|
-
auto_highlights?: boolean;
|
|
12189
|
-
/** The point in time, in milliseconds, to begin transcribing in your media file */
|
|
12190
|
-
audio_start_from?: number;
|
|
12191
|
-
/** The point in time, in milliseconds, to stop transcribing in your media file */
|
|
12192
|
-
audio_end_at?: number;
|
|
12193
|
-
/**
|
|
12194
|
-
* The list of custom vocabulary to boost transcription probability for
|
|
12195
|
-
* @deprecated
|
|
12196
|
-
*/
|
|
12197
|
-
word_boost?: string[];
|
|
12198
|
-
/** How much to boost specified words */
|
|
12199
|
-
boost_param?: TranscriptBoostParam;
|
|
12200
|
-
/** Filter profanity from the transcribed text, can be true or false */
|
|
12201
|
-
filter_profanity?: boolean;
|
|
12202
|
-
/** Redact PII from the transcribed text using the Redact PII model, can be true or false */
|
|
12203
|
-
redact_pii?: boolean;
|
|
12204
|
-
/** Generate a copy of the original media file with spoken PII "beeped" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details. */
|
|
12205
|
-
redact_pii_audio?: boolean;
|
|
12206
|
-
/** Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details. */
|
|
12207
|
-
redact_pii_audio_quality?: RedactPiiAudioQuality;
|
|
12208
|
-
/** The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details. */
|
|
12209
|
-
redact_pii_policies?: PiiPolicy[];
|
|
12210
|
-
/** The replacement logic for detected PII, can be "entity_type" or "hash". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details. */
|
|
12211
|
-
redact_pii_sub?: TranscriptOptionalParamsRedactPiiSub;
|
|
12212
|
-
/** Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false */
|
|
12213
|
-
speaker_labels?: boolean;
|
|
12214
|
-
/** Tells the speaker label model how many speakers it should attempt to identify. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details. */
|
|
12215
|
-
speakers_expected?: TranscriptOptionalParamsSpeakersExpected;
|
|
12216
|
-
/** Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false */
|
|
12217
|
-
content_safety?: boolean;
|
|
12218
|
-
/**
|
|
12219
|
-
* The confidence threshold for the Content Moderation model. Values must be between 25 and 100.
|
|
12220
|
-
* @minimum 25
|
|
12221
|
-
* @maximum 100
|
|
12222
|
-
*/
|
|
12223
|
-
content_safety_confidence?: number;
|
|
12224
|
-
/** Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false */
|
|
12225
|
-
iab_categories?: boolean;
|
|
12226
|
-
/** Customize how words are spelled and formatted using to and from values */
|
|
12227
|
-
custom_spelling?: TranscriptCustomSpelling[];
|
|
12228
|
-
/** <Warning>`keyterms_prompt` is only supported when the `speech_model` is specified as `slam-1`</Warning>
|
|
12229
|
-
Improve accuracy with up to 1000 domain-specific words or phrases (maximum 6 words per phrase).
|
|
12230
|
-
*/
|
|
12231
|
-
keyterms_prompt?: string[];
|
|
12232
|
-
/**
|
|
12233
|
-
* This parameter does not currently have any functionality attached to it.
|
|
12234
|
-
* @deprecated
|
|
12235
|
-
*/
|
|
12236
|
-
prompt?: string;
|
|
12237
|
-
/** Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false */
|
|
12238
|
-
sentiment_analysis?: boolean;
|
|
12239
|
-
/** Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false */
|
|
12240
|
-
auto_chapters?: boolean;
|
|
12241
|
-
/** Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false */
|
|
12242
|
-
entity_detection?: boolean;
|
|
12243
|
-
/**
|
|
12244
|
-
* Reject audio files that contain less than this fraction of speech.
|
|
12245
|
-
Valid values are in the range [0, 1] inclusive.
|
|
12246
|
-
|
|
12247
|
-
* @minimum 0
|
|
12248
|
-
* @maximum 1
|
|
12249
|
-
*/
|
|
12250
|
-
speech_threshold?: TranscriptOptionalParamsSpeechThreshold;
|
|
12251
|
-
/** Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false */
|
|
12252
|
-
summarization?: boolean;
|
|
12253
|
-
/** The model to summarize the transcript */
|
|
12254
|
-
summary_model?: SummaryModel;
|
|
12255
|
-
/** The type of summary */
|
|
12256
|
-
summary_type?: SummaryType;
|
|
12257
|
-
/**
|
|
12258
|
-
* Enable custom topics, either true or false
|
|
12259
|
-
* @deprecated
|
|
12260
|
-
*/
|
|
12261
|
-
custom_topics?: boolean;
|
|
12262
|
-
/** The list of custom topics */
|
|
12263
|
-
topics?: string[];
|
|
13224
|
+
interface TranscriptList {
|
|
13225
|
+
/** Details of the transcript page */
|
|
13226
|
+
page_details: PageDetails;
|
|
13227
|
+
/** An array of transcripts */
|
|
13228
|
+
transcripts: TranscriptListItem[];
|
|
12264
13229
|
}
|
|
12265
13230
|
|
|
12266
13231
|
/**
|
|
@@ -12593,4 +13558,4 @@ declare namespace index {
|
|
|
12593
13558
|
export { index_AudioIntelligenceModelStatus as AudioIntelligenceModelStatus, type index_AutoHighlightResult as AutoHighlightResult, type index_AutoHighlightsResult as AutoHighlightsResult, type index_BadRequestResponse as BadRequestResponse, type index_CannotAccessUploadedFileResponse as CannotAccessUploadedFileResponse, type index_Chapter as Chapter, type index_ContentSafetyLabel as ContentSafetyLabel, type index_ContentSafetyLabelResult as ContentSafetyLabelResult, type index_ContentSafetyLabelsResult as ContentSafetyLabelsResult, type index_ContentSafetyLabelsResultSeverityScoreSummary as ContentSafetyLabelsResultSeverityScoreSummary, type index_ContentSafetyLabelsResultSummary as ContentSafetyLabelsResultSummary, type index_CreateRealtimeTemporaryTokenParams as CreateRealtimeTemporaryTokenParams, type index_Entity as Entity, index_EntityType as EntityType, type Error$1 as Error, type index_GatewayTimeoutResponse as GatewayTimeoutResponse, type index_GetSubtitlesParams as GetSubtitlesParams, type index_InternalServerErrorResponse as InternalServerErrorResponse, type index_LemurActionItemsParams as LemurActionItemsParams, type index_LemurActionItemsParamsAllOf as LemurActionItemsParamsAllOf, type index_LemurActionItemsResponse as LemurActionItemsResponse, type index_LemurBaseParams as LemurBaseParams, type index_LemurBaseParamsContext as LemurBaseParamsContext, type index_LemurBaseParamsContextOneOf as LemurBaseParamsContextOneOf, type index_LemurBaseParamsFinalModel as LemurBaseParamsFinalModel, type index_LemurBaseResponse as LemurBaseResponse, index_LemurModel as LemurModel, type index_LemurQuestion as LemurQuestion, type index_LemurQuestionAnswer as LemurQuestionAnswer, type index_LemurQuestionAnswerParams as LemurQuestionAnswerParams, type index_LemurQuestionAnswerParamsAllOf as LemurQuestionAnswerParamsAllOf, type index_LemurQuestionAnswerResponse as LemurQuestionAnswerResponse, type index_LemurQuestionAnswerResponseAllOf as LemurQuestionAnswerResponseAllOf, type index_LemurQuestionContext as LemurQuestionContext, type index_LemurQuestionContextOneOf as LemurQuestionContextOneOf, type index_LemurResponse as LemurResponse, type index_LemurStringResponse as LemurStringResponse, type index_LemurStringResponseAllOf as LemurStringResponseAllOf, type index_LemurSummaryParams as LemurSummaryParams, type index_LemurSummaryParamsAllOf as LemurSummaryParamsAllOf, type index_LemurSummaryResponse as LemurSummaryResponse, type index_LemurTaskParams as LemurTaskParams, type index_LemurTaskParamsAllOf as LemurTaskParamsAllOf, type index_LemurTaskResponse as LemurTaskResponse, type index_LemurUsage as LemurUsage, type index_ListTranscriptParams as ListTranscriptParams, type index_ListTranscriptsParams as ListTranscriptsParams, type index_NotFoundResponse as NotFoundResponse, type index_PageDetails as PageDetails, type index_PageDetailsNextUrl as PageDetailsNextUrl, type index_PageDetailsPrevUrl as PageDetailsPrevUrl, type index_ParagraphsResponse as ParagraphsResponse, index_PiiPolicy as PiiPolicy, type index_PurgeLemurRequestDataResponse as PurgeLemurRequestDataResponse, type index_RealtimeTemporaryTokenResponse as RealtimeTemporaryTokenResponse, index_RedactPiiAudioQuality as RedactPiiAudioQuality, type index_RedactedAudioNotification as RedactedAudioNotification, type index_RedactedAudioResponse as RedactedAudioResponse, index_RedactedAudioStatus as RedactedAudioStatus, type index_SentencesResponse as SentencesResponse, index_Sentiment as Sentiment, type SentimentAnalysisResult$1 as SentimentAnalysisResult, type index_SentimentAnalysisResultChannel as SentimentAnalysisResultChannel, type index_SentimentAnalysisResultSpeaker as SentimentAnalysisResultSpeaker, type index_ServiceUnavailableResponse as ServiceUnavailableResponse, type index_SeverityScoreSummary as SeverityScoreSummary, index_SpeechModel as SpeechModel, index_SubstitutionPolicy as SubstitutionPolicy, index_SubtitleFormat as SubtitleFormat, index_SummaryModel as SummaryModel, index_SummaryType as SummaryType, type index_Timestamp as Timestamp, type index_TooManyRequestsResponse as TooManyRequestsResponse, type index_TopicDetectionModelResult as TopicDetectionModelResult, type index_TopicDetectionModelResultSummary as TopicDetectionModelResultSummary, type index_TopicDetectionResult as TopicDetectionResult, type index_TopicDetectionResultLabelsItem as TopicDetectionResultLabelsItem, type index_Transcript as Transcript, type index_TranscriptAudioDuration as TranscriptAudioDuration, type index_TranscriptAudioEndAt as TranscriptAudioEndAt, type index_TranscriptAudioStartFrom as TranscriptAudioStartFrom, type index_TranscriptAutoChapters as TranscriptAutoChapters, type index_TranscriptAutoHighlightsResult as TranscriptAutoHighlightsResult, index_TranscriptBoostParam as TranscriptBoostParam, type index_TranscriptBoostParamProperty as TranscriptBoostParamProperty, type index_TranscriptChapters as TranscriptChapters, type index_TranscriptConfidence as TranscriptConfidence, type index_TranscriptContentSafety as TranscriptContentSafety, type index_TranscriptContentSafetyLabels as TranscriptContentSafetyLabels, type index_TranscriptCustomSpelling as TranscriptCustomSpelling, type index_TranscriptCustomSpellingProperty as TranscriptCustomSpellingProperty, type index_TranscriptCustomTopics as TranscriptCustomTopics, type index_TranscriptDisfluencies as TranscriptDisfluencies, type index_TranscriptEntities as TranscriptEntities, type index_TranscriptEntityDetection as TranscriptEntityDetection, type index_TranscriptFilterProfanity as TranscriptFilterProfanity, type index_TranscriptFormatText as TranscriptFormatText, type index_TranscriptIabCategories as TranscriptIabCategories, type index_TranscriptIabCategoriesResult as TranscriptIabCategoriesResult, index_TranscriptLanguageCode as TranscriptLanguageCode, type index_TranscriptLanguageCodeProperty as TranscriptLanguageCodeProperty, type index_TranscriptLanguageConfidence as TranscriptLanguageConfidence, type index_TranscriptLanguageConfidenceThreshold as TranscriptLanguageConfidenceThreshold, type index_TranscriptLanguageDetection as TranscriptLanguageDetection, type index_TranscriptList as TranscriptList, type index_TranscriptListItem as TranscriptListItem, type index_TranscriptListItemCompleted as TranscriptListItemCompleted, type index_TranscriptListItemError as TranscriptListItemError, type index_TranscriptMultichannel as TranscriptMultichannel, type index_TranscriptOptionalParams as TranscriptOptionalParams, type index_TranscriptOptionalParamsLanguageCode as TranscriptOptionalParamsLanguageCode, type index_TranscriptOptionalParamsLanguageCodeOneOf as TranscriptOptionalParamsLanguageCodeOneOf, type index_TranscriptOptionalParamsRedactPiiSub as TranscriptOptionalParamsRedactPiiSub, type index_TranscriptOptionalParamsSpeakersExpected as TranscriptOptionalParamsSpeakersExpected, type index_TranscriptOptionalParamsSpeechModel as TranscriptOptionalParamsSpeechModel, type index_TranscriptOptionalParamsSpeechThreshold as TranscriptOptionalParamsSpeechThreshold, type index_TranscriptOptionalParamsWebhookAuthHeaderName as TranscriptOptionalParamsWebhookAuthHeaderName, type index_TranscriptOptionalParamsWebhookAuthHeaderValue as TranscriptOptionalParamsWebhookAuthHeaderValue, type index_TranscriptParagraph as TranscriptParagraph, type index_TranscriptParams as TranscriptParams, type index_TranscriptParamsAllOf as TranscriptParamsAllOf, type index_TranscriptPunctuate as TranscriptPunctuate, type index_TranscriptReadyNotification as TranscriptReadyNotification, index_TranscriptReadyStatus as TranscriptReadyStatus, type index_TranscriptRedactPiiAudio as TranscriptRedactPiiAudio, type index_TranscriptRedactPiiAudioQuality as TranscriptRedactPiiAudioQuality, type index_TranscriptRedactPiiPolicies as TranscriptRedactPiiPolicies, type index_TranscriptSentence as TranscriptSentence, type index_TranscriptSentenceChannel as TranscriptSentenceChannel, type index_TranscriptSentenceSpeaker as TranscriptSentenceSpeaker, type index_TranscriptSentimentAnalysis as TranscriptSentimentAnalysis, type index_TranscriptSentimentAnalysisResults as TranscriptSentimentAnalysisResults, type index_TranscriptSpeakerLabels as TranscriptSpeakerLabels, type index_TranscriptSpeakersExpected as TranscriptSpeakersExpected, type index_TranscriptSpeechModel as TranscriptSpeechModel, type index_TranscriptSpeechThreshold as TranscriptSpeechThreshold, type index_TranscriptSpeedBoost as TranscriptSpeedBoost, index_TranscriptStatus as TranscriptStatus, type index_TranscriptSummary as TranscriptSummary, type index_TranscriptSummaryModel as TranscriptSummaryModel, type index_TranscriptSummaryType as TranscriptSummaryType, type index_TranscriptText as TranscriptText, type index_TranscriptThrottled as TranscriptThrottled, type index_TranscriptUtterance as TranscriptUtterance, type index_TranscriptUtteranceChannel as TranscriptUtteranceChannel, type index_TranscriptUtterances as TranscriptUtterances, type index_TranscriptWebhookAuthHeaderName as TranscriptWebhookAuthHeaderName, type index_TranscriptWebhookNotification as TranscriptWebhookNotification, type index_TranscriptWebhookStatusCode as TranscriptWebhookStatusCode, type index_TranscriptWebhookUrl as TranscriptWebhookUrl, type index_TranscriptWord as TranscriptWord, type index_TranscriptWordChannel as TranscriptWordChannel, type index_TranscriptWordSpeaker as TranscriptWordSpeaker, type index_TranscriptWords as TranscriptWords, type index_UnauthorizedResponse as UnauthorizedResponse, type index_UploadedFile as UploadedFile, type index_WordSearchMatch as WordSearchMatch, type index_WordSearchParams as WordSearchParams, type index_WordSearchResponse as WordSearchResponse, type index_WordSearchTimestamp as WordSearchTimestamp };
|
|
12594
13559
|
}
|
|
12595
13560
|
|
|
12596
|
-
export { AssemblyAIAdapter, type AssemblyAIStreamingOptions, index as AssemblyAITypes, AssemblyAIWebhookHandler, type AudioChunk, type AudioInput, AudioResponseFormat, AudioTranscriptionModel, AzureSTTAdapter, AzureWebhookHandler, BaseAdapter, BaseWebhookHandler, type BatchOnlyProvider, DeepgramAdapter, type DeepgramStreamingOptions, DeepgramWebhookHandler, GladiaAdapter, type GladiaStreamingOptions, index$1 as GladiaTypes, GladiaWebhookHandler, ListenV1EncodingParameter, type ListenV1LanguageParameter, type ListenV1ModelParameter, type ListenV1VersionParameter, OpenAIWhisperAdapter, type ProviderCapabilities, type ProviderConfig, type ProviderRawResponseMap, type ProviderStreamingOptions, type SessionStatus, SpeakV1ContainerParameter, SpeakV1EncodingParameter, SpeakV1SampleRateParameter, type Speaker, SpeechmaticsAdapter, SpeechmaticsWebhookHandler, type StreamEvent, type StreamEventType, type StreamingCallbacks, type StreamingOptions, type StreamingOptionsForProvider, type StreamingProvider, type StreamingSession, StreamingSupportedBitDepthEnum, StreamingSupportedEncodingEnum, StreamingSupportedSampleRateEnum, type TranscribeOptions, type TranscribeStreamParams, type TranscriptionAdapter, type TranscriptionModel, type TranscriptionProvider, type TranscriptionStatus, type UnifiedTranscriptResponse, type UnifiedWebhookEvent, type Utterance, VoiceRouter, type VoiceRouterConfig, type WebhookEventType, WebhookRouter, type WebhookRouterOptions, type WebhookRouterResult, type WebhookValidation, type WebhookVerificationOptions, type Word, createAssemblyAIAdapter, createAssemblyAIWebhookHandler, createAzureSTTAdapter, createAzureWebhookHandler, createDeepgramAdapter, createDeepgramWebhookHandler, createGladiaAdapter, createGladiaWebhookHandler, createOpenAIWhisperAdapter, createSpeechmaticsAdapter, createVoiceRouter, createWebhookRouter };
|
|
13561
|
+
export { AssemblyAIAdapter, type TranscriptOptionalParams as AssemblyAIOptions, type AssemblyAIStreamingOptions, index as AssemblyAITypes, AssemblyAIWebhookHandler, type AudioChunk, type AudioInput, AudioResponseFormat, AudioTranscriptionModel, AzureSTTAdapter, AzureWebhookHandler, BaseAdapter, BaseWebhookHandler, type BatchOnlyProvider, DeepgramAdapter, type ListenV1MediaTranscribeParams as DeepgramOptions, type DeepgramStreamingOptions, DeepgramWebhookHandler, GladiaAdapter, type AudioToLlmListConfigDTO as GladiaAudioToLlmConfig, type CodeSwitchingConfigDTO as GladiaCodeSwitchingConfig, type InitTranscriptionRequest as GladiaOptions, type GladiaStreamingOptions, index$1 as GladiaTypes, GladiaWebhookHandler, ListenV1EncodingParameter, type ListenV1LanguageParameter, type ListenV1ModelParameter, type ListenV1VersionParameter, OpenAIWhisperAdapter, type CreateTranscriptionRequest as OpenAIWhisperOptions, type ProviderCapabilities, type ProviderConfig, type ProviderRawResponseMap, type ProviderStreamingOptions, type SessionStatus, SpeakV1ContainerParameter, SpeakV1EncodingParameter, SpeakV1SampleRateParameter, type Speaker, SpeechmaticsAdapter, type SpeechmaticsOperatingPoint, SpeechmaticsWebhookHandler, type StreamEvent, type StreamEventType, type StreamingCallbacks, type StreamingOptions, type StreamingOptionsForProvider, type StreamingProvider, type StreamingSession, StreamingSupportedBitDepthEnum, StreamingSupportedEncodingEnum, StreamingSupportedSampleRateEnum, type TranscribeOptions, type TranscribeStreamParams, type TranscriptionAdapter, type TranscriptionLanguage, type TranscriptionModel, type TranscriptionProvider, type TranscriptionStatus, type UnifiedTranscriptResponse, type UnifiedWebhookEvent, type Utterance, VoiceRouter, type VoiceRouterConfig, type WebhookEventType, WebhookRouter, type WebhookRouterOptions, type WebhookRouterResult, type WebhookValidation, type WebhookVerificationOptions, type Word, createAssemblyAIAdapter, createAssemblyAIWebhookHandler, createAzureSTTAdapter, createAzureWebhookHandler, createDeepgramAdapter, createDeepgramWebhookHandler, createGladiaAdapter, createGladiaWebhookHandler, createOpenAIWhisperAdapter, createSpeechmaticsAdapter, createVoiceRouter, createWebhookRouter };
|