aws-sdk-transcribestreamingservice 1.44.0 → 1.45.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: a060b0bd4b3cfe757be9de73e243c486ad8ec11b1d8a9394d2b7d7b722c886af
4
- data.tar.gz: 8885409ed12e7ad33cb30facf08cc35a77fd2015c32e88a61849ddee71d1a302
3
+ metadata.gz: 13c39003a528bb9d5e46520346fb92e2ee6c6cb1955a69b673c35855b083d99d
4
+ data.tar.gz: 96918fc33576873735890306ce0825283005ca7176c6bac64ea0365426e32295
5
5
  SHA512:
6
- metadata.gz: df7687bc08886043ab4732223c8d658056ac36463009aa34c51302fbfe2502eb0ce29a0727bf71df0564d22041022f5c3778bda431cff851b958708a922a8d54
7
- data.tar.gz: 6cadae8681d2ce310fea02efd60a6460c3d6970448e0cbe1f2c88d8f1d658e1583a1ea192bd1a0da9b5aeac79f83c4e56ecbc9c167c7f3cef780eff0cc874931
6
+ metadata.gz: a7d6224b5653da93fbce581cd9f65cbe414ee5c4b764c76367bf21ad47f7124d55171d613c3d19d730d2be4696b8066e826494b515303d408fb947d59c97fbd7
7
+ data.tar.gz: b1608815851864b83658695bf358990d66a1fb57755b2cbc9ed818efe9584ad9a7508bcc0490aece5a2bb68684be0aaea92e5f1986dd4ca3e06965a25f59824a
data/CHANGELOG.md CHANGED
@@ -1,6 +1,11 @@
1
1
  Unreleased Changes
2
2
  ------------------
3
3
 
4
+ 1.45.0 (2022-11-28)
5
+ ------------------
6
+
7
+ * Feature - This release adds support for real-time (streaming) and post-call Call Analytics within Amazon Transcribe.
8
+
4
9
  1.44.0 (2022-11-09)
5
10
  ------------------
6
11
 
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.44.0
1
+ 1.45.0
@@ -271,10 +271,453 @@ module Aws::TranscribeStreamingService
271
271
 
272
272
  # @!group API Operations
273
273
 
274
+ # Starts a bidirectional HTTP/2 or WebSocket stream where audio is
275
+ # streamed to Amazon Transcribe and the transcription results are
276
+ # streamed to your application. Use this operation for [Call
277
+ # Analytics][1] transcriptions.
278
+ #
279
+ # The following parameters are required:
280
+ #
281
+ # * `language-code`
282
+ #
283
+ # * `media-encoding`
284
+ #
285
+ # * `sample-rate`
286
+ #
287
+ # For more information on streaming with Amazon Transcribe, see
288
+ # [Transcribing streaming audio][2].
289
+ #
290
+ #
291
+ #
292
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/call-analytics.html
293
+ # [2]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html
294
+ #
295
+ # @option params [required, String] :language_code
296
+ # Specify the language code that represents the language spoken in your
297
+ # audio.
298
+ #
299
+ # If you're unsure of the language spoken in your audio, consider using
300
+ # `IdentifyLanguage` to enable automatic language identification.
301
+ #
302
+ # For a list of languages supported with streaming Call Analytics, refer
303
+ # to the [Supported languages][1] table.
304
+ #
305
+ #
306
+ #
307
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html
308
+ #
309
+ # @option params [required, Integer] :media_sample_rate_hertz
310
+ # The sample rate of the input audio (in hertz). Low-quality audio, such
311
+ # as telephone audio, is typically around 8,000 Hz. High-quality audio
312
+ # typically ranges from 16,000 Hz to 48,000 Hz. Note that the sample
313
+ # rate you specify must match that of your audio.
314
+ #
315
+ # @option params [required, String] :media_encoding
316
+ # Specify the encoding of your input audio. Supported formats are:
317
+ #
318
+ # * FLAC
319
+ #
320
+ # * OPUS-encoded audio in an Ogg container
321
+ #
322
+ # * PCM (only signed 16-bit little-endian audio formats, which does not
323
+ # include WAV)
324
+ #
325
+ # For more information, see [Media formats][1].
326
+ #
327
+ #
328
+ #
329
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio
330
+ #
331
+ # @option params [String] :vocabulary_name
332
+ # Specify the name of the custom vocabulary that you want to use when
333
+ # processing your transcription. Note that vocabulary names are case
334
+ # sensitive.
335
+ #
336
+ # If the language of the specified custom vocabulary doesn't match the
337
+ # language identified in your media, the custom vocabulary is not
338
+ # applied to your transcription.
339
+ #
340
+ # For more information, see [Custom vocabularies][1].
341
+ #
342
+ #
343
+ #
344
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/custom-vocabulary.html
345
+ #
346
+ # @option params [String] :session_id
347
+ # Specify a name for your Call Analytics transcription session. If you
348
+ # don't include this parameter in your request, Amazon Transcribe
349
+ # generates an ID and returns it in the response.
350
+ #
351
+ # You can use a session ID to retry a streaming session.
352
+ #
353
+ # @option params [String] :vocabulary_filter_name
354
+ # Specify the name of the custom vocabulary filter that you want to use
355
+ # when processing your transcription. Note that vocabulary filter names
356
+ # are case sensitive.
357
+ #
358
+ # If the language of the specified custom vocabulary filter doesn't
359
+ # match the language identified in your media, the vocabulary filter is
360
+ # not applied to your transcription.
361
+ #
362
+ # For more information, see [Using vocabulary filtering with unwanted
363
+ # words][1].
364
+ #
365
+ #
366
+ #
367
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/vocabulary-filtering.html
368
+ #
369
+ # @option params [String] :vocabulary_filter_method
370
+ # Specify how you want your vocabulary filter applied to your
371
+ # transcript.
372
+ #
373
+ # To replace words with `***`, choose `mask`.
374
+ #
375
+ # To delete words, choose `remove`.
376
+ #
377
+ # To flag words without changing them, choose `tag`.
378
+ #
379
+ # @option params [String] :language_model_name
380
+ # Specify the name of the custom language model that you want to use
381
+ # when processing your transcription. Note that language model names are
382
+ # case sensitive.
383
+ #
384
+ # The language of the specified language model must match the language
385
+ # code you specify in your transcription request. If the languages
386
+ # don't match, the custom language model isn't applied. There are no
387
+ # errors or warnings associated with a language mismatch.
388
+ #
389
+ # For more information, see [Custom language models][1].
390
+ #
391
+ #
392
+ #
393
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/custom-language-models.html
394
+ #
395
+ # @option params [Boolean] :enable_partial_results_stabilization
396
+ # Enables partial result stabilization for your transcription. Partial
397
+ # result stabilization can reduce latency in your output, but may impact
398
+ # accuracy. For more information, see [Partial-result stabilization][1].
399
+ #
400
+ #
401
+ #
402
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization
403
+ #
404
+ # @option params [String] :partial_results_stability
405
+ # Specify the level of stability to use when you enable partial results
406
+ # stabilization (`EnablePartialResultsStabilization`).
407
+ #
408
+ # Low stability provides the highest accuracy. High stability
409
+ # transcribes faster, but with slightly lower accuracy.
410
+ #
411
+ # For more information, see [Partial-result stabilization][1].
412
+ #
413
+ #
414
+ #
415
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization
416
+ #
417
+ # @option params [String] :content_identification_type
418
+ # Labels all personally identifiable information (PII) identified in
419
+ # your transcript.
420
+ #
421
+ # Content identification is performed at the segment level; PII
422
+ # specified in `PiiEntityTypes` is flagged upon complete transcription
423
+ # of an audio segment.
424
+ #
425
+ # You can’t set `ContentIdentificationType` and `ContentRedactionType`
426
+ # in the same request. If you set both, your request returns a
427
+ # `BadRequestException`.
428
+ #
429
+ # For more information, see [Redacting or identifying personally
430
+ # identifiable information][1].
431
+ #
432
+ #
433
+ #
434
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html
435
+ #
436
+ # @option params [String] :content_redaction_type
437
+ # Redacts all personally identifiable information (PII) identified in
438
+ # your transcript.
439
+ #
440
+ # Content redaction is performed at the segment level; PII specified in
441
+ # `PiiEntityTypes` is redacted upon complete transcription of an audio
442
+ # segment.
443
+ #
444
+ # You can’t set `ContentRedactionType` and `ContentIdentificationType`
445
+ # in the same request. If you set both, your request returns a
446
+ # `BadRequestException`.
447
+ #
448
+ # For more information, see [Redacting or identifying personally
449
+ # identifiable information][1].
450
+ #
451
+ #
452
+ #
453
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html
454
+ #
455
+ # @option params [String] :pii_entity_types
456
+ # Specify which types of personally identifiable information (PII) you
457
+ # want to redact in your transcript. You can include as many types as
458
+ # you'd like, or you can select `ALL`.
459
+ #
460
+ # To include `PiiEntityTypes` in your Call Analytics request, you must
461
+ # also include either `ContentIdentificationType` or
462
+ # `ContentRedactionType`.
463
+ #
464
+ # Values must be comma-separated and can include: `BANK_ACCOUNT_NUMBER`,
465
+ # `BANK_ROUTING`, `CREDIT_DEBIT_NUMBER`, `CREDIT_DEBIT_CVV`,
466
+ # `CREDIT_DEBIT_EXPIRY`, `PIN`, `EMAIL`, `ADDRESS`, `NAME`, `PHONE`,
467
+ # `SSN`, or `ALL`.
468
+ #
469
+ # @return [Types::StartCallAnalyticsStreamTranscriptionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
470
+ #
471
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#request_id #request_id} => String
472
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#language_code #language_code} => String
473
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#media_sample_rate_hertz #media_sample_rate_hertz} => Integer
474
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#media_encoding #media_encoding} => String
475
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#vocabulary_name #vocabulary_name} => String
476
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#session_id #session_id} => String
477
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#call_analytics_transcript_result_stream #call_analytics_transcript_result_stream} => Types::CallAnalyticsTranscriptResultStream
478
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#vocabulary_filter_name #vocabulary_filter_name} => String
479
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#vocabulary_filter_method #vocabulary_filter_method} => String
480
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#language_model_name #language_model_name} => String
481
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#enable_partial_results_stabilization #enable_partial_results_stabilization} => Boolean
482
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#partial_results_stability #partial_results_stability} => String
483
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#content_identification_type #content_identification_type} => String
484
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#content_redaction_type #content_redaction_type} => String
485
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#pii_entity_types #pii_entity_types} => String
486
+ #
487
+ # @example Bi-directional EventStream Operation Example
488
+ #
489
+ # You can signal input events after initial request is
490
+ # established, events will be sent to stream
491
+ # immediately (once stream connection is established successfully).
492
+ #
493
+ # To signal events, you can call #signal methods from an Aws::TranscribeStreamingService::EventStreams::AudioStream object.
494
+ # Make sure signal events before calling #wait or #join! at async response.
495
+ #
496
+ # input_stream = Aws::TranscribeStreamingService::EventStreams::AudioStream.new
497
+ #
498
+ # async_resp = client.start_call_analytics_stream_transcription( # params input,
499
+ # input_event_stream_handler: input_stream) do |out_stream|
500
+ #
501
+ # # register callbacks for events arrival
502
+ # out_stream.on_utterance_event_event do |event|
503
+ # event # => Aws::TranscribeStreamingService::Types::UtteranceEvent
504
+ # end
505
+ # out_stream.on_category_event_event do |event|
506
+ # event # => Aws::TranscribeStreamingService::Types::CategoryEvent
507
+ # end
508
+ # out_stream.on_bad_request_exception_event do |event|
509
+ # event # => Aws::TranscribeStreamingService::Types::BadRequestException
510
+ # end
511
+ # out_stream.on_limit_exceeded_exception_event do |event|
512
+ # event # => Aws::TranscribeStreamingService::Types::LimitExceededException
513
+ # end
514
+ # out_stream.on_internal_failure_exception_event do |event|
515
+ # event # => Aws::TranscribeStreamingService::Types::InternalFailureException
516
+ # end
517
+ # out_stream.on_conflict_exception_event do |event|
518
+ # event # => Aws::TranscribeStreamingService::Types::ConflictException
519
+ # end
520
+ # out_stream.on_service_unavailable_exception_event do |event|
521
+ # event # => Aws::TranscribeStreamingService::Types::ServiceUnavailableException
522
+ # end
523
+ #
524
+ # end
525
+ # # => returns Aws::Seahorse::Client::AsyncResponse
526
+ #
527
+ # # signal events
528
+ # input_stream.signal_audio_event_event( ... )
529
+ # input_stream.signal_configuration_event_event( ... )
530
+ #
531
+ # # make sure signaling :end_stream in the end
532
+ # input_stream.signal_end_stream
533
+ #
534
+ # # wait until stream is closed before finalizing sync response
535
+ # resp = async_resp.wait
536
+ # # Or close stream and finalizing sync response immediately
537
+ # # resp = async_resp.join!
538
+ #
539
+ # Inorder to streamingly processing events received, you can also provide an Aws::TranscribeStreamingService::EventStreams::CallAnalyticsTranscriptResultStream
540
+ # object to register callbacks before initializing request instead of processing from request block
541
+ #
542
+ # output_stream = Aws::TranscribeStreamingService::EventStreams::CallAnalyticsTranscriptResultStream.new
543
+ # # register callbacks for events arrival
544
+ # output_stream.on_utterance_event_event do |event|
545
+ # event # => Aws::TranscribeStreamingService::Types::UtteranceEvent
546
+ # end
547
+ # output_stream.on_category_event_event do |event|
548
+ # event # => Aws::TranscribeStreamingService::Types::CategoryEvent
549
+ # end
550
+ # output_stream.on_bad_request_exception_event do |event|
551
+ # event # => Aws::TranscribeStreamingService::Types::BadRequestException
552
+ # end
553
+ # output_stream.on_limit_exceeded_exception_event do |event|
554
+ # event # => Aws::TranscribeStreamingService::Types::LimitExceededException
555
+ # end
556
+ # output_stream.on_internal_failure_exception_event do |event|
557
+ # event # => Aws::TranscribeStreamingService::Types::InternalFailureException
558
+ # end
559
+ # output_stream.on_conflict_exception_event do |event|
560
+ # event # => Aws::TranscribeStreamingService::Types::ConflictException
561
+ # end
562
+ # output_stream.on_service_unavailable_exception_event do |event|
563
+ # event # => Aws::TranscribeStreamingService::Types::ServiceUnavailableException
564
+ # end
565
+ # output_stream.on_error_event do |event|
566
+ # # catch unmodeled error event in the stream
567
+ # raise event
568
+ # # => Aws::Errors::EventError
569
+ # # event.event_type => :error
570
+ # # event.error_code => String
571
+ # # event.error_message => String
572
+ # end
573
+ #
574
+ # async_resp = client.start_call_analytics_stream_transcription ( #params input,
575
+ # input_event_stream_handler: input_stream
576
+ # output_event_stream_handler: output_stream
577
+ # )
578
+ #
579
+ # resp = async_resp.wait!
580
+ #
581
+ # Besides above usage patterns for process events when they arrive immediately, you can also
582
+ # iterate through events after response complete.
583
+ #
584
+ # Events are available at resp.call_analytics_transcript_result_stream # => Enumerator
585
+ #
586
+ # @example Request syntax with placeholder values
587
+ #
588
+ # async_resp = async_client.start_call_analytics_stream_transcription({
589
+ # language_code: "en-US", # required, accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR
590
+ # media_sample_rate_hertz: 1, # required
591
+ # media_encoding: "pcm", # required, accepts pcm, ogg-opus, flac
592
+ # vocabulary_name: "VocabularyName",
593
+ # session_id: "SessionId",
594
+ # input_event_stream_hander: EventStreams::AudioStream.new,
595
+ # vocabulary_filter_name: "VocabularyFilterName",
596
+ # vocabulary_filter_method: "remove", # accepts remove, mask, tag
597
+ # language_model_name: "ModelName",
598
+ # enable_partial_results_stabilization: false,
599
+ # partial_results_stability: "high", # accepts high, medium, low
600
+ # content_identification_type: "PII", # accepts PII
601
+ # content_redaction_type: "PII", # accepts PII
602
+ # pii_entity_types: "PiiEntityTypes",
603
+ # })
604
+ # # => Seahorse::Client::AsyncResponse
605
+ # async_resp.wait
606
+ # # => Seahorse::Client::Response
607
+ # # Or use async_resp.join!
608
+ #
609
+ # @example Response structure
610
+ #
611
+ # resp.request_id #=> String
612
+ # resp.language_code #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR"
613
+ # resp.media_sample_rate_hertz #=> Integer
614
+ # resp.media_encoding #=> String, one of "pcm", "ogg-opus", "flac"
615
+ # resp.vocabulary_name #=> String
616
+ # resp.session_id #=> String
617
+ # All events are available at resp.call_analytics_transcript_result_stream:
618
+ # resp.call_analytics_transcript_result_stream #=> Enumerator
619
+ # resp.call_analytics_transcript_result_stream.event_types #=> [:utterance_event, :category_event, :bad_request_exception, :limit_exceeded_exception, :internal_failure_exception, :conflict_exception, :service_unavailable_exception]
620
+ #
621
+ # For :utterance_event event available at #on_utterance_event_event callback and response eventstream enumerator:
622
+ # event.utterance_id #=> String
623
+ # event.is_partial #=> Boolean
624
+ # event.participant_role #=> String, one of "AGENT", "CUSTOMER"
625
+ # event.begin_offset_millis #=> Integer
626
+ # event.end_offset_millis #=> Integer
627
+ # event.transcript #=> String
628
+ # event.items #=> Array
629
+ # event.items[0].begin_offset_millis #=> Integer
630
+ # event.items[0].end_offset_millis #=> Integer
631
+ # event.items[0].type #=> String, one of "pronunciation", "punctuation"
632
+ # event.items[0].content #=> String
633
+ # event.items[0].confidence #=> Float
634
+ # event.items[0].vocabulary_filter_match #=> Boolean
635
+ # event.items[0].stable #=> Boolean
636
+ # event.entities #=> Array
637
+ # event.entities[0].begin_offset_millis #=> Integer
638
+ # event.entities[0].end_offset_millis #=> Integer
639
+ # event.entities[0].category #=> String
640
+ # event.entities[0].type #=> String
641
+ # event.entities[0].content #=> String
642
+ # event.entities[0].confidence #=> Float
643
+ # event.sentiment #=> String, one of "POSITIVE", "NEGATIVE", "MIXED", "NEUTRAL"
644
+ # event.issues_detected #=> Array
645
+ # event.issues_detected[0].character_offsets.begin #=> Integer
646
+ # event.issues_detected[0].character_offsets.end #=> Integer
647
+ #
648
+ # For :category_event event available at #on_category_event_event callback and response eventstream enumerator:
649
+ # event.matched_categories #=> Array
650
+ # event.matched_categories[0] #=> String
651
+ # event.matched_details #=> Hash
652
+ # event.matched_details["String"].timestamp_ranges #=> Array
653
+ # event.matched_details["String"].timestamp_ranges[0].begin_offset_millis #=> Integer
654
+ # event.matched_details["String"].timestamp_ranges[0].end_offset_millis #=> Integer
655
+ #
656
+ # For :bad_request_exception event available at #on_bad_request_exception_event callback and response eventstream enumerator:
657
+ # event.message #=> String
658
+ #
659
+ # For :limit_exceeded_exception event available at #on_limit_exceeded_exception_event callback and response eventstream enumerator:
660
+ # event.message #=> String
661
+ #
662
+ # For :internal_failure_exception event available at #on_internal_failure_exception_event callback and response eventstream enumerator:
663
+ # event.message #=> String
664
+ #
665
+ # For :conflict_exception event available at #on_conflict_exception_event callback and response eventstream enumerator:
666
+ # event.message #=> String
667
+ #
668
+ # For :service_unavailable_exception event available at #on_service_unavailable_exception_event callback and response eventstream enumerator:
669
+ # event.message #=> String
670
+ #
671
+ # resp.vocabulary_filter_name #=> String
672
+ # resp.vocabulary_filter_method #=> String, one of "remove", "mask", "tag"
673
+ # resp.language_model_name #=> String
674
+ # resp.enable_partial_results_stabilization #=> Boolean
675
+ # resp.partial_results_stability #=> String, one of "high", "medium", "low"
676
+ # resp.content_identification_type #=> String, one of "PII"
677
+ # resp.content_redaction_type #=> String, one of "PII"
678
+ # resp.pii_entity_types #=> String
679
+ #
680
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/StartCallAnalyticsStreamTranscription AWS API Documentation
681
+ #
682
+ # @overload start_call_analytics_stream_transcription(params = {})
683
+ # @param [Hash] params ({})
684
+ def start_call_analytics_stream_transcription(params = {}, options = {}, &block)
685
+ params = params.dup
686
+ input_event_stream_handler = _event_stream_handler(
687
+ :input,
688
+ params.delete(:input_event_stream_handler),
689
+ EventStreams::AudioStream
690
+ )
691
+ output_event_stream_handler = _event_stream_handler(
692
+ :output,
693
+ params.delete(:output_event_stream_handler) || params.delete(:event_stream_handler),
694
+ EventStreams::CallAnalyticsTranscriptResultStream
695
+ )
696
+
697
+ yield(output_event_stream_handler) if block_given?
698
+
699
+ req = build_request(:start_call_analytics_stream_transcription, params)
700
+
701
+ req.context[:input_event_stream_handler] = input_event_stream_handler
702
+ req.handlers.add(Aws::Binary::EncodeHandler, priority: 55)
703
+ req.context[:output_event_stream_handler] = output_event_stream_handler
704
+ req.handlers.add(Aws::Binary::DecodeHandler, priority: 55)
705
+
706
+ req.send_request(options, &block)
707
+ end
708
+
274
709
  # Starts a bidirectional HTTP/2 or WebSocket stream where audio is
275
710
  # streamed to Amazon Transcribe Medical and the transcription results
276
711
  # are streamed to your application.
277
712
  #
713
+ # The following parameters are required:
714
+ #
715
+ # * `language-code`
716
+ #
717
+ # * `media-encoding`
718
+ #
719
+ # * `sample-rate`
720
+ #
278
721
  # For more information on streaming with Amazon Transcribe Medical, see
279
722
  # [Transcribing streaming audio][1].
280
723
  #
@@ -430,6 +873,7 @@ module Aws::TranscribeStreamingService
430
873
  #
431
874
  # # signal events
432
875
  # input_stream.signal_audio_event_event( ... )
876
+ # input_stream.signal_configuration_event_event( ... )
433
877
  #
434
878
  # # make sure signaling :end_stream in the end
435
879
  # input_stream.signal_end_stream
@@ -594,15 +1038,13 @@ module Aws::TranscribeStreamingService
594
1038
  # streamed to Amazon Transcribe and the transcription results are
595
1039
  # streamed to your application.
596
1040
  #
597
- # The following are encoded as headers:
598
- #
599
- # * language-code
1041
+ # The following parameters are required:
600
1042
  #
601
- # * media-encoding
1043
+ # * `language-code` or `identify-language`
602
1044
  #
603
- # * sample-rate
1045
+ # * `media-encoding`
604
1046
  #
605
- # * session-id
1047
+ # * `sample-rate`
606
1048
  #
607
1049
  # For more information on streaming with Amazon Transcribe, see
608
1050
  # [Transcribing streaming audio][1].
@@ -632,7 +1074,7 @@ module Aws::TranscribeStreamingService
632
1074
  # rate you specify must match that of your audio.
633
1075
  #
634
1076
  # @option params [required, String] :media_encoding
635
- # Specify the encoding used for the input audio. Supported formats are:
1077
+ # Specify the encoding of your input audio. Supported formats are:
636
1078
  #
637
1079
  # * FLAC
638
1080
  #
@@ -653,7 +1095,8 @@ module Aws::TranscribeStreamingService
653
1095
  # sensitive.
654
1096
  #
655
1097
  # If the language of the specified custom vocabulary doesn't match the
656
- # language identified in your media, your job fails.
1098
+ # language identified in your media, the custom vocabulary is not
1099
+ # applied to your transcription.
657
1100
  #
658
1101
  # This parameter is **not** intended for use with the `IdentifyLanguage`
659
1102
  # parameter. If you're including `IdentifyLanguage` in your request and
@@ -679,7 +1122,8 @@ module Aws::TranscribeStreamingService
679
1122
  # are case sensitive.
680
1123
  #
681
1124
  # If the language of the specified custom vocabulary filter doesn't
682
- # match the language identified in your media, your job fails.
1125
+ # match the language identified in your media, the vocabulary filter is
1126
+ # not applied to your transcription.
683
1127
  #
684
1128
  # This parameter is **not** intended for use with the `IdentifyLanguage`
685
1129
  # parameter. If you're including `IdentifyLanguage` in your request and
@@ -815,8 +1259,8 @@ module Aws::TranscribeStreamingService
815
1259
  #
816
1260
  # The language of the specified language model must match the language
817
1261
  # code you specify in your transcription request. If the languages
818
- # don't match, the language model isn't applied. There are no errors
819
- # or warnings associated with a language mismatch.
1262
+ # don't match, the custom language model isn't applied. There are no
1263
+ # errors or warnings associated with a language mismatch.
820
1264
  #
821
1265
  # For more information, see [Custom language models][1].
822
1266
  #
@@ -981,6 +1425,7 @@ module Aws::TranscribeStreamingService
981
1425
  #
982
1426
  # # signal events
983
1427
  # input_stream.signal_audio_event_event( ... )
1428
+ # input_stream.signal_configuration_event_event( ... )
984
1429
  #
985
1430
  # # make sure signaling :end_stream in the end
986
1431
  # input_stream.signal_end_stream
@@ -1182,7 +1627,7 @@ module Aws::TranscribeStreamingService
1182
1627
  http_response: Seahorse::Client::Http::AsyncResponse.new,
1183
1628
  config: config)
1184
1629
  context[:gem_name] = 'aws-sdk-transcribestreamingservice'
1185
- context[:gem_version] = '1.44.0'
1630
+ context[:gem_version] = '1.45.0'
1186
1631
  Seahorse::Client::Request.new(handlers, context)
1187
1632
  end
1188
1633
 
@@ -392,7 +392,7 @@ module Aws::TranscribeStreamingService
392
392
  params: params,
393
393
  config: config)
394
394
  context[:gem_name] = 'aws-sdk-transcribestreamingservice'
395
- context[:gem_version] = '1.44.0'
395
+ context[:gem_version] = '1.45.0'
396
396
  Seahorse::Client::Request.new(handlers, context)
397
397
  end
398
398