aws-sdk-transcribestreamingservice 1.44.0 → 1.46.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -47,13 +47,6 @@ module Aws::TranscribeStreamingService
47
47
  #
48
48
  # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/event-stream.html
49
49
  #
50
- # @note When making an API call, you may pass AudioEvent
51
- # data as a hash:
52
- #
53
- # {
54
- # audio_chunk: "data",
55
- # }
56
- #
57
50
  # @!attribute [rw] audio_chunk
58
51
  # An audio blob that contains the next part of the audio that you want
59
52
  # to transcribe. The maximum audio chunk size is 32 KB.
@@ -68,8 +61,9 @@ module Aws::TranscribeStreamingService
68
61
  include Aws::Structure
69
62
  end
70
63
 
71
- # One or more arguments to the `StartStreamTranscription` or
72
- # `StartMedicalStreamTranscription` operation was not valid. For
64
+ # One or more arguments to the `StartStreamTranscription`,
65
+ # `StartMedicalStreamTranscription`, or
66
+ # `StartCallAnalyticsStreamTranscription` operation was not valid. For
73
67
  # example, `MediaEncoding` or `LanguageCode` used not valid values.
74
68
  # Check the specified parameters and try your request again.
75
69
  #
@@ -85,6 +79,209 @@ module Aws::TranscribeStreamingService
85
79
  include Aws::Structure
86
80
  end
87
81
 
82
+ # Contains entities identified as personally identifiable information
83
+ # (PII) in your transcription output, along with various associated
84
+ # attributes. Examples include category, confidence score, content,
85
+ # type, and start and end times.
86
+ #
87
+ # @!attribute [rw] begin_offset_millis
88
+ # The time, in milliseconds, from the beginning of the audio stream to
89
+ # the start of the identified entity.
90
+ # @return [Integer]
91
+ #
92
+ # @!attribute [rw] end_offset_millis
93
+ # The time, in milliseconds, from the beginning of the audio stream to
94
+ # the end of the identified entity.
95
+ # @return [Integer]
96
+ #
97
+ # @!attribute [rw] category
98
+ # The category of information identified. For example, `PII`.
99
+ # @return [String]
100
+ #
101
+ # @!attribute [rw] type
102
+ # The type of PII identified. For example, `NAME` or
103
+ # `CREDIT_DEBIT_NUMBER`.
104
+ # @return [String]
105
+ #
106
+ # @!attribute [rw] content
107
+ # The word or words that represent the identified entity.
108
+ # @return [String]
109
+ #
110
+ # @!attribute [rw] confidence
111
+ # The confidence score associated with the identification of an entity
112
+ # in your transcript.
113
+ #
114
+ # Confidence scores are values between 0 and 1. A larger value
115
+ # indicates a higher probability that the identified entity correctly
116
+ # matches the entity spoken in your media.
117
+ # @return [Float]
118
+ #
119
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/CallAnalyticsEntity AWS API Documentation
120
+ #
121
+ class CallAnalyticsEntity < Struct.new(
122
+ :begin_offset_millis,
123
+ :end_offset_millis,
124
+ :category,
125
+ :type,
126
+ :content,
127
+ :confidence)
128
+ SENSITIVE = []
129
+ include Aws::Structure
130
+ end
131
+
132
+ # A word, phrase, or punctuation mark in your Call Analytics
133
+ # transcription output, along with various associated attributes, such
134
+ # as confidence score, type, and start and end times.
135
+ #
136
+ # @!attribute [rw] begin_offset_millis
137
+ # The time, in milliseconds, from the beginning of the audio stream to
138
+ # the start of the identified item.
139
+ # @return [Integer]
140
+ #
141
+ # @!attribute [rw] end_offset_millis
142
+ # The time, in milliseconds, from the beginning of the audio stream to
143
+ # the end of the identified item.
144
+ # @return [Integer]
145
+ #
146
+ # @!attribute [rw] type
147
+ # The type of item identified. Options are: `PRONUNCIATION` (spoken
148
+ # words) and `PUNCTUATION`.
149
+ # @return [String]
150
+ #
151
+ # @!attribute [rw] content
152
+ # The word or punctuation that was transcribed.
153
+ # @return [String]
154
+ #
155
+ # @!attribute [rw] confidence
156
+ # The confidence score associated with a word or phrase in your
157
+ # transcript.
158
+ #
159
+ # Confidence scores are values between 0 and 1. A larger value
160
+ # indicates a higher probability that the identified item correctly
161
+ # matches the item spoken in your media.
162
+ # @return [Float]
163
+ #
164
+ # @!attribute [rw] vocabulary_filter_match
165
+ # Indicates whether the specified item matches a word in the
166
+ # vocabulary filter included in your Call Analytics request. If
167
+ # `true`, there is a vocabulary filter match.
168
+ # @return [Boolean]
169
+ #
170
+ # @!attribute [rw] stable
171
+ # If partial result stabilization is enabled, `Stable` indicates
172
+ # whether the specified item is stable (`true`) or if it may change
173
+ # when the segment is complete (`false`).
174
+ # @return [Boolean]
175
+ #
176
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/CallAnalyticsItem AWS API Documentation
177
+ #
178
+ class CallAnalyticsItem < Struct.new(
179
+ :begin_offset_millis,
180
+ :end_offset_millis,
181
+ :type,
182
+ :content,
183
+ :confidence,
184
+ :vocabulary_filter_match,
185
+ :stable)
186
+ SENSITIVE = []
187
+ include Aws::Structure
188
+ end
189
+
190
+ # Provides information on any `TranscriptFilterType` categories that
191
+ # matched your transcription output. Matches are identified for each
192
+ # segment upon completion of that segment.
193
+ #
194
+ # @!attribute [rw] matched_categories
195
+ # Lists the categories that were matched in your audio segment.
196
+ # @return [Array<String>]
197
+ #
198
+ # @!attribute [rw] matched_details
199
+ # Contains information about the matched categories, including
200
+ # category names and timestamps.
201
+ # @return [Hash<String,Types::PointsOfInterest>]
202
+ #
203
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/CategoryEvent AWS API Documentation
204
+ #
205
+ class CategoryEvent < Struct.new(
206
+ :matched_categories,
207
+ :matched_details,
208
+ :event_type)
209
+ SENSITIVE = []
210
+ include Aws::Structure
211
+ end
212
+
213
+ # Makes it possible to specify which speaker is on which audio channel.
214
+ # For example, if your agent is the first participant to speak, you
215
+ # would set `ChannelId` to `0` (to indicate the first channel) and
216
+ # `ParticipantRole` to `AGENT` (to indicate that it's the agent
217
+ # speaking).
218
+ #
219
+ # @!attribute [rw] channel_id
220
+ # Specify the audio channel you want to define.
221
+ # @return [Integer]
222
+ #
223
+ # @!attribute [rw] participant_role
224
+ # Specify the speaker you want to define. Omitting this parameter is
225
+ # equivalent to specifying both participants.
226
+ # @return [String]
227
+ #
228
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/ChannelDefinition AWS API Documentation
229
+ #
230
+ class ChannelDefinition < Struct.new(
231
+ :channel_id,
232
+ :participant_role)
233
+ SENSITIVE = []
234
+ include Aws::Structure
235
+ end
236
+
237
+ # Provides the location, using character count, in your transcript where
238
+ # a match is identified. For example, the location of an issue or a
239
+ # category match within a segment.
240
+ #
241
+ # @!attribute [rw] begin
242
+ # Provides the character count of the first character where a match is
243
+ # identified. For example, the first character associated with an
244
+ # issue or a category match in a segment transcript.
245
+ # @return [Integer]
246
+ #
247
+ # @!attribute [rw] end
248
+ # Provides the character count of the last character where a match is
249
+ # identified. For example, the last character associated with an issue
250
+ # or a category match in a segment transcript.
251
+ # @return [Integer]
252
+ #
253
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/CharacterOffsets AWS API Documentation
254
+ #
255
+ class CharacterOffsets < Struct.new(
256
+ :begin,
257
+ :end)
258
+ SENSITIVE = []
259
+ include Aws::Structure
260
+ end
261
+
262
+ # Allows you to set audio channel definitions and post-call analytics
263
+ # settings.
264
+ #
265
+ # @!attribute [rw] channel_definitions
266
+ # Indicates which speaker is on which audio channel.
267
+ # @return [Array<Types::ChannelDefinition>]
268
+ #
269
+ # @!attribute [rw] post_call_analytics_settings
270
+ # Provides additional optional settings for your Call Analytics
271
+ # post-call request, including encryption and output locations for
272
+ # your redacted and unredacted transcript.
273
+ # @return [Types::PostCallAnalyticsSettings]
274
+ #
275
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/ConfigurationEvent AWS API Documentation
276
+ #
277
+ class ConfigurationEvent < Struct.new(
278
+ :channel_definitions,
279
+ :post_call_analytics_settings,
280
+ :event_type)
281
+ SENSITIVE = []
282
+ include Aws::Structure
283
+ end
284
+
88
285
  # A new stream started with the same session ID. The current stream has
89
286
  # been terminated.
90
287
  #
@@ -165,6 +362,21 @@ module Aws::TranscribeStreamingService
165
362
  include Aws::Structure
166
363
  end
167
364
 
365
+ # Lists the issues that were identified in your audio segment.
366
+ #
367
+ # @!attribute [rw] character_offsets
368
+ # Provides the timestamps that identify when in an audio segment the
369
+ # specified issue occurs.
370
+ # @return [Types::CharacterOffsets]
371
+ #
372
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/IssueDetected AWS API Documentation
373
+ #
374
+ class IssueDetected < Struct.new(
375
+ :character_offsets)
376
+ SENSITIVE = []
377
+ include Aws::Structure
378
+ end
379
+
168
380
  # A word, phrase, or punctuation mark in your transcription output,
169
381
  # along with various associated attributes, such as confidence score,
170
382
  # type, and start and end times.
@@ -486,6 +698,112 @@ module Aws::TranscribeStreamingService
486
698
  include Aws::Structure
487
699
  end
488
700
 
701
+ # Contains the timestamps of matched categories.
702
+ #
703
+ # @!attribute [rw] timestamp_ranges
704
+ # Contains the timestamp ranges (start time through end time) of
705
+ # matched categories and rules.
706
+ # @return [Array<Types::TimestampRange>]
707
+ #
708
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/PointsOfInterest AWS API Documentation
709
+ #
710
+ class PointsOfInterest < Struct.new(
711
+ :timestamp_ranges)
712
+ SENSITIVE = []
713
+ include Aws::Structure
714
+ end
715
+
716
+ # Allows you to specify additional settings for your streaming Call
717
+ # Analytics post-call request, including output locations for your
718
+ # redacted and unredacted transcript, which IAM role to use, and,
719
+ # optionally, which encryption key to use.
720
+ #
721
+ # `ContentRedactionOutput`, `DataAccessRoleArn`, and `OutputLocation`
722
+ # are required fields.
723
+ #
724
+ # @!attribute [rw] output_location
725
+ # The Amazon S3 location where you want your Call Analytics post-call
726
+ # transcription output stored. You can use any of the following
727
+ # formats to specify the output location:
728
+ #
729
+ # 1. s3://DOC-EXAMPLE-BUCKET
730
+ #
731
+ # 2. s3://DOC-EXAMPLE-BUCKET/my-output-folder/
732
+ #
733
+ # 3. s3://DOC-EXAMPLE-BUCKET/my-output-folder/my-call-analytics-job.json
734
+ # @return [String]
735
+ #
736
+ # @!attribute [rw] data_access_role_arn
737
+ # The Amazon Resource Name (ARN) of an IAM role that has permissions
738
+ # to access the Amazon S3 bucket that contains your input files. If
739
+ # the role that you specify doesn’t have the appropriate permissions
740
+ # to access the specified Amazon S3 location, your request fails.
741
+ #
742
+ # IAM role ARNs have the format
743
+ # `arn:partition:iam::account:role/role-name-with-path`. For example:
744
+ # `arn:aws:iam::111122223333:role/Admin`. For more information, see
745
+ # [IAM ARNs][1].
746
+ #
747
+ #
748
+ #
749
+ # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns
750
+ # @return [String]
751
+ #
752
+ # @!attribute [rw] content_redaction_output
753
+ # Specify whether you want only a redacted transcript or both a
754
+ # redacted and an unredacted transcript. If you choose redacted and
755
+ # unredacted, two JSON files are generated and stored in the Amazon S3
756
+ # output location you specify.
757
+ #
758
+ # Note that to include `ContentRedactionOutput` in your request, you
759
+ # must enable content redaction (`ContentRedactionType`).
760
+ # @return [String]
761
+ #
762
+ # @!attribute [rw] output_encryption_kms_key_id
763
+ # The KMS key you want to use to encrypt your Call Analytics post-call
764
+ # output.
765
+ #
766
+ # If using a key located in the **current** Amazon Web Services
767
+ # account, you can specify your KMS key in one of four ways:
768
+ #
769
+ # 1. Use the KMS key ID itself. For example,
770
+ # `1234abcd-12ab-34cd-56ef-1234567890ab`.
771
+ #
772
+ # 2. Use an alias for the KMS key ID. For example,
773
+ # `alias/ExampleAlias`.
774
+ #
775
+ # 3. Use the Amazon Resource Name (ARN) for the KMS key ID. For
776
+ # example,
777
+ # `arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab`.
778
+ #
779
+ # 4. Use the ARN for the KMS key alias. For example,
780
+ # `arn:aws:kms:region:account-ID:alias/ExampleAlias`.
781
+ #
782
+ # If using a key located in a **different** Amazon Web Services
783
+ # account than the current Amazon Web Services account, you can
784
+ # specify your KMS key in one of two ways:
785
+ #
786
+ # 1. Use the ARN for the KMS key ID. For example,
787
+ # `arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab`.
788
+ #
789
+ # 2. Use the ARN for the KMS key alias. For example,
790
+ # `arn:aws:kms:region:account-ID:alias/ExampleAlias`.
791
+ #
792
+ # Note that the user making the request must have permission to use
793
+ # the specified KMS key.
794
+ # @return [String]
795
+ #
796
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/PostCallAnalyticsSettings AWS API Documentation
797
+ #
798
+ class PostCallAnalyticsSettings < Struct.new(
799
+ :output_location,
800
+ :data_access_role_arn,
801
+ :content_redaction_output,
802
+ :output_encryption_kms_key_id)
803
+ SENSITIVE = []
804
+ include Aws::Structure
805
+ end
806
+
489
807
  # The `Result` associated with a ``.
490
808
  #
491
809
  # Contains a set of transcription results from one or more audio
@@ -520,7 +838,7 @@ module Aws::TranscribeStreamingService
520
838
  # @return [Array<Types::Alternative>]
521
839
  #
522
840
  # @!attribute [rw] channel_id
523
- # Indicates the channel identified for the `Result`.
841
+ # Indicates which audio channel is associated with the `Result`.
524
842
  # @return [String]
525
843
  #
526
844
  # @!attribute [rw] language_code
@@ -565,24 +883,322 @@ module Aws::TranscribeStreamingService
565
883
  include Aws::Structure
566
884
  end
567
885
 
568
- # @note When making an API call, you may pass StartMedicalStreamTranscriptionRequest
569
- # data as a hash:
570
- #
571
- # {
572
- # language_code: "en-US", # required, accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR, ja-JP, ko-KR, zh-CN, hi-IN, th-TH
573
- # media_sample_rate_hertz: 1, # required
574
- # media_encoding: "pcm", # required, accepts pcm, ogg-opus, flac
575
- # vocabulary_name: "VocabularyName",
576
- # specialty: "PRIMARYCARE", # required, accepts PRIMARYCARE, CARDIOLOGY, NEUROLOGY, ONCOLOGY, RADIOLOGY, UROLOGY
577
- # type: "CONVERSATION", # required, accepts CONVERSATION, DICTATION
578
- # show_speaker_label: false,
579
- # session_id: "SessionId",
580
- # input_event_stream_hander: EventStreams::AudioStream.new,
581
- # enable_channel_identification: false,
582
- # number_of_channels: 1,
583
- # content_identification_type: "PHI", # accepts PHI
584
- # }
886
+ # @!attribute [rw] language_code
887
+ # Specify the language code that represents the language spoken in
888
+ # your audio.
889
+ #
890
+ # If you're unsure of the language spoken in your audio, consider
891
+ # using `IdentifyLanguage` to enable automatic language
892
+ # identification.
893
+ #
894
+ # For a list of languages supported with streaming Call Analytics,
895
+ # refer to the [Supported languages][1] table.
896
+ #
897
+ #
898
+ #
899
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html
900
+ # @return [String]
901
+ #
902
+ # @!attribute [rw] media_sample_rate_hertz
903
+ # The sample rate of the input audio (in hertz). Low-quality audio,
904
+ # such as telephone audio, is typically around 8,000 Hz. High-quality
905
+ # audio typically ranges from 16,000 Hz to 48,000 Hz. Note that the
906
+ # sample rate you specify must match that of your audio.
907
+ # @return [Integer]
908
+ #
909
+ # @!attribute [rw] media_encoding
910
+ # Specify the encoding of your input audio. Supported formats are:
911
+ #
912
+ # * FLAC
913
+ #
914
+ # * OPUS-encoded audio in an Ogg container
915
+ #
916
+ # * PCM (only signed 16-bit little-endian audio formats, which does
917
+ # not include WAV)
918
+ #
919
+ # For more information, see [Media formats][1].
920
+ #
921
+ #
922
+ #
923
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio
924
+ # @return [String]
925
+ #
926
+ # @!attribute [rw] vocabulary_name
927
+ # Specify the name of the custom vocabulary that you want to use when
928
+ # processing your transcription. Note that vocabulary names are case
929
+ # sensitive.
930
+ #
931
+ # If the language of the specified custom vocabulary doesn't match
932
+ # the language identified in your media, the custom vocabulary is not
933
+ # applied to your transcription.
934
+ #
935
+ # For more information, see [Custom vocabularies][1].
936
+ #
937
+ #
938
+ #
939
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/custom-vocabulary.html
940
+ # @return [String]
941
+ #
942
+ # @!attribute [rw] session_id
943
+ # Specify a name for your Call Analytics transcription session. If you
944
+ # don't include this parameter in your request, Amazon Transcribe
945
+ # generates an ID and returns it in the response.
946
+ #
947
+ # You can use a session ID to retry a streaming session.
948
+ # @return [String]
949
+ #
950
+ # @!attribute [rw] audio_stream
951
+ # An encoded stream of audio blobs. Audio streams are encoded as
952
+ # either HTTP/2 or WebSocket data frames.
953
+ #
954
+ # For more information, see [Transcribing streaming audio][1].
955
+ #
956
+ #
957
+ #
958
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html
959
+ # @return [Types::AudioStream]
960
+ #
961
+ # @!attribute [rw] vocabulary_filter_name
962
+ # Specify the name of the custom vocabulary filter that you want to
963
+ # use when processing your transcription. Note that vocabulary filter
964
+ # names are case sensitive.
965
+ #
966
+ # If the language of the specified custom vocabulary filter doesn't
967
+ # match the language identified in your media, the vocabulary filter
968
+ # is not applied to your transcription.
969
+ #
970
+ # For more information, see [Using vocabulary filtering with unwanted
971
+ # words][1].
972
+ #
973
+ #
974
+ #
975
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/vocabulary-filtering.html
976
+ # @return [String]
977
+ #
978
+ # @!attribute [rw] vocabulary_filter_method
979
+ # Specify how you want your vocabulary filter applied to your
980
+ # transcript.
981
+ #
982
+ # To replace words with `***`, choose `mask`.
983
+ #
984
+ # To delete words, choose `remove`.
985
+ #
986
+ # To flag words without changing them, choose `tag`.
987
+ # @return [String]
988
+ #
989
+ # @!attribute [rw] language_model_name
990
+ # Specify the name of the custom language model that you want to use
991
+ # when processing your transcription. Note that language model names
992
+ # are case sensitive.
993
+ #
994
+ # The language of the specified language model must match the language
995
+ # code you specify in your transcription request. If the languages
996
+ # don't match, the custom language model isn't applied. There are no
997
+ # errors or warnings associated with a language mismatch.
998
+ #
999
+ # For more information, see [Custom language models][1].
1000
+ #
1001
+ #
1002
+ #
1003
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/custom-language-models.html
1004
+ # @return [String]
1005
+ #
1006
+ # @!attribute [rw] enable_partial_results_stabilization
1007
+ # Enables partial result stabilization for your transcription. Partial
1008
+ # result stabilization can reduce latency in your output, but may
1009
+ # impact accuracy. For more information, see [Partial-result
1010
+ # stabilization][1].
1011
+ #
1012
+ #
1013
+ #
1014
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization
1015
+ # @return [Boolean]
1016
+ #
1017
+ # @!attribute [rw] partial_results_stability
1018
+ # Specify the level of stability to use when you enable partial
1019
+ # results stabilization (`EnablePartialResultsStabilization`).
1020
+ #
1021
+ # Low stability provides the highest accuracy. High stability
1022
+ # transcribes faster, but with slightly lower accuracy.
1023
+ #
1024
+ # For more information, see [Partial-result stabilization][1].
1025
+ #
1026
+ #
1027
+ #
1028
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization
1029
+ # @return [String]
1030
+ #
1031
+ # @!attribute [rw] content_identification_type
1032
+ # Labels all personally identifiable information (PII) identified in
1033
+ # your transcript.
1034
+ #
1035
+ # Content identification is performed at the segment level; PII
1036
+ # specified in `PiiEntityTypes` is flagged upon complete transcription
1037
+ # of an audio segment.
1038
+ #
1039
+ # You can’t set `ContentIdentificationType` and `ContentRedactionType`
1040
+ # in the same request. If you set both, your request returns a
1041
+ # `BadRequestException`.
1042
+ #
1043
+ # For more information, see [Redacting or identifying personally
1044
+ # identifiable information][1].
1045
+ #
1046
+ #
1047
+ #
1048
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html
1049
+ # @return [String]
1050
+ #
1051
+ # @!attribute [rw] content_redaction_type
1052
+ # Redacts all personally identifiable information (PII) identified in
1053
+ # your transcript.
1054
+ #
1055
+ # Content redaction is performed at the segment level; PII specified
1056
+ # in `PiiEntityTypes` is redacted upon complete transcription of an
1057
+ # audio segment.
1058
+ #
1059
+ # You can’t set `ContentRedactionType` and `ContentIdentificationType`
1060
+ # in the same request. If you set both, your request returns a
1061
+ # `BadRequestException`.
1062
+ #
1063
+ # For more information, see [Redacting or identifying personally
1064
+ # identifiable information][1].
1065
+ #
1066
+ #
1067
+ #
1068
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html
1069
+ # @return [String]
1070
+ #
1071
+ # @!attribute [rw] pii_entity_types
1072
+ # Specify which types of personally identifiable information (PII) you
1073
+ # want to redact in your transcript. You can include as many types as
1074
+ # you'd like, or you can select `ALL`.
1075
+ #
1076
+ # To include `PiiEntityTypes` in your Call Analytics request, you must
1077
+ # also include either `ContentIdentificationType` or
1078
+ # `ContentRedactionType`.
1079
+ #
1080
+ # Values must be comma-separated and can include:
1081
+ # `BANK_ACCOUNT_NUMBER`, `BANK_ROUTING`, `CREDIT_DEBIT_NUMBER`,
1082
+ # `CREDIT_DEBIT_CVV`, `CREDIT_DEBIT_EXPIRY`, `PIN`, `EMAIL`,
1083
+ # `ADDRESS`, `NAME`, `PHONE`, `SSN`, or `ALL`.
1084
+ # @return [String]
1085
+ #
1086
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/StartCallAnalyticsStreamTranscriptionRequest AWS API Documentation
1087
+ #
1088
+ class StartCallAnalyticsStreamTranscriptionRequest < Struct.new(
1089
+ :language_code,
1090
+ :media_sample_rate_hertz,
1091
+ :media_encoding,
1092
+ :vocabulary_name,
1093
+ :session_id,
1094
+ :audio_stream,
1095
+ :vocabulary_filter_name,
1096
+ :vocabulary_filter_method,
1097
+ :language_model_name,
1098
+ :enable_partial_results_stabilization,
1099
+ :partial_results_stability,
1100
+ :content_identification_type,
1101
+ :content_redaction_type,
1102
+ :pii_entity_types)
1103
+ SENSITIVE = []
1104
+ include Aws::Structure
1105
+ end
1106
+
1107
+ # @!attribute [rw] request_id
1108
+ # Provides the identifier for your Call Analytics streaming request.
1109
+ # @return [String]
585
1110
  #
1111
+ # @!attribute [rw] language_code
1112
+ # Provides the language code that you specified in your Call Analytics
1113
+ # request.
1114
+ # @return [String]
1115
+ #
1116
+ # @!attribute [rw] media_sample_rate_hertz
1117
+ # Provides the sample rate that you specified in your Call Analytics
1118
+ # request.
1119
+ # @return [Integer]
1120
+ #
1121
+ # @!attribute [rw] media_encoding
1122
+ # Provides the media encoding you specified in your Call Analytics
1123
+ # request.
1124
+ # @return [String]
1125
+ #
1126
+ # @!attribute [rw] vocabulary_name
1127
+ # Provides the name of the custom vocabulary that you specified in
1128
+ # your Call Analytics request.
1129
+ # @return [String]
1130
+ #
1131
+ # @!attribute [rw] session_id
1132
+ # Provides the identifier for your Call Analytics transcription
1133
+ # session.
1134
+ # @return [String]
1135
+ #
1136
+ # @!attribute [rw] call_analytics_transcript_result_stream
1137
+ # Provides detailed information about your Call Analytics streaming
1138
+ # session.
1139
+ # @return [Types::CallAnalyticsTranscriptResultStream]
1140
+ #
1141
+ # @!attribute [rw] vocabulary_filter_name
1142
+ # Provides the name of the custom vocabulary filter that you specified
1143
+ # in your Call Analytics request.
1144
+ # @return [String]
1145
+ #
1146
+ # @!attribute [rw] vocabulary_filter_method
1147
+ # Provides the vocabulary filtering method used in your Call Analytics
1148
+ # transcription.
1149
+ # @return [String]
1150
+ #
1151
+ # @!attribute [rw] language_model_name
1152
+ # Provides the name of the custom language model that you specified in
1153
+ # your Call Analytics request.
1154
+ # @return [String]
1155
+ #
1156
+ # @!attribute [rw] enable_partial_results_stabilization
1157
+ # Shows whether partial results stabilization was enabled for your
1158
+ # Call Analytics transcription.
1159
+ # @return [Boolean]
1160
+ #
1161
+ # @!attribute [rw] partial_results_stability
1162
+ # Provides the stabilization level used for your transcription.
1163
+ # @return [String]
1164
+ #
1165
+ # @!attribute [rw] content_identification_type
1166
+ # Shows whether content identification was enabled for your Call
1167
+ # Analytics transcription.
1168
+ # @return [String]
1169
+ #
1170
+ # @!attribute [rw] content_redaction_type
1171
+ # Shows whether content redaction was enabled for your Call Analytics
1172
+ # transcription.
1173
+ # @return [String]
1174
+ #
1175
+ # @!attribute [rw] pii_entity_types
1176
+ # Lists the PII entity types you specified in your Call Analytics
1177
+ # request.
1178
+ # @return [String]
1179
+ #
1180
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/StartCallAnalyticsStreamTranscriptionResponse AWS API Documentation
1181
+ #
1182
+ class StartCallAnalyticsStreamTranscriptionResponse < Struct.new(
1183
+ :request_id,
1184
+ :language_code,
1185
+ :media_sample_rate_hertz,
1186
+ :media_encoding,
1187
+ :vocabulary_name,
1188
+ :session_id,
1189
+ :call_analytics_transcript_result_stream,
1190
+ :vocabulary_filter_name,
1191
+ :vocabulary_filter_method,
1192
+ :language_model_name,
1193
+ :enable_partial_results_stabilization,
1194
+ :partial_results_stability,
1195
+ :content_identification_type,
1196
+ :content_redaction_type,
1197
+ :pii_entity_types)
1198
+ SENSITIVE = []
1199
+ include Aws::Structure
1200
+ end
1201
+
586
1202
  # @!attribute [rw] language_code
587
1203
  # Specify the language code that represents the language spoken in
588
1204
  # your audio.
@@ -795,34 +1411,6 @@ module Aws::TranscribeStreamingService
795
1411
  include Aws::Structure
796
1412
  end
797
1413
 
798
- # @note When making an API call, you may pass StartStreamTranscriptionRequest
799
- # data as a hash:
800
- #
801
- # {
802
- # language_code: "en-US", # accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR, ja-JP, ko-KR, zh-CN, hi-IN, th-TH
803
- # media_sample_rate_hertz: 1, # required
804
- # media_encoding: "pcm", # required, accepts pcm, ogg-opus, flac
805
- # vocabulary_name: "VocabularyName",
806
- # session_id: "SessionId",
807
- # input_event_stream_hander: EventStreams::AudioStream.new,
808
- # vocabulary_filter_name: "VocabularyFilterName",
809
- # vocabulary_filter_method: "remove", # accepts remove, mask, tag
810
- # show_speaker_label: false,
811
- # enable_channel_identification: false,
812
- # number_of_channels: 1,
813
- # enable_partial_results_stabilization: false,
814
- # partial_results_stability: "high", # accepts high, medium, low
815
- # content_identification_type: "PII", # accepts PII
816
- # content_redaction_type: "PII", # accepts PII
817
- # pii_entity_types: "PiiEntityTypes",
818
- # language_model_name: "ModelName",
819
- # identify_language: false,
820
- # language_options: "LanguageOptions",
821
- # preferred_language: "en-US", # accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR, ja-JP, ko-KR, zh-CN, hi-IN, th-TH
822
- # vocabulary_names: "VocabularyNames",
823
- # vocabulary_filter_names: "VocabularyFilterNames",
824
- # }
825
- #
826
1414
  # @!attribute [rw] language_code
827
1415
  # Specify the language code that represents the language spoken in
828
1416
  # your audio.
@@ -847,8 +1435,7 @@ module Aws::TranscribeStreamingService
847
1435
  # @return [Integer]
848
1436
  #
849
1437
  # @!attribute [rw] media_encoding
850
- # Specify the encoding used for the input audio. Supported formats
851
- # are:
1438
+ # Specify the encoding of your input audio. Supported formats are:
852
1439
  #
853
1440
  # * FLAC
854
1441
  #
@@ -870,7 +1457,8 @@ module Aws::TranscribeStreamingService
870
1457
  # sensitive.
871
1458
  #
872
1459
  # If the language of the specified custom vocabulary doesn't match
873
- # the language identified in your media, your job fails.
1460
+ # the language identified in your media, the custom vocabulary is not
1461
+ # applied to your transcription.
874
1462
  #
875
1463
  # This parameter is **not** intended for use with the
876
1464
  # `IdentifyLanguage` parameter. If you're including
@@ -910,7 +1498,8 @@ module Aws::TranscribeStreamingService
910
1498
  # names are case sensitive.
911
1499
  #
912
1500
  # If the language of the specified custom vocabulary filter doesn't
913
- # match the language identified in your media, your job fails.
1501
+ # match the language identified in your media, the vocabulary filter
1502
+ # is not applied to your transcription.
914
1503
  #
915
1504
  # This parameter is **not** intended for use with the
916
1505
  # `IdentifyLanguage` parameter. If you're including
@@ -1058,8 +1647,8 @@ module Aws::TranscribeStreamingService
1058
1647
  #
1059
1648
  # The language of the specified language model must match the language
1060
1649
  # code you specify in your transcription request. If the languages
1061
- # don't match, the language model isn't applied. There are no errors
1062
- # or warnings associated with a language mismatch.
1650
+ # don't match, the custom language model isn't applied. There are no
1651
+ # errors or warnings associated with a language mismatch.
1063
1652
  #
1064
1653
  # For more information, see [Custom language models][1].
1065
1654
  #
@@ -1329,6 +1918,28 @@ module Aws::TranscribeStreamingService
1329
1918
  include Aws::Structure
1330
1919
  end
1331
1920
 
1921
+ # Contains the timestamp range (start time through end time) of a
1922
+ # matched category.
1923
+ #
1924
+ # @!attribute [rw] begin_offset_millis
1925
+ # The time, in milliseconds, from the beginning of the audio stream to
1926
+ # the start of the category match.
1927
+ # @return [Integer]
1928
+ #
1929
+ # @!attribute [rw] end_offset_millis
1930
+ # The time, in milliseconds, from the beginning of the audio stream to
1931
+ # the end of the category match.
1932
+ # @return [Integer]
1933
+ #
1934
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/TimestampRange AWS API Documentation
1935
+ #
1936
+ class TimestampRange < Struct.new(
1937
+ :begin_offset_millis,
1938
+ :end_offset_millis)
1939
+ SENSITIVE = []
1940
+ include Aws::Structure
1941
+ end
1942
+
1332
1943
  # The `Transcript` associated with a `.</p> Transcript contains
1333
1944
  # Results, which contains a set of transcription results from one or
1334
1945
  # more audio segments, along with additional information per your
@@ -1376,6 +1987,76 @@ module Aws::TranscribeStreamingService
1376
1987
  include Aws::Structure
1377
1988
  end
1378
1989
 
1990
+ # Contains set of transcription results from one or more audio segments,
1991
+ # along with additional information about the parameters included in
1992
+ # your request. For example, channel definitions, partial result
1993
+ # stabilization, sentiment, and issue detection.
1994
+ #
1995
+ # @!attribute [rw] utterance_id
1996
+ # The unique identifier that is associated with the specified
1997
+ # `UtteranceEvent`.
1998
+ # @return [String]
1999
+ #
2000
+ # @!attribute [rw] is_partial
2001
+ # Indicates whether the segment in the `UtteranceEvent` is complete
2002
+ # (`FALSE`) or partial (`TRUE`).
2003
+ # @return [Boolean]
2004
+ #
2005
+ # @!attribute [rw] participant_role
2006
+ # Provides the role of the speaker for each audio channel, either
2007
+ # `CUSTOMER` or `AGENT`.
2008
+ # @return [String]
2009
+ #
2010
+ # @!attribute [rw] begin_offset_millis
2011
+ # The time, in milliseconds, from the beginning of the audio stream to
2012
+ # the start of the `UtteranceEvent`.
2013
+ # @return [Integer]
2014
+ #
2015
+ # @!attribute [rw] end_offset_millis
2016
+ # The time, in milliseconds, from the beginning of the audio stream to
2017
+ # the start of the `UtteranceEvent`.
2018
+ # @return [Integer]
2019
+ #
2020
+ # @!attribute [rw] transcript
2021
+ # Contains transcribed text.
2022
+ # @return [String]
2023
+ #
2024
+ # @!attribute [rw] items
2025
+ # Contains words, phrases, or punctuation marks that are associated
2026
+ # with the specified `UtteranceEvent`.
2027
+ # @return [Array<Types::CallAnalyticsItem>]
2028
+ #
2029
+ # @!attribute [rw] entities
2030
+ # Contains entities identified as personally identifiable information
2031
+ # (PII) in your transcription output.
2032
+ # @return [Array<Types::CallAnalyticsEntity>]
2033
+ #
2034
+ # @!attribute [rw] sentiment
2035
+ # Provides the sentiment that was detected in the specified segment.
2036
+ # @return [String]
2037
+ #
2038
+ # @!attribute [rw] issues_detected
2039
+ # Provides the issue that was detected in the specified segment.
2040
+ # @return [Array<Types::IssueDetected>]
2041
+ #
2042
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/UtteranceEvent AWS API Documentation
2043
+ #
2044
+ class UtteranceEvent < Struct.new(
2045
+ :utterance_id,
2046
+ :is_partial,
2047
+ :participant_role,
2048
+ :begin_offset_millis,
2049
+ :end_offset_millis,
2050
+ :transcript,
2051
+ :items,
2052
+ :entities,
2053
+ :sentiment,
2054
+ :issues_detected,
2055
+ :event_type)
2056
+ SENSITIVE = []
2057
+ include Aws::Structure
2058
+ end
2059
+
1379
2060
  # An encoded stream of audio blobs. Audio streams are encoded as either
1380
2061
  # HTTP/2 or WebSocket data frames.
1381
2062
  #
@@ -1385,15 +2066,6 @@ module Aws::TranscribeStreamingService
1385
2066
  #
1386
2067
  # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html
1387
2068
  #
1388
- # @note When making an API call, you may pass AudioStream
1389
- # data as a hash:
1390
- #
1391
- # {
1392
- # audio_event: {
1393
- # audio_chunk: "data",
1394
- # },
1395
- # }
1396
- #
1397
2069
  # EventStream is an Enumerator of Events.
1398
2070
  # #event_types #=> Array, returns all modeled event types in the stream
1399
2071
  #
@@ -1403,7 +2075,33 @@ module Aws::TranscribeStreamingService
1403
2075
 
1404
2076
  def event_types
1405
2077
  [
1406
- :audio_event
2078
+ :audio_event,
2079
+ :configuration_event
2080
+ ]
2081
+ end
2082
+
2083
+ end
2084
+
2085
+ # Contains detailed information about your Call Analytics streaming
2086
+ # session. These details are provided in the `UtteranceEvent` and
2087
+ # `CategoryEvent` objects.
2088
+ #
2089
+ # EventStream is an Enumerator of Events.
2090
+ # #event_types #=> Array, returns all modeled event types in the stream
2091
+ #
2092
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/CallAnalyticsTranscriptResultStream AWS API Documentation
2093
+ #
2094
+ class CallAnalyticsTranscriptResultStream < Enumerator
2095
+
2096
+ def event_types
2097
+ [
2098
+ :utterance_event,
2099
+ :category_event,
2100
+ :bad_request_exception,
2101
+ :limit_exceeded_exception,
2102
+ :internal_failure_exception,
2103
+ :conflict_exception,
2104
+ :service_unavailable_exception
1407
2105
  ]
1408
2106
  end
1409
2107