aws-sdk-rekognition 1.66.0 → 1.69.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -160,11 +160,11 @@ module Aws::Rekognition
160
160
  include Aws::Structure
161
161
  end
162
162
 
163
- # Identifies the bounding box around the label, face, text or personal
164
- # protective equipment. The `left` (x-coordinate) and `top`
165
- # (y-coordinate) are coordinates representing the top and left sides of
166
- # the bounding box. Note that the upper-left corner of the image is the
167
- # origin (0,0).
163
+ # Identifies the bounding box around the label, face, text, object of
164
+ # interest, or personal protective equipment. The `left` (x-coordinate)
165
+ # and `top` (y-coordinate) are coordinates representing the top and left
166
+ # sides of the bounding box. Note that the upper-left corner of the
167
+ # image is the origin (0,0).
168
168
  #
169
169
  # The `top` and `left` values returned are ratios of the overall image
170
170
  # size. For example, if the input image is 700x200 pixels, and the
@@ -558,6 +558,72 @@ module Aws::Rekognition
558
558
  include Aws::Structure
559
559
  end
560
560
 
561
+ # Label detection settings to use on a streaming video. Defining the
562
+ # settings is required in the request parameter for
563
+ # CreateStreamProcessor. Including this setting in the
564
+ # `CreateStreamProcessor` request enables you to use the stream
565
+ # processor for label detection. You can then select what you want the
566
+ # stream processor to detect, such as people or pets. When the stream
567
+ # processor has started, one notification is sent for each object class
568
+ # specified. For example, if packages and pets are selected, one SNS
569
+ # notification is published the first time a package is detected and one
570
+ # SNS notification is published the first time a pet is detected, as
571
+ # well as an end-of-session summary.
572
+ #
573
+ # @note When making an API call, you may pass ConnectedHomeSettings
574
+ # data as a hash:
575
+ #
576
+ # {
577
+ # labels: ["ConnectedHomeLabel"], # required
578
+ # min_confidence: 1.0,
579
+ # }
580
+ #
581
+ # @!attribute [rw] labels
582
+ # Specifies what you want to detect in the video, such as people,
583
+ # packages, or pets. The current valid labels you can include in this
584
+ # list are: "PERSON", "PET", "PACKAGE", and "ALL".
585
+ # @return [Array<String>]
586
+ #
587
+ # @!attribute [rw] min_confidence
588
+ # The minimum confidence required to label an object in the video.
589
+ # @return [Float]
590
+ #
591
+ class ConnectedHomeSettings < Struct.new(
592
+ :labels,
593
+ :min_confidence)
594
+ SENSITIVE = []
595
+ include Aws::Structure
596
+ end
597
+
598
+ # The label detection settings you want to use in your stream processor.
599
+ # This includes the labels you want the stream processor to detect and
600
+ # the minimum confidence level allowed to label objects.
601
+ #
602
+ # @note When making an API call, you may pass ConnectedHomeSettingsForUpdate
603
+ # data as a hash:
604
+ #
605
+ # {
606
+ # labels: ["ConnectedHomeLabel"],
607
+ # min_confidence: 1.0,
608
+ # }
609
+ #
610
+ # @!attribute [rw] labels
611
+ # Specifies what you want to detect in the video, such as people,
612
+ # packages, or pets. The current valid labels you can include in this
613
+ # list are: "PERSON", "PET", "PACKAGE", and "ALL".
614
+ # @return [Array<String>]
615
+ #
616
+ # @!attribute [rw] min_confidence
617
+ # The minimum confidence required to label an object in the video.
618
+ # @return [Float]
619
+ #
620
+ class ConnectedHomeSettingsForUpdate < Struct.new(
621
+ :labels,
622
+ :min_confidence)
623
+ SENSITIVE = []
624
+ include Aws::Structure
625
+ end
626
+
561
627
  # Information about an inappropriate, unwanted, or offensive content
562
628
  # label detection in a stored video.
563
629
  #
@@ -632,12 +698,8 @@ module Aws::Rekognition
632
698
  # @return [String]
633
699
  #
634
700
  # @!attribute [rw] face_model_version
635
- # Latest face model being used with the collection. For more
636
- # information, see [Model versioning][1].
637
- #
638
- #
639
- #
640
- # [1]: https://docs.aws.amazon.com/rekognition/latest/dg/face-detection-model.html
701
+ # Version number of the face detection model associated with the
702
+ # collection you are creating.
641
703
  # @return [String]
642
704
  #
643
705
  class CreateCollectionResponse < Struct.new(
@@ -867,6 +929,10 @@ module Aws::Rekognition
867
929
  # kinesis_data_stream: {
868
930
  # arn: "KinesisDataArn",
869
931
  # },
932
+ # s3_destination: {
933
+ # bucket: "S3Bucket",
934
+ # key_prefix: "S3KeyPrefix",
935
+ # },
870
936
  # },
871
937
  # name: "StreamProcessorName", # required
872
938
  # settings: { # required
@@ -874,40 +940,77 @@ module Aws::Rekognition
874
940
  # collection_id: "CollectionId",
875
941
  # face_match_threshold: 1.0,
876
942
  # },
943
+ # connected_home: {
944
+ # labels: ["ConnectedHomeLabel"], # required
945
+ # min_confidence: 1.0,
946
+ # },
877
947
  # },
878
948
  # role_arn: "RoleArn", # required
879
949
  # tags: {
880
950
  # "TagKey" => "TagValue",
881
951
  # },
952
+ # notification_channel: {
953
+ # sns_topic_arn: "SNSTopicArn", # required
954
+ # },
955
+ # kms_key_id: "KmsKeyId",
956
+ # regions_of_interest: [
957
+ # {
958
+ # bounding_box: {
959
+ # width: 1.0,
960
+ # height: 1.0,
961
+ # left: 1.0,
962
+ # top: 1.0,
963
+ # },
964
+ # polygon: [
965
+ # {
966
+ # x: 1.0,
967
+ # y: 1.0,
968
+ # },
969
+ # ],
970
+ # },
971
+ # ],
972
+ # data_sharing_preference: {
973
+ # opt_in: false, # required
974
+ # },
882
975
  # }
883
976
  #
884
977
  # @!attribute [rw] input
885
978
  # Kinesis video stream stream that provides the source streaming
886
979
  # video. If you are using the AWS CLI, the parameter name is
887
- # `StreamProcessorInput`.
980
+ # `StreamProcessorInput`. This is required for both face search and
981
+ # label detection stream processors.
888
982
  # @return [Types::StreamProcessorInput]
889
983
  #
890
984
  # @!attribute [rw] output
891
- # Kinesis data stream stream to which Amazon Rekognition Video puts
892
- # the analysis results. If you are using the AWS CLI, the parameter
893
- # name is `StreamProcessorOutput`.
985
+ # Kinesis data stream stream or Amazon S3 bucket location to which
986
+ # Amazon Rekognition Video puts the analysis results. If you are using
987
+ # the AWS CLI, the parameter name is `StreamProcessorOutput`. This
988
+ # must be a S3Destination of an Amazon S3 bucket that you own for a
989
+ # label detection stream processor or a Kinesis data stream ARN for a
990
+ # face search stream processor.
894
991
  # @return [Types::StreamProcessorOutput]
895
992
  #
896
993
  # @!attribute [rw] name
897
994
  # An identifier you assign to the stream processor. You can use `Name`
898
995
  # to manage the stream processor. For example, you can get the current
899
996
  # status of the stream processor by calling DescribeStreamProcessor.
900
- # `Name` is idempotent.
997
+ # `Name` is idempotent. This is required for both face search and
998
+ # label detection stream processors.
901
999
  # @return [String]
902
1000
  #
903
1001
  # @!attribute [rw] settings
904
- # Face recognition input parameters to be used by the stream
905
- # processor. Includes the collection to use for face recognition and
906
- # the face attributes to detect.
1002
+ # Input parameters used in a streaming video analyzed by a stream
1003
+ # processor. You can use `FaceSearch` to recognize faces in a
1004
+ # streaming video, or you can use `ConnectedHome` to detect labels.
907
1005
  # @return [Types::StreamProcessorSettings]
908
1006
  #
909
1007
  # @!attribute [rw] role_arn
910
- # ARN of the IAM role that allows access to the stream processor.
1008
+ # The Amazon Resource Number (ARN) of the IAM role that allows access
1009
+ # to the stream processor. The IAM role provides Rekognition read
1010
+ # permissions for a Kinesis stream. It also provides write permissions
1011
+ # to an Amazon S3 bucket and Amazon Simple Notification Service topic
1012
+ # for a label detection stream processor. This is required for both
1013
+ # face search and label detection stream processors.
911
1014
  # @return [String]
912
1015
  #
913
1016
  # @!attribute [rw] tags
@@ -915,19 +1018,65 @@ module Aws::Rekognition
915
1018
  # stream processor.
916
1019
  # @return [Hash<String,String>]
917
1020
  #
1021
+ # @!attribute [rw] notification_channel
1022
+ # The Amazon Simple Notification Service topic to which Amazon
1023
+ # Rekognition publishes the object detection results and completion
1024
+ # status of a video analysis operation.
1025
+ #
1026
+ # Amazon Rekognition publishes a notification the first time an object
1027
+ # of interest or a person is detected in the video stream. For
1028
+ # example, if Amazon Rekognition detects a person at second 2, a pet
1029
+ # at second 4, and a person again at second 5, Amazon Rekognition
1030
+ # sends 2 object class detected notifications, one for a person at
1031
+ # second 2 and one for a pet at second 4.
1032
+ #
1033
+ # Amazon Rekognition also publishes an an end-of-session notification
1034
+ # with a summary when the stream processing session is complete.
1035
+ # @return [Types::StreamProcessorNotificationChannel]
1036
+ #
1037
+ # @!attribute [rw] kms_key_id
1038
+ # The identifier for your AWS Key Management Service key (AWS KMS
1039
+ # key). This is an optional parameter for label detection stream
1040
+ # processors and should not be used to create a face search stream
1041
+ # processor. You can supply the Amazon Resource Name (ARN) of your KMS
1042
+ # key, the ID of your KMS key, an alias for your KMS key, or an alias
1043
+ # ARN. The key is used to encrypt results and data published to your
1044
+ # Amazon S3 bucket, which includes image frames and hero images. Your
1045
+ # source images are unaffected.
1046
+ # @return [String]
1047
+ #
1048
+ # @!attribute [rw] regions_of_interest
1049
+ # Specifies locations in the frames where Amazon Rekognition checks
1050
+ # for objects or people. You can specify up to 10 regions of interest,
1051
+ # and each region has either a polygon or a bounding box. This is an
1052
+ # optional parameter for label detection stream processors and should
1053
+ # not be used to create a face search stream processor.
1054
+ # @return [Array<Types::RegionOfInterest>]
1055
+ #
1056
+ # @!attribute [rw] data_sharing_preference
1057
+ # Shows whether you are sharing data with Rekognition to improve model
1058
+ # performance. You can choose this option at the account level or on a
1059
+ # per-stream basis. Note that if you opt out at the account level this
1060
+ # setting is ignored on individual streams.
1061
+ # @return [Types::StreamProcessorDataSharingPreference]
1062
+ #
918
1063
  class CreateStreamProcessorRequest < Struct.new(
919
1064
  :input,
920
1065
  :output,
921
1066
  :name,
922
1067
  :settings,
923
1068
  :role_arn,
924
- :tags)
1069
+ :tags,
1070
+ :notification_channel,
1071
+ :kms_key_id,
1072
+ :regions_of_interest,
1073
+ :data_sharing_preference)
925
1074
  SENSITIVE = []
926
1075
  include Aws::Structure
927
1076
  end
928
1077
 
929
1078
  # @!attribute [rw] stream_processor_arn
930
- # ARN for the newly create stream processor.
1079
+ # Amazon Resource Number for the newly created stream processor.
931
1080
  # @return [String]
932
1081
  #
933
1082
  class CreateStreamProcessorResponse < Struct.new(
@@ -1373,7 +1522,7 @@ module Aws::Rekognition
1373
1522
  # The version of the face model that's used by the collection for
1374
1523
  # face detection.
1375
1524
  #
1376
- # For more information, see Model Versioning in the Amazon Rekognition
1525
+ # For more information, see Model versioning in the Amazon Rekognition
1377
1526
  # Developer Guide.
1378
1527
  # @return [String]
1379
1528
  #
@@ -1604,11 +1753,46 @@ module Aws::Rekognition
1604
1753
  # @return [String]
1605
1754
  #
1606
1755
  # @!attribute [rw] settings
1607
- # Face recognition input parameters that are being used by the stream
1608
- # processor. Includes the collection to use for face recognition and
1609
- # the face attributes to detect.
1756
+ # Input parameters used in a streaming video analyzed by a stream
1757
+ # processor. You can use `FaceSearch` to recognize faces in a
1758
+ # streaming video, or you can use `ConnectedHome` to detect labels.
1610
1759
  # @return [Types::StreamProcessorSettings]
1611
1760
  #
1761
+ # @!attribute [rw] notification_channel
1762
+ # The Amazon Simple Notification Service topic to which Amazon
1763
+ # Rekognition publishes the object detection results and completion
1764
+ # status of a video analysis operation.
1765
+ #
1766
+ # Amazon Rekognition publishes a notification the first time an object
1767
+ # of interest or a person is detected in the video stream. For
1768
+ # example, if Amazon Rekognition detects a person at second 2, a pet
1769
+ # at second 4, and a person again at second 5, Amazon Rekognition
1770
+ # sends 2 object class detected notifications, one for a person at
1771
+ # second 2 and one for a pet at second 4.
1772
+ #
1773
+ # Amazon Rekognition also publishes an an end-of-session notification
1774
+ # with a summary when the stream processing session is complete.
1775
+ # @return [Types::StreamProcessorNotificationChannel]
1776
+ #
1777
+ # @!attribute [rw] kms_key_id
1778
+ # The identifier for your AWS Key Management Service key (AWS KMS
1779
+ # key). This is an optional parameter for label detection stream
1780
+ # processors.
1781
+ # @return [String]
1782
+ #
1783
+ # @!attribute [rw] regions_of_interest
1784
+ # Specifies locations in the frames where Amazon Rekognition checks
1785
+ # for objects or people. This is an optional parameter for label
1786
+ # detection stream processors.
1787
+ # @return [Array<Types::RegionOfInterest>]
1788
+ #
1789
+ # @!attribute [rw] data_sharing_preference
1790
+ # Shows whether you are sharing data with Rekognition to improve model
1791
+ # performance. You can choose this option at the account level or on a
1792
+ # per-stream basis. Note that if you opt out at the account level this
1793
+ # setting is ignored on individual streams.
1794
+ # @return [Types::StreamProcessorDataSharingPreference]
1795
+ #
1612
1796
  class DescribeStreamProcessorResponse < Struct.new(
1613
1797
  :name,
1614
1798
  :stream_processor_arn,
@@ -1619,7 +1803,11 @@ module Aws::Rekognition
1619
1803
  :input,
1620
1804
  :output,
1621
1805
  :role_arn,
1622
- :settings)
1806
+ :settings,
1807
+ :notification_channel,
1808
+ :kms_key_id,
1809
+ :regions_of_interest,
1810
+ :data_sharing_preference)
1623
1811
  SENSITIVE = []
1624
1812
  include Aws::Structure
1625
1813
  end
@@ -1671,8 +1859,9 @@ module Aws::Rekognition
1671
1859
  # operation using the S3Object property.
1672
1860
  #
1673
1861
  # For Amazon Rekognition to process an S3 object, the user must have
1674
- # permission to access the S3 object. For more information, see
1675
- # Resource Based Policies in the Amazon Rekognition Developer Guide.
1862
+ # permission to access the S3 object. For more information, see How
1863
+ # Amazon Rekognition works with IAM in the Amazon Rekognition
1864
+ # Developer Guide.
1676
1865
  # @return [Types::Image]
1677
1866
  #
1678
1867
  # @!attribute [rw] max_results
@@ -2027,6 +2216,12 @@ module Aws::Rekognition
2027
2216
  # left: 1.0,
2028
2217
  # top: 1.0,
2029
2218
  # },
2219
+ # polygon: [
2220
+ # {
2221
+ # x: 1.0,
2222
+ # y: 1.0,
2223
+ # },
2224
+ # ],
2030
2225
  # },
2031
2226
  # ],
2032
2227
  # }
@@ -2074,6 +2269,12 @@ module Aws::Rekognition
2074
2269
  # left: 1.0,
2075
2270
  # top: 1.0,
2076
2271
  # },
2272
+ # polygon: [
2273
+ # {
2274
+ # x: 1.0,
2275
+ # y: 1.0,
2276
+ # },
2277
+ # ],
2077
2278
  # },
2078
2279
  # ],
2079
2280
  # },
@@ -2132,8 +2333,7 @@ module Aws::Rekognition
2132
2333
  # @!attribute [rw] min_confidence
2133
2334
  # Sets the confidence of word detection. Words with detection
2134
2335
  # confidence below this will be excluded from the result. Values
2135
- # should be between 50 and 100 as Text in Video will not return any
2136
- # result below 50.
2336
+ # should be between 0 and 100. The default MinConfidence is 80.
2137
2337
  # @return [Float]
2138
2338
  #
2139
2339
  # @!attribute [rw] min_bounding_box_height
@@ -2534,8 +2734,9 @@ module Aws::Rekognition
2534
2734
  end
2535
2735
 
2536
2736
  # Input face recognition parameters for an Amazon Rekognition stream
2537
- # processor. `FaceRecognitionSettings` is a request parameter for
2538
- # CreateStreamProcessor.
2737
+ # processor. Includes the collection to use for face recognition and the
2738
+ # face attributes to detect. Defining the settings is required in the
2739
+ # request parameter for CreateStreamProcessor.
2539
2740
  #
2540
2741
  # @note When making an API call, you may pass FaceSearchSettings
2541
2742
  # data as a hash:
@@ -2580,7 +2781,7 @@ module Aws::Rekognition
2580
2781
  # media platform.
2581
2782
  #
2582
2783
  # We don't recommend using gender binary predictions to make decisions
2583
- # that impactan individual's rights, privacy, or access to services.
2784
+ # that impact an individual's rights, privacy, or access to services.
2584
2785
  #
2585
2786
  # @!attribute [rw] value
2586
2787
  # The predicted gender of the face.
@@ -3348,8 +3549,9 @@ module Aws::Rekognition
3348
3549
  # region you use for Amazon Rekognition operations.
3349
3550
  #
3350
3551
  # For Amazon Rekognition to process an S3 object, the user must have
3351
- # permission to access the S3 object. For more information, see
3352
- # Resource-Based Policies in the Amazon Rekognition Developer Guide.
3552
+ # permission to access the S3 object. For more information, see How
3553
+ # Amazon Rekognition works with IAM in the Amazon Rekognition
3554
+ # Developer Guide.
3353
3555
  # @return [Types::S3Object]
3354
3556
  #
3355
3557
  class GroundTruthManifest < Struct.new(
@@ -3499,8 +3701,9 @@ module Aws::Rekognition
3499
3701
  # using the S3Object property.
3500
3702
  #
3501
3703
  # For Amazon Rekognition to process an S3 object, the user must have
3502
- # permission to access the S3 object. For more information, see Resource
3503
- # Based Policies in the Amazon Rekognition Developer Guide.
3704
+ # permission to access the S3 object. For more information, see How
3705
+ # Amazon Rekognition works with IAM in the Amazon Rekognition Developer
3706
+ # Guide.
3504
3707
  #
3505
3708
  # @note When making an API call, you may pass Image
3506
3709
  # data as a hash:
@@ -3552,8 +3755,8 @@ module Aws::Rekognition
3552
3755
 
3553
3756
  # The input image size exceeds the allowed limit. If you are calling
3554
3757
  # DetectProtectiveEquipment, the image size or resolution exceeds the
3555
- # allowed limit. For more information, see Limits in Amazon Rekognition
3556
- # in the Amazon Rekognition Developer Guide.
3758
+ # allowed limit. For more information, see Guidelines and quotas in
3759
+ # Amazon Rekognition in the Amazon Rekognition Developer Guide.
3557
3760
  #
3558
3761
  class ImageTooLargeException < Aws::EmptyStructure; end
3559
3762
 
@@ -3694,12 +3897,8 @@ module Aws::Rekognition
3694
3897
  # @return [String]
3695
3898
  #
3696
3899
  # @!attribute [rw] face_model_version
3697
- # Latest face model being used with the collection. For more
3698
- # information, see [Model versioning][1].
3699
- #
3700
- #
3701
- #
3702
- # [1]: https://docs.aws.amazon.com/rekognition/latest/dg/face-detection-model.html
3900
+ # The version number of the face detection model that's associated
3901
+ # with the input collection (`CollectionId`).
3703
3902
  # @return [String]
3704
3903
  #
3705
3904
  # @!attribute [rw] unindexed_faces
@@ -3804,6 +4003,38 @@ module Aws::Rekognition
3804
4003
  include Aws::Structure
3805
4004
  end
3806
4005
 
4006
+ # Specifies the starting point in a Kinesis stream to start processing.
4007
+ # You can use the producer timestamp or the fragment number. For more
4008
+ # information, see [Fragment][1].
4009
+ #
4010
+ #
4011
+ #
4012
+ # [1]: https://docs.aws.amazon.com/kinesisvideostreams/latest/dg/API_reader_Fragment.html
4013
+ #
4014
+ # @note When making an API call, you may pass KinesisVideoStreamStartSelector
4015
+ # data as a hash:
4016
+ #
4017
+ # {
4018
+ # producer_timestamp: 1,
4019
+ # fragment_number: "KinesisVideoStreamFragmentNumber",
4020
+ # }
4021
+ #
4022
+ # @!attribute [rw] producer_timestamp
4023
+ # The timestamp from the producer corresponding to the fragment.
4024
+ # @return [Integer]
4025
+ #
4026
+ # @!attribute [rw] fragment_number
4027
+ # The unique identifier of the fragment. This value monotonically
4028
+ # increases based on the ingestion order.
4029
+ # @return [String]
4030
+ #
4031
+ class KinesisVideoStreamStartSelector < Struct.new(
4032
+ :producer_timestamp,
4033
+ :fragment_number)
4034
+ SENSITIVE = []
4035
+ include Aws::Structure
4036
+ end
4037
+
3807
4038
  # The known gender identity for the celebrity that matches the provided
3808
4039
  # ID. The known gender identity can be Male, Female, Nonbinary, or
3809
4040
  # Unlisted.
@@ -3940,15 +4171,10 @@ module Aws::Rekognition
3940
4171
  # @return [String]
3941
4172
  #
3942
4173
  # @!attribute [rw] face_model_versions
3943
- # Latest face models being used with the corresponding collections in
3944
- # the array. For more information, see [Model versioning][1]. For
3945
- # example, the value of `FaceModelVersions[2]` is the version number
3946
- # for the face detection model used by the collection in
3947
- # `CollectionId[2]`.
3948
- #
3949
- #
3950
- #
3951
- # [1]: https://docs.aws.amazon.com/rekognition/latest/dg/face-detection-model.html
4174
+ # Version numbers of the face detection models associated with the
4175
+ # collections in the array `CollectionIds`. For example, the value of
4176
+ # `FaceModelVersions[2]` is the version number for the face detection
4177
+ # model used by the collection in `CollectionId[2]`.
3952
4178
  # @return [Array<String>]
3953
4179
  #
3954
4180
  class ListCollectionsResponse < Struct.new(
@@ -4144,12 +4370,8 @@ module Aws::Rekognition
4144
4370
  # @return [String]
4145
4371
  #
4146
4372
  # @!attribute [rw] face_model_version
4147
- # Latest face model being used with the collection. For more
4148
- # information, see [Model versioning][1].
4149
- #
4150
- #
4151
- #
4152
- # [1]: https://docs.aws.amazon.com/rekognition/latest/dg/face-detection-model.html
4373
+ # Version number of the face detection model associated with the input
4374
+ # collection (`CollectionId`).
4153
4375
  # @return [String]
4154
4376
  #
4155
4377
  class ListFacesResponse < Struct.new(
@@ -4303,15 +4525,17 @@ module Aws::Rekognition
4303
4525
 
4304
4526
  # The Amazon Simple Notification Service topic to which Amazon
4305
4527
  # Rekognition publishes the completion status of a video analysis
4306
- # operation. For more information, see api-video. Note that the Amazon
4307
- # SNS topic must have a topic name that begins with *AmazonRekognition*
4308
- # if you are using the AmazonRekognitionServiceRole permissions policy
4309
- # to access the topic. For more information, see [Giving access to
4310
- # multiple Amazon SNS topics][1].
4528
+ # operation. For more information, see [Calling Amazon Rekognition Video
4529
+ # operations][1]. Note that the Amazon SNS topic must have a topic name
4530
+ # that begins with *AmazonRekognition* if you are using the
4531
+ # AmazonRekognitionServiceRole permissions policy to access the topic.
4532
+ # For more information, see [Giving access to multiple Amazon SNS
4533
+ # topics][2].
4311
4534
  #
4312
4535
  #
4313
4536
  #
4314
- # [1]: https://docs.aws.amazon.com/rekognition/latest/dg/api-video-roles.html#api-video-roles-all-topics
4537
+ # [1]: https://docs.aws.amazon.com/rekognition/latest/dg/api-video.html
4538
+ # [2]: https://docs.aws.amazon.com/rekognition/latest/dg/api-video-roles.html#api-video-roles-all-topics
4315
4539
  #
4316
4540
  # @note When making an API call, you may pass NotificationChannel
4317
4541
  # data as a hash:
@@ -4322,7 +4546,7 @@ module Aws::Rekognition
4322
4546
  # }
4323
4547
  #
4324
4548
  # @!attribute [rw] sns_topic_arn
4325
- # The Amazon SNS topic to which Amazon Rekognition to posts the
4549
+ # The Amazon SNS topic to which Amazon Rekognition posts the
4326
4550
  # completion status.
4327
4551
  # @return [String]
4328
4552
  #
@@ -4452,15 +4676,24 @@ module Aws::Rekognition
4452
4676
  include Aws::Structure
4453
4677
  end
4454
4678
 
4455
- # The X and Y coordinates of a point on an image. The X and Y values
4456
- # returned are ratios of the overall image size. For example, if the
4457
- # input image is 700x200 and the operation returns X=0.5 and Y=0.25,
4458
- # then the point is at the (350,50) pixel coordinate on the image.
4679
+ # The X and Y coordinates of a point on an image or video frame. The X
4680
+ # and Y values are ratios of the overall image size or video resolution.
4681
+ # For example, if an input image is 700x200 and the values are X=0.5 and
4682
+ # Y=0.25, then the point is at the (350,50) pixel coordinate on the
4683
+ # image.
4459
4684
  #
4460
- # An array of `Point` objects, `Polygon`, is returned by DetectText and
4461
- # by DetectCustomLabels. `Polygon` represents a fine-grained polygon
4462
- # around a detected item. For more information, see Geometry in the
4463
- # Amazon Rekognition Developer Guide.
4685
+ # An array of `Point` objects makes up a `Polygon`. A `Polygon` is
4686
+ # returned by DetectText and by DetectCustomLabels `Polygon` represents
4687
+ # a fine-grained polygon around a detected item. For more information,
4688
+ # see Geometry in the Amazon Rekognition Developer Guide.
4689
+ #
4690
+ # @note When making an API call, you may pass Point
4691
+ # data as a hash:
4692
+ #
4693
+ # {
4694
+ # x: 1.0,
4695
+ # y: 1.0,
4696
+ # }
4464
4697
  #
4465
4698
  # @!attribute [rw] x
4466
4699
  # The value of the X coordinate for a point on a `Polygon`.
@@ -4591,6 +4824,12 @@ module Aws::Rekognition
4591
4824
  # that was used to encrypt the model during training.
4592
4825
  # @return [String]
4593
4826
  #
4827
+ # @!attribute [rw] max_inference_units
4828
+ # The maximum number of inference units Amazon Rekognition Custom
4829
+ # Labels uses to auto-scale the model. For more information, see
4830
+ # StartProjectVersion.
4831
+ # @return [Integer]
4832
+ #
4594
4833
  class ProjectVersionDescription < Struct.new(
4595
4834
  :project_version_arn,
4596
4835
  :creation_timestamp,
@@ -4604,7 +4843,8 @@ module Aws::Rekognition
4604
4843
  :testing_data_result,
4605
4844
  :evaluation_result,
4606
4845
  :manifest_summary,
4607
- :kms_key_id)
4846
+ :kms_key_id,
4847
+ :max_inference_units)
4608
4848
  SENSITIVE = []
4609
4849
  include Aws::Structure
4610
4850
  end
@@ -4842,12 +5082,13 @@ module Aws::Rekognition
4842
5082
  end
4843
5083
 
4844
5084
  # Specifies a location within the frame that Rekognition checks for
4845
- # text. Uses a `BoundingBox` object to set a region of the screen.
5085
+ # objects of interest such as text, labels, or faces. It uses a
5086
+ # `BoundingBox` or `Polygon` to set a region of the screen.
4846
5087
  #
4847
- # A word is included in the region if the word is more than half in that
4848
- # region. If there is more than one region, the word will be compared
4849
- # with all regions of the screen. Any word more than half in a region is
4850
- # kept in the results.
5088
+ # A word, face, or label is included in the region if it is more than
5089
+ # half in that region. If there is more than one region, the word, face,
5090
+ # or label is compared with all regions of the screen. Any object of
5091
+ # interest that is more than half in a region is kept in the results.
4851
5092
  #
4852
5093
  # @note When making an API call, you may pass RegionOfInterest
4853
5094
  # data as a hash:
@@ -4859,14 +5100,26 @@ module Aws::Rekognition
4859
5100
  # left: 1.0,
4860
5101
  # top: 1.0,
4861
5102
  # },
5103
+ # polygon: [
5104
+ # {
5105
+ # x: 1.0,
5106
+ # y: 1.0,
5107
+ # },
5108
+ # ],
4862
5109
  # }
4863
5110
  #
4864
5111
  # @!attribute [rw] bounding_box
4865
5112
  # The box representing a region of interest on screen.
4866
5113
  # @return [Types::BoundingBox]
4867
5114
  #
5115
+ # @!attribute [rw] polygon
5116
+ # Specifies a shape made up of up to 10 `Point` objects to define a
5117
+ # region of interest.
5118
+ # @return [Array<Types::Point>]
5119
+ #
4868
5120
  class RegionOfInterest < Struct.new(
4869
- :bounding_box)
5121
+ :bounding_box,
5122
+ :polygon)
4870
5123
  SENSITIVE = []
4871
5124
  include Aws::Structure
4872
5125
  end
@@ -4889,14 +5142,52 @@ module Aws::Rekognition
4889
5142
  #
4890
5143
  class ResourceNotReadyException < Aws::EmptyStructure; end
4891
5144
 
5145
+ # The Amazon S3 bucket location to which Amazon Rekognition publishes
5146
+ # the detailed inference results of a video analysis operation. These
5147
+ # results include the name of the stream processor resource, the session
5148
+ # ID of the stream processing session, and labeled timestamps and
5149
+ # bounding boxes for detected labels.
5150
+ #
5151
+ # @note When making an API call, you may pass S3Destination
5152
+ # data as a hash:
5153
+ #
5154
+ # {
5155
+ # bucket: "S3Bucket",
5156
+ # key_prefix: "S3KeyPrefix",
5157
+ # }
5158
+ #
5159
+ # @!attribute [rw] bucket
5160
+ # The name of the Amazon S3 bucket you want to associate with the
5161
+ # streaming video project. You must be the owner of the Amazon S3
5162
+ # bucket.
5163
+ # @return [String]
5164
+ #
5165
+ # @!attribute [rw] key_prefix
5166
+ # The prefix value of the location within the bucket that you want the
5167
+ # information to be published to. For more information, see [Using
5168
+ # prefixes][1].
5169
+ #
5170
+ #
5171
+ #
5172
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-prefixes.html
5173
+ # @return [String]
5174
+ #
5175
+ class S3Destination < Struct.new(
5176
+ :bucket,
5177
+ :key_prefix)
5178
+ SENSITIVE = []
5179
+ include Aws::Structure
5180
+ end
5181
+
4892
5182
  # Provides the S3 bucket name and object name.
4893
5183
  #
4894
5184
  # The region for the S3 bucket containing the S3 object must match the
4895
5185
  # region you use for Amazon Rekognition operations.
4896
5186
  #
4897
5187
  # For Amazon Rekognition to process an S3 object, the user must have
4898
- # permission to access the S3 object. For more information, see
4899
- # Resource-Based Policies in the Amazon Rekognition Developer Guide.
5188
+ # permission to access the S3 object. For more information, see How
5189
+ # Amazon Rekognition works with IAM in the Amazon Rekognition Developer
5190
+ # Guide.
4900
5191
  #
4901
5192
  # @note When making an API call, you may pass S3Object
4902
5193
  # data as a hash:
@@ -5014,12 +5305,8 @@ module Aws::Rekognition
5014
5305
  # @return [Array<Types::FaceMatch>]
5015
5306
  #
5016
5307
  # @!attribute [rw] face_model_version
5017
- # Latest face model being used with the collection. For more
5018
- # information, see [Model versioning][1].
5019
- #
5020
- #
5021
- #
5022
- # [1]: https://docs.aws.amazon.com/rekognition/latest/dg/face-detection-model.html
5308
+ # Version number of the face detection model associated with the input
5309
+ # collection (`CollectionId`).
5023
5310
  # @return [String]
5024
5311
  #
5025
5312
  class SearchFacesByImageResponse < Struct.new(
@@ -5079,12 +5366,8 @@ module Aws::Rekognition
5079
5366
  # @return [Array<Types::FaceMatch>]
5080
5367
  #
5081
5368
  # @!attribute [rw] face_model_version
5082
- # Latest face model being used with the collection. For more
5083
- # information, see [Model versioning][1].
5084
- #
5085
- #
5086
- #
5087
- # [1]: https://docs.aws.amazon.com/rekognition/latest/dg/face-detection-model.html
5369
+ # Version number of the face detection model associated with the input
5370
+ # collection (`CollectionId`).
5088
5371
  # @return [String]
5089
5372
  #
5090
5373
  class SearchFacesResponse < Struct.new(
@@ -5198,8 +5481,8 @@ module Aws::Rekognition
5198
5481
  end
5199
5482
 
5200
5483
  # The size of the collection exceeds the allowed limit. For more
5201
- # information, see Limits in Amazon Rekognition in the Amazon
5202
- # Rekognition Developer Guide.
5484
+ # information, see Guidelines and quotas in Amazon Rekognition in the
5485
+ # Amazon Rekognition Developer Guide.
5203
5486
  #
5204
5487
  class ServiceQuotaExceededException < Aws::EmptyStructure; end
5205
5488
 
@@ -5695,6 +5978,7 @@ module Aws::Rekognition
5695
5978
  # {
5696
5979
  # project_version_arn: "ProjectVersionArn", # required
5697
5980
  # min_inference_units: 1, # required
5981
+ # max_inference_units: 1,
5698
5982
  # }
5699
5983
  #
5700
5984
  # @!attribute [rw] project_version_arn
@@ -5704,15 +5988,27 @@ module Aws::Rekognition
5704
5988
  #
5705
5989
  # @!attribute [rw] min_inference_units
5706
5990
  # The minimum number of inference units to use. A single inference
5707
- # unit represents 1 hour of processing and can support up to 5
5708
- # Transaction Pers Second (TPS). Use a higher number to increase the
5709
- # TPS throughput of your model. You are charged for the number of
5710
- # inference units that you use.
5991
+ # unit represents 1 hour of processing.
5992
+ #
5993
+ # For information about the number of transactions per second (TPS)
5994
+ # that an inference unit can support, see *Running a trained Amazon
5995
+ # Rekognition Custom Labels model* in the Amazon Rekognition Custom
5996
+ # Labels Guide.
5997
+ #
5998
+ # Use a higher number to increase the TPS throughput of your model.
5999
+ # You are charged for the number of inference units that you use.
6000
+ # @return [Integer]
6001
+ #
6002
+ # @!attribute [rw] max_inference_units
6003
+ # The maximum number of inference units to use for auto-scaling the
6004
+ # model. If you don't specify a value, Amazon Rekognition Custom
6005
+ # Labels doesn't auto-scale the model.
5711
6006
  # @return [Integer]
5712
6007
  #
5713
6008
  class StartProjectVersionRequest < Struct.new(
5714
6009
  :project_version_arn,
5715
- :min_inference_units)
6010
+ :min_inference_units,
6011
+ :max_inference_units)
5716
6012
  SENSITIVE = []
5717
6013
  include Aws::Structure
5718
6014
  end
@@ -5888,19 +6184,59 @@ module Aws::Rekognition
5888
6184
  #
5889
6185
  # {
5890
6186
  # name: "StreamProcessorName", # required
6187
+ # start_selector: {
6188
+ # kvs_stream_start_selector: {
6189
+ # producer_timestamp: 1,
6190
+ # fragment_number: "KinesisVideoStreamFragmentNumber",
6191
+ # },
6192
+ # },
6193
+ # stop_selector: {
6194
+ # max_duration_in_seconds: 1,
6195
+ # },
5891
6196
  # }
5892
6197
  #
5893
6198
  # @!attribute [rw] name
5894
6199
  # The name of the stream processor to start processing.
5895
6200
  # @return [String]
5896
6201
  #
6202
+ # @!attribute [rw] start_selector
6203
+ # Specifies the starting point in the Kinesis stream to start
6204
+ # processing. You can use the producer timestamp or the fragment
6205
+ # number. For more information, see [Fragment][1].
6206
+ #
6207
+ # This is a required parameter for label detection stream processors
6208
+ # and should not be used to start a face search stream processor.
6209
+ #
6210
+ #
6211
+ #
6212
+ # [1]: https://docs.aws.amazon.com/kinesisvideostreams/latest/dg/API_reader_Fragment.html
6213
+ # @return [Types::StreamProcessingStartSelector]
6214
+ #
6215
+ # @!attribute [rw] stop_selector
6216
+ # Specifies when to stop processing the stream. You can specify a
6217
+ # maximum amount of time to process the video.
6218
+ #
6219
+ # This is a required parameter for label detection stream processors
6220
+ # and should not be used to start a face search stream processor.
6221
+ # @return [Types::StreamProcessingStopSelector]
6222
+ #
5897
6223
  class StartStreamProcessorRequest < Struct.new(
5898
- :name)
6224
+ :name,
6225
+ :start_selector,
6226
+ :stop_selector)
5899
6227
  SENSITIVE = []
5900
6228
  include Aws::Structure
5901
6229
  end
5902
6230
 
5903
- class StartStreamProcessorResponse < Aws::EmptyStructure; end
6231
+ # @!attribute [rw] session_id
6232
+ # A unique identifier for the stream processing session.
6233
+ # @return [String]
6234
+ #
6235
+ class StartStreamProcessorResponse < Struct.new(
6236
+ :session_id)
6237
+ SENSITIVE = []
6238
+ include Aws::Structure
6239
+ end
5904
6240
 
5905
6241
  # Filters for the technical segments returned by GetSegmentDetection.
5906
6242
  # For more information, see StartSegmentDetectionFilters.
@@ -5966,6 +6302,12 @@ module Aws::Rekognition
5966
6302
  # left: 1.0,
5967
6303
  # top: 1.0,
5968
6304
  # },
6305
+ # polygon: [
6306
+ # {
6307
+ # x: 1.0,
6308
+ # y: 1.0,
6309
+ # },
6310
+ # ],
5969
6311
  # },
5970
6312
  # ],
5971
6313
  # }
@@ -6018,6 +6360,12 @@ module Aws::Rekognition
6018
6360
  # left: 1.0,
6019
6361
  # top: 1.0,
6020
6362
  # },
6363
+ # polygon: [
6364
+ # {
6365
+ # x: 1.0,
6366
+ # y: 1.0,
6367
+ # },
6368
+ # ],
6021
6369
  # },
6022
6370
  # ],
6023
6371
  # },
@@ -6040,16 +6388,17 @@ module Aws::Rekognition
6040
6388
  # @!attribute [rw] notification_channel
6041
6389
  # The Amazon Simple Notification Service topic to which Amazon
6042
6390
  # Rekognition publishes the completion status of a video analysis
6043
- # operation. For more information, see api-video. Note that the Amazon
6044
- # SNS topic must have a topic name that begins with
6045
- # *AmazonRekognition* if you are using the
6391
+ # operation. For more information, see [Calling Amazon Rekognition
6392
+ # Video operations][1]. Note that the Amazon SNS topic must have a
6393
+ # topic name that begins with *AmazonRekognition* if you are using the
6046
6394
  # AmazonRekognitionServiceRole permissions policy to access the topic.
6047
6395
  # For more information, see [Giving access to multiple Amazon SNS
6048
- # topics][1].
6396
+ # topics][2].
6049
6397
  #
6050
6398
  #
6051
6399
  #
6052
- # [1]: https://docs.aws.amazon.com/rekognition/latest/dg/api-video-roles.html#api-video-roles-all-topics
6400
+ # [1]: https://docs.aws.amazon.com/rekognition/latest/dg/api-video.html
6401
+ # [2]: https://docs.aws.amazon.com/rekognition/latest/dg/api-video-roles.html#api-video-roles-all-topics
6053
6402
  # @return [Types::NotificationChannel]
6054
6403
  #
6055
6404
  # @!attribute [rw] job_tag
@@ -6135,8 +6484,52 @@ module Aws::Rekognition
6135
6484
 
6136
6485
  class StopStreamProcessorResponse < Aws::EmptyStructure; end
6137
6486
 
6138
- # An object that recognizes faces in a streaming video. An Amazon
6139
- # Rekognition stream processor is created by a call to
6487
+ # @note When making an API call, you may pass StreamProcessingStartSelector
6488
+ # data as a hash:
6489
+ #
6490
+ # {
6491
+ # kvs_stream_start_selector: {
6492
+ # producer_timestamp: 1,
6493
+ # fragment_number: "KinesisVideoStreamFragmentNumber",
6494
+ # },
6495
+ # }
6496
+ #
6497
+ # @!attribute [rw] kvs_stream_start_selector
6498
+ # Specifies the starting point in the stream to start processing. This
6499
+ # can be done with a timestamp or a fragment number in a Kinesis
6500
+ # stream.
6501
+ # @return [Types::KinesisVideoStreamStartSelector]
6502
+ #
6503
+ class StreamProcessingStartSelector < Struct.new(
6504
+ :kvs_stream_start_selector)
6505
+ SENSITIVE = []
6506
+ include Aws::Structure
6507
+ end
6508
+
6509
+ # Specifies when to stop processing the stream. You can specify a
6510
+ # maximum amount of time to process the video.
6511
+ #
6512
+ # @note When making an API call, you may pass StreamProcessingStopSelector
6513
+ # data as a hash:
6514
+ #
6515
+ # {
6516
+ # max_duration_in_seconds: 1,
6517
+ # }
6518
+ #
6519
+ # @!attribute [rw] max_duration_in_seconds
6520
+ # Specifies the maximum amount of time in seconds that you want the
6521
+ # stream to be processed. The largest amount of time is 2 minutes. The
6522
+ # default is 10 seconds.
6523
+ # @return [Integer]
6524
+ #
6525
+ class StreamProcessingStopSelector < Struct.new(
6526
+ :max_duration_in_seconds)
6527
+ SENSITIVE = []
6528
+ include Aws::Structure
6529
+ end
6530
+
6531
+ # An object that recognizes faces or labels in a streaming video. An
6532
+ # Amazon Rekognition stream processor is created by a call to
6140
6533
  # CreateStreamProcessor. The request parameters for
6141
6534
  # `CreateStreamProcessor` describe the Kinesis video stream source for
6142
6535
  # the streaming video, face recognition parameters, and where to stream
@@ -6157,6 +6550,29 @@ module Aws::Rekognition
6157
6550
  include Aws::Structure
6158
6551
  end
6159
6552
 
6553
+ # Allows you to opt in or opt out to share data with Rekognition to
6554
+ # improve model performance. You can choose this option at the account
6555
+ # level or on a per-stream basis. Note that if you opt out at the
6556
+ # account level this setting is ignored on individual streams.
6557
+ #
6558
+ # @note When making an API call, you may pass StreamProcessorDataSharingPreference
6559
+ # data as a hash:
6560
+ #
6561
+ # {
6562
+ # opt_in: false, # required
6563
+ # }
6564
+ #
6565
+ # @!attribute [rw] opt_in
6566
+ # If this option is set to true, you choose to share data with
6567
+ # Rekognition to improve model performance.
6568
+ # @return [Boolean]
6569
+ #
6570
+ class StreamProcessorDataSharingPreference < Struct.new(
6571
+ :opt_in)
6572
+ SENSITIVE = []
6573
+ include Aws::Structure
6574
+ end
6575
+
6160
6576
  # Information about the source streaming video.
6161
6577
  #
6162
6578
  # @note When making an API call, you may pass StreamProcessorInput
@@ -6179,6 +6595,39 @@ module Aws::Rekognition
6179
6595
  include Aws::Structure
6180
6596
  end
6181
6597
 
6598
+ # The Amazon Simple Notification Service topic to which Amazon
6599
+ # Rekognition publishes the object detection results and completion
6600
+ # status of a video analysis operation.
6601
+ #
6602
+ # Amazon Rekognition publishes a notification the first time an object
6603
+ # of interest or a person is detected in the video stream. For example,
6604
+ # if Amazon Rekognition detects a person at second 2, a pet at second 4,
6605
+ # and a person again at second 5, Amazon Rekognition sends 2 object
6606
+ # class detected notifications, one for a person at second 2 and one for
6607
+ # a pet at second 4.
6608
+ #
6609
+ # Amazon Rekognition also publishes an an end-of-session notification
6610
+ # with a summary when the stream processing session is complete.
6611
+ #
6612
+ # @note When making an API call, you may pass StreamProcessorNotificationChannel
6613
+ # data as a hash:
6614
+ #
6615
+ # {
6616
+ # sns_topic_arn: "SNSTopicArn", # required
6617
+ # }
6618
+ #
6619
+ # @!attribute [rw] sns_topic_arn
6620
+ # The Amazon Resource Number (ARN) of the Amazon Amazon Simple
6621
+ # Notification Service topic to which Amazon Rekognition posts the
6622
+ # completion status.
6623
+ # @return [String]
6624
+ #
6625
+ class StreamProcessorNotificationChannel < Struct.new(
6626
+ :sns_topic_arn)
6627
+ SENSITIVE = []
6628
+ include Aws::Structure
6629
+ end
6630
+
6182
6631
  # Information about the Amazon Kinesis Data Streams stream to which a
6183
6632
  # Amazon Rekognition Video stream processor streams the results of a
6184
6633
  # video analysis. For more information, see CreateStreamProcessor in the
@@ -6191,6 +6640,10 @@ module Aws::Rekognition
6191
6640
  # kinesis_data_stream: {
6192
6641
  # arn: "KinesisDataArn",
6193
6642
  # },
6643
+ # s3_destination: {
6644
+ # bucket: "S3Bucket",
6645
+ # key_prefix: "S3KeyPrefix",
6646
+ # },
6194
6647
  # }
6195
6648
  #
6196
6649
  # @!attribute [rw] kinesis_data_stream
@@ -6198,14 +6651,22 @@ module Aws::Rekognition
6198
6651
  # Rekognition stream processor streams the analysis results.
6199
6652
  # @return [Types::KinesisDataStream]
6200
6653
  #
6654
+ # @!attribute [rw] s3_destination
6655
+ # The Amazon S3 bucket location to which Amazon Rekognition publishes
6656
+ # the detailed inference results of a video analysis operation.
6657
+ # @return [Types::S3Destination]
6658
+ #
6201
6659
  class StreamProcessorOutput < Struct.new(
6202
- :kinesis_data_stream)
6660
+ :kinesis_data_stream,
6661
+ :s3_destination)
6203
6662
  SENSITIVE = []
6204
6663
  include Aws::Structure
6205
6664
  end
6206
6665
 
6207
- # Input parameters used to recognize faces in a streaming video analyzed
6208
- # by a Amazon Rekognition stream processor.
6666
+ # Input parameters used in a streaming video analyzed by a Amazon
6667
+ # Rekognition stream processor. You can use `FaceSearch` to recognize
6668
+ # faces in a streaming video, or you can use `ConnectedHome` to detect
6669
+ # labels.
6209
6670
  #
6210
6671
  # @note When making an API call, you may pass StreamProcessorSettings
6211
6672
  # data as a hash:
@@ -6215,14 +6676,58 @@ module Aws::Rekognition
6215
6676
  # collection_id: "CollectionId",
6216
6677
  # face_match_threshold: 1.0,
6217
6678
  # },
6679
+ # connected_home: {
6680
+ # labels: ["ConnectedHomeLabel"], # required
6681
+ # min_confidence: 1.0,
6682
+ # },
6218
6683
  # }
6219
6684
  #
6220
6685
  # @!attribute [rw] face_search
6221
6686
  # Face search settings to use on a streaming video.
6222
6687
  # @return [Types::FaceSearchSettings]
6223
6688
  #
6689
+ # @!attribute [rw] connected_home
6690
+ # Label detection settings to use on a streaming video. Defining the
6691
+ # settings is required in the request parameter for
6692
+ # CreateStreamProcessor. Including this setting in the
6693
+ # `CreateStreamProcessor` request enables you to use the stream
6694
+ # processor for label detection. You can then select what you want the
6695
+ # stream processor to detect, such as people or pets. When the stream
6696
+ # processor has started, one notification is sent for each object
6697
+ # class specified. For example, if packages and pets are selected, one
6698
+ # SNS notification is published the first time a package is detected
6699
+ # and one SNS notification is published the first time a pet is
6700
+ # detected, as well as an end-of-session summary.
6701
+ # @return [Types::ConnectedHomeSettings]
6702
+ #
6224
6703
  class StreamProcessorSettings < Struct.new(
6225
- :face_search)
6704
+ :face_search,
6705
+ :connected_home)
6706
+ SENSITIVE = []
6707
+ include Aws::Structure
6708
+ end
6709
+
6710
+ # The stream processor settings that you want to update. `ConnectedHome`
6711
+ # settings can be updated to detect different labels with a different
6712
+ # minimum confidence.
6713
+ #
6714
+ # @note When making an API call, you may pass StreamProcessorSettingsForUpdate
6715
+ # data as a hash:
6716
+ #
6717
+ # {
6718
+ # connected_home_for_update: {
6719
+ # labels: ["ConnectedHomeLabel"],
6720
+ # min_confidence: 1.0,
6721
+ # },
6722
+ # }
6723
+ #
6724
+ # @!attribute [rw] connected_home_for_update
6725
+ # The label detection settings you want to use for your stream
6726
+ # processor.
6727
+ # @return [Types::ConnectedHomeSettingsForUpdate]
6728
+ #
6729
+ class StreamProcessorSettingsForUpdate < Struct.new(
6730
+ :connected_home_for_update)
6226
6731
  SENSITIVE = []
6227
6732
  include Aws::Structure
6228
6733
  end
@@ -6241,8 +6746,9 @@ module Aws::Rekognition
6241
6746
  # region you use for Amazon Rekognition operations.
6242
6747
  #
6243
6748
  # For Amazon Rekognition to process an S3 object, the user must have
6244
- # permission to access the S3 object. For more information, see
6245
- # Resource-Based Policies in the Amazon Rekognition Developer Guide.
6749
+ # permission to access the S3 object. For more information, see How
6750
+ # Amazon Rekognition works with IAM in the Amazon Rekognition
6751
+ # Developer Guide.
6246
6752
  # @return [Types::S3Object]
6247
6753
  #
6248
6754
  class Summary < Struct.new(
@@ -6393,7 +6899,7 @@ module Aws::Rekognition
6393
6899
  # of text in which the word appears. The word `Id` is also an index for
6394
6900
  # the word within a line of words.
6395
6901
  #
6396
- # For more information, see Detecting Text in the Amazon Rekognition
6902
+ # For more information, see Detecting text in the Amazon Rekognition
6397
6903
  # Developer Guide.
6398
6904
  #
6399
6905
  # @!attribute [rw] detected_text
@@ -6607,6 +7113,78 @@ module Aws::Rekognition
6607
7113
 
6608
7114
  class UpdateDatasetEntriesResponse < Aws::EmptyStructure; end
6609
7115
 
7116
+ # @note When making an API call, you may pass UpdateStreamProcessorRequest
7117
+ # data as a hash:
7118
+ #
7119
+ # {
7120
+ # name: "StreamProcessorName", # required
7121
+ # settings_for_update: {
7122
+ # connected_home_for_update: {
7123
+ # labels: ["ConnectedHomeLabel"],
7124
+ # min_confidence: 1.0,
7125
+ # },
7126
+ # },
7127
+ # regions_of_interest_for_update: [
7128
+ # {
7129
+ # bounding_box: {
7130
+ # width: 1.0,
7131
+ # height: 1.0,
7132
+ # left: 1.0,
7133
+ # top: 1.0,
7134
+ # },
7135
+ # polygon: [
7136
+ # {
7137
+ # x: 1.0,
7138
+ # y: 1.0,
7139
+ # },
7140
+ # ],
7141
+ # },
7142
+ # ],
7143
+ # data_sharing_preference_for_update: {
7144
+ # opt_in: false, # required
7145
+ # },
7146
+ # parameters_to_delete: ["ConnectedHomeMinConfidence"], # accepts ConnectedHomeMinConfidence, RegionsOfInterest
7147
+ # }
7148
+ #
7149
+ # @!attribute [rw] name
7150
+ # Name of the stream processor that you want to update.
7151
+ # @return [String]
7152
+ #
7153
+ # @!attribute [rw] settings_for_update
7154
+ # The stream processor settings that you want to update. Label
7155
+ # detection settings can be updated to detect different labels with a
7156
+ # different minimum confidence.
7157
+ # @return [Types::StreamProcessorSettingsForUpdate]
7158
+ #
7159
+ # @!attribute [rw] regions_of_interest_for_update
7160
+ # Specifies locations in the frames where Amazon Rekognition checks
7161
+ # for objects or people. This is an optional parameter for label
7162
+ # detection stream processors.
7163
+ # @return [Array<Types::RegionOfInterest>]
7164
+ #
7165
+ # @!attribute [rw] data_sharing_preference_for_update
7166
+ # Shows whether you are sharing data with Rekognition to improve model
7167
+ # performance. You can choose this option at the account level or on a
7168
+ # per-stream basis. Note that if you opt out at the account level this
7169
+ # setting is ignored on individual streams.
7170
+ # @return [Types::StreamProcessorDataSharingPreference]
7171
+ #
7172
+ # @!attribute [rw] parameters_to_delete
7173
+ # A list of parameters you want to delete from the stream processor.
7174
+ # @return [Array<String>]
7175
+ #
7176
+ class UpdateStreamProcessorRequest < Struct.new(
7177
+ :name,
7178
+ :settings_for_update,
7179
+ :regions_of_interest_for_update,
7180
+ :data_sharing_preference_for_update,
7181
+ :parameters_to_delete)
7182
+ SENSITIVE = []
7183
+ include Aws::Structure
7184
+ end
7185
+
7186
+ class UpdateStreamProcessorResponse < Aws::EmptyStructure; end
7187
+
6610
7188
  # Contains the Amazon S3 bucket location of the validation data for a
6611
7189
  # model training job.
6612
7190
  #