aws-sdk-rekognition 1.66.0 → 1.67.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +5 -0
- data/VERSION +1 -1
- data/lib/aws-sdk-rekognition/client.rb +294 -56
- data/lib/aws-sdk-rekognition/client_api.rb +90 -0
- data/lib/aws-sdk-rekognition/types.rb +671 -114
- data/lib/aws-sdk-rekognition.rb +1 -1
- metadata +2 -2
@@ -160,11 +160,11 @@ module Aws::Rekognition
|
|
160
160
|
include Aws::Structure
|
161
161
|
end
|
162
162
|
|
163
|
-
# Identifies the bounding box around the label, face, text
|
164
|
-
# protective equipment. The `left` (x-coordinate)
|
165
|
-
# (y-coordinate) are coordinates representing the top and left
|
166
|
-
# the bounding box. Note that the upper-left corner of the
|
167
|
-
# origin (0,0).
|
163
|
+
# Identifies the bounding box around the label, face, text, object of
|
164
|
+
# interest, or personal protective equipment. The `left` (x-coordinate)
|
165
|
+
# and `top` (y-coordinate) are coordinates representing the top and left
|
166
|
+
# sides of the bounding box. Note that the upper-left corner of the
|
167
|
+
# image is the origin (0,0).
|
168
168
|
#
|
169
169
|
# The `top` and `left` values returned are ratios of the overall image
|
170
170
|
# size. For example, if the input image is 700x200 pixels, and the
|
@@ -558,6 +558,72 @@ module Aws::Rekognition
|
|
558
558
|
include Aws::Structure
|
559
559
|
end
|
560
560
|
|
561
|
+
# Label detection settings to use on a streaming video. Defining the
|
562
|
+
# settings is required in the request parameter for
|
563
|
+
# CreateStreamProcessor. Including this setting in the
|
564
|
+
# `CreateStreamProcessor` request enables you to use the stream
|
565
|
+
# processor for label detection. You can then select what you want the
|
566
|
+
# stream processor to detect, such as people or pets. When the stream
|
567
|
+
# processor has started, one notification is sent for each object class
|
568
|
+
# specified. For example, if packages and pets are selected, one SNS
|
569
|
+
# notification is published the first time a package is detected and one
|
570
|
+
# SNS notification is published the first time a pet is detected, as
|
571
|
+
# well as an end-of-session summary.
|
572
|
+
#
|
573
|
+
# @note When making an API call, you may pass ConnectedHomeSettings
|
574
|
+
# data as a hash:
|
575
|
+
#
|
576
|
+
# {
|
577
|
+
# labels: ["ConnectedHomeLabel"], # required
|
578
|
+
# min_confidence: 1.0,
|
579
|
+
# }
|
580
|
+
#
|
581
|
+
# @!attribute [rw] labels
|
582
|
+
# Specifies what you want to detect in the video, such as people,
|
583
|
+
# packages, or pets. The current valid labels you can include in this
|
584
|
+
# list are: "PERSON", "PET", "PACKAGE", and "ALL".
|
585
|
+
# @return [Array<String>]
|
586
|
+
#
|
587
|
+
# @!attribute [rw] min_confidence
|
588
|
+
# The minimum confidence required to label an object in the video.
|
589
|
+
# @return [Float]
|
590
|
+
#
|
591
|
+
class ConnectedHomeSettings < Struct.new(
|
592
|
+
:labels,
|
593
|
+
:min_confidence)
|
594
|
+
SENSITIVE = []
|
595
|
+
include Aws::Structure
|
596
|
+
end
|
597
|
+
|
598
|
+
# The label detection settings you want to use in your stream processor.
|
599
|
+
# This includes the labels you want the stream processor to detect and
|
600
|
+
# the minimum confidence level allowed to label objects.
|
601
|
+
#
|
602
|
+
# @note When making an API call, you may pass ConnectedHomeSettingsForUpdate
|
603
|
+
# data as a hash:
|
604
|
+
#
|
605
|
+
# {
|
606
|
+
# labels: ["ConnectedHomeLabel"],
|
607
|
+
# min_confidence: 1.0,
|
608
|
+
# }
|
609
|
+
#
|
610
|
+
# @!attribute [rw] labels
|
611
|
+
# Specifies what you want to detect in the video, such as people,
|
612
|
+
# packages, or pets. The current valid labels you can include in this
|
613
|
+
# list are: "PERSON", "PET", "PACKAGE", and "ALL".
|
614
|
+
# @return [Array<String>]
|
615
|
+
#
|
616
|
+
# @!attribute [rw] min_confidence
|
617
|
+
# The minimum confidence required to label an object in the video.
|
618
|
+
# @return [Float]
|
619
|
+
#
|
620
|
+
class ConnectedHomeSettingsForUpdate < Struct.new(
|
621
|
+
:labels,
|
622
|
+
:min_confidence)
|
623
|
+
SENSITIVE = []
|
624
|
+
include Aws::Structure
|
625
|
+
end
|
626
|
+
|
561
627
|
# Information about an inappropriate, unwanted, or offensive content
|
562
628
|
# label detection in a stored video.
|
563
629
|
#
|
@@ -632,12 +698,8 @@ module Aws::Rekognition
|
|
632
698
|
# @return [String]
|
633
699
|
#
|
634
700
|
# @!attribute [rw] face_model_version
|
635
|
-
#
|
636
|
-
#
|
637
|
-
#
|
638
|
-
#
|
639
|
-
#
|
640
|
-
# [1]: https://docs.aws.amazon.com/rekognition/latest/dg/face-detection-model.html
|
701
|
+
# Version number of the face detection model associated with the
|
702
|
+
# collection you are creating.
|
641
703
|
# @return [String]
|
642
704
|
#
|
643
705
|
class CreateCollectionResponse < Struct.new(
|
@@ -867,6 +929,10 @@ module Aws::Rekognition
|
|
867
929
|
# kinesis_data_stream: {
|
868
930
|
# arn: "KinesisDataArn",
|
869
931
|
# },
|
932
|
+
# s3_destination: {
|
933
|
+
# bucket: "S3Bucket",
|
934
|
+
# key_prefix: "S3KeyPrefix",
|
935
|
+
# },
|
870
936
|
# },
|
871
937
|
# name: "StreamProcessorName", # required
|
872
938
|
# settings: { # required
|
@@ -874,40 +940,77 @@ module Aws::Rekognition
|
|
874
940
|
# collection_id: "CollectionId",
|
875
941
|
# face_match_threshold: 1.0,
|
876
942
|
# },
|
943
|
+
# connected_home: {
|
944
|
+
# labels: ["ConnectedHomeLabel"], # required
|
945
|
+
# min_confidence: 1.0,
|
946
|
+
# },
|
877
947
|
# },
|
878
948
|
# role_arn: "RoleArn", # required
|
879
949
|
# tags: {
|
880
950
|
# "TagKey" => "TagValue",
|
881
951
|
# },
|
952
|
+
# notification_channel: {
|
953
|
+
# sns_topic_arn: "SNSTopicArn", # required
|
954
|
+
# },
|
955
|
+
# kms_key_id: "KmsKeyId",
|
956
|
+
# regions_of_interest: [
|
957
|
+
# {
|
958
|
+
# bounding_box: {
|
959
|
+
# width: 1.0,
|
960
|
+
# height: 1.0,
|
961
|
+
# left: 1.0,
|
962
|
+
# top: 1.0,
|
963
|
+
# },
|
964
|
+
# polygon: [
|
965
|
+
# {
|
966
|
+
# x: 1.0,
|
967
|
+
# y: 1.0,
|
968
|
+
# },
|
969
|
+
# ],
|
970
|
+
# },
|
971
|
+
# ],
|
972
|
+
# data_sharing_preference: {
|
973
|
+
# opt_in: false, # required
|
974
|
+
# },
|
882
975
|
# }
|
883
976
|
#
|
884
977
|
# @!attribute [rw] input
|
885
978
|
# Kinesis video stream stream that provides the source streaming
|
886
979
|
# video. If you are using the AWS CLI, the parameter name is
|
887
|
-
# `StreamProcessorInput`.
|
980
|
+
# `StreamProcessorInput`. This is required for both face search and
|
981
|
+
# label detection stream processors.
|
888
982
|
# @return [Types::StreamProcessorInput]
|
889
983
|
#
|
890
984
|
# @!attribute [rw] output
|
891
|
-
# Kinesis data stream stream
|
892
|
-
# the analysis results. If you are using
|
893
|
-
# name is `StreamProcessorOutput`.
|
985
|
+
# Kinesis data stream stream or Amazon S3 bucket location to which
|
986
|
+
# Amazon Rekognition Video puts the analysis results. If you are using
|
987
|
+
# the AWS CLI, the parameter name is `StreamProcessorOutput`. This
|
988
|
+
# must be a S3Destination of an Amazon S3 bucket that you own for a
|
989
|
+
# label detection stream processor or a Kinesis data stream ARN for a
|
990
|
+
# face search stream processor.
|
894
991
|
# @return [Types::StreamProcessorOutput]
|
895
992
|
#
|
896
993
|
# @!attribute [rw] name
|
897
994
|
# An identifier you assign to the stream processor. You can use `Name`
|
898
995
|
# to manage the stream processor. For example, you can get the current
|
899
996
|
# status of the stream processor by calling DescribeStreamProcessor.
|
900
|
-
# `Name` is idempotent.
|
997
|
+
# `Name` is idempotent. This is required for both face search and
|
998
|
+
# label detection stream processors.
|
901
999
|
# @return [String]
|
902
1000
|
#
|
903
1001
|
# @!attribute [rw] settings
|
904
|
-
#
|
905
|
-
# processor.
|
906
|
-
#
|
1002
|
+
# Input parameters used in a streaming video analyzed by a stream
|
1003
|
+
# processor. You can use `FaceSearch` to recognize faces in a
|
1004
|
+
# streaming video, or you can use `ConnectedHome` to detect labels.
|
907
1005
|
# @return [Types::StreamProcessorSettings]
|
908
1006
|
#
|
909
1007
|
# @!attribute [rw] role_arn
|
910
|
-
# ARN of the IAM role that allows access
|
1008
|
+
# The Amazon Resource Number (ARN) of the IAM role that allows access
|
1009
|
+
# to the stream processor. The IAM role provides Rekognition read
|
1010
|
+
# permissions for a Kinesis stream. It also provides write permissions
|
1011
|
+
# to an Amazon S3 bucket and Amazon Simple Notification Service topic
|
1012
|
+
# for a label detection stream processor. This is required for both
|
1013
|
+
# face search and label detection stream processors.
|
911
1014
|
# @return [String]
|
912
1015
|
#
|
913
1016
|
# @!attribute [rw] tags
|
@@ -915,19 +1018,64 @@ module Aws::Rekognition
|
|
915
1018
|
# stream processor.
|
916
1019
|
# @return [Hash<String,String>]
|
917
1020
|
#
|
1021
|
+
# @!attribute [rw] notification_channel
|
1022
|
+
# The Amazon Simple Notification Service topic to which Amazon
|
1023
|
+
# Rekognition publishes the object detection results and completion
|
1024
|
+
# status of a video analysis operation.
|
1025
|
+
#
|
1026
|
+
# Amazon Rekognition publishes a notification the first time an object
|
1027
|
+
# of interest or a person is detected in the video stream. For
|
1028
|
+
# example, if Amazon Rekognition detects a person at second 2, a pet
|
1029
|
+
# at second 4, and a person again at second 5, Amazon Rekognition
|
1030
|
+
# sends 2 object class detected notifications, one for a person at
|
1031
|
+
# second 2 and one for a pet at second 4.
|
1032
|
+
#
|
1033
|
+
# Amazon Rekognition also publishes an an end-of-session notification
|
1034
|
+
# with a summary when the stream processing session is complete.
|
1035
|
+
# @return [Types::StreamProcessorNotificationChannel]
|
1036
|
+
#
|
1037
|
+
# @!attribute [rw] kms_key_id
|
1038
|
+
# The identifier for your AWS Key Management Service key (AWS KMS
|
1039
|
+
# key). This is an optional parameter for label detection stream
|
1040
|
+
# processors and should not be used to create a face search stream
|
1041
|
+
# processor. You can supply the Amazon Resource Name (ARN) of your KMS
|
1042
|
+
# key, the ID of your KMS key, an alias for your KMS key, or an alias
|
1043
|
+
# ARN. The key is used to encrypt results and data published to your
|
1044
|
+
# Amazon S3 bucket, which includes image frames and hero images. Your
|
1045
|
+
# source images are unaffected.
|
1046
|
+
# @return [String]
|
1047
|
+
#
|
1048
|
+
# @!attribute [rw] regions_of_interest
|
1049
|
+
# Specifies locations in the frames where Amazon Rekognition checks
|
1050
|
+
# for objects or people. You can specify up to 10 regions of interest.
|
1051
|
+
# This is an optional parameter for label detection stream processors
|
1052
|
+
# and should not be used to create a face search stream processor.
|
1053
|
+
# @return [Array<Types::RegionOfInterest>]
|
1054
|
+
#
|
1055
|
+
# @!attribute [rw] data_sharing_preference
|
1056
|
+
# Shows whether you are sharing data with Rekognition to improve model
|
1057
|
+
# performance. You can choose this option at the account level or on a
|
1058
|
+
# per-stream basis. Note that if you opt out at the account level this
|
1059
|
+
# setting is ignored on individual streams.
|
1060
|
+
# @return [Types::StreamProcessorDataSharingPreference]
|
1061
|
+
#
|
918
1062
|
class CreateStreamProcessorRequest < Struct.new(
|
919
1063
|
:input,
|
920
1064
|
:output,
|
921
1065
|
:name,
|
922
1066
|
:settings,
|
923
1067
|
:role_arn,
|
924
|
-
:tags
|
1068
|
+
:tags,
|
1069
|
+
:notification_channel,
|
1070
|
+
:kms_key_id,
|
1071
|
+
:regions_of_interest,
|
1072
|
+
:data_sharing_preference)
|
925
1073
|
SENSITIVE = []
|
926
1074
|
include Aws::Structure
|
927
1075
|
end
|
928
1076
|
|
929
1077
|
# @!attribute [rw] stream_processor_arn
|
930
|
-
#
|
1078
|
+
# Amazon Resource Number for the newly created stream processor.
|
931
1079
|
# @return [String]
|
932
1080
|
#
|
933
1081
|
class CreateStreamProcessorResponse < Struct.new(
|
@@ -1373,7 +1521,7 @@ module Aws::Rekognition
|
|
1373
1521
|
# The version of the face model that's used by the collection for
|
1374
1522
|
# face detection.
|
1375
1523
|
#
|
1376
|
-
# For more information, see Model
|
1524
|
+
# For more information, see Model versioning in the Amazon Rekognition
|
1377
1525
|
# Developer Guide.
|
1378
1526
|
# @return [String]
|
1379
1527
|
#
|
@@ -1604,11 +1752,46 @@ module Aws::Rekognition
|
|
1604
1752
|
# @return [String]
|
1605
1753
|
#
|
1606
1754
|
# @!attribute [rw] settings
|
1607
|
-
#
|
1608
|
-
# processor.
|
1609
|
-
#
|
1755
|
+
# Input parameters used in a streaming video analyzed by a stream
|
1756
|
+
# processor. You can use `FaceSearch` to recognize faces in a
|
1757
|
+
# streaming video, or you can use `ConnectedHome` to detect labels.
|
1610
1758
|
# @return [Types::StreamProcessorSettings]
|
1611
1759
|
#
|
1760
|
+
# @!attribute [rw] notification_channel
|
1761
|
+
# The Amazon Simple Notification Service topic to which Amazon
|
1762
|
+
# Rekognition publishes the object detection results and completion
|
1763
|
+
# status of a video analysis operation.
|
1764
|
+
#
|
1765
|
+
# Amazon Rekognition publishes a notification the first time an object
|
1766
|
+
# of interest or a person is detected in the video stream. For
|
1767
|
+
# example, if Amazon Rekognition detects a person at second 2, a pet
|
1768
|
+
# at second 4, and a person again at second 5, Amazon Rekognition
|
1769
|
+
# sends 2 object class detected notifications, one for a person at
|
1770
|
+
# second 2 and one for a pet at second 4.
|
1771
|
+
#
|
1772
|
+
# Amazon Rekognition also publishes an an end-of-session notification
|
1773
|
+
# with a summary when the stream processing session is complete.
|
1774
|
+
# @return [Types::StreamProcessorNotificationChannel]
|
1775
|
+
#
|
1776
|
+
# @!attribute [rw] kms_key_id
|
1777
|
+
# The identifier for your AWS Key Management Service key (AWS KMS
|
1778
|
+
# key). This is an optional parameter for label detection stream
|
1779
|
+
# processors.
|
1780
|
+
# @return [String]
|
1781
|
+
#
|
1782
|
+
# @!attribute [rw] regions_of_interest
|
1783
|
+
# Specifies locations in the frames where Amazon Rekognition checks
|
1784
|
+
# for objects or people. This is an optional parameter for label
|
1785
|
+
# detection stream processors.
|
1786
|
+
# @return [Array<Types::RegionOfInterest>]
|
1787
|
+
#
|
1788
|
+
# @!attribute [rw] data_sharing_preference
|
1789
|
+
# Shows whether you are sharing data with Rekognition to improve model
|
1790
|
+
# performance. You can choose this option at the account level or on a
|
1791
|
+
# per-stream basis. Note that if you opt out at the account level this
|
1792
|
+
# setting is ignored on individual streams.
|
1793
|
+
# @return [Types::StreamProcessorDataSharingPreference]
|
1794
|
+
#
|
1612
1795
|
class DescribeStreamProcessorResponse < Struct.new(
|
1613
1796
|
:name,
|
1614
1797
|
:stream_processor_arn,
|
@@ -1619,7 +1802,11 @@ module Aws::Rekognition
|
|
1619
1802
|
:input,
|
1620
1803
|
:output,
|
1621
1804
|
:role_arn,
|
1622
|
-
:settings
|
1805
|
+
:settings,
|
1806
|
+
:notification_channel,
|
1807
|
+
:kms_key_id,
|
1808
|
+
:regions_of_interest,
|
1809
|
+
:data_sharing_preference)
|
1623
1810
|
SENSITIVE = []
|
1624
1811
|
include Aws::Structure
|
1625
1812
|
end
|
@@ -1671,8 +1858,9 @@ module Aws::Rekognition
|
|
1671
1858
|
# operation using the S3Object property.
|
1672
1859
|
#
|
1673
1860
|
# For Amazon Rekognition to process an S3 object, the user must have
|
1674
|
-
# permission to access the S3 object. For more information, see
|
1675
|
-
#
|
1861
|
+
# permission to access the S3 object. For more information, see How
|
1862
|
+
# Amazon Rekognition works with IAM in the Amazon Rekognition
|
1863
|
+
# Developer Guide.
|
1676
1864
|
# @return [Types::Image]
|
1677
1865
|
#
|
1678
1866
|
# @!attribute [rw] max_results
|
@@ -2027,6 +2215,12 @@ module Aws::Rekognition
|
|
2027
2215
|
# left: 1.0,
|
2028
2216
|
# top: 1.0,
|
2029
2217
|
# },
|
2218
|
+
# polygon: [
|
2219
|
+
# {
|
2220
|
+
# x: 1.0,
|
2221
|
+
# y: 1.0,
|
2222
|
+
# },
|
2223
|
+
# ],
|
2030
2224
|
# },
|
2031
2225
|
# ],
|
2032
2226
|
# }
|
@@ -2074,6 +2268,12 @@ module Aws::Rekognition
|
|
2074
2268
|
# left: 1.0,
|
2075
2269
|
# top: 1.0,
|
2076
2270
|
# },
|
2271
|
+
# polygon: [
|
2272
|
+
# {
|
2273
|
+
# x: 1.0,
|
2274
|
+
# y: 1.0,
|
2275
|
+
# },
|
2276
|
+
# ],
|
2077
2277
|
# },
|
2078
2278
|
# ],
|
2079
2279
|
# },
|
@@ -2132,8 +2332,7 @@ module Aws::Rekognition
|
|
2132
2332
|
# @!attribute [rw] min_confidence
|
2133
2333
|
# Sets the confidence of word detection. Words with detection
|
2134
2334
|
# confidence below this will be excluded from the result. Values
|
2135
|
-
# should be between
|
2136
|
-
# result below 50.
|
2335
|
+
# should be between 0 and 100. The default MinConfidence is 80.
|
2137
2336
|
# @return [Float]
|
2138
2337
|
#
|
2139
2338
|
# @!attribute [rw] min_bounding_box_height
|
@@ -2534,8 +2733,9 @@ module Aws::Rekognition
|
|
2534
2733
|
end
|
2535
2734
|
|
2536
2735
|
# Input face recognition parameters for an Amazon Rekognition stream
|
2537
|
-
# processor.
|
2538
|
-
#
|
2736
|
+
# processor. Includes the collection to use for face recognition and the
|
2737
|
+
# face attributes to detect. Defining the settings is required in the
|
2738
|
+
# request parameter for CreateStreamProcessor.
|
2539
2739
|
#
|
2540
2740
|
# @note When making an API call, you may pass FaceSearchSettings
|
2541
2741
|
# data as a hash:
|
@@ -2580,7 +2780,7 @@ module Aws::Rekognition
|
|
2580
2780
|
# media platform.
|
2581
2781
|
#
|
2582
2782
|
# We don't recommend using gender binary predictions to make decisions
|
2583
|
-
# that impact
|
2783
|
+
# that impact
|
2584
2784
|
#
|
2585
2785
|
# @!attribute [rw] value
|
2586
2786
|
# The predicted gender of the face.
|
@@ -3348,8 +3548,9 @@ module Aws::Rekognition
|
|
3348
3548
|
# region you use for Amazon Rekognition operations.
|
3349
3549
|
#
|
3350
3550
|
# For Amazon Rekognition to process an S3 object, the user must have
|
3351
|
-
# permission to access the S3 object. For more information, see
|
3352
|
-
#
|
3551
|
+
# permission to access the S3 object. For more information, see How
|
3552
|
+
# Amazon Rekognition works with IAM in the Amazon Rekognition
|
3553
|
+
# Developer Guide.
|
3353
3554
|
# @return [Types::S3Object]
|
3354
3555
|
#
|
3355
3556
|
class GroundTruthManifest < Struct.new(
|
@@ -3499,8 +3700,9 @@ module Aws::Rekognition
|
|
3499
3700
|
# using the S3Object property.
|
3500
3701
|
#
|
3501
3702
|
# For Amazon Rekognition to process an S3 object, the user must have
|
3502
|
-
# permission to access the S3 object. For more information, see
|
3503
|
-
#
|
3703
|
+
# permission to access the S3 object. For more information, see How
|
3704
|
+
# Amazon Rekognition works with IAM in the Amazon Rekognition Developer
|
3705
|
+
# Guide.
|
3504
3706
|
#
|
3505
3707
|
# @note When making an API call, you may pass Image
|
3506
3708
|
# data as a hash:
|
@@ -3552,8 +3754,8 @@ module Aws::Rekognition
|
|
3552
3754
|
|
3553
3755
|
# The input image size exceeds the allowed limit. If you are calling
|
3554
3756
|
# DetectProtectiveEquipment, the image size or resolution exceeds the
|
3555
|
-
# allowed limit. For more information, see
|
3556
|
-
# in the Amazon Rekognition Developer Guide.
|
3757
|
+
# allowed limit. For more information, see Guidelines and quotas in
|
3758
|
+
# Amazon Rekognition in the Amazon Rekognition Developer Guide.
|
3557
3759
|
#
|
3558
3760
|
class ImageTooLargeException < Aws::EmptyStructure; end
|
3559
3761
|
|
@@ -3694,12 +3896,8 @@ module Aws::Rekognition
|
|
3694
3896
|
# @return [String]
|
3695
3897
|
#
|
3696
3898
|
# @!attribute [rw] face_model_version
|
3697
|
-
#
|
3698
|
-
#
|
3699
|
-
#
|
3700
|
-
#
|
3701
|
-
#
|
3702
|
-
# [1]: https://docs.aws.amazon.com/rekognition/latest/dg/face-detection-model.html
|
3899
|
+
# The version number of the face detection model that's associated
|
3900
|
+
# with the input collection (`CollectionId`).
|
3703
3901
|
# @return [String]
|
3704
3902
|
#
|
3705
3903
|
# @!attribute [rw] unindexed_faces
|
@@ -3804,6 +4002,38 @@ module Aws::Rekognition
|
|
3804
4002
|
include Aws::Structure
|
3805
4003
|
end
|
3806
4004
|
|
4005
|
+
# Specifies the starting point in a Kinesis stream to start processing.
|
4006
|
+
# You can use the producer timestamp or the fragment number. For more
|
4007
|
+
# information, see [Fragment][1].
|
4008
|
+
#
|
4009
|
+
#
|
4010
|
+
#
|
4011
|
+
# [1]: https://docs.aws.amazon.com/kinesisvideostreams/latest/dg/API_reader_Fragment.html
|
4012
|
+
#
|
4013
|
+
# @note When making an API call, you may pass KinesisVideoStreamStartSelector
|
4014
|
+
# data as a hash:
|
4015
|
+
#
|
4016
|
+
# {
|
4017
|
+
# producer_timestamp: 1,
|
4018
|
+
# fragment_number: "KinesisVideoStreamFragmentNumber",
|
4019
|
+
# }
|
4020
|
+
#
|
4021
|
+
# @!attribute [rw] producer_timestamp
|
4022
|
+
# The timestamp from the producer corresponding to the fragment.
|
4023
|
+
# @return [Integer]
|
4024
|
+
#
|
4025
|
+
# @!attribute [rw] fragment_number
|
4026
|
+
# The unique identifier of the fragment. This value monotonically
|
4027
|
+
# increases based on the ingestion order.
|
4028
|
+
# @return [String]
|
4029
|
+
#
|
4030
|
+
class KinesisVideoStreamStartSelector < Struct.new(
|
4031
|
+
:producer_timestamp,
|
4032
|
+
:fragment_number)
|
4033
|
+
SENSITIVE = []
|
4034
|
+
include Aws::Structure
|
4035
|
+
end
|
4036
|
+
|
3807
4037
|
# The known gender identity for the celebrity that matches the provided
|
3808
4038
|
# ID. The known gender identity can be Male, Female, Nonbinary, or
|
3809
4039
|
# Unlisted.
|
@@ -3940,15 +4170,10 @@ module Aws::Rekognition
|
|
3940
4170
|
# @return [String]
|
3941
4171
|
#
|
3942
4172
|
# @!attribute [rw] face_model_versions
|
3943
|
-
#
|
3944
|
-
# the array
|
3945
|
-
#
|
3946
|
-
#
|
3947
|
-
# `CollectionId[2]`.
|
3948
|
-
#
|
3949
|
-
#
|
3950
|
-
#
|
3951
|
-
# [1]: https://docs.aws.amazon.com/rekognition/latest/dg/face-detection-model.html
|
4173
|
+
# Version numbers of the face detection models associated with the
|
4174
|
+
# collections in the array `CollectionIds`. For example, the value of
|
4175
|
+
# `FaceModelVersions[2]` is the version number for the face detection
|
4176
|
+
# model used by the collection in `CollectionId[2]`.
|
3952
4177
|
# @return [Array<String>]
|
3953
4178
|
#
|
3954
4179
|
class ListCollectionsResponse < Struct.new(
|
@@ -4144,12 +4369,8 @@ module Aws::Rekognition
|
|
4144
4369
|
# @return [String]
|
4145
4370
|
#
|
4146
4371
|
# @!attribute [rw] face_model_version
|
4147
|
-
#
|
4148
|
-
#
|
4149
|
-
#
|
4150
|
-
#
|
4151
|
-
#
|
4152
|
-
# [1]: https://docs.aws.amazon.com/rekognition/latest/dg/face-detection-model.html
|
4372
|
+
# Version number of the face detection model associated with the input
|
4373
|
+
# collection (`CollectionId`).
|
4153
4374
|
# @return [String]
|
4154
4375
|
#
|
4155
4376
|
class ListFacesResponse < Struct.new(
|
@@ -4303,15 +4524,17 @@ module Aws::Rekognition
|
|
4303
4524
|
|
4304
4525
|
# The Amazon Simple Notification Service topic to which Amazon
|
4305
4526
|
# Rekognition publishes the completion status of a video analysis
|
4306
|
-
# operation. For more information, see
|
4307
|
-
# SNS topic must have a topic name
|
4308
|
-
# if you are using the
|
4309
|
-
# to access the topic.
|
4310
|
-
# multiple Amazon SNS
|
4527
|
+
# operation. For more information, see [Calling Amazon Rekognition Video
|
4528
|
+
# operations][1]. Note that the Amazon SNS topic must have a topic name
|
4529
|
+
# that begins with *AmazonRekognition* if you are using the
|
4530
|
+
# AmazonRekognitionServiceRole permissions policy to access the topic.
|
4531
|
+
# For more information, see [Giving access to multiple Amazon SNS
|
4532
|
+
# topics][2].
|
4311
4533
|
#
|
4312
4534
|
#
|
4313
4535
|
#
|
4314
|
-
# [1]: https://docs.aws.amazon.com/rekognition/latest/dg/api-video
|
4536
|
+
# [1]: https://docs.aws.amazon.com/rekognition/latest/dg/api-video.html
|
4537
|
+
# [2]: https://docs.aws.amazon.com/rekognition/latest/dg/api-video-roles.html#api-video-roles-all-topics
|
4315
4538
|
#
|
4316
4539
|
# @note When making an API call, you may pass NotificationChannel
|
4317
4540
|
# data as a hash:
|
@@ -4322,7 +4545,7 @@ module Aws::Rekognition
|
|
4322
4545
|
# }
|
4323
4546
|
#
|
4324
4547
|
# @!attribute [rw] sns_topic_arn
|
4325
|
-
# The Amazon SNS topic to which Amazon Rekognition
|
4548
|
+
# The Amazon SNS topic to which Amazon Rekognition posts the
|
4326
4549
|
# completion status.
|
4327
4550
|
# @return [String]
|
4328
4551
|
#
|
@@ -4452,15 +4675,24 @@ module Aws::Rekognition
|
|
4452
4675
|
include Aws::Structure
|
4453
4676
|
end
|
4454
4677
|
|
4455
|
-
# The X and Y coordinates of a point on an image. The X
|
4456
|
-
#
|
4457
|
-
# input image is 700x200 and the
|
4458
|
-
# then the point is at the (350,50) pixel coordinate on the
|
4678
|
+
# The X and Y coordinates of a point on an image or video frame. The X
|
4679
|
+
# and Y values are ratios of the overall image size or video resolution.
|
4680
|
+
# For example, if an input image is 700x200 and the values are X=0.5 and
|
4681
|
+
# Y=0.25, then the point is at the (350,50) pixel coordinate on the
|
4682
|
+
# image.
|
4459
4683
|
#
|
4460
|
-
# An array of `Point` objects
|
4461
|
-
# by DetectCustomLabels
|
4462
|
-
# around a detected item. For more information,
|
4463
|
-
# Amazon Rekognition Developer Guide.
|
4684
|
+
# An array of `Point` objects makes up a `Polygon`. A `Polygon` is
|
4685
|
+
# returned by DetectText and by DetectCustomLabels `Polygon` represents
|
4686
|
+
# a fine-grained polygon around a detected item. For more information,
|
4687
|
+
# see Geometry in the Amazon Rekognition Developer Guide.
|
4688
|
+
#
|
4689
|
+
# @note When making an API call, you may pass Point
|
4690
|
+
# data as a hash:
|
4691
|
+
#
|
4692
|
+
# {
|
4693
|
+
# x: 1.0,
|
4694
|
+
# y: 1.0,
|
4695
|
+
# }
|
4464
4696
|
#
|
4465
4697
|
# @!attribute [rw] x
|
4466
4698
|
# The value of the X coordinate for a point on a `Polygon`.
|
@@ -4842,12 +5074,13 @@ module Aws::Rekognition
|
|
4842
5074
|
end
|
4843
5075
|
|
4844
5076
|
# Specifies a location within the frame that Rekognition checks for
|
4845
|
-
#
|
5077
|
+
# objects of interest such as text, labels, or faces. It uses a
|
5078
|
+
# `BoundingBox` or object or `Polygon` to set a region of the screen.
|
4846
5079
|
#
|
4847
|
-
# A word is included in the region if
|
4848
|
-
# region. If there is more than one region, the word
|
4849
|
-
# with all regions of the screen. Any
|
4850
|
-
# kept in the results.
|
5080
|
+
# A word, face, or label is included in the region if it is more than
|
5081
|
+
# half in that region. If there is more than one region, the word, face,
|
5082
|
+
# or label is compared with all regions of the screen. Any object of
|
5083
|
+
# interest that is more than half in a region is kept in the results.
|
4851
5084
|
#
|
4852
5085
|
# @note When making an API call, you may pass RegionOfInterest
|
4853
5086
|
# data as a hash:
|
@@ -4859,14 +5092,26 @@ module Aws::Rekognition
|
|
4859
5092
|
# left: 1.0,
|
4860
5093
|
# top: 1.0,
|
4861
5094
|
# },
|
5095
|
+
# polygon: [
|
5096
|
+
# {
|
5097
|
+
# x: 1.0,
|
5098
|
+
# y: 1.0,
|
5099
|
+
# },
|
5100
|
+
# ],
|
4862
5101
|
# }
|
4863
5102
|
#
|
4864
5103
|
# @!attribute [rw] bounding_box
|
4865
5104
|
# The box representing a region of interest on screen.
|
4866
5105
|
# @return [Types::BoundingBox]
|
4867
5106
|
#
|
5107
|
+
# @!attribute [rw] polygon
|
5108
|
+
# Specifies a shape made up of up to 10 `Point` objects to define a
|
5109
|
+
# region of interest.
|
5110
|
+
# @return [Array<Types::Point>]
|
5111
|
+
#
|
4868
5112
|
class RegionOfInterest < Struct.new(
|
4869
|
-
:bounding_box
|
5113
|
+
:bounding_box,
|
5114
|
+
:polygon)
|
4870
5115
|
SENSITIVE = []
|
4871
5116
|
include Aws::Structure
|
4872
5117
|
end
|
@@ -4889,14 +5134,52 @@ module Aws::Rekognition
|
|
4889
5134
|
#
|
4890
5135
|
class ResourceNotReadyException < Aws::EmptyStructure; end
|
4891
5136
|
|
5137
|
+
# The Amazon S3 bucket location to which Amazon Rekognition publishes
|
5138
|
+
# the detailed inference results of a video analysis operation. These
|
5139
|
+
# results include the name of the stream processor resource, the session
|
5140
|
+
# ID of the stream processing session, and labeled timestamps and
|
5141
|
+
# bounding boxes for detected labels.
|
5142
|
+
#
|
5143
|
+
# @note When making an API call, you may pass S3Destination
|
5144
|
+
# data as a hash:
|
5145
|
+
#
|
5146
|
+
# {
|
5147
|
+
# bucket: "S3Bucket",
|
5148
|
+
# key_prefix: "S3KeyPrefix",
|
5149
|
+
# }
|
5150
|
+
#
|
5151
|
+
# @!attribute [rw] bucket
|
5152
|
+
# The name of the Amazon S3 bucket you want to associate with the
|
5153
|
+
# streaming video project. You must be the owner of the Amazon S3
|
5154
|
+
# bucket.
|
5155
|
+
# @return [String]
|
5156
|
+
#
|
5157
|
+
# @!attribute [rw] key_prefix
|
5158
|
+
# The prefix value of the location within the bucket that you want the
|
5159
|
+
# information to be published to. For more information, see [Using
|
5160
|
+
# prefixes][1].
|
5161
|
+
#
|
5162
|
+
#
|
5163
|
+
#
|
5164
|
+
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-prefixes.html
|
5165
|
+
# @return [String]
|
5166
|
+
#
|
5167
|
+
class S3Destination < Struct.new(
|
5168
|
+
:bucket,
|
5169
|
+
:key_prefix)
|
5170
|
+
SENSITIVE = []
|
5171
|
+
include Aws::Structure
|
5172
|
+
end
|
5173
|
+
|
4892
5174
|
# Provides the S3 bucket name and object name.
|
4893
5175
|
#
|
4894
5176
|
# The region for the S3 bucket containing the S3 object must match the
|
4895
5177
|
# region you use for Amazon Rekognition operations.
|
4896
5178
|
#
|
4897
5179
|
# For Amazon Rekognition to process an S3 object, the user must have
|
4898
|
-
# permission to access the S3 object. For more information, see
|
4899
|
-
#
|
5180
|
+
# permission to access the S3 object. For more information, see How
|
5181
|
+
# Amazon Rekognition works with IAM in the Amazon Rekognition Developer
|
5182
|
+
# Guide.
|
4900
5183
|
#
|
4901
5184
|
# @note When making an API call, you may pass S3Object
|
4902
5185
|
# data as a hash:
|
@@ -5014,12 +5297,8 @@ module Aws::Rekognition
|
|
5014
5297
|
# @return [Array<Types::FaceMatch>]
|
5015
5298
|
#
|
5016
5299
|
# @!attribute [rw] face_model_version
|
5017
|
-
#
|
5018
|
-
#
|
5019
|
-
#
|
5020
|
-
#
|
5021
|
-
#
|
5022
|
-
# [1]: https://docs.aws.amazon.com/rekognition/latest/dg/face-detection-model.html
|
5300
|
+
# Version number of the face detection model associated with the input
|
5301
|
+
# collection (`CollectionId`).
|
5023
5302
|
# @return [String]
|
5024
5303
|
#
|
5025
5304
|
class SearchFacesByImageResponse < Struct.new(
|
@@ -5079,12 +5358,8 @@ module Aws::Rekognition
|
|
5079
5358
|
# @return [Array<Types::FaceMatch>]
|
5080
5359
|
#
|
5081
5360
|
# @!attribute [rw] face_model_version
|
5082
|
-
#
|
5083
|
-
#
|
5084
|
-
#
|
5085
|
-
#
|
5086
|
-
#
|
5087
|
-
# [1]: https://docs.aws.amazon.com/rekognition/latest/dg/face-detection-model.html
|
5361
|
+
# Version number of the face detection model associated with the input
|
5362
|
+
# collection (`CollectionId`).
|
5088
5363
|
# @return [String]
|
5089
5364
|
#
|
5090
5365
|
class SearchFacesResponse < Struct.new(
|
@@ -5198,8 +5473,8 @@ module Aws::Rekognition
|
|
5198
5473
|
end
|
5199
5474
|
|
5200
5475
|
# The size of the collection exceeds the allowed limit. For more
|
5201
|
-
# information, see
|
5202
|
-
# Rekognition Developer Guide.
|
5476
|
+
# information, see Guidelines and quotas in Amazon Rekognition in the
|
5477
|
+
# Amazon Rekognition Developer Guide.
|
5203
5478
|
#
|
5204
5479
|
class ServiceQuotaExceededException < Aws::EmptyStructure; end
|
5205
5480
|
|
@@ -5888,19 +6163,59 @@ module Aws::Rekognition
|
|
5888
6163
|
#
|
5889
6164
|
# {
|
5890
6165
|
# name: "StreamProcessorName", # required
|
6166
|
+
# start_selector: {
|
6167
|
+
# kvs_stream_start_selector: {
|
6168
|
+
# producer_timestamp: 1,
|
6169
|
+
# fragment_number: "KinesisVideoStreamFragmentNumber",
|
6170
|
+
# },
|
6171
|
+
# },
|
6172
|
+
# stop_selector: {
|
6173
|
+
# max_duration_in_seconds: 1,
|
6174
|
+
# },
|
5891
6175
|
# }
|
5892
6176
|
#
|
5893
6177
|
# @!attribute [rw] name
|
5894
6178
|
# The name of the stream processor to start processing.
|
5895
6179
|
# @return [String]
|
5896
6180
|
#
|
6181
|
+
# @!attribute [rw] start_selector
|
6182
|
+
# Specifies the starting point in the Kinesis stream to start
|
6183
|
+
# processing. You can use the producer timestamp or the fragment
|
6184
|
+
# number. For more information, see [Fragment][1].
|
6185
|
+
#
|
6186
|
+
# This is a required parameter for label detection stream processors
|
6187
|
+
# and should not be used to start a face search stream processor.
|
6188
|
+
#
|
6189
|
+
#
|
6190
|
+
#
|
6191
|
+
# [1]: https://docs.aws.amazon.com/kinesisvideostreams/latest/dg/API_reader_Fragment.html
|
6192
|
+
# @return [Types::StreamProcessingStartSelector]
|
6193
|
+
#
|
6194
|
+
# @!attribute [rw] stop_selector
|
6195
|
+
# Specifies when to stop processing the stream. You can specify a
|
6196
|
+
# maximum amount of time to process the video.
|
6197
|
+
#
|
6198
|
+
# This is a required parameter for label detection stream processors
|
6199
|
+
# and should not be used to start a face search stream processor.
|
6200
|
+
# @return [Types::StreamProcessingStopSelector]
|
6201
|
+
#
|
5897
6202
|
class StartStreamProcessorRequest < Struct.new(
|
5898
|
-
:name
|
6203
|
+
:name,
|
6204
|
+
:start_selector,
|
6205
|
+
:stop_selector)
|
5899
6206
|
SENSITIVE = []
|
5900
6207
|
include Aws::Structure
|
5901
6208
|
end
|
5902
6209
|
|
5903
|
-
|
6210
|
+
# @!attribute [rw] session_id
|
6211
|
+
# A unique identifier for the stream processing session.
|
6212
|
+
# @return [String]
|
6213
|
+
#
|
6214
|
+
class StartStreamProcessorResponse < Struct.new(
|
6215
|
+
:session_id)
|
6216
|
+
SENSITIVE = []
|
6217
|
+
include Aws::Structure
|
6218
|
+
end
|
5904
6219
|
|
5905
6220
|
# Filters for the technical segments returned by GetSegmentDetection.
|
5906
6221
|
# For more information, see StartSegmentDetectionFilters.
|
@@ -5966,6 +6281,12 @@ module Aws::Rekognition
|
|
5966
6281
|
# left: 1.0,
|
5967
6282
|
# top: 1.0,
|
5968
6283
|
# },
|
6284
|
+
# polygon: [
|
6285
|
+
# {
|
6286
|
+
# x: 1.0,
|
6287
|
+
# y: 1.0,
|
6288
|
+
# },
|
6289
|
+
# ],
|
5969
6290
|
# },
|
5970
6291
|
# ],
|
5971
6292
|
# }
|
@@ -6018,6 +6339,12 @@ module Aws::Rekognition
|
|
6018
6339
|
# left: 1.0,
|
6019
6340
|
# top: 1.0,
|
6020
6341
|
# },
|
6342
|
+
# polygon: [
|
6343
|
+
# {
|
6344
|
+
# x: 1.0,
|
6345
|
+
# y: 1.0,
|
6346
|
+
# },
|
6347
|
+
# ],
|
6021
6348
|
# },
|
6022
6349
|
# ],
|
6023
6350
|
# },
|
@@ -6040,16 +6367,17 @@ module Aws::Rekognition
|
|
6040
6367
|
# @!attribute [rw] notification_channel
|
6041
6368
|
# The Amazon Simple Notification Service topic to which Amazon
|
6042
6369
|
# Rekognition publishes the completion status of a video analysis
|
6043
|
-
# operation. For more information, see
|
6044
|
-
# SNS topic must have a
|
6045
|
-
# *AmazonRekognition* if you are using the
|
6370
|
+
# operation. For more information, see [Calling Amazon Rekognition
|
6371
|
+
# Video operations][1]. Note that the Amazon SNS topic must have a
|
6372
|
+
# topic name that begins with *AmazonRekognition* if you are using the
|
6046
6373
|
# AmazonRekognitionServiceRole permissions policy to access the topic.
|
6047
6374
|
# For more information, see [Giving access to multiple Amazon SNS
|
6048
|
-
# topics][
|
6375
|
+
# topics][2].
|
6049
6376
|
#
|
6050
6377
|
#
|
6051
6378
|
#
|
6052
|
-
# [1]: https://docs.aws.amazon.com/rekognition/latest/dg/api-video
|
6379
|
+
# [1]: https://docs.aws.amazon.com/rekognition/latest/dg/api-video.html
|
6380
|
+
# [2]: https://docs.aws.amazon.com/rekognition/latest/dg/api-video-roles.html#api-video-roles-all-topics
|
6053
6381
|
# @return [Types::NotificationChannel]
|
6054
6382
|
#
|
6055
6383
|
# @!attribute [rw] job_tag
|
@@ -6135,8 +6463,52 @@ module Aws::Rekognition
|
|
6135
6463
|
|
6136
6464
|
class StopStreamProcessorResponse < Aws::EmptyStructure; end
|
6137
6465
|
|
6138
|
-
#
|
6139
|
-
#
|
6466
|
+
# @note When making an API call, you may pass StreamProcessingStartSelector
|
6467
|
+
# data as a hash:
|
6468
|
+
#
|
6469
|
+
# {
|
6470
|
+
# kvs_stream_start_selector: {
|
6471
|
+
# producer_timestamp: 1,
|
6472
|
+
# fragment_number: "KinesisVideoStreamFragmentNumber",
|
6473
|
+
# },
|
6474
|
+
# }
|
6475
|
+
#
|
6476
|
+
# @!attribute [rw] kvs_stream_start_selector
|
6477
|
+
# Specifies the starting point in the stream to start processing. This
|
6478
|
+
# can be done with a timestamp or a fragment number in a Kinesis
|
6479
|
+
# stream.
|
6480
|
+
# @return [Types::KinesisVideoStreamStartSelector]
|
6481
|
+
#
|
6482
|
+
class StreamProcessingStartSelector < Struct.new(
|
6483
|
+
:kvs_stream_start_selector)
|
6484
|
+
SENSITIVE = []
|
6485
|
+
include Aws::Structure
|
6486
|
+
end
|
6487
|
+
|
6488
|
+
# Specifies when to stop processing the stream. You can specify a
|
6489
|
+
# maximum amount of time to process the video.
|
6490
|
+
#
|
6491
|
+
# @note When making an API call, you may pass StreamProcessingStopSelector
|
6492
|
+
# data as a hash:
|
6493
|
+
#
|
6494
|
+
# {
|
6495
|
+
# max_duration_in_seconds: 1,
|
6496
|
+
# }
|
6497
|
+
#
|
6498
|
+
# @!attribute [rw] max_duration_in_seconds
|
6499
|
+
# Specifies the maximum amount of time in seconds that you want the
|
6500
|
+
# stream to be processed. The largest amount of time is 2 minutes. The
|
6501
|
+
# default is 10 seconds.
|
6502
|
+
# @return [Integer]
|
6503
|
+
#
|
6504
|
+
class StreamProcessingStopSelector < Struct.new(
|
6505
|
+
:max_duration_in_seconds)
|
6506
|
+
SENSITIVE = []
|
6507
|
+
include Aws::Structure
|
6508
|
+
end
|
6509
|
+
|
6510
|
+
# An object that recognizes faces or labels in a streaming video. An
|
6511
|
+
# Amazon Rekognition stream processor is created by a call to
|
6140
6512
|
# CreateStreamProcessor. The request parameters for
|
6141
6513
|
# `CreateStreamProcessor` describe the Kinesis video stream source for
|
6142
6514
|
# the streaming video, face recognition parameters, and where to stream
|
@@ -6157,6 +6529,29 @@ module Aws::Rekognition
|
|
6157
6529
|
include Aws::Structure
|
6158
6530
|
end
|
6159
6531
|
|
6532
|
+
# Allows you to opt in or opt out to share data with Rekognition to
|
6533
|
+
# improve model performance. You can choose this option at the account
|
6534
|
+
# level or on a per-stream basis. Note that if you opt out at the
|
6535
|
+
# account level this setting is ignored on individual streams.
|
6536
|
+
#
|
6537
|
+
# @note When making an API call, you may pass StreamProcessorDataSharingPreference
|
6538
|
+
# data as a hash:
|
6539
|
+
#
|
6540
|
+
# {
|
6541
|
+
# opt_in: false, # required
|
6542
|
+
# }
|
6543
|
+
#
|
6544
|
+
# @!attribute [rw] opt_in
|
6545
|
+
# If this option is set to true, you choose to share data with
|
6546
|
+
# Rekognition to improve model performance.
|
6547
|
+
# @return [Boolean]
|
6548
|
+
#
|
6549
|
+
class StreamProcessorDataSharingPreference < Struct.new(
|
6550
|
+
:opt_in)
|
6551
|
+
SENSITIVE = []
|
6552
|
+
include Aws::Structure
|
6553
|
+
end
|
6554
|
+
|
6160
6555
|
# Information about the source streaming video.
|
6161
6556
|
#
|
6162
6557
|
# @note When making an API call, you may pass StreamProcessorInput
|
@@ -6179,6 +6574,39 @@ module Aws::Rekognition
|
|
6179
6574
|
include Aws::Structure
|
6180
6575
|
end
|
6181
6576
|
|
6577
|
+
# The Amazon Simple Notification Service topic to which Amazon
|
6578
|
+
# Rekognition publishes the object detection results and completion
|
6579
|
+
# status of a video analysis operation.
|
6580
|
+
#
|
6581
|
+
# Amazon Rekognition publishes a notification the first time an object
|
6582
|
+
# of interest or a person is detected in the video stream. For example,
|
6583
|
+
# if Amazon Rekognition detects a person at second 2, a pet at second 4,
|
6584
|
+
# and a person again at second 5, Amazon Rekognition sends 2 object
|
6585
|
+
# class detected notifications, one for a person at second 2 and one for
|
6586
|
+
# a pet at second 4.
|
6587
|
+
#
|
6588
|
+
# Amazon Rekognition also publishes an an end-of-session notification
|
6589
|
+
# with a summary when the stream processing session is complete.
|
6590
|
+
#
|
6591
|
+
# @note When making an API call, you may pass StreamProcessorNotificationChannel
|
6592
|
+
# data as a hash:
|
6593
|
+
#
|
6594
|
+
# {
|
6595
|
+
# sns_topic_arn: "SNSTopicArn", # required
|
6596
|
+
# }
|
6597
|
+
#
|
6598
|
+
# @!attribute [rw] sns_topic_arn
|
6599
|
+
# The Amazon Resource Number (ARN) of the Amazon Amazon Simple
|
6600
|
+
# Notification Service topic to which Amazon Rekognition posts the
|
6601
|
+
# completion status.
|
6602
|
+
# @return [String]
|
6603
|
+
#
|
6604
|
+
class StreamProcessorNotificationChannel < Struct.new(
|
6605
|
+
:sns_topic_arn)
|
6606
|
+
SENSITIVE = []
|
6607
|
+
include Aws::Structure
|
6608
|
+
end
|
6609
|
+
|
6182
6610
|
# Information about the Amazon Kinesis Data Streams stream to which a
|
6183
6611
|
# Amazon Rekognition Video stream processor streams the results of a
|
6184
6612
|
# video analysis. For more information, see CreateStreamProcessor in the
|
@@ -6191,6 +6619,10 @@ module Aws::Rekognition
|
|
6191
6619
|
# kinesis_data_stream: {
|
6192
6620
|
# arn: "KinesisDataArn",
|
6193
6621
|
# },
|
6622
|
+
# s3_destination: {
|
6623
|
+
# bucket: "S3Bucket",
|
6624
|
+
# key_prefix: "S3KeyPrefix",
|
6625
|
+
# },
|
6194
6626
|
# }
|
6195
6627
|
#
|
6196
6628
|
# @!attribute [rw] kinesis_data_stream
|
@@ -6198,14 +6630,22 @@ module Aws::Rekognition
|
|
6198
6630
|
# Rekognition stream processor streams the analysis results.
|
6199
6631
|
# @return [Types::KinesisDataStream]
|
6200
6632
|
#
|
6633
|
+
# @!attribute [rw] s3_destination
|
6634
|
+
# The Amazon S3 bucket location to which Amazon Rekognition publishes
|
6635
|
+
# the detailed inference results of a video analysis operation.
|
6636
|
+
# @return [Types::S3Destination]
|
6637
|
+
#
|
6201
6638
|
class StreamProcessorOutput < Struct.new(
|
6202
|
-
:kinesis_data_stream
|
6639
|
+
:kinesis_data_stream,
|
6640
|
+
:s3_destination)
|
6203
6641
|
SENSITIVE = []
|
6204
6642
|
include Aws::Structure
|
6205
6643
|
end
|
6206
6644
|
|
6207
|
-
# Input parameters used
|
6208
|
-
#
|
6645
|
+
# Input parameters used in a streaming video analyzed by a Amazon
|
6646
|
+
# Rekognition stream processor. You can use `FaceSearch` to recognize
|
6647
|
+
# faces in a streaming video, or you can use `ConnectedHome` to detect
|
6648
|
+
# labels.
|
6209
6649
|
#
|
6210
6650
|
# @note When making an API call, you may pass StreamProcessorSettings
|
6211
6651
|
# data as a hash:
|
@@ -6215,14 +6655,58 @@ module Aws::Rekognition
|
|
6215
6655
|
# collection_id: "CollectionId",
|
6216
6656
|
# face_match_threshold: 1.0,
|
6217
6657
|
# },
|
6658
|
+
# connected_home: {
|
6659
|
+
# labels: ["ConnectedHomeLabel"], # required
|
6660
|
+
# min_confidence: 1.0,
|
6661
|
+
# },
|
6218
6662
|
# }
|
6219
6663
|
#
|
6220
6664
|
# @!attribute [rw] face_search
|
6221
6665
|
# Face search settings to use on a streaming video.
|
6222
6666
|
# @return [Types::FaceSearchSettings]
|
6223
6667
|
#
|
6668
|
+
# @!attribute [rw] connected_home
|
6669
|
+
# Label detection settings to use on a streaming video. Defining the
|
6670
|
+
# settings is required in the request parameter for
|
6671
|
+
# CreateStreamProcessor. Including this setting in the
|
6672
|
+
# `CreateStreamProcessor` request enables you to use the stream
|
6673
|
+
# processor for label detection. You can then select what you want the
|
6674
|
+
# stream processor to detect, such as people or pets. When the stream
|
6675
|
+
# processor has started, one notification is sent for each object
|
6676
|
+
# class specified. For example, if packages and pets are selected, one
|
6677
|
+
# SNS notification is published the first time a package is detected
|
6678
|
+
# and one SNS notification is published the first time a pet is
|
6679
|
+
# detected, as well as an end-of-session summary.
|
6680
|
+
# @return [Types::ConnectedHomeSettings]
|
6681
|
+
#
|
6224
6682
|
class StreamProcessorSettings < Struct.new(
|
6225
|
-
:face_search
|
6683
|
+
:face_search,
|
6684
|
+
:connected_home)
|
6685
|
+
SENSITIVE = []
|
6686
|
+
include Aws::Structure
|
6687
|
+
end
|
6688
|
+
|
6689
|
+
# The stream processor settings that you want to update. `ConnectedHome`
|
6690
|
+
# settings can be updated to detect different labels with a different
|
6691
|
+
# minimum confidence.
|
6692
|
+
#
|
6693
|
+
# @note When making an API call, you may pass StreamProcessorSettingsForUpdate
|
6694
|
+
# data as a hash:
|
6695
|
+
#
|
6696
|
+
# {
|
6697
|
+
# connected_home_for_update: {
|
6698
|
+
# labels: ["ConnectedHomeLabel"],
|
6699
|
+
# min_confidence: 1.0,
|
6700
|
+
# },
|
6701
|
+
# }
|
6702
|
+
#
|
6703
|
+
# @!attribute [rw] connected_home_for_update
|
6704
|
+
# The label detection settings you want to use for your stream
|
6705
|
+
# processor.
|
6706
|
+
# @return [Types::ConnectedHomeSettingsForUpdate]
|
6707
|
+
#
|
6708
|
+
class StreamProcessorSettingsForUpdate < Struct.new(
|
6709
|
+
:connected_home_for_update)
|
6226
6710
|
SENSITIVE = []
|
6227
6711
|
include Aws::Structure
|
6228
6712
|
end
|
@@ -6241,8 +6725,9 @@ module Aws::Rekognition
|
|
6241
6725
|
# region you use for Amazon Rekognition operations.
|
6242
6726
|
#
|
6243
6727
|
# For Amazon Rekognition to process an S3 object, the user must have
|
6244
|
-
# permission to access the S3 object. For more information, see
|
6245
|
-
#
|
6728
|
+
# permission to access the S3 object. For more information, see How
|
6729
|
+
# Amazon Rekognition works with IAM in the Amazon Rekognition
|
6730
|
+
# Developer Guide.
|
6246
6731
|
# @return [Types::S3Object]
|
6247
6732
|
#
|
6248
6733
|
class Summary < Struct.new(
|
@@ -6393,7 +6878,7 @@ module Aws::Rekognition
|
|
6393
6878
|
# of text in which the word appears. The word `Id` is also an index for
|
6394
6879
|
# the word within a line of words.
|
6395
6880
|
#
|
6396
|
-
# For more information, see Detecting
|
6881
|
+
# For more information, see Detecting text in the Amazon Rekognition
|
6397
6882
|
# Developer Guide.
|
6398
6883
|
#
|
6399
6884
|
# @!attribute [rw] detected_text
|
@@ -6607,6 +7092,78 @@ module Aws::Rekognition
|
|
6607
7092
|
|
6608
7093
|
class UpdateDatasetEntriesResponse < Aws::EmptyStructure; end
|
6609
7094
|
|
7095
|
+
# @note When making an API call, you may pass UpdateStreamProcessorRequest
|
7096
|
+
# data as a hash:
|
7097
|
+
#
|
7098
|
+
# {
|
7099
|
+
# name: "StreamProcessorName", # required
|
7100
|
+
# settings_for_update: {
|
7101
|
+
# connected_home_for_update: {
|
7102
|
+
# labels: ["ConnectedHomeLabel"],
|
7103
|
+
# min_confidence: 1.0,
|
7104
|
+
# },
|
7105
|
+
# },
|
7106
|
+
# regions_of_interest_for_update: [
|
7107
|
+
# {
|
7108
|
+
# bounding_box: {
|
7109
|
+
# width: 1.0,
|
7110
|
+
# height: 1.0,
|
7111
|
+
# left: 1.0,
|
7112
|
+
# top: 1.0,
|
7113
|
+
# },
|
7114
|
+
# polygon: [
|
7115
|
+
# {
|
7116
|
+
# x: 1.0,
|
7117
|
+
# y: 1.0,
|
7118
|
+
# },
|
7119
|
+
# ],
|
7120
|
+
# },
|
7121
|
+
# ],
|
7122
|
+
# data_sharing_preference_for_update: {
|
7123
|
+
# opt_in: false, # required
|
7124
|
+
# },
|
7125
|
+
# parameters_to_delete: ["ConnectedHomeMinConfidence"], # accepts ConnectedHomeMinConfidence, RegionsOfInterest
|
7126
|
+
# }
|
7127
|
+
#
|
7128
|
+
# @!attribute [rw] name
|
7129
|
+
# Name of the stream processor that you want to update.
|
7130
|
+
# @return [String]
|
7131
|
+
#
|
7132
|
+
# @!attribute [rw] settings_for_update
|
7133
|
+
# The stream processor settings that you want to update. Label
|
7134
|
+
# detection settings can be updated to detect different labels with a
|
7135
|
+
# different minimum confidence.
|
7136
|
+
# @return [Types::StreamProcessorSettingsForUpdate]
|
7137
|
+
#
|
7138
|
+
# @!attribute [rw] regions_of_interest_for_update
|
7139
|
+
# Specifies locations in the frames where Amazon Rekognition checks
|
7140
|
+
# for objects or people. This is an optional parameter for label
|
7141
|
+
# detection stream processors.
|
7142
|
+
# @return [Array<Types::RegionOfInterest>]
|
7143
|
+
#
|
7144
|
+
# @!attribute [rw] data_sharing_preference_for_update
|
7145
|
+
# Shows whether you are sharing data with Rekognition to improve model
|
7146
|
+
# performance. You can choose this option at the account level or on a
|
7147
|
+
# per-stream basis. Note that if you opt out at the account level this
|
7148
|
+
# setting is ignored on individual streams.
|
7149
|
+
# @return [Types::StreamProcessorDataSharingPreference]
|
7150
|
+
#
|
7151
|
+
# @!attribute [rw] parameters_to_delete
|
7152
|
+
# A list of parameters you want to delete from the stream processor.
|
7153
|
+
# @return [Array<String>]
|
7154
|
+
#
|
7155
|
+
class UpdateStreamProcessorRequest < Struct.new(
|
7156
|
+
:name,
|
7157
|
+
:settings_for_update,
|
7158
|
+
:regions_of_interest_for_update,
|
7159
|
+
:data_sharing_preference_for_update,
|
7160
|
+
:parameters_to_delete)
|
7161
|
+
SENSITIVE = []
|
7162
|
+
include Aws::Structure
|
7163
|
+
end
|
7164
|
+
|
7165
|
+
class UpdateStreamProcessorResponse < Aws::EmptyStructure; end
|
7166
|
+
|
6610
7167
|
# Contains the Amazon S3 bucket location of the validation data for a
|
6611
7168
|
# model training job.
|
6612
7169
|
#
|