aws-sdk-rekognition 1.32.0 → 1.33.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: f88df947d010d4e4f6f23ddb3275063422b232c1
4
- data.tar.gz: 4fb28d5dc55cbc5c057706a248047e3543c8ff31
3
+ metadata.gz: '0073779d5db2875ab000e60fa739f8731b4fccb8'
4
+ data.tar.gz: 7427f1652ab2e61ae91127e38a47b03bbb18eec2
5
5
  SHA512:
6
- metadata.gz: e1abdd86e56212e4c23ab773d10e21d83b25c952bd75d0540a695c39b9d486aa0ca322c19dc5172ffbb2889583799c8853be3df10c75d09f5b8134b3c4d78a93
7
- data.tar.gz: e750ce1b80b67746276fb4ff373eca36b6b6b56164e3881c5e4db00f8f3417cf1f2d1a1b9c15a0344417d1a7966a3f9a450be396c51a9ba55f931e0432ea6af5
6
+ metadata.gz: 386fcd38b965c535dd8d2286633f826b77b8e30f9fb4e1b72e784b892aa62c817283aa4066778d133e0542baa295652833328a32b19283ac3cc2147ec85a132d
7
+ data.tar.gz: 2f3b0a9813affe582a4129d7de1ccaaa91b2f6c95fa49445664a3d3fa289c7c65c1c79ce15e9012106cec4f5e3b53f883a3f6cbf408f0656eef39973abbf130d
@@ -12,6 +12,7 @@ require_relative 'aws-sdk-rekognition/types'
12
12
  require_relative 'aws-sdk-rekognition/client_api'
13
13
  require_relative 'aws-sdk-rekognition/client'
14
14
  require_relative 'aws-sdk-rekognition/errors'
15
+ require_relative 'aws-sdk-rekognition/waiters'
15
16
  require_relative 'aws-sdk-rekognition/resource'
16
17
  require_relative 'aws-sdk-rekognition/customizations'
17
18
 
@@ -42,6 +43,6 @@ require_relative 'aws-sdk-rekognition/customizations'
42
43
  # @service
43
44
  module Aws::Rekognition
44
45
 
45
- GEM_VERSION = '1.32.0'
46
+ GEM_VERSION = '1.33.0'
46
47
 
47
48
  end
@@ -300,11 +300,10 @@ module Aws::Rekognition
300
300
  #
301
301
  # The `QualityFilter` input parameter allows you to filter out detected
302
302
  # faces that don’t meet a required quality bar. The quality bar is based
303
- # on a variety of common use cases. By default, `CompareFaces` chooses
304
- # the quality bar that's used to filter faces. You can also explicitly
305
- # choose the quality bar. Use `QualityFilter`, to set the quality bar by
306
- # specifying `LOW`, `MEDIUM`, or `HIGH`. If you do not want to filter
307
- # detected faces, specify `NONE`.
303
+ # on a variety of common use cases. Use `QualityFilter` to set the
304
+ # quality bar by specifying `LOW`, `MEDIUM`, or `HIGH`. If you do not
305
+ # want to filter detected faces, specify `NONE`. The default value is
306
+ # `NONE`.
308
307
  #
309
308
  # <note markdown="1"> To use quality filtering, you need a collection associated with
310
309
  # version 3 of the face model or higher. To get the version of the face
@@ -359,12 +358,12 @@ module Aws::Rekognition
359
358
  # to identify faces. Filtered faces aren't compared. If you specify
360
359
  # `AUTO`, Amazon Rekognition chooses the quality bar. If you specify
361
360
  # `LOW`, `MEDIUM`, or `HIGH`, filtering removes all faces that don’t
362
- # meet the chosen quality bar. The default value is `AUTO`. The quality
363
- # bar is based on a variety of common use cases. Low-quality detections
364
- # can occur for a number of reasons. Some examples are an object that's
365
- # misidentified as a face, a face that's too blurry, or a face with a
366
- # pose that's too extreme to use. If you specify `NONE`, no filtering
367
- # is performed.
361
+ # meet the chosen quality bar. The quality bar is based on a variety of
362
+ # common use cases. Low-quality detections can occur for a number of
363
+ # reasons. Some examples are an object that's misidentified as a face,
364
+ # a face that's too blurry, or a face with a pose that's too extreme
365
+ # to use. If you specify `NONE`, no filtering is performed. The default
366
+ # value is `NONE`.
368
367
  #
369
368
  # To use quality filtering, the collection you are using must be
370
369
  # associated with version 3 of the face model or higher.
@@ -557,6 +556,124 @@ module Aws::Rekognition
557
556
  req.send_request(options)
558
557
  end
559
558
 
559
+ # Creates a new Amazon Rekognition Custom Labels project. A project is a
560
+ # logical grouping of resources (images, Labels, models) and operations
561
+ # (training, evaluation and detection).
562
+ #
563
+ # This operation requires permissions to perform the
564
+ # `rekognition:CreateProject` action.
565
+ #
566
+ # @option params [required, String] :project_name
567
+ # The name of the project to create.
568
+ #
569
+ # @return [Types::CreateProjectResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
570
+ #
571
+ # * {Types::CreateProjectResponse#project_arn #project_arn} => String
572
+ #
573
+ # @example Request syntax with placeholder values
574
+ #
575
+ # resp = client.create_project({
576
+ # project_name: "ProjectName", # required
577
+ # })
578
+ #
579
+ # @example Response structure
580
+ #
581
+ # resp.project_arn #=> String
582
+ #
583
+ # @overload create_project(params = {})
584
+ # @param [Hash] params ({})
585
+ def create_project(params = {}, options = {})
586
+ req = build_request(:create_project, params)
587
+ req.send_request(options)
588
+ end
589
+
590
+ # Creates a new version of a model and begins training. Models are
591
+ # managed as part of an Amazon Rekognition Custom Labels project. You
592
+ # can specify one training dataset and one testing dataset. The response
593
+ # from `CreateProjectVersion` is an Amazon Resource Name (ARN) for the
594
+ # version of the model.
595
+ #
596
+ # Training takes a while to complete. You can get the current status by
597
+ # calling DescribeProjectVersions.
598
+ #
599
+ # Once training has successfully completed, call DescribeProjectVersions
600
+ # to get the training results and evaluate the model.
601
+ #
602
+ # After evaluating the model, you start the model by calling
603
+ # StartProjectVersion.
604
+ #
605
+ # This operation requires permissions to perform the
606
+ # `rekognition:CreateProjectVersion` action.
607
+ #
608
+ # @option params [required, String] :project_arn
609
+ # The ARN of the Amazon Rekognition Custom Labels project that manages
610
+ # the model that you want to train.
611
+ #
612
+ # @option params [required, String] :version_name
613
+ # A name for the version of the model. This value must be unique.
614
+ #
615
+ # @option params [required, Types::OutputConfig] :output_config
616
+ # The Amazon S3 location to store the results of training.
617
+ #
618
+ # @option params [required, Types::TrainingData] :training_data
619
+ # The dataset to use for training.
620
+ #
621
+ # @option params [required, Types::TestingData] :testing_data
622
+ # The dataset to use for testing.
623
+ #
624
+ # @return [Types::CreateProjectVersionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
625
+ #
626
+ # * {Types::CreateProjectVersionResponse#project_version_arn #project_version_arn} => String
627
+ #
628
+ # @example Request syntax with placeholder values
629
+ #
630
+ # resp = client.create_project_version({
631
+ # project_arn: "ProjectArn", # required
632
+ # version_name: "VersionName", # required
633
+ # output_config: { # required
634
+ # s3_bucket: "S3Bucket",
635
+ # s3_key_prefix: "S3KeyPrefix",
636
+ # },
637
+ # training_data: { # required
638
+ # assets: [
639
+ # {
640
+ # ground_truth_manifest: {
641
+ # s3_object: {
642
+ # bucket: "S3Bucket",
643
+ # name: "S3ObjectName",
644
+ # version: "S3ObjectVersion",
645
+ # },
646
+ # },
647
+ # },
648
+ # ],
649
+ # },
650
+ # testing_data: { # required
651
+ # assets: [
652
+ # {
653
+ # ground_truth_manifest: {
654
+ # s3_object: {
655
+ # bucket: "S3Bucket",
656
+ # name: "S3ObjectName",
657
+ # version: "S3ObjectVersion",
658
+ # },
659
+ # },
660
+ # },
661
+ # ],
662
+ # auto_create: false,
663
+ # },
664
+ # })
665
+ #
666
+ # @example Response structure
667
+ #
668
+ # resp.project_version_arn #=> String
669
+ #
670
+ # @overload create_project_version(params = {})
671
+ # @param [Hash] params ({})
672
+ def create_project_version(params = {}, options = {})
673
+ req = build_request(:create_project_version, params)
674
+ req.send_request(options)
675
+ end
676
+
560
677
  # Creates an Amazon Rekognition stream processor that you can use to
561
678
  # detect and recognize faces in a streaming video.
562
679
  #
@@ -799,6 +916,137 @@ module Aws::Rekognition
799
916
  req.send_request(options)
800
917
  end
801
918
 
919
+ # Lists and describes the models in an Amazon Rekognition Custom Labels
920
+ # project. You can specify up to 10 model versions in
921
+ # `ProjectVersionArns`. If you don't specify a value, descriptions for
922
+ # all models are returned.
923
+ #
924
+ # This operation requires permissions to perform the
925
+ # `rekognition:DescribeProjectVersions` action.
926
+ #
927
+ # @option params [required, String] :project_arn
928
+ # The Amazon Resource Name (ARN) of the project that contains the models
929
+ # you want to describe.
930
+ #
931
+ # @option params [Array<String>] :version_names
932
+ # A list of model version names that you want to describe. You can add
933
+ # up to 10 model version names to the list. If you don't specify a
934
+ # value, all model descriptions are returned.
935
+ #
936
+ # @option params [String] :next_token
937
+ # If the previous response was incomplete (because there is more results
938
+ # to retrieve), Amazon Rekognition Custom Labels returns a pagination
939
+ # token in the response. You can use this pagination token to retrieve
940
+ # the next set of results.
941
+ #
942
+ # @option params [Integer] :max_results
943
+ # The maximum number of results to return per paginated call. The
944
+ # largest value you can specify is 100. If you specify a value greater
945
+ # than 100, a ValidationException error occurs. The default value is
946
+ # 100.
947
+ #
948
+ # @return [Types::DescribeProjectVersionsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
949
+ #
950
+ # * {Types::DescribeProjectVersionsResponse#project_version_descriptions #project_version_descriptions} => Array&lt;Types::ProjectVersionDescription&gt;
951
+ # * {Types::DescribeProjectVersionsResponse#next_token #next_token} => String
952
+ #
953
+ # @example Request syntax with placeholder values
954
+ #
955
+ # resp = client.describe_project_versions({
956
+ # project_arn: "ProjectArn", # required
957
+ # version_names: ["VersionName"],
958
+ # next_token: "ExtendedPaginationToken",
959
+ # max_results: 1,
960
+ # })
961
+ #
962
+ # @example Response structure
963
+ #
964
+ # resp.project_version_descriptions #=> Array
965
+ # resp.project_version_descriptions[0].project_version_arn #=> String
966
+ # resp.project_version_descriptions[0].creation_timestamp #=> Time
967
+ # resp.project_version_descriptions[0].min_inference_units #=> Integer
968
+ # resp.project_version_descriptions[0].status #=> String, one of "TRAINING_IN_PROGRESS", "TRAINING_COMPLETED", "TRAINING_FAILED", "STARTING", "RUNNING", "FAILED", "STOPPING", "STOPPED", "DELETING"
969
+ # resp.project_version_descriptions[0].status_message #=> String
970
+ # resp.project_version_descriptions[0].billable_training_time_in_seconds #=> Integer
971
+ # resp.project_version_descriptions[0].training_end_timestamp #=> Time
972
+ # resp.project_version_descriptions[0].output_config.s3_bucket #=> String
973
+ # resp.project_version_descriptions[0].output_config.s3_key_prefix #=> String
974
+ # resp.project_version_descriptions[0].training_data_result.input.assets #=> Array
975
+ # resp.project_version_descriptions[0].training_data_result.input.assets[0].ground_truth_manifest.s3_object.bucket #=> String
976
+ # resp.project_version_descriptions[0].training_data_result.input.assets[0].ground_truth_manifest.s3_object.name #=> String
977
+ # resp.project_version_descriptions[0].training_data_result.input.assets[0].ground_truth_manifest.s3_object.version #=> String
978
+ # resp.project_version_descriptions[0].training_data_result.output.assets #=> Array
979
+ # resp.project_version_descriptions[0].training_data_result.output.assets[0].ground_truth_manifest.s3_object.bucket #=> String
980
+ # resp.project_version_descriptions[0].training_data_result.output.assets[0].ground_truth_manifest.s3_object.name #=> String
981
+ # resp.project_version_descriptions[0].training_data_result.output.assets[0].ground_truth_manifest.s3_object.version #=> String
982
+ # resp.project_version_descriptions[0].testing_data_result.input.assets #=> Array
983
+ # resp.project_version_descriptions[0].testing_data_result.input.assets[0].ground_truth_manifest.s3_object.bucket #=> String
984
+ # resp.project_version_descriptions[0].testing_data_result.input.assets[0].ground_truth_manifest.s3_object.name #=> String
985
+ # resp.project_version_descriptions[0].testing_data_result.input.assets[0].ground_truth_manifest.s3_object.version #=> String
986
+ # resp.project_version_descriptions[0].testing_data_result.input.auto_create #=> Boolean
987
+ # resp.project_version_descriptions[0].testing_data_result.output.assets #=> Array
988
+ # resp.project_version_descriptions[0].testing_data_result.output.assets[0].ground_truth_manifest.s3_object.bucket #=> String
989
+ # resp.project_version_descriptions[0].testing_data_result.output.assets[0].ground_truth_manifest.s3_object.name #=> String
990
+ # resp.project_version_descriptions[0].testing_data_result.output.assets[0].ground_truth_manifest.s3_object.version #=> String
991
+ # resp.project_version_descriptions[0].testing_data_result.output.auto_create #=> Boolean
992
+ # resp.project_version_descriptions[0].evaluation_result.f1_score #=> Float
993
+ # resp.project_version_descriptions[0].evaluation_result.summary.s3_object.bucket #=> String
994
+ # resp.project_version_descriptions[0].evaluation_result.summary.s3_object.name #=> String
995
+ # resp.project_version_descriptions[0].evaluation_result.summary.s3_object.version #=> String
996
+ # resp.next_token #=> String
997
+ #
998
+ # @overload describe_project_versions(params = {})
999
+ # @param [Hash] params ({})
1000
+ def describe_project_versions(params = {}, options = {})
1001
+ req = build_request(:describe_project_versions, params)
1002
+ req.send_request(options)
1003
+ end
1004
+
1005
+ # Lists and gets information about your Amazon Rekognition Custom Labels
1006
+ # projects.
1007
+ #
1008
+ # This operation requires permissions to perform the
1009
+ # `rekognition:DescribeProjects` action.
1010
+ #
1011
+ # @option params [String] :next_token
1012
+ # If the previous response was incomplete (because there is more results
1013
+ # to retrieve), Amazon Rekognition Custom Labels returns a pagination
1014
+ # token in the response. You can use this pagination token to retrieve
1015
+ # the next set of results.
1016
+ #
1017
+ # @option params [Integer] :max_results
1018
+ # The maximum number of results to return per paginated call. The
1019
+ # largest value you can specify is 100. If you specify a value greater
1020
+ # than 100, a ValidationException error occurs. The default value is
1021
+ # 100.
1022
+ #
1023
+ # @return [Types::DescribeProjectsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1024
+ #
1025
+ # * {Types::DescribeProjectsResponse#project_descriptions #project_descriptions} => Array&lt;Types::ProjectDescription&gt;
1026
+ # * {Types::DescribeProjectsResponse#next_token #next_token} => String
1027
+ #
1028
+ # @example Request syntax with placeholder values
1029
+ #
1030
+ # resp = client.describe_projects({
1031
+ # next_token: "ExtendedPaginationToken",
1032
+ # max_results: 1,
1033
+ # })
1034
+ #
1035
+ # @example Response structure
1036
+ #
1037
+ # resp.project_descriptions #=> Array
1038
+ # resp.project_descriptions[0].project_arn #=> String
1039
+ # resp.project_descriptions[0].creation_timestamp #=> Time
1040
+ # resp.project_descriptions[0].status #=> String, one of "CREATING", "CREATED", "DELETING"
1041
+ # resp.next_token #=> String
1042
+ #
1043
+ # @overload describe_projects(params = {})
1044
+ # @param [Hash] params ({})
1045
+ def describe_projects(params = {}, options = {})
1046
+ req = build_request(:describe_projects, params)
1047
+ req.send_request(options)
1048
+ end
1049
+
802
1050
  # Provides information about a stream processor created by
803
1051
  # CreateStreamProcessor. You can get information about the input and
804
1052
  # output streams, the input parameters for the face recognition being
@@ -847,6 +1095,127 @@ module Aws::Rekognition
847
1095
  req.send_request(options)
848
1096
  end
849
1097
 
1098
+ # Detects custom labels in a supplied image by using an Amazon
1099
+ # Rekognition Custom Labels model.
1100
+ #
1101
+ # You specify which version of a model version to use by using the
1102
+ # `ProjectVersionArn` input parameter.
1103
+ #
1104
+ # You pass the input image as base64-encoded image bytes or as a
1105
+ # reference to an image in an Amazon S3 bucket. If you use the AWS CLI
1106
+ # to call Amazon Rekognition operations, passing image bytes is not
1107
+ # supported. The image must be either a PNG or JPEG formatted file.
1108
+ #
1109
+ # For each object that the model version detects on an image, the API
1110
+ # returns a (`CustomLabel`) object in an array (`CustomLabels`). Each
1111
+ # `CustomLabel` object provides the label name (`Name`), the level of
1112
+ # confidence that the image contains the object (`Confidence`), and
1113
+ # object location information, if it exists, for the label on the image
1114
+ # (`Geometry`).
1115
+ #
1116
+ # During training model calculates a threshold value that determines if
1117
+ # a prediction for a label is true. By default, `DetectCustomLabels`
1118
+ # doesn't return labels whose confidence value is below the model's
1119
+ # calculated threshold value. To filter labels that are returned,
1120
+ # specify a value for `MinConfidence` that is higher than the model's
1121
+ # calculated threshold. You can get the model's calculated threshold
1122
+ # from the model's training results shown in the Amazon Rekognition
1123
+ # Custom Labels console. To get all labels, regardless of confidence,
1124
+ # specify a `MinConfidence` value of 0.
1125
+ #
1126
+ # You can also add the `MaxResults` parameter to limit the number of
1127
+ # labels returned.
1128
+ #
1129
+ # This is a stateless API operation. That is, the operation does not
1130
+ # persist any data.
1131
+ #
1132
+ # This operation requires permissions to perform the
1133
+ # `rekognition:DetectCustomLabels` action.
1134
+ #
1135
+ # @option params [required, String] :project_version_arn
1136
+ # The ARN of the model version that you want to use.
1137
+ #
1138
+ # @option params [required, Types::Image] :image
1139
+ # Provides the input image either as bytes or an S3 object.
1140
+ #
1141
+ # You pass image bytes to an Amazon Rekognition API operation by using
1142
+ # the `Bytes` property. For example, you would use the `Bytes` property
1143
+ # to pass an image loaded from a local file system. Image bytes passed
1144
+ # by using the `Bytes` property must be base64-encoded. Your code may
1145
+ # not need to encode image bytes if you are using an AWS SDK to call
1146
+ # Amazon Rekognition API operations.
1147
+ #
1148
+ # For more information, see Analyzing an Image Loaded from a Local File
1149
+ # System in the Amazon Rekognition Developer Guide.
1150
+ #
1151
+ # You pass images stored in an S3 bucket to an Amazon Rekognition API
1152
+ # operation by using the `S3Object` property. Images stored in an S3
1153
+ # bucket do not need to be base64-encoded.
1154
+ #
1155
+ # The region for the S3 bucket containing the S3 object must match the
1156
+ # region you use for Amazon Rekognition operations.
1157
+ #
1158
+ # If you use the AWS CLI to call Amazon Rekognition operations, passing
1159
+ # image bytes using the Bytes property is not supported. You must first
1160
+ # upload the image to an Amazon S3 bucket and then call the operation
1161
+ # using the S3Object property.
1162
+ #
1163
+ # For Amazon Rekognition to process an S3 object, the user must have
1164
+ # permission to access the S3 object. For more information, see Resource
1165
+ # Based Policies in the Amazon Rekognition Developer Guide.
1166
+ #
1167
+ # @option params [Integer] :max_results
1168
+ # Maximum number of results you want the service to return in the
1169
+ # response. The service returns the specified number of highest
1170
+ # confidence labels ranked from highest confidence to lowest.
1171
+ #
1172
+ # @option params [Float] :min_confidence
1173
+ # Specifies the minimum confidence level for the labels to return.
1174
+ # Amazon Rekognition doesn't return any labels with a confidence lower
1175
+ # than this specified value. If you specify a value of 0, all labels are
1176
+ # return, regardless of the default thresholds that the model version
1177
+ # applies.
1178
+ #
1179
+ # @return [Types::DetectCustomLabelsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1180
+ #
1181
+ # * {Types::DetectCustomLabelsResponse#custom_labels #custom_labels} => Array&lt;Types::CustomLabel&gt;
1182
+ #
1183
+ # @example Request syntax with placeholder values
1184
+ #
1185
+ # resp = client.detect_custom_labels({
1186
+ # project_version_arn: "ProjectVersionArn", # required
1187
+ # image: { # required
1188
+ # bytes: "data",
1189
+ # s3_object: {
1190
+ # bucket: "S3Bucket",
1191
+ # name: "S3ObjectName",
1192
+ # version: "S3ObjectVersion",
1193
+ # },
1194
+ # },
1195
+ # max_results: 1,
1196
+ # min_confidence: 1.0,
1197
+ # })
1198
+ #
1199
+ # @example Response structure
1200
+ #
1201
+ # resp.custom_labels #=> Array
1202
+ # resp.custom_labels[0].name #=> String
1203
+ # resp.custom_labels[0].confidence #=> Float
1204
+ # resp.custom_labels[0].geometry.bounding_box.width #=> Float
1205
+ # resp.custom_labels[0].geometry.bounding_box.height #=> Float
1206
+ # resp.custom_labels[0].geometry.bounding_box.left #=> Float
1207
+ # resp.custom_labels[0].geometry.bounding_box.top #=> Float
1208
+ # resp.custom_labels[0].geometry.polygon #=> Array
1209
+ # resp.custom_labels[0].geometry.polygon[0].x #=> Float
1210
+ # resp.custom_labels[0].geometry.polygon[0].y #=> Float
1211
+ #
1212
+ # @overload detect_custom_labels(params = {})
1213
+ # @param [Hash] params ({})
1214
+ def detect_custom_labels(params = {}, options = {})
1215
+ req = build_request(:detect_custom_labels, params)
1216
+ req.send_request(options)
1217
+ end
1218
+
850
1219
  # Detects faces within an image that is provided as input.
851
1220
  #
852
1221
  # `DetectFaces` detects the 100 largest faces in the image. For each
@@ -1237,10 +1606,15 @@ module Aws::Rekognition
1237
1606
  # If you don't specify `MinConfidence`, the operation returns labels
1238
1607
  # with confidence values greater than or equal to 50 percent.
1239
1608
  #
1609
+ # @option params [Types::HumanLoopConfig] :human_loop_config
1610
+ # Sets up the configuration for human evaluation, including the
1611
+ # FlowDefinition the image will be sent to.
1612
+ #
1240
1613
  # @return [Types::DetectModerationLabelsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1241
1614
  #
1242
1615
  # * {Types::DetectModerationLabelsResponse#moderation_labels #moderation_labels} => Array&lt;Types::ModerationLabel&gt;
1243
1616
  # * {Types::DetectModerationLabelsResponse#moderation_model_version #moderation_model_version} => String
1617
+ # * {Types::DetectModerationLabelsResponse#human_loop_activation_output #human_loop_activation_output} => Types::HumanLoopActivationOutput
1244
1618
  #
1245
1619
  # @example Request syntax with placeholder values
1246
1620
  #
@@ -1254,6 +1628,13 @@ module Aws::Rekognition
1254
1628
  # },
1255
1629
  # },
1256
1630
  # min_confidence: 1.0,
1631
+ # human_loop_config: {
1632
+ # human_loop_name: "HumanLoopName", # required
1633
+ # flow_definition_arn: "FlowDefinitionArn", # required
1634
+ # data_attributes: {
1635
+ # content_classifiers: ["FreeOfPersonallyIdentifiableInformation"], # accepts FreeOfPersonallyIdentifiableInformation, FreeOfAdultContent
1636
+ # },
1637
+ # },
1257
1638
  # })
1258
1639
  #
1259
1640
  # @example Response structure
@@ -1263,6 +1644,10 @@ module Aws::Rekognition
1263
1644
  # resp.moderation_labels[0].name #=> String
1264
1645
  # resp.moderation_labels[0].parent_name #=> String
1265
1646
  # resp.moderation_model_version #=> String
1647
+ # resp.human_loop_activation_output.human_loop_arn #=> String
1648
+ # resp.human_loop_activation_output.human_loop_activation_reasons #=> Array
1649
+ # resp.human_loop_activation_output.human_loop_activation_reasons[0] #=> String
1650
+ # resp.human_loop_activation_output.human_loop_activation_conditions_evaluation_results #=> String
1266
1651
  #
1267
1652
  # @overload detect_moderation_labels(params = {})
1268
1653
  # @param [Hash] params ({})
@@ -3161,11 +3546,10 @@ module Aws::Rekognition
3161
3546
  #
3162
3547
  # The `QualityFilter` input parameter allows you to filter out detected
3163
3548
  # faces that don’t meet a required quality bar. The quality bar is based
3164
- # on a variety of common use cases. By default, Amazon Rekognition
3165
- # chooses the quality bar that's used to filter faces. You can also
3166
- # explicitly choose the quality bar. Use `QualityFilter`, to set the
3549
+ # on a variety of common use cases. Use `QualityFilter` to set the
3167
3550
  # quality bar for filtering by specifying `LOW`, `MEDIUM`, or `HIGH`. If
3168
- # you do not want to filter detected faces, specify `NONE`.
3551
+ # you do not want to filter detected faces, specify `NONE`. The default
3552
+ # value is `NONE`.
3169
3553
  #
3170
3554
  # <note markdown="1"> To use quality filtering, you need a collection associated with
3171
3555
  # version 3 of the face model or higher. To get the version of the face
@@ -3203,12 +3587,12 @@ module Aws::Rekognition
3203
3587
  # to identify faces. Filtered faces aren't searched for in the
3204
3588
  # collection. If you specify `AUTO`, Amazon Rekognition chooses the
3205
3589
  # quality bar. If you specify `LOW`, `MEDIUM`, or `HIGH`, filtering
3206
- # removes all faces that don’t meet the chosen quality bar. The default
3207
- # value is `AUTO`. The quality bar is based on a variety of common use
3208
- # cases. Low-quality detections can occur for a number of reasons. Some
3209
- # examples are an object that's misidentified as a face, a face that's
3210
- # too blurry, or a face with a pose that's too extreme to use. If you
3211
- # specify `NONE`, no filtering is performed.
3590
+ # removes all faces that don’t meet the chosen quality bar. The quality
3591
+ # bar is based on a variety of common use cases. Low-quality detections
3592
+ # can occur for a number of reasons. Some examples are an object that's
3593
+ # misidentified as a face, a face that's too blurry, or a face with a
3594
+ # pose that's too extreme to use. If you specify `NONE`, no filtering
3595
+ # is performed. The default value is `NONE`.
3212
3596
  #
3213
3597
  # To use quality filtering, the collection you are using must be
3214
3598
  # associated with version 3 of the face model or higher.
@@ -3773,6 +4157,54 @@ module Aws::Rekognition
3773
4157
  req.send_request(options)
3774
4158
  end
3775
4159
 
4160
+ # Starts the running of the version of a model. Starting a model takes a
4161
+ # while to complete. To check the current state of the model, use
4162
+ # DescribeProjectVersions.
4163
+ #
4164
+ # Once the model is running, you can detect custom labels in new images
4165
+ # by calling DetectCustomLabels.
4166
+ #
4167
+ # <note markdown="1"> You are charged for the amount of time that the model is running. To
4168
+ # stop a running model, call StopProjectVersion.
4169
+ #
4170
+ # </note>
4171
+ #
4172
+ # This operation requires permissions to perform the
4173
+ # `rekognition:StartProjectVersion` action.
4174
+ #
4175
+ # @option params [required, String] :project_version_arn
4176
+ # The Amazon Resource Name(ARN) of the model version that you want to
4177
+ # start.
4178
+ #
4179
+ # @option params [required, Integer] :min_inference_units
4180
+ # The minimum number of inference units to use. A single inference unit
4181
+ # represents 1 hour of processing and can support up to 5 Transaction
4182
+ # Pers Second (TPS). Use a higher number to increase the TPS throughput
4183
+ # of your model. You are charged for the number of inference units that
4184
+ # you use.
4185
+ #
4186
+ # @return [Types::StartProjectVersionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
4187
+ #
4188
+ # * {Types::StartProjectVersionResponse#status #status} => String
4189
+ #
4190
+ # @example Request syntax with placeholder values
4191
+ #
4192
+ # resp = client.start_project_version({
4193
+ # project_version_arn: "ProjectVersionArn", # required
4194
+ # min_inference_units: 1, # required
4195
+ # })
4196
+ #
4197
+ # @example Response structure
4198
+ #
4199
+ # resp.status #=> String, one of "TRAINING_IN_PROGRESS", "TRAINING_COMPLETED", "TRAINING_FAILED", "STARTING", "RUNNING", "FAILED", "STOPPING", "STOPPED", "DELETING"
4200
+ #
4201
+ # @overload start_project_version(params = {})
4202
+ # @param [Hash] params ({})
4203
+ def start_project_version(params = {}, options = {})
4204
+ req = build_request(:start_project_version, params)
4205
+ req.send_request(options)
4206
+ end
4207
+
3776
4208
  # Starts processing a stream processor. You create a stream processor by
3777
4209
  # calling CreateStreamProcessor. To tell `StartStreamProcessor` which
3778
4210
  # stream processor to start, use the value of the `Name` field specified
@@ -3796,6 +4228,37 @@ module Aws::Rekognition
3796
4228
  req.send_request(options)
3797
4229
  end
3798
4230
 
4231
+ # Stops a running model. The operation might take a while to complete.
4232
+ # To check the current status, call DescribeProjectVersions.
4233
+ #
4234
+ # @option params [required, String] :project_version_arn
4235
+ # The Amazon Resource Name (ARN) of the model version that you want to
4236
+ # delete.
4237
+ #
4238
+ # This operation requires permissions to perform the
4239
+ # `rekognition:StopProjectVersion` action.
4240
+ #
4241
+ # @return [Types::StopProjectVersionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
4242
+ #
4243
+ # * {Types::StopProjectVersionResponse#status #status} => String
4244
+ #
4245
+ # @example Request syntax with placeholder values
4246
+ #
4247
+ # resp = client.stop_project_version({
4248
+ # project_version_arn: "ProjectVersionArn", # required
4249
+ # })
4250
+ #
4251
+ # @example Response structure
4252
+ #
4253
+ # resp.status #=> String, one of "TRAINING_IN_PROGRESS", "TRAINING_COMPLETED", "TRAINING_FAILED", "STARTING", "RUNNING", "FAILED", "STOPPING", "STOPPED", "DELETING"
4254
+ #
4255
+ # @overload stop_project_version(params = {})
4256
+ # @param [Hash] params ({})
4257
+ def stop_project_version(params = {}, options = {})
4258
+ req = build_request(:stop_project_version, params)
4259
+ req.send_request(options)
4260
+ end
4261
+
3799
4262
  # Stops a running stream processor that was created by
3800
4263
  # CreateStreamProcessor.
3801
4264
  #
@@ -3830,14 +4293,129 @@ module Aws::Rekognition
3830
4293
  params: params,
3831
4294
  config: config)
3832
4295
  context[:gem_name] = 'aws-sdk-rekognition'
3833
- context[:gem_version] = '1.32.0'
4296
+ context[:gem_version] = '1.33.0'
3834
4297
  Seahorse::Client::Request.new(handlers, context)
3835
4298
  end
3836
4299
 
4300
+ # Polls an API operation until a resource enters a desired state.
4301
+ #
4302
+ # ## Basic Usage
4303
+ #
4304
+ # A waiter will call an API operation until:
4305
+ #
4306
+ # * It is successful
4307
+ # * It enters a terminal state
4308
+ # * It makes the maximum number of attempts
4309
+ #
4310
+ # In between attempts, the waiter will sleep.
4311
+ #
4312
+ # # polls in a loop, sleeping between attempts
4313
+ # client.wait_until(waiter_name, params)
4314
+ #
4315
+ # ## Configuration
4316
+ #
4317
+ # You can configure the maximum number of polling attempts, and the
4318
+ # delay (in seconds) between each polling attempt. You can pass
4319
+ # configuration as the final arguments hash.
4320
+ #
4321
+ # # poll for ~25 seconds
4322
+ # client.wait_until(waiter_name, params, {
4323
+ # max_attempts: 5,
4324
+ # delay: 5,
4325
+ # })
4326
+ #
4327
+ # ## Callbacks
4328
+ #
4329
+ # You can be notified before each polling attempt and before each
4330
+ # delay. If you throw `:success` or `:failure` from these callbacks,
4331
+ # it will terminate the waiter.
4332
+ #
4333
+ # started_at = Time.now
4334
+ # client.wait_until(waiter_name, params, {
4335
+ #
4336
+ # # disable max attempts
4337
+ # max_attempts: nil,
4338
+ #
4339
+ # # poll for 1 hour, instead of a number of attempts
4340
+ # before_wait: -> (attempts, response) do
4341
+ # throw :failure if Time.now - started_at > 3600
4342
+ # end
4343
+ # })
4344
+ #
4345
+ # ## Handling Errors
4346
+ #
4347
+ # When a waiter is unsuccessful, it will raise an error.
4348
+ # All of the failure errors extend from
4349
+ # {Aws::Waiters::Errors::WaiterFailed}.
4350
+ #
4351
+ # begin
4352
+ # client.wait_until(...)
4353
+ # rescue Aws::Waiters::Errors::WaiterFailed
4354
+ # # resource did not enter the desired state in time
4355
+ # end
4356
+ #
4357
+ # ## Valid Waiters
4358
+ #
4359
+ # The following table lists the valid waiter names, the operations they call,
4360
+ # and the default `:delay` and `:max_attempts` values.
4361
+ #
4362
+ # | waiter_name | params | :delay | :max_attempts |
4363
+ # | ---------------------------------- | ---------------------------- | -------- | ------------- |
4364
+ # | project_version_running | {#describe_project_versions} | 30 | 40 |
4365
+ # | project_version_training_completed | {#describe_project_versions} | 120 | 360 |
4366
+ #
4367
+ # @raise [Errors::FailureStateError] Raised when the waiter terminates
4368
+ # because the waiter has entered a state that it will not transition
4369
+ # out of, preventing success.
4370
+ #
4371
+ # @raise [Errors::TooManyAttemptsError] Raised when the configured
4372
+ # maximum number of attempts have been made, and the waiter is not
4373
+ # yet successful.
4374
+ #
4375
+ # @raise [Errors::UnexpectedError] Raised when an error is encounted
4376
+ # while polling for a resource that is not expected.
4377
+ #
4378
+ # @raise [Errors::NoSuchWaiterError] Raised when you request to wait
4379
+ # for an unknown state.
4380
+ #
4381
+ # @return [Boolean] Returns `true` if the waiter was successful.
4382
+ # @param [Symbol] waiter_name
4383
+ # @param [Hash] params ({})
4384
+ # @param [Hash] options ({})
4385
+ # @option options [Integer] :max_attempts
4386
+ # @option options [Integer] :delay
4387
+ # @option options [Proc] :before_attempt
4388
+ # @option options [Proc] :before_wait
4389
+ def wait_until(waiter_name, params = {}, options = {})
4390
+ w = waiter(waiter_name, options)
4391
+ yield(w.waiter) if block_given? # deprecated
4392
+ w.wait(params)
4393
+ end
4394
+
3837
4395
  # @api private
3838
4396
  # @deprecated
3839
4397
  def waiter_names
3840
- []
4398
+ waiters.keys
4399
+ end
4400
+
4401
+ private
4402
+
4403
+ # @param [Symbol] waiter_name
4404
+ # @param [Hash] options ({})
4405
+ def waiter(waiter_name, options = {})
4406
+ waiter_class = waiters[waiter_name]
4407
+ if waiter_class
4408
+ waiter_class.new(options.merge(client: self))
4409
+ else
4410
+ raise Aws::Waiters::Errors::NoSuchWaiterError.new(waiter_name, waiters.keys)
4411
+ end
4412
+ end
4413
+
4414
+ def waiters
4415
+ {
4416
+ project_version_running: Waiters::ProjectVersionRunning,
4417
+ project_version_training_completed: Waiters::ProjectVersionTrainingCompleted
4418
+ }
3841
4419
  end
3842
4420
 
3843
4421
  class << self