aws-sdk-rekognition 1.45.0 → 1.46.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 23d4f78117d6aaa01586bb2aa17b9329f988cfbc9f627b374d1669519444b60d
4
- data.tar.gz: d136754eb59c007e50c04a0a9181b1d607caf8e9ed5c531892630a1e13275392
3
+ metadata.gz: 66b78988008c0edbda9ee92460d994deb4686bbd9410f9e2f5c870c2dda1c230
4
+ data.tar.gz: f5d654701e2512093b5e93822612e5b9b27eb178309874f84a074de53c802208
5
5
  SHA512:
6
- metadata.gz: 2e307ceff11624bc4cf4db747a152d53c0e378046d61d2d9a827df75c07355de5ef93ffe57f7b317f77dda0f6508992b089adfff62209377d8da40da570ae693
7
- data.tar.gz: 4fdf838ac74ad34e830533e43c6340c5f6db7ae21bd28f2c411f5cc908706814f0a858a19df1dd46ead358831a69bfd332b6a1765a1db3b91147eb50822bc4fa
6
+ metadata.gz: e24de16aa4123cec0c8a627d163bc3a6213fed11c770c1725178bb04d1b6beeaf71de45304c8e40866d9556807e8550c7a4ef6726146d0859edb6c38cf66b77d
7
+ data.tar.gz: 3c531c5f29c7e8f53d9fe808ed78788b8b6da9c2b4321149ee504a3233dc4603faa42186ae6ed80d20de8e5ead831322b5d7f35380efcc7c5165558795eff8f9
@@ -49,6 +49,6 @@ require_relative 'aws-sdk-rekognition/customizations'
49
49
  # @!group service
50
50
  module Aws::Rekognition
51
51
 
52
- GEM_VERSION = '1.45.0'
52
+ GEM_VERSION = '1.46.0'
53
53
 
54
54
  end
@@ -378,12 +378,6 @@ module Aws::Rekognition
378
378
  # want to filter detected faces, specify `NONE`. The default value is
379
379
  # `NONE`.
380
380
  #
381
- # <note markdown="1"> To use quality filtering, you need a collection associated with
382
- # version 3 of the face model or higher. To get the version of the face
383
- # model associated with a collection, call DescribeCollection.
384
- #
385
- # </note>
386
- #
387
381
  # If the image doesn't contain Exif metadata, `CompareFaces` returns
388
382
  # orientation information for the source and target images. Use these
389
383
  # values to display the images with the correct image orientation.
@@ -1124,6 +1118,10 @@ module Aws::Rekognition
1124
1118
  # resp.project_version_descriptions[0].training_data_result.output.assets[0].ground_truth_manifest.s3_object.bucket #=> String
1125
1119
  # resp.project_version_descriptions[0].training_data_result.output.assets[0].ground_truth_manifest.s3_object.name #=> String
1126
1120
  # resp.project_version_descriptions[0].training_data_result.output.assets[0].ground_truth_manifest.s3_object.version #=> String
1121
+ # resp.project_version_descriptions[0].training_data_result.validation.assets #=> Array
1122
+ # resp.project_version_descriptions[0].training_data_result.validation.assets[0].ground_truth_manifest.s3_object.bucket #=> String
1123
+ # resp.project_version_descriptions[0].training_data_result.validation.assets[0].ground_truth_manifest.s3_object.name #=> String
1124
+ # resp.project_version_descriptions[0].training_data_result.validation.assets[0].ground_truth_manifest.s3_object.version #=> String
1127
1125
  # resp.project_version_descriptions[0].testing_data_result.input.assets #=> Array
1128
1126
  # resp.project_version_descriptions[0].testing_data_result.input.assets[0].ground_truth_manifest.s3_object.bucket #=> String
1129
1127
  # resp.project_version_descriptions[0].testing_data_result.input.assets[0].ground_truth_manifest.s3_object.name #=> String
@@ -1134,10 +1132,17 @@ module Aws::Rekognition
1134
1132
  # resp.project_version_descriptions[0].testing_data_result.output.assets[0].ground_truth_manifest.s3_object.name #=> String
1135
1133
  # resp.project_version_descriptions[0].testing_data_result.output.assets[0].ground_truth_manifest.s3_object.version #=> String
1136
1134
  # resp.project_version_descriptions[0].testing_data_result.output.auto_create #=> Boolean
1135
+ # resp.project_version_descriptions[0].testing_data_result.validation.assets #=> Array
1136
+ # resp.project_version_descriptions[0].testing_data_result.validation.assets[0].ground_truth_manifest.s3_object.bucket #=> String
1137
+ # resp.project_version_descriptions[0].testing_data_result.validation.assets[0].ground_truth_manifest.s3_object.name #=> String
1138
+ # resp.project_version_descriptions[0].testing_data_result.validation.assets[0].ground_truth_manifest.s3_object.version #=> String
1137
1139
  # resp.project_version_descriptions[0].evaluation_result.f1_score #=> Float
1138
1140
  # resp.project_version_descriptions[0].evaluation_result.summary.s3_object.bucket #=> String
1139
1141
  # resp.project_version_descriptions[0].evaluation_result.summary.s3_object.name #=> String
1140
1142
  # resp.project_version_descriptions[0].evaluation_result.summary.s3_object.version #=> String
1143
+ # resp.project_version_descriptions[0].manifest_summary.s3_object.bucket #=> String
1144
+ # resp.project_version_descriptions[0].manifest_summary.s3_object.name #=> String
1145
+ # resp.project_version_descriptions[0].manifest_summary.s3_object.version #=> String
1141
1146
  # resp.next_token #=> String
1142
1147
  #
1143
1148
  #
@@ -3672,11 +3677,11 @@ module Aws::Rekognition
3672
3677
  # more information, see Recognizing Celebrities in the Amazon
3673
3678
  # Rekognition Developer Guide.
3674
3679
  #
3675
- # `RecognizeCelebrities` returns the 100 largest faces in the image. It
3680
+ # `RecognizeCelebrities` returns the 64 largest faces in the image. It
3676
3681
  # lists recognized celebrities in the `CelebrityFaces` array and
3677
3682
  # unrecognized faces in the `UnrecognizedFaces` array.
3678
3683
  # `RecognizeCelebrities` doesn't return celebrities whose faces aren't
3679
- # among the largest 100 faces in the image.
3684
+ # among the largest 64 faces in the image.
3680
3685
  #
3681
3686
  # For each celebrity recognized, `RecognizeCelebrities` returns a
3682
3687
  # `Celebrity` object. The `Celebrity` object contains the celebrity
@@ -4885,7 +4890,7 @@ module Aws::Rekognition
4885
4890
  params: params,
4886
4891
  config: config)
4887
4892
  context[:gem_name] = 'aws-sdk-rekognition'
4888
- context[:gem_version] = '1.45.0'
4893
+ context[:gem_version] = '1.46.0'
4889
4894
  Seahorse::Client::Request.new(handlers, context)
4890
4895
  end
4891
4896
 
@@ -313,6 +313,7 @@ module Aws::Rekognition
313
313
  UnindexedFaces = Shapes::ListShape.new(name: 'UnindexedFaces')
314
314
  Url = Shapes::StringShape.new(name: 'Url')
315
315
  Urls = Shapes::ListShape.new(name: 'Urls')
316
+ ValidationData = Shapes::StructureShape.new(name: 'ValidationData')
316
317
  VersionName = Shapes::StringShape.new(name: 'VersionName')
317
318
  VersionNames = Shapes::ListShape.new(name: 'VersionNames')
318
319
  Video = Shapes::StructureShape.new(name: 'Video')
@@ -975,6 +976,7 @@ module Aws::Rekognition
975
976
  ProjectVersionDescription.add_member(:training_data_result, Shapes::ShapeRef.new(shape: TrainingDataResult, location_name: "TrainingDataResult"))
976
977
  ProjectVersionDescription.add_member(:testing_data_result, Shapes::ShapeRef.new(shape: TestingDataResult, location_name: "TestingDataResult"))
977
978
  ProjectVersionDescription.add_member(:evaluation_result, Shapes::ShapeRef.new(shape: EvaluationResult, location_name: "EvaluationResult"))
979
+ ProjectVersionDescription.add_member(:manifest_summary, Shapes::ShapeRef.new(shape: GroundTruthManifest, location_name: "ManifestSummary"))
978
980
  ProjectVersionDescription.struct_class = Types::ProjectVersionDescription
979
981
 
980
982
  ProjectVersionDescriptions.member = Shapes::ShapeRef.new(shape: ProjectVersionDescription)
@@ -1211,6 +1213,7 @@ module Aws::Rekognition
1211
1213
 
1212
1214
  TestingDataResult.add_member(:input, Shapes::ShapeRef.new(shape: TestingData, location_name: "Input"))
1213
1215
  TestingDataResult.add_member(:output, Shapes::ShapeRef.new(shape: TestingData, location_name: "Output"))
1216
+ TestingDataResult.add_member(:validation, Shapes::ShapeRef.new(shape: ValidationData, location_name: "Validation"))
1214
1217
  TestingDataResult.struct_class = Types::TestingDataResult
1215
1218
 
1216
1219
  TextDetection.add_member(:detected_text, Shapes::ShapeRef.new(shape: String, location_name: "DetectedText"))
@@ -1236,6 +1239,7 @@ module Aws::Rekognition
1236
1239
 
1237
1240
  TrainingDataResult.add_member(:input, Shapes::ShapeRef.new(shape: TrainingData, location_name: "Input"))
1238
1241
  TrainingDataResult.add_member(:output, Shapes::ShapeRef.new(shape: TrainingData, location_name: "Output"))
1242
+ TrainingDataResult.add_member(:validation, Shapes::ShapeRef.new(shape: ValidationData, location_name: "Validation"))
1239
1243
  TrainingDataResult.struct_class = Types::TrainingDataResult
1240
1244
 
1241
1245
  UnindexedFace.add_member(:reasons, Shapes::ShapeRef.new(shape: Reasons, location_name: "Reasons"))
@@ -1246,6 +1250,9 @@ module Aws::Rekognition
1246
1250
 
1247
1251
  Urls.member = Shapes::ShapeRef.new(shape: Url)
1248
1252
 
1253
+ ValidationData.add_member(:assets, Shapes::ShapeRef.new(shape: Assets, location_name: "Assets"))
1254
+ ValidationData.struct_class = Types::ValidationData
1255
+
1249
1256
  VersionNames.member = Shapes::ShapeRef.new(shape: VersionName)
1250
1257
 
1251
1258
  Video.add_member(:s3_object, Shapes::ShapeRef.new(shape: S3Object, location_name: "S3Object"))
@@ -37,8 +37,8 @@ module Aws::Rekognition
37
37
  end
38
38
 
39
39
  # Assets are the images that you use to train and evaluate a model
40
- # version. Assets are referenced by Sagemaker GroundTruth manifest
41
- # files.
40
+ # version. Assets can also contain validation information that you use
41
+ # to debug a failed model training.
42
42
  #
43
43
  # @note When making an API call, you may pass Asset
44
44
  # data as a hash:
@@ -54,7 +54,8 @@ module Aws::Rekognition
54
54
  # }
55
55
  #
56
56
  # @!attribute [rw] ground_truth_manifest
57
- # The S3 bucket that contains the Ground Truth manifest file.
57
+ # The S3 bucket that contains an Amazon Sagemaker Ground Truth format
58
+ # manifest file.
58
59
  # @return [Types::GroundTruthManifest]
59
60
  #
60
61
  class Asset < Struct.new(
@@ -80,7 +81,7 @@ module Aws::Rekognition
80
81
  # @return [Integer]
81
82
  #
82
83
  # @!attribute [rw] number_of_channels
83
- # The number of audio channels in the segement.
84
+ # The number of audio channels in the segment.
84
85
  # @return [Integer]
85
86
  #
86
87
  class AudioMetadata < Struct.new(
@@ -2578,7 +2579,10 @@ module Aws::Rekognition
2578
2579
  # @return [String]
2579
2580
  #
2580
2581
  # @!attribute [rw] segments
2581
- # An array of segments detected in a video.
2582
+ # An array of segments detected in a video. The array is sorted by the
2583
+ # segment types (TECHNICAL\_CUE or SHOT) specified in the
2584
+ # `SegmentTypes` input parameter of `StartSegmentDetection`. Within
2585
+ # each segment type the array is sorted by timestamp values.
2582
2586
  # @return [Array<Types::SegmentDetection>]
2583
2587
  #
2584
2588
  # @!attribute [rw] selected_segment_types
@@ -2676,7 +2680,8 @@ module Aws::Rekognition
2676
2680
  include Aws::Structure
2677
2681
  end
2678
2682
 
2679
- # The S3 bucket that contains the Ground Truth manifest file.
2683
+ # The S3 bucket that contains an Amazon Sagemaker Ground Truth format
2684
+ # manifest file.
2680
2685
  #
2681
2686
  # @note When making an API call, you may pass GroundTruthManifest
2682
2687
  # data as a hash:
@@ -3205,17 +3210,17 @@ module Aws::Rekognition
3205
3210
  # @return [String]
3206
3211
  #
3207
3212
  # @!attribute [rw] x
3208
- # The x-coordinate from the top left of the landmark expressed as the
3209
- # ratio of the width of the image. For example, if the image is 700 x
3210
- # 200 and the x-coordinate of the landmark is at 350 pixels, this
3211
- # value is 0.5.
3213
+ # The x-coordinate of the landmark expressed as a ratio of the width
3214
+ # of the image. The x-coordinate is measured from the left-side of the
3215
+ # image. For example, if the image is 700 pixels wide and the
3216
+ # x-coordinate of the landmark is at 350 pixels, this value is 0.5.
3212
3217
  # @return [Float]
3213
3218
  #
3214
3219
  # @!attribute [rw] y
3215
- # The y-coordinate from the top left of the landmark expressed as the
3216
- # ratio of the height of the image. For example, if the image is 700 x
3217
- # 200 and the y-coordinate of the landmark is at 100 pixels, this
3218
- # value is 0.5.
3220
+ # The y-coordinate of the landmark expressed as a ratio of the height
3221
+ # of the image. The y-coordinate is measured from the top of the
3222
+ # image. For example, if the image height is 200 pixels and the
3223
+ # y-coordinate of the landmark is at 50 pixels, this value is 0.25.
3219
3224
  # @return [Float]
3220
3225
  #
3221
3226
  class Landmark < Struct.new(
@@ -3702,11 +3707,11 @@ module Aws::Rekognition
3702
3707
  # @return [Types::OutputConfig]
3703
3708
  #
3704
3709
  # @!attribute [rw] training_data_result
3705
- # The manifest file that represents the training results.
3710
+ # Contains information about the training results.
3706
3711
  # @return [Types::TrainingDataResult]
3707
3712
  #
3708
3713
  # @!attribute [rw] testing_data_result
3709
- # The manifest file that represents the testing results.
3714
+ # Contains information about the testing results.
3710
3715
  # @return [Types::TestingDataResult]
3711
3716
  #
3712
3717
  # @!attribute [rw] evaluation_result
@@ -3714,6 +3719,12 @@ module Aws::Rekognition
3714
3719
  # training is successful.
3715
3720
  # @return [Types::EvaluationResult]
3716
3721
  #
3722
+ # @!attribute [rw] manifest_summary
3723
+ # The location of the summary manifest. The summary manifest provides
3724
+ # aggregate data validation results for the training and test
3725
+ # datasets.
3726
+ # @return [Types::GroundTruthManifest]
3727
+ #
3717
3728
  class ProjectVersionDescription < Struct.new(
3718
3729
  :project_version_arn,
3719
3730
  :creation_timestamp,
@@ -3725,7 +3736,8 @@ module Aws::Rekognition
3725
3736
  :output_config,
3726
3737
  :training_data_result,
3727
3738
  :testing_data_result,
3728
- :evaluation_result)
3739
+ :evaluation_result,
3740
+ :manifest_summary)
3729
3741
  SENSITIVE = []
3730
3742
  include Aws::Structure
3731
3743
  end
@@ -3768,7 +3780,7 @@ module Aws::Rekognition
3768
3780
 
3769
3781
  # @!attribute [rw] celebrity_faces
3770
3782
  # Details about each celebrity found in the image. Amazon Rekognition
3771
- # can detect a maximum of 15 celebrities in an image.
3783
+ # can detect a maximum of 64 celebrities in an image.
3772
3784
  # @return [Array<Types::Celebrity>]
3773
3785
  #
3774
3786
  # @!attribute [rw] unrecognized_faces
@@ -4059,12 +4071,14 @@ module Aws::Rekognition
4059
4071
  #
4060
4072
  # @!attribute [rw] start_timestamp_millis
4061
4073
  # The start time of the detected segment in milliseconds from the
4062
- # start of the video.
4074
+ # start of the video. This value is rounded down. For example, if the
4075
+ # actual timestamp is 100.6667 milliseconds, Amazon Rekognition Video
4076
+ # returns a value of 100 millis.
4063
4077
  # @return [Integer]
4064
4078
  #
4065
4079
  # @!attribute [rw] end_timestamp_millis
4066
4080
  # The end time of the detected segment, in milliseconds, from the
4067
- # start of the video.
4081
+ # start of the video. This value is rounded down.
4068
4082
  # @return [Integer]
4069
4083
  #
4070
4084
  # @!attribute [rw] duration_millis
@@ -4135,7 +4149,7 @@ module Aws::Rekognition
4135
4149
  # more information, see SegmentDetection.
4136
4150
  #
4137
4151
  # @!attribute [rw] index
4138
- # An Identifier for a shot detection segment detected in a video
4152
+ # An Identifier for a shot detection segment detected in a video.
4139
4153
  # @return [Integer]
4140
4154
  #
4141
4155
  # @!attribute [rw] confidence
@@ -5207,8 +5221,8 @@ module Aws::Rekognition
5207
5221
  include Aws::Structure
5208
5222
  end
5209
5223
 
5210
- # A Sagemaker Groundtruth format manifest file representing the dataset
5211
- # used for testing.
5224
+ # Sagemaker Groundtruth format manifest files for the input, output and
5225
+ # validation datasets that are used and created during testing.
5212
5226
  #
5213
5227
  # @!attribute [rw] input
5214
5228
  # The testing dataset that was supplied for training.
@@ -5220,9 +5234,15 @@ module Aws::Rekognition
5220
5234
  # issues.
5221
5235
  # @return [Types::TestingData]
5222
5236
  #
5237
+ # @!attribute [rw] validation
5238
+ # The location of the data validation manifest. The data validation
5239
+ # manifest is created for the test dataset during model training.
5240
+ # @return [Types::ValidationData]
5241
+ #
5223
5242
  class TestingDataResult < Struct.new(
5224
5243
  :input,
5225
- :output)
5244
+ :output,
5245
+ :validation)
5226
5246
  SENSITIVE = []
5227
5247
  include Aws::Structure
5228
5248
  end
@@ -5337,8 +5357,8 @@ module Aws::Rekognition
5337
5357
  include Aws::Structure
5338
5358
  end
5339
5359
 
5340
- # A Sagemaker Groundtruth format manifest file that represents the
5341
- # dataset used for training.
5360
+ # Sagemaker Groundtruth format manifest files for the input, output and
5361
+ # validation datasets that are used and created during testing.
5342
5362
  #
5343
5363
  # @!attribute [rw] input
5344
5364
  # The training assets that you supplied for training.
@@ -5349,9 +5369,15 @@ module Aws::Rekognition
5349
5369
  # Custom Labels.
5350
5370
  # @return [Types::TrainingData]
5351
5371
  #
5372
+ # @!attribute [rw] validation
5373
+ # The location of the data validation manifest. The data validation
5374
+ # manifest is created for the training dataset during model training.
5375
+ # @return [Types::ValidationData]
5376
+ #
5352
5377
  class TrainingDataResult < Struct.new(
5353
5378
  :input,
5354
- :output)
5379
+ :output,
5380
+ :validation)
5355
5381
  SENSITIVE = []
5356
5382
  include Aws::Structure
5357
5383
  end
@@ -5391,6 +5417,32 @@ module Aws::Rekognition
5391
5417
  include Aws::Structure
5392
5418
  end
5393
5419
 
5420
+ # Contains the Amazon S3 bucket location of the validation data for a
5421
+ # model training job.
5422
+ #
5423
+ # The validation data includes error information for individual JSON
5424
+ # lines in the dataset. For more information, see Debugging a Failed
5425
+ # Model Training in the Amazon Rekognition Custom Labels Developer
5426
+ # Guide.
5427
+ #
5428
+ # You get the `ValidationData` object for the training dataset
5429
+ # (TrainingDataResult) and the test dataset (TestingDataResult) by
5430
+ # calling DescribeProjectVersions.
5431
+ #
5432
+ # The assets array contains a single Asset object. The
5433
+ # GroundTruthManifest field of the Asset object contains the S3 bucket
5434
+ # location of the validation data.
5435
+ #
5436
+ # @!attribute [rw] assets
5437
+ # The assets that comprise the validation data.
5438
+ # @return [Array<Types::Asset>]
5439
+ #
5440
+ class ValidationData < Struct.new(
5441
+ :assets)
5442
+ SENSITIVE = []
5443
+ include Aws::Structure
5444
+ end
5445
+
5394
5446
  # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
5395
5447
  # start operations such as StartLabelDetection use `Video` to specify a
5396
5448
  # video for analysis. The supported file formats are .mp4, .mov and
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: aws-sdk-rekognition
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.45.0
4
+ version: 1.46.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Amazon Web Services
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2020-09-30 00:00:00.000000000 Z
11
+ date: 2020-10-08 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: aws-sdk-core