aws-sdk-rekognition 1.10.0 → 1.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/aws-sdk-rekognition.rb +1 -1
- data/lib/aws-sdk-rekognition/client.rb +115 -100
- data/lib/aws-sdk-rekognition/types.rb +89 -80
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: d6dfe87f1bcc7424a61682b5f56ea02b0cba2fb8
|
4
|
+
data.tar.gz: dc3fc7bf664cbf8966646907869131d2f93eb018
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 1875e75835c3aed614d5b4f625352a3a7555bb3104ccc0aefa2fcf41e577db06b1ff5e73220a08be988d37df35dbb7dc3bd5c671e222836dfbe644a1468ea25f
|
7
|
+
data.tar.gz: 53e3821805a71d2801596badb958b76c890622b917abfc1e884f37eee4f552f53f5662896c5848a1ab6edffd2921b586010c3a318dcacd5bba0041cb0afa04d4
|
data/lib/aws-sdk-rekognition.rb
CHANGED
@@ -196,10 +196,9 @@ module Aws::Rekognition
|
|
196
196
|
# </note>
|
197
197
|
#
|
198
198
|
# You pass the input and target images either as base64-encoded image
|
199
|
-
# bytes or as
|
200
|
-
# the
|
201
|
-
#
|
202
|
-
# formatted file.
|
199
|
+
# bytes or as references to images in an Amazon S3 bucket. If you use
|
200
|
+
# the AWS CLI to call Amazon Rekognition operations, passing image bytes
|
201
|
+
# isn't supported. The image must be formatted as a PNG or JPEG file.
|
203
202
|
#
|
204
203
|
# In response, the operation returns an array of face matches ordered by
|
205
204
|
# similarity score in descending order. For each face match, the
|
@@ -727,19 +726,19 @@ module Aws::Rekognition
|
|
727
726
|
# Detects faces within an image that is provided as input.
|
728
727
|
#
|
729
728
|
# `DetectFaces` detects the 100 largest faces in the image. For each
|
730
|
-
# face detected, the operation returns face details
|
731
|
-
# box of the face, a confidence value (that the
|
732
|
-
# face), and a fixed set of attributes such as
|
733
|
-
# example, coordinates of eye and mouth), gender,
|
734
|
-
# sunglasses,
|
729
|
+
# face detected, the operation returns face details. These details
|
730
|
+
# include a bounding box of the face, a confidence value (that the
|
731
|
+
# bounding box contains a face), and a fixed set of attributes such as
|
732
|
+
# facial landmarks (for example, coordinates of eye and mouth), gender,
|
733
|
+
# presence of beard, sunglasses, and so on.
|
735
734
|
#
|
736
735
|
# The face-detection algorithm is most effective on frontal faces. For
|
737
|
-
# non-frontal or obscured faces, the algorithm
|
738
|
-
# or might detect faces with lower confidence.
|
736
|
+
# non-frontal or obscured faces, the algorithm might not detect the
|
737
|
+
# faces or might detect faces with lower confidence.
|
739
738
|
#
|
740
739
|
# You pass the input image either as base64-encoded image bytes or as a
|
741
|
-
# reference to an image in an Amazon S3 bucket. If you use the
|
742
|
-
#
|
740
|
+
# reference to an image in an Amazon S3 bucket. If you use the AWS CLI
|
741
|
+
# to call Amazon Rekognition operations, passing image bytes is not
|
743
742
|
# supported. The image must be either a PNG or JPEG formatted file.
|
744
743
|
#
|
745
744
|
# <note markdown="1"> This is a stateless API operation. That is, the operation does not
|
@@ -760,9 +759,9 @@ module Aws::Rekognition
|
|
760
759
|
# default list of attributes or all attributes. If you don't specify a
|
761
760
|
# value for `Attributes` or if you specify `["DEFAULT"]`, the API
|
762
761
|
# returns the following subset of facial attributes: `BoundingBox`,
|
763
|
-
# `Confidence`, `Pose`, `Quality
|
764
|
-
# `["ALL"]`, all facial attributes are returned but the operation
|
765
|
-
#
|
762
|
+
# `Confidence`, `Pose`, `Quality`, and `Landmarks`. If you provide
|
763
|
+
# `["ALL"]`, all facial attributes are returned, but the operation takes
|
764
|
+
# longer to complete.
|
766
765
|
#
|
767
766
|
# If you provide both, `["ALL", "DEFAULT"]`, the service uses a logical
|
768
767
|
# AND operator to determine which attributes to return (in this case,
|
@@ -916,15 +915,15 @@ module Aws::Rekognition
|
|
916
915
|
# </note>
|
917
916
|
#
|
918
917
|
# You pass the input image as base64-encoded image bytes or as a
|
919
|
-
# reference to an image in an Amazon S3 bucket. If you use the
|
920
|
-
#
|
918
|
+
# reference to an image in an Amazon S3 bucket. If you use the AWS CLI
|
919
|
+
# to call Amazon Rekognition operations, passing image bytes is not
|
921
920
|
# supported. The image must be either a PNG or JPEG formatted file.
|
922
921
|
#
|
923
922
|
# For each object, scene, and concept the API returns one or more
|
924
923
|
# labels. Each label provides the object name, and the level of
|
925
924
|
# confidence that the image contains the object. For example, suppose
|
926
925
|
# the input image has a lighthouse, the sea, and a rock. The response
|
927
|
-
#
|
926
|
+
# includes all three labels, one for each object.
|
928
927
|
#
|
929
928
|
# `\{Name: lighthouse, Confidence: 98.4629\}`
|
930
929
|
#
|
@@ -1059,8 +1058,8 @@ module Aws::Rekognition
|
|
1059
1058
|
# in the Amazon Rekognition Developer Guide.
|
1060
1059
|
#
|
1061
1060
|
# You pass the input image either as base64-encoded image bytes or as a
|
1062
|
-
# reference to an image in an Amazon S3 bucket. If you use the
|
1063
|
-
#
|
1061
|
+
# reference to an image in an Amazon S3 bucket. If you use the AWS CLI
|
1062
|
+
# to call Amazon Rekognition operations, passing image bytes is not
|
1064
1063
|
# supported. The image must be either a PNG or JPEG formatted file.
|
1065
1064
|
#
|
1066
1065
|
# @option params [required, Types::Image] :image
|
@@ -1191,9 +1190,9 @@ module Aws::Rekognition
|
|
1191
1190
|
end
|
1192
1191
|
|
1193
1192
|
# Gets the name and additional information about a celebrity based on
|
1194
|
-
# his or her Rekognition ID. The additional information is
|
1195
|
-
# an array of URLs. If there is no additional information
|
1196
|
-
# celebrity, this list is empty.
|
1193
|
+
# his or her Amazon Rekognition ID. The additional information is
|
1194
|
+
# returned as an array of URLs. If there is no additional information
|
1195
|
+
# about the celebrity, this list is empty.
|
1197
1196
|
#
|
1198
1197
|
# For more information, see Recognizing Celebrities in an Image in the
|
1199
1198
|
# Amazon Rekognition Developer Guide.
|
@@ -1971,21 +1970,24 @@ module Aws::Rekognition
|
|
1971
1970
|
# Detects faces in the input image and adds them to the specified
|
1972
1971
|
# collection.
|
1973
1972
|
#
|
1974
|
-
# Amazon Rekognition
|
1975
|
-
# the underlying detection algorithm first detects the faces in
|
1976
|
-
# input image
|
1977
|
-
# vector, and stores it in the
|
1978
|
-
# uses feature vectors when
|
1979
|
-
# using the and operations.
|
1973
|
+
# Amazon Rekognition doesn't save the actual faces that are detected.
|
1974
|
+
# Instead, the underlying detection algorithm first detects the faces in
|
1975
|
+
# the input image. For each face, the algorithm extracts facial features
|
1976
|
+
# into a feature vector, and stores it in the backend database. Amazon
|
1977
|
+
# Rekognition uses feature vectors when it performs face match and
|
1978
|
+
# search operations using the and operations.
|
1979
|
+
#
|
1980
|
+
# For more information, see Adding Faces to a Collection in the Amazon
|
1981
|
+
# Rekognition Developer Guide.
|
1980
1982
|
#
|
1981
1983
|
# To get the number of faces in a collection, call .
|
1982
1984
|
#
|
1983
|
-
# If you
|
1985
|
+
# If you're using version 1.0 of the face detection model, `IndexFaces`
|
1984
1986
|
# indexes the 15 largest faces in the input image. Later versions of the
|
1985
1987
|
# face detection model index the 100 largest faces in the input image.
|
1986
|
-
# To determine which version of the model you
|
1987
|
-
# the collection ID. You also get the model version from the value
|
1988
|
-
# `FaceModelVersion` in the response from `IndexFaces`.
|
1988
|
+
# To determine which version of the model you're using, call and supply
|
1989
|
+
# the collection ID. You can also get the model version from the value
|
1990
|
+
# of `FaceModelVersion` in the response from `IndexFaces`.
|
1989
1991
|
#
|
1990
1992
|
# For more information, see Model Versioning in the Amazon Rekognition
|
1991
1993
|
# Developer Guide.
|
@@ -1999,58 +2001,65 @@ module Aws::Rekognition
|
|
1999
2001
|
#
|
2000
2002
|
# You can specify the maximum number of faces to index with the
|
2001
2003
|
# `MaxFaces` input parameter. This is useful when you want to index the
|
2002
|
-
# largest faces in an image
|
2003
|
-
#
|
2004
|
+
# largest faces in an image and don't want to index smaller faces, such
|
2005
|
+
# as those belonging to people standing in the background.
|
2004
2006
|
#
|
2005
2007
|
# The `QualityFilter` input parameter allows you to filter out detected
|
2006
2008
|
# faces that don’t meet the required quality bar chosen by Amazon
|
2007
2009
|
# Rekognition. The quality bar is based on a variety of common use
|
2008
|
-
# cases.
|
2009
|
-
#
|
2010
|
-
#
|
2011
|
-
#
|
2010
|
+
# cases. By default, `IndexFaces` filters detected faces. You can also
|
2011
|
+
# explicitly filter detected faces by specifying `AUTO` for the value of
|
2012
|
+
# `QualityFilter`. If you do not want to filter detected faces, specify
|
2013
|
+
# `NONE`.
|
2012
2014
|
#
|
2013
|
-
#
|
2014
|
-
#
|
2015
|
-
#
|
2016
|
-
# bounding box contains a face.
|
2017
|
-
#
|
2018
|
-
# * A face ID, `faceId`, assigned by the service for each face that is
|
2019
|
-
# detected and stored.
|
2015
|
+
# <note markdown="1"> To use quality filtering, you need a collection associated with
|
2016
|
+
# version 3 of the face model. To get the version of the face model
|
2017
|
+
# associated with a collection, call .
|
2020
2018
|
#
|
2021
|
-
#
|
2022
|
-
#
|
2023
|
-
# If you request all facial attributes (using the `detectionAttributes`
|
2024
|
-
# parameter), Amazon Rekognition returns detailed facial attributes such
|
2025
|
-
# as facial landmarks (for example, location of eye and mouth) and other
|
2026
|
-
# facial attributes such gender. If you provide the same image, specify
|
2027
|
-
# the same collection, and use the same external ID in the `IndexFaces`
|
2028
|
-
# operation, Amazon Rekognition doesn't save duplicate face metadata.
|
2019
|
+
# </note>
|
2029
2020
|
#
|
2030
2021
|
# Information about faces detected in an image, but not indexed, is
|
2031
|
-
# returned in an array of objects, `UnindexedFaces`. Faces
|
2022
|
+
# returned in an array of objects, `UnindexedFaces`. Faces aren't
|
2032
2023
|
# indexed for reasons such as:
|
2033
2024
|
#
|
2025
|
+
# * The number of faces detected exceeds the value of the `MaxFaces`
|
2026
|
+
# request parameter.
|
2027
|
+
#
|
2028
|
+
# * The face is too small compared to the image dimensions.
|
2029
|
+
#
|
2034
2030
|
# * The face is too blurry.
|
2035
2031
|
#
|
2036
2032
|
# * The image is too dark.
|
2037
2033
|
#
|
2038
2034
|
# * The face has an extreme pose.
|
2039
2035
|
#
|
2040
|
-
#
|
2036
|
+
# In response, the `IndexFaces` operation returns an array of metadata
|
2037
|
+
# for all detected faces, `FaceRecords`. This includes:
|
2041
2038
|
#
|
2042
|
-
# * The
|
2043
|
-
# request parameter.
|
2039
|
+
# * The bounding box, `BoundingBox`, of the detected face.
|
2044
2040
|
#
|
2041
|
+
# * A confidence value, `Confidence`, which indicates the confidence
|
2042
|
+
# that the bounding box contains a face.
|
2045
2043
|
#
|
2044
|
+
# * A face ID, `faceId`, assigned by the service for each face that's
|
2045
|
+
# detected and stored.
|
2046
2046
|
#
|
2047
|
-
#
|
2048
|
-
# Rekognition Developer Guide.
|
2047
|
+
# * An image ID, `ImageId`, assigned by the service for the input image.
|
2049
2048
|
#
|
2050
|
-
#
|
2051
|
-
#
|
2052
|
-
#
|
2053
|
-
#
|
2049
|
+
# If you request all facial attributes (by using the
|
2050
|
+
# `detectionAttributes` parameter), Amazon Rekognition returns detailed
|
2051
|
+
# facial attributes, such as facial landmarks (for example, location of
|
2052
|
+
# eye and mouth) and other facial attributes like gender. If you provide
|
2053
|
+
# the same image, specify the same collection, and use the same external
|
2054
|
+
# ID in the `IndexFaces` operation, Amazon Rekognition doesn't save
|
2055
|
+
# duplicate face metadata.
|
2056
|
+
#
|
2057
|
+
#
|
2058
|
+
#
|
2059
|
+
# The input image is passed either as base64-encoded image bytes, or as
|
2060
|
+
# a reference to an image in an Amazon S3 bucket. If you use the AWS CLI
|
2061
|
+
# to call Amazon Rekognition operations, passing image bytes isn't
|
2062
|
+
# supported. The image must be formatted as a PNG or JPEG file.
|
2054
2063
|
#
|
2055
2064
|
# This operation requires permissions to perform the
|
2056
2065
|
# `rekognition:IndexFaces` action.
|
@@ -2062,19 +2071,19 @@ module Aws::Rekognition
|
|
2062
2071
|
# @option params [required, Types::Image] :image
|
2063
2072
|
# The input image as base64-encoded bytes or an S3 object. If you use
|
2064
2073
|
# the AWS CLI to call Amazon Rekognition operations, passing
|
2065
|
-
# base64-encoded image bytes
|
2074
|
+
# base64-encoded image bytes isn't supported.
|
2066
2075
|
#
|
2067
2076
|
# @option params [String] :external_image_id
|
2068
|
-
# ID you want to assign to all the faces detected in the image.
|
2077
|
+
# The ID you want to assign to all the faces detected in the image.
|
2069
2078
|
#
|
2070
2079
|
# @option params [Array<String>] :detection_attributes
|
2071
2080
|
# An array of facial attributes that you want to be returned. This can
|
2072
2081
|
# be the default list of attributes or all attributes. If you don't
|
2073
2082
|
# specify a value for `Attributes` or if you specify `["DEFAULT"]`, the
|
2074
2083
|
# API returns the following subset of facial attributes: `BoundingBox`,
|
2075
|
-
# `Confidence`, `Pose`, `Quality
|
2076
|
-
# `["ALL"]`, all facial attributes are returned but the operation
|
2077
|
-
#
|
2084
|
+
# `Confidence`, `Pose`, `Quality`, and `Landmarks`. If you provide
|
2085
|
+
# `["ALL"]`, all facial attributes are returned, but the operation takes
|
2086
|
+
# longer to complete.
|
2078
2087
|
#
|
2079
2088
|
# If you provide both, `["ALL", "DEFAULT"]`, the service uses a logical
|
2080
2089
|
# AND operator to determine which attributes to return (in this case,
|
@@ -2082,30 +2091,36 @@ module Aws::Rekognition
|
|
2082
2091
|
#
|
2083
2092
|
# @option params [Integer] :max_faces
|
2084
2093
|
# The maximum number of faces to index. The value of `MaxFaces` must be
|
2085
|
-
# greater than or equal to 1. `IndexFaces` returns no more
|
2094
|
+
# greater than or equal to 1. `IndexFaces` returns no more than 100
|
2086
2095
|
# detected faces in an image, even if you specify a larger value for
|
2087
2096
|
# `MaxFaces`.
|
2088
2097
|
#
|
2089
2098
|
# If `IndexFaces` detects more faces than the value of `MaxFaces`, the
|
2090
2099
|
# faces with the lowest quality are filtered out first. If there are
|
2091
2100
|
# still more faces than the value of `MaxFaces`, the faces with the
|
2092
|
-
# smallest bounding boxes are filtered out (up to the number
|
2093
|
-
# satisfy the value of `MaxFaces`). Information about the
|
2094
|
-
# faces is available in the `UnindexedFaces` array.
|
2101
|
+
# smallest bounding boxes are filtered out (up to the number that's
|
2102
|
+
# needed to satisfy the value of `MaxFaces`). Information about the
|
2103
|
+
# unindexed faces is available in the `UnindexedFaces` array.
|
2104
|
+
#
|
2105
|
+
# The faces that are returned by `IndexFaces` are sorted by the largest
|
2106
|
+
# face bounding box size to the smallest size, in descending order.
|
2095
2107
|
#
|
2096
|
-
#
|
2097
|
-
# the
|
2108
|
+
# `MaxFaces` can be used with a collection associated with any version
|
2109
|
+
# of the face model.
|
2098
2110
|
#
|
2099
2111
|
# @option params [String] :quality_filter
|
2100
|
-
#
|
2101
|
-
# low quality. Filtered faces
|
2102
|
-
# filtering prioritizes the identification of faces
|
2103
|
-
# required quality bar chosen by Amazon Rekognition.
|
2104
|
-
# based on a variety of common use cases. Low
|
2105
|
-
#
|
2106
|
-
# a face, a face that
|
2107
|
-
# extreme to use. If you specify `NONE`,
|
2108
|
-
# default value is
|
2112
|
+
# A filter that specifies how much filtering is done to identify faces
|
2113
|
+
# that are detected with low quality. Filtered faces aren't indexed. If
|
2114
|
+
# you specify `AUTO`, filtering prioritizes the identification of faces
|
2115
|
+
# that don’t meet the required quality bar chosen by Amazon Rekognition.
|
2116
|
+
# The quality bar is based on a variety of common use cases. Low-quality
|
2117
|
+
# detections can occur for a number of reasons. Some examples are an
|
2118
|
+
# object that's misidentified as a face, a face that's too blurry, or
|
2119
|
+
# a face with a pose that's too extreme to use. If you specify `NONE`,
|
2120
|
+
# no filtering is performed. The default value is AUTO.
|
2121
|
+
#
|
2122
|
+
# To use quality filtering, the collection you are using must be
|
2123
|
+
# associated with version 3 of the face model.
|
2109
2124
|
#
|
2110
2125
|
# @return [Types::IndexFacesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
|
2111
2126
|
#
|
@@ -2664,25 +2679,25 @@ module Aws::Rekognition
|
|
2664
2679
|
# `RecognizeCelebrities` returns the 100 largest faces in the image. It
|
2665
2680
|
# lists recognized celebrities in the `CelebrityFaces` array and
|
2666
2681
|
# unrecognized faces in the `UnrecognizedFaces` array.
|
2667
|
-
# `RecognizeCelebrities` doesn't return celebrities whose faces
|
2668
|
-
#
|
2682
|
+
# `RecognizeCelebrities` doesn't return celebrities whose faces aren't
|
2683
|
+
# among the largest 100 faces in the image.
|
2669
2684
|
#
|
2670
|
-
# For each celebrity recognized,
|
2685
|
+
# For each celebrity recognized, `RecognizeCelebrities` returns a
|
2671
2686
|
# `Celebrity` object. The `Celebrity` object contains the celebrity
|
2672
2687
|
# name, ID, URL links to additional information, match confidence, and a
|
2673
2688
|
# `ComparedFace` object that you can use to locate the celebrity's face
|
2674
2689
|
# on the image.
|
2675
2690
|
#
|
2676
|
-
# Rekognition
|
2677
|
-
# has been recognized in. Your application must store this
|
2678
|
-
# and use the `Celebrity` ID property as a unique identifier
|
2679
|
-
# celebrity. If you don't store the celebrity name or
|
2680
|
-
# information URLs returned by `RecognizeCelebrities`, you
|
2681
|
-
# ID to identify the celebrity in a call to the operation.
|
2691
|
+
# Amazon Rekognition doesn't retain information about which images a
|
2692
|
+
# celebrity has been recognized in. Your application must store this
|
2693
|
+
# information and use the `Celebrity` ID property as a unique identifier
|
2694
|
+
# for the celebrity. If you don't store the celebrity name or
|
2695
|
+
# additional information URLs returned by `RecognizeCelebrities`, you
|
2696
|
+
# will need the ID to identify the celebrity in a call to the operation.
|
2682
2697
|
#
|
2683
|
-
# You pass the
|
2684
|
-
# reference to an image in an Amazon S3 bucket. If you use the
|
2685
|
-
#
|
2698
|
+
# You pass the input image either as base64-encoded image bytes or as a
|
2699
|
+
# reference to an image in an Amazon S3 bucket. If you use the AWS CLI
|
2700
|
+
# to call Amazon Rekognition operations, passing image bytes is not
|
2686
2701
|
# supported. The image must be either a PNG or JPEG formatted file.
|
2687
2702
|
#
|
2688
2703
|
# For an example, see Recognizing Celebrities in an Image in the Amazon
|
@@ -2914,8 +2929,8 @@ module Aws::Rekognition
|
|
2914
2929
|
# </note>
|
2915
2930
|
#
|
2916
2931
|
# You pass the input image either as base64-encoded image bytes or as a
|
2917
|
-
# reference to an image in an Amazon S3 bucket. If you use the
|
2918
|
-
#
|
2932
|
+
# reference to an image in an Amazon S3 bucket. If you use the AWS CLI
|
2933
|
+
# to call Amazon Rekognition operations, passing image bytes is not
|
2919
2934
|
# supported. The image must be either a PNG or JPEG formatted file.
|
2920
2935
|
#
|
2921
2936
|
# The response returns an array of faces that match, ordered by
|
@@ -3549,7 +3564,7 @@ module Aws::Rekognition
|
|
3549
3564
|
params: params,
|
3550
3565
|
config: config)
|
3551
3566
|
context[:gem_name] = 'aws-sdk-rekognition'
|
3552
|
-
context[:gem_version] = '1.
|
3567
|
+
context[:gem_version] = '1.11.0'
|
3553
3568
|
Seahorse::Client::Request.new(handlers, context)
|
3554
3569
|
end
|
3555
3570
|
|
@@ -10,10 +10,10 @@ module Aws::Rekognition
|
|
10
10
|
|
11
11
|
# Structure containing the estimated age range, in years, for a face.
|
12
12
|
#
|
13
|
-
# Rekognition estimates an age
|
14
|
-
# image. Estimated age ranges can overlap
|
15
|
-
# have an estimated range of 4-6
|
16
|
-
# have an estimated range of 4-8.
|
13
|
+
# Amazon Rekognition estimates an age range for faces detected in the
|
14
|
+
# input image. Estimated age ranges can overlap. A face of a 5-year-old
|
15
|
+
# might have an estimated range of 4-6, while the face of a 6-year-old
|
16
|
+
# might have an estimated range of 4-8.
|
17
17
|
#
|
18
18
|
# @!attribute [rw] low
|
19
19
|
# The lowest estimated age.
|
@@ -119,7 +119,7 @@ module Aws::Rekognition
|
|
119
119
|
# @return [Types::ComparedFace]
|
120
120
|
#
|
121
121
|
# @!attribute [rw] match_confidence
|
122
|
-
# The confidence, in percentage, that Rekognition has that the
|
122
|
+
# The confidence, in percentage, that Amazon Rekognition has that the
|
123
123
|
# recognized face is the celebrity.
|
124
124
|
# @return [Float]
|
125
125
|
#
|
@@ -189,7 +189,7 @@ module Aws::Rekognition
|
|
189
189
|
end
|
190
190
|
|
191
191
|
# Provides information about a face in a target image that matches the
|
192
|
-
# source image face
|
192
|
+
# source image face analyzed by `CompareFaces`. The `Face` property
|
193
193
|
# contains the bounding box of the face in the target image. The
|
194
194
|
# `Similarity` property is the confidence that the source image face
|
195
195
|
# matches the face in the bounding box.
|
@@ -283,10 +283,10 @@ module Aws::Rekognition
|
|
283
283
|
# <note markdown="1"> If the source image is in .jpeg format, it might contain
|
284
284
|
# exchangeable image (Exif) metadata that includes the image's
|
285
285
|
# orientation. If the Exif metadata for the source image populates the
|
286
|
-
# orientation field, the value of `OrientationCorrection` is null
|
287
|
-
#
|
288
|
-
#
|
289
|
-
#
|
286
|
+
# orientation field, the value of `OrientationCorrection` is null. The
|
287
|
+
# `SourceImageFace` bounding box coordinates represent the location of
|
288
|
+
# the face after Exif metadata is used to correct the orientation.
|
289
|
+
# Images in .png format don't contain Exif metadata.
|
290
290
|
#
|
291
291
|
# </note>
|
292
292
|
# @return [String]
|
@@ -301,7 +301,7 @@ module Aws::Rekognition
|
|
301
301
|
# <note markdown="1"> If the target image is in .jpg format, it might contain Exif
|
302
302
|
# metadata that includes the orientation of the image. If the Exif
|
303
303
|
# metadata for the target image populates the orientation field, the
|
304
|
-
# value of `OrientationCorrection` is null
|
304
|
+
# value of `OrientationCorrection` is null. The bounding box
|
305
305
|
# coordinates in `FaceMatches` and `UnmatchedFaces` represent the
|
306
306
|
# location of the face after Exif metadata is used to correct the
|
307
307
|
# orientation. Images in .png format don't contain Exif metadata.
|
@@ -318,7 +318,7 @@ module Aws::Rekognition
|
|
318
318
|
include Aws::Structure
|
319
319
|
end
|
320
320
|
|
321
|
-
# Provides face metadata for target image faces that are
|
321
|
+
# Provides face metadata for target image faces that are analyzed by
|
322
322
|
# `CompareFaces` and `RecognizeCelebrities`.
|
323
323
|
#
|
324
324
|
# @!attribute [rw] bounding_box
|
@@ -717,9 +717,9 @@ module Aws::Rekognition
|
|
717
717
|
# the default list of attributes or all attributes. If you don't
|
718
718
|
# specify a value for `Attributes` or if you specify `["DEFAULT"]`,
|
719
719
|
# the API returns the following subset of facial attributes:
|
720
|
-
# `BoundingBox`, `Confidence`, `Pose`, `Quality
|
721
|
-
# you provide `["ALL"]`, all facial attributes are returned but the
|
722
|
-
# operation
|
720
|
+
# `BoundingBox`, `Confidence`, `Pose`, `Quality`, and `Landmarks`. If
|
721
|
+
# you provide `["ALL"]`, all facial attributes are returned, but the
|
722
|
+
# operation takes longer to complete.
|
723
723
|
#
|
724
724
|
# If you provide both, `["ALL", "DEFAULT"]`, the service uses a
|
725
725
|
# logical AND operator to determine which attributes to return (in
|
@@ -746,7 +746,7 @@ module Aws::Rekognition
|
|
746
746
|
# <note markdown="1"> If the input image is in .jpeg format, it might contain exchangeable
|
747
747
|
# image (Exif) metadata that includes the image's orientation. If so,
|
748
748
|
# and the Exif metadata for the input image populates the orientation
|
749
|
-
# field, the value of `OrientationCorrection` is null
|
749
|
+
# field, the value of `OrientationCorrection` is null. The
|
750
750
|
# `FaceDetails` bounding box coordinates represent face locations
|
751
751
|
# after Exif metadata is used to correct the image orientation. Images
|
752
752
|
# in .png format don't contain Exif metadata.
|
@@ -1146,8 +1146,8 @@ module Aws::Rekognition
|
|
1146
1146
|
include Aws::Structure
|
1147
1147
|
end
|
1148
1148
|
|
1149
|
-
# Object containing both the face metadata (stored in the
|
1150
|
-
# database) and facial attributes that are detected but aren't stored
|
1149
|
+
# Object containing both the face metadata (stored in the backend
|
1150
|
+
# database), and facial attributes that are detected but aren't stored
|
1151
1151
|
# in the database.
|
1152
1152
|
#
|
1153
1153
|
# @!attribute [rw] face
|
@@ -1736,27 +1736,27 @@ module Aws::Rekognition
|
|
1736
1736
|
|
1737
1737
|
# Provides the input image either as bytes or an S3 object.
|
1738
1738
|
#
|
1739
|
-
# You pass image bytes to
|
1740
|
-
# `Bytes` property. For example, you would use the `Bytes` property
|
1741
|
-
# pass an image loaded from a local file system. Image bytes passed
|
1742
|
-
# using the `Bytes` property must be base64-encoded. Your code may
|
1743
|
-
# need to encode image bytes if you are using an AWS SDK to call
|
1744
|
-
# Rekognition API operations.
|
1739
|
+
# You pass image bytes to an Amazon Rekognition API operation by using
|
1740
|
+
# the `Bytes` property. For example, you would use the `Bytes` property
|
1741
|
+
# to pass an image loaded from a local file system. Image bytes passed
|
1742
|
+
# by using the `Bytes` property must be base64-encoded. Your code may
|
1743
|
+
# not need to encode image bytes if you are using an AWS SDK to call
|
1744
|
+
# Amazon Rekognition API operations.
|
1745
1745
|
#
|
1746
1746
|
# For more information, see Analyzing an Image Loaded from a Local File
|
1747
1747
|
# System in the Amazon Rekognition Developer Guide.
|
1748
1748
|
#
|
1749
|
-
# You pass images stored in an S3 bucket to
|
1750
|
-
# by using the `S3Object` property. Images stored in an S3
|
1751
|
-
# need to be base64-encoded.
|
1749
|
+
# You pass images stored in an S3 bucket to an Amazon Rekognition API
|
1750
|
+
# operation by using the `S3Object` property. Images stored in an S3
|
1751
|
+
# bucket do not need to be base64-encoded.
|
1752
1752
|
#
|
1753
1753
|
# The region for the S3 bucket containing the S3 object must match the
|
1754
1754
|
# region you use for Amazon Rekognition operations.
|
1755
1755
|
#
|
1756
|
-
# If you use the
|
1757
|
-
#
|
1758
|
-
#
|
1759
|
-
#
|
1756
|
+
# If you use the AWS CLI to call Amazon Rekognition operations, passing
|
1757
|
+
# image bytes using the Bytes property is not supported. You must first
|
1758
|
+
# upload the image to an Amazon S3 bucket and then call the operation
|
1759
|
+
# using the S3Object property.
|
1760
1760
|
#
|
1761
1761
|
# For Amazon Rekognition to process an S3 object, the user must have
|
1762
1762
|
# permission to access the S3 object. For more information, see Resource
|
@@ -1835,11 +1835,11 @@ module Aws::Rekognition
|
|
1835
1835
|
# @!attribute [rw] image
|
1836
1836
|
# The input image as base64-encoded bytes or an S3 object. If you use
|
1837
1837
|
# the AWS CLI to call Amazon Rekognition operations, passing
|
1838
|
-
# base64-encoded image bytes
|
1838
|
+
# base64-encoded image bytes isn't supported.
|
1839
1839
|
# @return [Types::Image]
|
1840
1840
|
#
|
1841
1841
|
# @!attribute [rw] external_image_id
|
1842
|
-
# ID you want to assign to all the faces detected in the image.
|
1842
|
+
# The ID you want to assign to all the faces detected in the image.
|
1843
1843
|
# @return [String]
|
1844
1844
|
#
|
1845
1845
|
# @!attribute [rw] detection_attributes
|
@@ -1847,9 +1847,9 @@ module Aws::Rekognition
|
|
1847
1847
|
# be the default list of attributes or all attributes. If you don't
|
1848
1848
|
# specify a value for `Attributes` or if you specify `["DEFAULT"]`,
|
1849
1849
|
# the API returns the following subset of facial attributes:
|
1850
|
-
# `BoundingBox`, `Confidence`, `Pose`, `Quality
|
1851
|
-
# you provide `["ALL"]`, all facial attributes are returned but the
|
1852
|
-
# operation
|
1850
|
+
# `BoundingBox`, `Confidence`, `Pose`, `Quality`, and `Landmarks`. If
|
1851
|
+
# you provide `["ALL"]`, all facial attributes are returned, but the
|
1852
|
+
# operation takes longer to complete.
|
1853
1853
|
#
|
1854
1854
|
# If you provide both, `["ALL", "DEFAULT"]`, the service uses a
|
1855
1855
|
# logical AND operator to determine which attributes to return (in
|
@@ -1858,31 +1858,39 @@ module Aws::Rekognition
|
|
1858
1858
|
#
|
1859
1859
|
# @!attribute [rw] max_faces
|
1860
1860
|
# The maximum number of faces to index. The value of `MaxFaces` must
|
1861
|
-
# be greater than or equal to 1. `IndexFaces` returns no more
|
1861
|
+
# be greater than or equal to 1. `IndexFaces` returns no more than 100
|
1862
1862
|
# detected faces in an image, even if you specify a larger value for
|
1863
1863
|
# `MaxFaces`.
|
1864
1864
|
#
|
1865
1865
|
# If `IndexFaces` detects more faces than the value of `MaxFaces`, the
|
1866
1866
|
# faces with the lowest quality are filtered out first. If there are
|
1867
1867
|
# still more faces than the value of `MaxFaces`, the faces with the
|
1868
|
-
# smallest bounding boxes are filtered out (up to the number
|
1869
|
-
# satisfy the value of `MaxFaces`). Information about the
|
1870
|
-
# faces is available in the `UnindexedFaces` array.
|
1868
|
+
# smallest bounding boxes are filtered out (up to the number that's
|
1869
|
+
# needed to satisfy the value of `MaxFaces`). Information about the
|
1870
|
+
# unindexed faces is available in the `UnindexedFaces` array.
|
1871
1871
|
#
|
1872
|
-
# The faces returned by `IndexFaces` are sorted
|
1873
|
-
#
|
1872
|
+
# The faces that are returned by `IndexFaces` are sorted by the
|
1873
|
+
# largest face bounding box size to the smallest size, in descending
|
1874
|
+
# order.
|
1875
|
+
#
|
1876
|
+
# `MaxFaces` can be used with a collection associated with any version
|
1877
|
+
# of the face model.
|
1874
1878
|
# @return [Integer]
|
1875
1879
|
#
|
1876
1880
|
# @!attribute [rw] quality_filter
|
1877
|
-
#
|
1878
|
-
# low quality. Filtered faces
|
1879
|
-
# filtering prioritizes the identification of
|
1880
|
-
# the required quality bar chosen by Amazon
|
1881
|
-
# bar is based on a variety of common use
|
1882
|
-
# detections can
|
1883
|
-
#
|
1884
|
-
#
|
1885
|
-
# is performed. The default
|
1881
|
+
# A filter that specifies how much filtering is done to identify faces
|
1882
|
+
# that are detected with low quality. Filtered faces aren't indexed.
|
1883
|
+
# If you specify `AUTO`, filtering prioritizes the identification of
|
1884
|
+
# faces that don’t meet the required quality bar chosen by Amazon
|
1885
|
+
# Rekognition. The quality bar is based on a variety of common use
|
1886
|
+
# cases. Low-quality detections can occur for a number of reasons.
|
1887
|
+
# Some examples are an object that's misidentified as a face, a face
|
1888
|
+
# that's too blurry, or a face with a pose that's too extreme to
|
1889
|
+
# use. If you specify `NONE`, no filtering is performed. The default
|
1890
|
+
# value is AUTO.
|
1891
|
+
#
|
1892
|
+
# To use quality filtering, the collection you are using must be
|
1893
|
+
# associated with version 3 of the face model.
|
1886
1894
|
# @return [String]
|
1887
1895
|
#
|
1888
1896
|
class IndexFacesRequest < Struct.new(
|
@@ -1910,24 +1918,25 @@ module Aws::Rekognition
|
|
1910
1918
|
#
|
1911
1919
|
# <note markdown="1"> If the input image is in jpeg format, it might contain exchangeable
|
1912
1920
|
# image (Exif) metadata. If so, and the Exif metadata populates the
|
1913
|
-
# orientation field, the value of `OrientationCorrection` is null
|
1914
|
-
#
|
1915
|
-
#
|
1916
|
-
#
|
1921
|
+
# orientation field, the value of `OrientationCorrection` is null. The
|
1922
|
+
# bounding box coordinates in `FaceRecords` represent face locations
|
1923
|
+
# after Exif metadata is used to correct the image orientation. Images
|
1924
|
+
# in .png format don't contain Exif metadata.
|
1917
1925
|
#
|
1918
1926
|
# </note>
|
1919
1927
|
# @return [String]
|
1920
1928
|
#
|
1921
1929
|
# @!attribute [rw] face_model_version
|
1922
|
-
#
|
1923
|
-
# collection (`CollectionId`).
|
1930
|
+
# The version number of the face detection model that's associated
|
1931
|
+
# with the input collection (`CollectionId`).
|
1924
1932
|
# @return [String]
|
1925
1933
|
#
|
1926
1934
|
# @!attribute [rw] unindexed_faces
|
1927
|
-
# An array of faces that detected in the image but
|
1928
|
-
#
|
1929
|
-
# `MaxFaces` request parameter filtered
|
1930
|
-
# filter, you specify the `QualityFilter`
|
1935
|
+
# An array of faces that were detected in the image but weren't
|
1936
|
+
# indexed. They weren't indexed because the quality filter identified
|
1937
|
+
# them as low quality, or the `MaxFaces` request parameter filtered
|
1938
|
+
# them out. To use the quality filter, you specify the `QualityFilter`
|
1939
|
+
# request parameter.
|
1931
1940
|
# @return [Array<Types::UnindexedFace>]
|
1932
1941
|
#
|
1933
1942
|
class IndexFacesResponse < Struct.new(
|
@@ -2018,20 +2027,20 @@ module Aws::Rekognition
|
|
2018
2027
|
# Indicates the location of the landmark on the face.
|
2019
2028
|
#
|
2020
2029
|
# @!attribute [rw] type
|
2021
|
-
# Type of
|
2030
|
+
# Type of landmark.
|
2022
2031
|
# @return [String]
|
2023
2032
|
#
|
2024
2033
|
# @!attribute [rw] x
|
2025
|
-
# x-coordinate from the top left of the landmark expressed as the
|
2026
|
-
# ratio of the width of the image. For example, if the
|
2027
|
-
#
|
2034
|
+
# The x-coordinate from the top left of the landmark expressed as the
|
2035
|
+
# ratio of the width of the image. For example, if the image is 700 x
|
2036
|
+
# 200 and the x-coordinate of the landmark is at 350 pixels, this
|
2028
2037
|
# value is 0.5.
|
2029
2038
|
# @return [Float]
|
2030
2039
|
#
|
2031
2040
|
# @!attribute [rw] y
|
2032
|
-
# y-coordinate from the top left of the landmark expressed as the
|
2033
|
-
# ratio of the height of the image. For example, if the
|
2034
|
-
#
|
2041
|
+
# The y-coordinate from the top left of the landmark expressed as the
|
2042
|
+
# ratio of the height of the image. For example, if the image is 700 x
|
2043
|
+
# 200 and the y-coordinate of the landmark is at 100 pixels, this
|
2035
2044
|
# value is 0.5.
|
2036
2045
|
# @return [Float]
|
2037
2046
|
#
|
@@ -2202,7 +2211,7 @@ module Aws::Rekognition
|
|
2202
2211
|
# @return [String]
|
2203
2212
|
#
|
2204
2213
|
# @!attribute [rw] parent_name
|
2205
|
-
# The name for the parent label. Labels at the top
|
2214
|
+
# The name for the parent label. Labels at the top level of the
|
2206
2215
|
# hierarchy have the parent label `""`.
|
2207
2216
|
# @return [String]
|
2208
2217
|
#
|
@@ -2322,11 +2331,11 @@ module Aws::Rekognition
|
|
2322
2331
|
include Aws::Structure
|
2323
2332
|
end
|
2324
2333
|
|
2325
|
-
# Information about a person whose face matches a face(s) in
|
2334
|
+
# Information about a person whose face matches a face(s) in an Amazon
|
2326
2335
|
# Rekognition collection. Includes information about the faces in the
|
2327
2336
|
# Amazon Rekognition collection (), information about the person
|
2328
|
-
# (PersonDetail) and the
|
2329
|
-
# video. An array of `PersonMatch` objects is returned by .
|
2337
|
+
# (PersonDetail), and the time stamp for when the person was detected in
|
2338
|
+
# a video. An array of `PersonMatch` objects is returned by .
|
2330
2339
|
#
|
2331
2340
|
# @!attribute [rw] timestamp
|
2332
2341
|
# The time, in milliseconds from the beginning of the video, that the
|
@@ -2438,7 +2447,7 @@ module Aws::Rekognition
|
|
2438
2447
|
# <note markdown="1"> If the input image is in .jpeg format, it might contain exchangeable
|
2439
2448
|
# image (Exif) metadata that includes the image's orientation. If so,
|
2440
2449
|
# and the Exif metadata for the input image populates the orientation
|
2441
|
-
# field, the value of `OrientationCorrection` is null
|
2450
|
+
# field, the value of `OrientationCorrection` is null. The
|
2442
2451
|
# `CelebrityFaces` and `UnrecognizedFaces` bounding box coordinates
|
2443
2452
|
# represent face locations after Exif metadata is used to correct the
|
2444
2453
|
# image orientation. Images in .png format don't contain Exif
|
@@ -2460,8 +2469,8 @@ module Aws::Rekognition
|
|
2460
2469
|
# region you use for Amazon Rekognition operations.
|
2461
2470
|
#
|
2462
2471
|
# For Amazon Rekognition to process an S3 object, the user must have
|
2463
|
-
# permission to access the S3 object. For more information, see
|
2464
|
-
# Based Policies in the Amazon Rekognition Developer Guide.
|
2472
|
+
# permission to access the S3 object. For more information, see
|
2473
|
+
# Resource-Based Policies in the Amazon Rekognition Developer Guide.
|
2465
2474
|
#
|
2466
2475
|
# @note When making an API call, you may pass S3Object
|
2467
2476
|
# data as a hash:
|
@@ -3243,11 +3252,11 @@ module Aws::Rekognition
|
|
3243
3252
|
include Aws::Structure
|
3244
3253
|
end
|
3245
3254
|
|
3246
|
-
# A face detected
|
3247
|
-
# attribute to determine why a face
|
3255
|
+
# A face that detected, but didn't index. Use the `Reasons` response
|
3256
|
+
# attribute to determine why a face wasn't indexed.
|
3248
3257
|
#
|
3249
3258
|
# @!attribute [rw] reasons
|
3250
|
-
# An array of reasons
|
3259
|
+
# An array of reasons that specify why a face wasn't indexed.
|
3251
3260
|
#
|
3252
3261
|
# * EXTREME\_POSE - The face is at a pose that can't be detected. For
|
3253
3262
|
# example, the head is turned too far away from the camera.
|
@@ -3267,8 +3276,8 @@ module Aws::Rekognition
|
|
3267
3276
|
# @return [Array<String>]
|
3268
3277
|
#
|
3269
3278
|
# @!attribute [rw] face_detail
|
3270
|
-
#
|
3271
|
-
#
|
3279
|
+
# The structure that contains attributes of a face that
|
3280
|
+
# `IndexFaces`detected, but didn't index.
|
3272
3281
|
# @return [Types::FaceDetail]
|
3273
3282
|
#
|
3274
3283
|
class UnindexedFace < Struct.new(
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: aws-sdk-rekognition
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.11.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Amazon Web Services
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2018-
|
11
|
+
date: 2018-10-01 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: aws-sdk-core
|