aws-sdk-rekognition 1.0.0.rc2 → 1.0.0.rc3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/aws-sdk-rekognition.rb +1 -1
- data/lib/aws-sdk-rekognition/client.rb +958 -839
- data/lib/aws-sdk-rekognition/client_api.rb +514 -516
- data/lib/aws-sdk-rekognition/errors.rb +4 -13
- data/lib/aws-sdk-rekognition/resource.rb +12 -14
- data/lib/aws-sdk-rekognition/types.rb +1204 -1052
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: f09db9c79146687943e0a31adc2fe709d40a8cde
|
4
|
+
data.tar.gz: e952dadf75d712a3612a0fec3e89e0c074a6d19e
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: e786c5f8cb95277f36c1886a6566968499dfd206247add6684683cfe690b5f48993e5124264e46b4fff2bafb40104ca11b9032970a4481a5b19461a985e1571a
|
7
|
+
data.tar.gz: 7f6f5a5515787b4399a5b3d1d07f2ba526d6d0578e8106f67746f8090c4bb3dbbeceb53dcf276d8e0ed47ef61c9ff4f8fcfa216b4dbfab7c1f84dcf5266c996f
|
data/lib/aws-sdk-rekognition.rb
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
# WARNING ABOUT GENERATED CODE
|
2
2
|
#
|
3
|
-
# This file is generated. See the contributing for
|
3
|
+
# This file is generated. See the contributing guide for more information:
|
4
4
|
# https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
|
5
5
|
#
|
6
6
|
# WARNING ABOUT GENERATED CODE
|
@@ -1,6 +1,6 @@
|
|
1
1
|
# WARNING ABOUT GENERATED CODE
|
2
2
|
#
|
3
|
-
# This file is generated. See the contributing for
|
3
|
+
# This file is generated. See the contributing guide for more information:
|
4
4
|
# https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
|
5
5
|
#
|
6
6
|
# WARNING ABOUT GENERATED CODE
|
@@ -23,868 +23,987 @@ require 'aws-sdk-core/plugins/protocols/json_rpc.rb'
|
|
23
23
|
|
24
24
|
Aws::Plugins::GlobalConfiguration.add_identifier(:rekognition)
|
25
25
|
|
26
|
-
module Aws
|
27
|
-
|
28
|
-
class Client < Seahorse::Client::Base
|
26
|
+
module Aws::Rekognition
|
27
|
+
class Client < Seahorse::Client::Base
|
29
28
|
|
30
|
-
|
29
|
+
include Aws::ClientStubs
|
31
30
|
|
32
|
-
|
31
|
+
@identifier = :rekognition
|
33
32
|
|
34
|
-
|
33
|
+
set_api(ClientApi::API)
|
35
34
|
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
35
|
+
add_plugin(Seahorse::Client::Plugins::ContentLength)
|
36
|
+
add_plugin(Aws::Plugins::CredentialsConfiguration)
|
37
|
+
add_plugin(Aws::Plugins::Logging)
|
38
|
+
add_plugin(Aws::Plugins::ParamConverter)
|
39
|
+
add_plugin(Aws::Plugins::ParamValidator)
|
40
|
+
add_plugin(Aws::Plugins::UserAgent)
|
41
|
+
add_plugin(Aws::Plugins::HelpfulSocketErrors)
|
42
|
+
add_plugin(Aws::Plugins::RetryErrors)
|
43
|
+
add_plugin(Aws::Plugins::GlobalConfiguration)
|
44
|
+
add_plugin(Aws::Plugins::RegionalEndpoint)
|
45
|
+
add_plugin(Aws::Plugins::ResponsePaging)
|
46
|
+
add_plugin(Aws::Plugins::StubResponses)
|
47
|
+
add_plugin(Aws::Plugins::IdempotencyToken)
|
48
|
+
add_plugin(Aws::Plugins::SignatureV4)
|
49
|
+
add_plugin(Aws::Plugins::Protocols::JsonRpc)
|
51
50
|
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
51
|
+
# @option options [required, Aws::CredentialProvider] :credentials
|
52
|
+
# Your AWS credentials. This can be an instance of any one of the
|
53
|
+
# following classes:
|
54
|
+
#
|
55
|
+
# * `Aws::Credentials` - Used for configuring static, non-refreshing
|
56
|
+
# credentials.
|
57
|
+
#
|
58
|
+
# * `Aws::InstanceProfileCredentials` - Used for loading credentials
|
59
|
+
# from an EC2 IMDS on an EC2 instance.
|
60
|
+
#
|
61
|
+
# * `Aws::SharedCredentials` - Used for loading credentials from a
|
62
|
+
# shared file, such as `~/.aws/config`.
|
63
|
+
#
|
64
|
+
# * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
|
65
|
+
#
|
66
|
+
# When `:credentials` are not configured directly, the following
|
67
|
+
# locations will be searched for credentials:
|
68
|
+
#
|
69
|
+
# * `Aws.config[:credentials]`
|
70
|
+
# * The `:access_key_id`, `:secret_access_key`, and `:session_token` options.
|
71
|
+
# * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY']
|
72
|
+
# * `~/.aws/credentials`
|
73
|
+
# * `~/.aws/config`
|
74
|
+
# * EC2 IMDS instance profile - When used by default, the timeouts are
|
75
|
+
# very aggressive. Construct and pass an instance of
|
76
|
+
# `Aws::InstanceProfileCredentails` to enable retries and extended
|
77
|
+
# timeouts.
|
78
|
+
#
|
79
|
+
# @option options [required, String] :region
|
80
|
+
# The AWS region to connect to. The configured `:region` is
|
81
|
+
# used to determine the service `:endpoint`. When not passed,
|
82
|
+
# a default `:region` is search for in the following locations:
|
83
|
+
#
|
84
|
+
# * `Aws.config[:region]`
|
85
|
+
# * `ENV['AWS_REGION']`
|
86
|
+
# * `ENV['AMAZON_REGION']`
|
87
|
+
# * `ENV['AWS_DEFAULT_REGION']`
|
88
|
+
# * `~/.aws/credentials`
|
89
|
+
# * `~/.aws/config`
|
90
|
+
#
|
91
|
+
# @option options [String] :access_key_id
|
92
|
+
#
|
93
|
+
# @option options [Boolean] :convert_params (true)
|
94
|
+
# When `true`, an attempt is made to coerce request parameters into
|
95
|
+
# the required types.
|
96
|
+
#
|
97
|
+
# @option options [String] :endpoint
|
98
|
+
# The client endpoint is normally constructed from the `:region`
|
99
|
+
# option. You should only configure an `:endpoint` when connecting
|
100
|
+
# to test endpoints. This should be avalid HTTP(S) URI.
|
101
|
+
#
|
102
|
+
# @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
|
103
|
+
# The log formatter.
|
104
|
+
#
|
105
|
+
# @option options [Symbol] :log_level (:info)
|
106
|
+
# The log level to send messages to the `:logger` at.
|
107
|
+
#
|
108
|
+
# @option options [Logger] :logger
|
109
|
+
# The Logger instance to send log messages to. If this option
|
110
|
+
# is not set, logging will be disabled.
|
111
|
+
#
|
112
|
+
# @option options [String] :profile ("default")
|
113
|
+
# Used when loading credentials from the shared credentials file
|
114
|
+
# at HOME/.aws/credentials. When not specified, 'default' is used.
|
115
|
+
#
|
116
|
+
# @option options [Integer] :retry_limit (3)
|
117
|
+
# The maximum number of times to retry failed requests. Only
|
118
|
+
# ~ 500 level server errors and certain ~ 400 level client errors
|
119
|
+
# are retried. Generally, these are throttling errors, data
|
120
|
+
# checksum errors, networking errors, timeout errors and auth
|
121
|
+
# errors from expired credentials.
|
122
|
+
#
|
123
|
+
# @option options [String] :secret_access_key
|
124
|
+
#
|
125
|
+
# @option options [String] :session_token
|
126
|
+
#
|
127
|
+
# @option options [Boolean] :simple_json (false)
|
128
|
+
# Disables request parameter conversion, validation, and formatting.
|
129
|
+
# Also disable response data type conversions. This option is useful
|
130
|
+
# when you want to ensure the highest level of performance by
|
131
|
+
# avoiding overhead of walking request parameters and response data
|
132
|
+
# structures.
|
133
|
+
#
|
134
|
+
# When `:simple_json` is enabled, the request parameters hash must
|
135
|
+
# be formatted exactly as the DynamoDB API expects.
|
136
|
+
#
|
137
|
+
# @option options [Boolean] :stub_responses (false)
|
138
|
+
# Causes the client to return stubbed responses. By default
|
139
|
+
# fake responses are generated and returned. You can specify
|
140
|
+
# the response data to return or errors to raise by calling
|
141
|
+
# {ClientStubs#stub_responses}. See {ClientStubs} for more information.
|
142
|
+
#
|
143
|
+
# ** Please note ** When response stubbing is enabled, no HTTP
|
144
|
+
# requests are made, and retries are disabled.
|
145
|
+
#
|
146
|
+
# @option options [Boolean] :validate_params (true)
|
147
|
+
# When `true`, request parameters are validated before
|
148
|
+
# sending the request.
|
149
|
+
#
|
150
|
+
def initialize(*args)
|
151
|
+
super
|
152
|
+
end
|
139
153
|
|
140
|
-
|
154
|
+
# @!group API Operations
|
141
155
|
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
# can search the collection for faces in the user-specific container.
|
236
|
-
#
|
237
|
-
# For an example, see example1.
|
238
|
-
#
|
239
|
-
# This operation requires permissions to perform the
|
240
|
-
# `rekognition:CreateCollection` action.
|
241
|
-
# @option params [required, String] :collection_id
|
242
|
-
# ID for the collection that you are creating.
|
243
|
-
# @return [Types::CreateCollectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
|
244
|
-
#
|
245
|
-
# * {Types::CreateCollectionResponse#status_code #StatusCode} => Integer
|
246
|
-
# * {Types::CreateCollectionResponse#collection_arn #CollectionArn} => String
|
247
|
-
#
|
248
|
-
# @example Request syntax with placeholder values
|
249
|
-
# resp = client.create_collection({
|
250
|
-
# collection_id: "CollectionId", # required
|
251
|
-
# })
|
252
|
-
#
|
253
|
-
# @example Response structure
|
254
|
-
# resp.status_code #=> Integer
|
255
|
-
# resp.collection_arn #=> String
|
256
|
-
# @overload create_collection(params = {})
|
257
|
-
# @param [Hash] params ({})
|
258
|
-
def create_collection(params = {}, options = {})
|
259
|
-
req = build_request(:create_collection, params)
|
260
|
-
req.send_request(options)
|
261
|
-
end
|
156
|
+
# Compares a face in the *source* input image with each face detected in
|
157
|
+
# the *target* input image.
|
158
|
+
#
|
159
|
+
# <note markdown="1"> If the source image contains multiple faces, the service detects the
|
160
|
+
# largest face and uses it to compare with each face detected in the
|
161
|
+
# target image.
|
162
|
+
#
|
163
|
+
# </note>
|
164
|
+
#
|
165
|
+
# In response, the operation returns an array of face matches ordered by
|
166
|
+
# similarity score with the highest similarity scores first. For each
|
167
|
+
# face match, the response provides a bounding box of the face and
|
168
|
+
# `confidence` value (indicating the level of confidence that the
|
169
|
+
# bounding box contains a face). The response also provides a
|
170
|
+
# `similarity` score, which indicates how closely the faces match.
|
171
|
+
#
|
172
|
+
# <note markdown="1"> By default, only faces with the similarity score of greater than or
|
173
|
+
# equal to 80% are returned in the response. You can change this value.
|
174
|
+
#
|
175
|
+
# </note>
|
176
|
+
#
|
177
|
+
# In addition to the face matches, the response returns information
|
178
|
+
# about the face in the source image, including the bounding box of the
|
179
|
+
# face and confidence value.
|
180
|
+
#
|
181
|
+
# <note markdown="1"> This is a stateless API operation. That is, the operation does not
|
182
|
+
# persist any data.
|
183
|
+
#
|
184
|
+
# </note>
|
185
|
+
#
|
186
|
+
# For an example, see get-started-exercise-compare-faces
|
187
|
+
#
|
188
|
+
# This operation requires permissions to perform the
|
189
|
+
# `rekognition:CompareFaces` action.
|
190
|
+
#
|
191
|
+
# @option params [required, Types::Image] :source_image
|
192
|
+
# Source image either as bytes or an S3 object
|
193
|
+
#
|
194
|
+
# @option params [required, Types::Image] :target_image
|
195
|
+
# Target image either as bytes or an S3 object
|
196
|
+
#
|
197
|
+
# @option params [Float] :similarity_threshold
|
198
|
+
# The minimum level of confidence in the match you want included in the
|
199
|
+
# result.
|
200
|
+
#
|
201
|
+
# @return [Types::CompareFacesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
|
202
|
+
#
|
203
|
+
# * {Types::CompareFacesResponse#source_image_face #source_image_face} => Types::ComparedSourceImageFace
|
204
|
+
# * {Types::CompareFacesResponse#face_matches #face_matches} => Array<Types::CompareFacesMatch>
|
205
|
+
#
|
206
|
+
# @example Request syntax with placeholder values
|
207
|
+
#
|
208
|
+
# resp = client.compare_faces({
|
209
|
+
# source_image: { # required
|
210
|
+
# bytes: "data",
|
211
|
+
# s3_object: {
|
212
|
+
# bucket: "S3Bucket",
|
213
|
+
# name: "S3ObjectName",
|
214
|
+
# version: "S3ObjectVersion",
|
215
|
+
# },
|
216
|
+
# },
|
217
|
+
# target_image: { # required
|
218
|
+
# bytes: "data",
|
219
|
+
# s3_object: {
|
220
|
+
# bucket: "S3Bucket",
|
221
|
+
# name: "S3ObjectName",
|
222
|
+
# version: "S3ObjectVersion",
|
223
|
+
# },
|
224
|
+
# },
|
225
|
+
# similarity_threshold: 1.0,
|
226
|
+
# })
|
227
|
+
#
|
228
|
+
# @example Response structure
|
229
|
+
#
|
230
|
+
# resp.source_image_face.bounding_box.width #=> Float
|
231
|
+
# resp.source_image_face.bounding_box.height #=> Float
|
232
|
+
# resp.source_image_face.bounding_box.left #=> Float
|
233
|
+
# resp.source_image_face.bounding_box.top #=> Float
|
234
|
+
# resp.source_image_face.confidence #=> Float
|
235
|
+
# resp.face_matches #=> Array
|
236
|
+
# resp.face_matches[0].similarity #=> Float
|
237
|
+
# resp.face_matches[0].face.bounding_box.width #=> Float
|
238
|
+
# resp.face_matches[0].face.bounding_box.height #=> Float
|
239
|
+
# resp.face_matches[0].face.bounding_box.left #=> Float
|
240
|
+
# resp.face_matches[0].face.bounding_box.top #=> Float
|
241
|
+
# resp.face_matches[0].face.confidence #=> Float
|
242
|
+
#
|
243
|
+
# @overload compare_faces(params = {})
|
244
|
+
# @param [Hash] params ({})
|
245
|
+
def compare_faces(params = {}, options = {})
|
246
|
+
req = build_request(:compare_faces, params)
|
247
|
+
req.send_request(options)
|
248
|
+
end
|
262
249
|
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
250
|
+
# Creates a collection in an AWS Region. You can add faces to the
|
251
|
+
# collection using the operation.
|
252
|
+
#
|
253
|
+
# For example, you might create collections, one for each of your
|
254
|
+
# application users. A user can then index faces using the `IndexFaces`
|
255
|
+
# operation and persist results in a specific collection. Then, a user
|
256
|
+
# can search the collection for faces in the user-specific container.
|
257
|
+
#
|
258
|
+
# For an example, see example1.
|
259
|
+
#
|
260
|
+
# This operation requires permissions to perform the
|
261
|
+
# `rekognition:CreateCollection` action.
|
262
|
+
#
|
263
|
+
# @option params [required, String] :collection_id
|
264
|
+
# ID for the collection that you are creating.
|
265
|
+
#
|
266
|
+
# @return [Types::CreateCollectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
|
267
|
+
#
|
268
|
+
# * {Types::CreateCollectionResponse#status_code #status_code} => Integer
|
269
|
+
# * {Types::CreateCollectionResponse#collection_arn #collection_arn} => String
|
270
|
+
#
|
271
|
+
# @example Request syntax with placeholder values
|
272
|
+
#
|
273
|
+
# resp = client.create_collection({
|
274
|
+
# collection_id: "CollectionId", # required
|
275
|
+
# })
|
276
|
+
#
|
277
|
+
# @example Response structure
|
278
|
+
#
|
279
|
+
# resp.status_code #=> Integer
|
280
|
+
# resp.collection_arn #=> String
|
281
|
+
#
|
282
|
+
# @overload create_collection(params = {})
|
283
|
+
# @param [Hash] params ({})
|
284
|
+
def create_collection(params = {}, options = {})
|
285
|
+
req = build_request(:create_collection, params)
|
286
|
+
req.send_request(options)
|
287
|
+
end
|
287
288
|
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
289
|
+
# Deletes the specified collection. Note that this operation removes all
|
290
|
+
# faces in the collection. For an example, see example1.
|
291
|
+
#
|
292
|
+
# This operation requires permissions to perform the
|
293
|
+
# `rekognition:DeleteCollection` action.
|
294
|
+
#
|
295
|
+
# @option params [required, String] :collection_id
|
296
|
+
# ID of the collection to delete.
|
297
|
+
#
|
298
|
+
# @return [Types::DeleteCollectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
|
299
|
+
#
|
300
|
+
# * {Types::DeleteCollectionResponse#status_code #status_code} => Integer
|
301
|
+
#
|
302
|
+
# @example Request syntax with placeholder values
|
303
|
+
#
|
304
|
+
# resp = client.delete_collection({
|
305
|
+
# collection_id: "CollectionId", # required
|
306
|
+
# })
|
307
|
+
#
|
308
|
+
# @example Response structure
|
309
|
+
#
|
310
|
+
# resp.status_code #=> Integer
|
311
|
+
#
|
312
|
+
# @overload delete_collection(params = {})
|
313
|
+
# @param [Hash] params ({})
|
314
|
+
def delete_collection(params = {}, options = {})
|
315
|
+
req = build_request(:delete_collection, params)
|
316
|
+
req.send_request(options)
|
317
|
+
end
|
316
318
|
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
#
|
352
|
-
# * {Types::DetectFacesResponse#face_details #FaceDetails} => Array<Types::FaceDetail>
|
353
|
-
# * {Types::DetectFacesResponse#orientation_correction #OrientationCorrection} => String
|
354
|
-
#
|
355
|
-
# @example Request syntax with placeholder values
|
356
|
-
# resp = client.detect_faces({
|
357
|
-
# image: { # required
|
358
|
-
# bytes: "data",
|
359
|
-
# s3_object: {
|
360
|
-
# bucket: "S3Bucket",
|
361
|
-
# name: "S3ObjectName",
|
362
|
-
# version: "S3ObjectVersion",
|
363
|
-
# },
|
364
|
-
# },
|
365
|
-
# attributes: ["DEFAULT"], # accepts DEFAULT, ALL
|
366
|
-
# })
|
367
|
-
#
|
368
|
-
# @example Response structure
|
369
|
-
# resp.face_details #=> Array
|
370
|
-
# resp.face_details[0].bounding_box.width #=> Float
|
371
|
-
# resp.face_details[0].bounding_box.height #=> Float
|
372
|
-
# resp.face_details[0].bounding_box.left #=> Float
|
373
|
-
# resp.face_details[0].bounding_box.top #=> Float
|
374
|
-
# resp.face_details[0].smile.value #=> Boolean
|
375
|
-
# resp.face_details[0].smile.confidence #=> Float
|
376
|
-
# resp.face_details[0].eyeglasses.value #=> Boolean
|
377
|
-
# resp.face_details[0].eyeglasses.confidence #=> Float
|
378
|
-
# resp.face_details[0].sunglasses.value #=> Boolean
|
379
|
-
# resp.face_details[0].sunglasses.confidence #=> Float
|
380
|
-
# resp.face_details[0].gender.value #=> String, one of "MALE", "FEMALE"
|
381
|
-
# resp.face_details[0].gender.confidence #=> Float
|
382
|
-
# resp.face_details[0].beard.value #=> Boolean
|
383
|
-
# resp.face_details[0].beard.confidence #=> Float
|
384
|
-
# resp.face_details[0].mustache.value #=> Boolean
|
385
|
-
# resp.face_details[0].mustache.confidence #=> Float
|
386
|
-
# resp.face_details[0].eyes_open.value #=> Boolean
|
387
|
-
# resp.face_details[0].eyes_open.confidence #=> Float
|
388
|
-
# resp.face_details[0].mouth_open.value #=> Boolean
|
389
|
-
# resp.face_details[0].mouth_open.confidence #=> Float
|
390
|
-
# resp.face_details[0].emotions #=> Array
|
391
|
-
# resp.face_details[0].emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN"
|
392
|
-
# resp.face_details[0].emotions[0].confidence #=> Float
|
393
|
-
# resp.face_details[0].landmarks #=> Array
|
394
|
-
# resp.face_details[0].landmarks[0].type #=> String, one of "EYE_LEFT", "EYE_RIGHT", "NOSE", "MOUTH_LEFT", "MOUTH_RIGHT", "LEFT_EYEBROW_LEFT", "LEFT_EYEBROW_RIGHT", "LEFT_EYEBROW_UP", "RIGHT_EYEBROW_LEFT", "RIGHT_EYEBROW_RIGHT", "RIGHT_EYEBROW_UP", "LEFT_EYE_LEFT", "LEFT_EYE_RIGHT", "LEFT_EYE_UP", "LEFT_EYE_DOWN", "RIGHT_EYE_LEFT", "RIGHT_EYE_RIGHT", "RIGHT_EYE_UP", "RIGHT_EYE_DOWN", "NOSE_LEFT", "NOSE_RIGHT", "MOUTH_UP", "MOUTH_DOWN", "LEFT_PUPIL", "RIGHT_PUPIL"
|
395
|
-
# resp.face_details[0].landmarks[0].x #=> Float
|
396
|
-
# resp.face_details[0].landmarks[0].y #=> Float
|
397
|
-
# resp.face_details[0].pose.roll #=> Float
|
398
|
-
# resp.face_details[0].pose.yaw #=> Float
|
399
|
-
# resp.face_details[0].pose.pitch #=> Float
|
400
|
-
# resp.face_details[0].quality.brightness #=> Float
|
401
|
-
# resp.face_details[0].quality.sharpness #=> Float
|
402
|
-
# resp.face_details[0].confidence #=> Float
|
403
|
-
# resp.orientation_correction #=> String, one of "ROTATE_0", "ROTATE_90", "ROTATE_180", "ROTATE_270"
|
404
|
-
# @overload detect_faces(params = {})
|
405
|
-
# @param [Hash] params ({})
|
406
|
-
def detect_faces(params = {}, options = {})
|
407
|
-
req = build_request(:detect_faces, params)
|
408
|
-
req.send_request(options)
|
409
|
-
end
|
319
|
+
# Deletes faces from a collection. You specify a collection ID and an
|
320
|
+
# array of face IDs to remove from the collection.
|
321
|
+
#
|
322
|
+
# This operation requires permissions to perform the
|
323
|
+
# `rekognition:DeleteFaces` action.
|
324
|
+
#
|
325
|
+
# @option params [required, String] :collection_id
|
326
|
+
# Collection from which to remove the specific faces.
|
327
|
+
#
|
328
|
+
# @option params [required, Array<String>] :face_ids
|
329
|
+
# An array of face IDs to delete.
|
330
|
+
#
|
331
|
+
# @return [Types::DeleteFacesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
|
332
|
+
#
|
333
|
+
# * {Types::DeleteFacesResponse#deleted_faces #deleted_faces} => Array<String>
|
334
|
+
#
|
335
|
+
# @example Request syntax with placeholder values
|
336
|
+
#
|
337
|
+
# resp = client.delete_faces({
|
338
|
+
# collection_id: "CollectionId", # required
|
339
|
+
# face_ids: ["FaceId"], # required
|
340
|
+
# })
|
341
|
+
#
|
342
|
+
# @example Response structure
|
343
|
+
#
|
344
|
+
# resp.deleted_faces #=> Array
|
345
|
+
# resp.deleted_faces[0] #=> String
|
346
|
+
#
|
347
|
+
# @overload delete_faces(params = {})
|
348
|
+
# @param [Hash] params ({})
|
349
|
+
def delete_faces(params = {}, options = {})
|
350
|
+
req = build_request(:delete_faces, params)
|
351
|
+
req.send_request(options)
|
352
|
+
end
|
410
353
|
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
483
|
-
|
484
|
-
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
354
|
+
# Detects faces within an image (JPEG or PNG) that is provided as input.
|
355
|
+
#
|
356
|
+
# For each face detected, the operation returns face details including a
|
357
|
+
# bounding box of the face, a confidence value (that the bounding box
|
358
|
+
# contains a face), and a fixed set of attributes such as facial
|
359
|
+
# landmarks (for example, coordinates of eye and mouth), gender,
|
360
|
+
# presence of beard, sunglasses, etc.
|
361
|
+
#
|
362
|
+
# The face-detection algorithm is most effective on frontal faces. For
|
363
|
+
# non-frontal or obscured faces, the algorithm may not detect the faces
|
364
|
+
# or might detect faces with lower confidence.
|
365
|
+
#
|
366
|
+
# <note markdown="1"> This is a stateless API operation. That is, the operation does not
|
367
|
+
# persist any data.
|
368
|
+
#
|
369
|
+
# </note>
|
370
|
+
#
|
371
|
+
# For an example, see get-started-exercise-detect-faces.
|
372
|
+
#
|
373
|
+
# This operation requires permissions to perform the
|
374
|
+
# `rekognition:DetectFaces` action.
|
375
|
+
#
|
376
|
+
# @option params [required, Types::Image] :image
|
377
|
+
# The image in which you want to detect faces. You can specify a blob or
|
378
|
+
# an S3 object.
|
379
|
+
#
|
380
|
+
# @option params [Array<String>] :attributes
|
381
|
+
# A list of facial attributes you would like to be returned. By default,
|
382
|
+
# the API returns subset of facial attributes.
|
383
|
+
#
|
384
|
+
# For example, you can specify the value as, \["ALL"\] or
|
385
|
+
# \["DEFAULT"\]. If you provide both, \["ALL", "DEFAULT"\], the
|
386
|
+
# service uses a logical AND operator to determine which attributes to
|
387
|
+
# return (in this case, it is all attributes). If you specify all
|
388
|
+
# attributes, Amazon Rekognition performs additional detection.
|
389
|
+
#
|
390
|
+
# @return [Types::DetectFacesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
|
391
|
+
#
|
392
|
+
# * {Types::DetectFacesResponse#face_details #face_details} => Array<Types::FaceDetail>
|
393
|
+
# * {Types::DetectFacesResponse#orientation_correction #orientation_correction} => String
|
394
|
+
#
|
395
|
+
# @example Request syntax with placeholder values
|
396
|
+
#
|
397
|
+
# resp = client.detect_faces({
|
398
|
+
# image: { # required
|
399
|
+
# bytes: "data",
|
400
|
+
# s3_object: {
|
401
|
+
# bucket: "S3Bucket",
|
402
|
+
# name: "S3ObjectName",
|
403
|
+
# version: "S3ObjectVersion",
|
404
|
+
# },
|
405
|
+
# },
|
406
|
+
# attributes: ["DEFAULT"], # accepts DEFAULT, ALL
|
407
|
+
# })
|
408
|
+
#
|
409
|
+
# @example Response structure
|
410
|
+
#
|
411
|
+
# resp.face_details #=> Array
|
412
|
+
# resp.face_details[0].bounding_box.width #=> Float
|
413
|
+
# resp.face_details[0].bounding_box.height #=> Float
|
414
|
+
# resp.face_details[0].bounding_box.left #=> Float
|
415
|
+
# resp.face_details[0].bounding_box.top #=> Float
|
416
|
+
# resp.face_details[0].smile.value #=> Boolean
|
417
|
+
# resp.face_details[0].smile.confidence #=> Float
|
418
|
+
# resp.face_details[0].eyeglasses.value #=> Boolean
|
419
|
+
# resp.face_details[0].eyeglasses.confidence #=> Float
|
420
|
+
# resp.face_details[0].sunglasses.value #=> Boolean
|
421
|
+
# resp.face_details[0].sunglasses.confidence #=> Float
|
422
|
+
# resp.face_details[0].gender.value #=> String, one of "MALE", "FEMALE"
|
423
|
+
# resp.face_details[0].gender.confidence #=> Float
|
424
|
+
# resp.face_details[0].beard.value #=> Boolean
|
425
|
+
# resp.face_details[0].beard.confidence #=> Float
|
426
|
+
# resp.face_details[0].mustache.value #=> Boolean
|
427
|
+
# resp.face_details[0].mustache.confidence #=> Float
|
428
|
+
# resp.face_details[0].eyes_open.value #=> Boolean
|
429
|
+
# resp.face_details[0].eyes_open.confidence #=> Float
|
430
|
+
# resp.face_details[0].mouth_open.value #=> Boolean
|
431
|
+
# resp.face_details[0].mouth_open.confidence #=> Float
|
432
|
+
# resp.face_details[0].emotions #=> Array
|
433
|
+
# resp.face_details[0].emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN"
|
434
|
+
# resp.face_details[0].emotions[0].confidence #=> Float
|
435
|
+
# resp.face_details[0].landmarks #=> Array
|
436
|
+
# resp.face_details[0].landmarks[0].type #=> String, one of "EYE_LEFT", "EYE_RIGHT", "NOSE", "MOUTH_LEFT", "MOUTH_RIGHT", "LEFT_EYEBROW_LEFT", "LEFT_EYEBROW_RIGHT", "LEFT_EYEBROW_UP", "RIGHT_EYEBROW_LEFT", "RIGHT_EYEBROW_RIGHT", "RIGHT_EYEBROW_UP", "LEFT_EYE_LEFT", "LEFT_EYE_RIGHT", "LEFT_EYE_UP", "LEFT_EYE_DOWN", "RIGHT_EYE_LEFT", "RIGHT_EYE_RIGHT", "RIGHT_EYE_UP", "RIGHT_EYE_DOWN", "NOSE_LEFT", "NOSE_RIGHT", "MOUTH_UP", "MOUTH_DOWN", "LEFT_PUPIL", "RIGHT_PUPIL"
|
437
|
+
# resp.face_details[0].landmarks[0].x #=> Float
|
438
|
+
# resp.face_details[0].landmarks[0].y #=> Float
|
439
|
+
# resp.face_details[0].pose.roll #=> Float
|
440
|
+
# resp.face_details[0].pose.yaw #=> Float
|
441
|
+
# resp.face_details[0].pose.pitch #=> Float
|
442
|
+
# resp.face_details[0].quality.brightness #=> Float
|
443
|
+
# resp.face_details[0].quality.sharpness #=> Float
|
444
|
+
# resp.face_details[0].confidence #=> Float
|
445
|
+
# resp.orientation_correction #=> String, one of "ROTATE_0", "ROTATE_90", "ROTATE_180", "ROTATE_270"
|
446
|
+
#
|
447
|
+
# @overload detect_faces(params = {})
|
448
|
+
# @param [Hash] params ({})
|
449
|
+
def detect_faces(params = {}, options = {})
|
450
|
+
req = build_request(:detect_faces, params)
|
451
|
+
req.send_request(options)
|
452
|
+
end
|
505
453
|
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
|
526
|
-
|
527
|
-
|
528
|
-
|
529
|
-
|
530
|
-
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
|
539
|
-
|
540
|
-
|
541
|
-
|
542
|
-
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
|
547
|
-
|
548
|
-
|
549
|
-
|
550
|
-
|
551
|
-
|
552
|
-
|
553
|
-
|
554
|
-
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
|
561
|
-
|
562
|
-
|
563
|
-
|
564
|
-
|
565
|
-
|
566
|
-
|
567
|
-
|
568
|
-
|
569
|
-
|
570
|
-
|
571
|
-
|
572
|
-
|
573
|
-
|
574
|
-
|
575
|
-
|
576
|
-
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
|
581
|
-
|
582
|
-
|
583
|
-
|
584
|
-
|
585
|
-
|
586
|
-
|
587
|
-
|
588
|
-
|
589
|
-
|
590
|
-
|
591
|
-
|
592
|
-
|
593
|
-
|
594
|
-
|
595
|
-
|
596
|
-
|
597
|
-
|
598
|
-
|
599
|
-
|
600
|
-
|
601
|
-
|
602
|
-
|
603
|
-
|
604
|
-
|
605
|
-
|
606
|
-
|
607
|
-
# resp.face_records[0].face_detail.emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN"
|
608
|
-
# resp.face_records[0].face_detail.emotions[0].confidence #=> Float
|
609
|
-
# resp.face_records[0].face_detail.landmarks #=> Array
|
610
|
-
# resp.face_records[0].face_detail.landmarks[0].type #=> String, one of "EYE_LEFT", "EYE_RIGHT", "NOSE", "MOUTH_LEFT", "MOUTH_RIGHT", "LEFT_EYEBROW_LEFT", "LEFT_EYEBROW_RIGHT", "LEFT_EYEBROW_UP", "RIGHT_EYEBROW_LEFT", "RIGHT_EYEBROW_RIGHT", "RIGHT_EYEBROW_UP", "LEFT_EYE_LEFT", "LEFT_EYE_RIGHT", "LEFT_EYE_UP", "LEFT_EYE_DOWN", "RIGHT_EYE_LEFT", "RIGHT_EYE_RIGHT", "RIGHT_EYE_UP", "RIGHT_EYE_DOWN", "NOSE_LEFT", "NOSE_RIGHT", "MOUTH_UP", "MOUTH_DOWN", "LEFT_PUPIL", "RIGHT_PUPIL"
|
611
|
-
# resp.face_records[0].face_detail.landmarks[0].x #=> Float
|
612
|
-
# resp.face_records[0].face_detail.landmarks[0].y #=> Float
|
613
|
-
# resp.face_records[0].face_detail.pose.roll #=> Float
|
614
|
-
# resp.face_records[0].face_detail.pose.yaw #=> Float
|
615
|
-
# resp.face_records[0].face_detail.pose.pitch #=> Float
|
616
|
-
# resp.face_records[0].face_detail.quality.brightness #=> Float
|
617
|
-
# resp.face_records[0].face_detail.quality.sharpness #=> Float
|
618
|
-
# resp.face_records[0].face_detail.confidence #=> Float
|
619
|
-
# resp.orientation_correction #=> String, one of "ROTATE_0", "ROTATE_90", "ROTATE_180", "ROTATE_270"
|
620
|
-
# @overload index_faces(params = {})
|
621
|
-
# @param [Hash] params ({})
|
622
|
-
def index_faces(params = {}, options = {})
|
623
|
-
req = build_request(:index_faces, params)
|
624
|
-
req.send_request(options)
|
625
|
-
end
|
454
|
+
# Detects instances of real-world labels within an image (JPEG or PNG)
|
455
|
+
# provided as input. This includes objects like flower, tree, and table;
|
456
|
+
# events like wedding, graduation, and birthday party; and concepts like
|
457
|
+
# landscape, evening, and nature. For an example, see
|
458
|
+
# get-started-exercise-detect-labels.
|
459
|
+
#
|
460
|
+
# For each object, scene, and concept the API returns one or more
|
461
|
+
# labels. Each label provides the object name, and the level of
|
462
|
+
# confidence that the image contains the object. For example, suppose
|
463
|
+
# the input image has a lighthouse, the sea, and a rock. The response
|
464
|
+
# will include all three labels, one for each object.
|
465
|
+
#
|
466
|
+
# `\{Name: lighthouse, Confidence: 98.4629\}`
|
467
|
+
#
|
468
|
+
# `\{Name: rock,Confidence: 79.2097\}`
|
469
|
+
#
|
470
|
+
# ` \{Name: sea,Confidence: 75.061\}`
|
471
|
+
#
|
472
|
+
# In the preceding example, the operation returns one label for each of
|
473
|
+
# the three objects. The operation can also return multiple labels for
|
474
|
+
# the same object in the image. For example, if the input image shows a
|
475
|
+
# flower (for example, a tulip), the operation might return the
|
476
|
+
# following three labels.
|
477
|
+
#
|
478
|
+
# `\{Name: flower,Confidence: 99.0562\}`
|
479
|
+
#
|
480
|
+
# `\{Name: plant,Confidence: 99.0562\}`
|
481
|
+
#
|
482
|
+
# `\{Name: tulip,Confidence: 99.0562\}`
|
483
|
+
#
|
484
|
+
# In this example, the detection algorithm more precisely identifies the
|
485
|
+
# flower as a tulip.
|
486
|
+
#
|
487
|
+
# You can provide the input image as an S3 object or as base64-encoded
|
488
|
+
# bytes. In response, the API returns an array of labels. In addition,
|
489
|
+
# the response also includes the orientation correction. Optionally, you
|
490
|
+
# can specify `MinConfidence` to control the confidence threshold for
|
491
|
+
# the labels returned. The default is 50%. You can also add the
|
492
|
+
# `MaxLabels` parameter to limit the number of labels returned.
|
493
|
+
#
|
494
|
+
# <note markdown="1"> If the object detected is a person, the operation doesn't provide the
|
495
|
+
# same facial details that the DetectFaces operation provides.
|
496
|
+
#
|
497
|
+
# </note>
|
498
|
+
#
|
499
|
+
# This is a stateless API operation. That is, the operation does not
|
500
|
+
# persist any data.
|
501
|
+
#
|
502
|
+
# This operation requires permissions to perform the
|
503
|
+
# `rekognition:DetectLabels` action.
|
504
|
+
#
|
505
|
+
# @option params [required, Types::Image] :image
|
506
|
+
# The input image. You can provide a blob of image bytes or an S3
|
507
|
+
# object.
|
508
|
+
#
|
509
|
+
# @option params [Integer] :max_labels
|
510
|
+
# Maximum number of labels you want the service to return in the
|
511
|
+
# response. The service returns the specified number of highest
|
512
|
+
# confidence labels.
|
513
|
+
#
|
514
|
+
# @option params [Float] :min_confidence
|
515
|
+
# Specifies the minimum confidence level for the labels to return.
|
516
|
+
# Amazon Rekognition doesn't return any labels with confidence lower
|
517
|
+
# than this specified value.
|
518
|
+
#
|
519
|
+
# If `minConfidence` is not specified, the operation returns labels with
|
520
|
+
# a confidence values greater than or equal to 50 percent.
|
521
|
+
#
|
522
|
+
# @return [Types::DetectLabelsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
|
523
|
+
#
|
524
|
+
# * {Types::DetectLabelsResponse#labels #labels} => Array<Types::Label>
|
525
|
+
# * {Types::DetectLabelsResponse#orientation_correction #orientation_correction} => String
|
526
|
+
#
|
527
|
+
# @example Request syntax with placeholder values
|
528
|
+
#
|
529
|
+
# resp = client.detect_labels({
|
530
|
+
# image: { # required
|
531
|
+
# bytes: "data",
|
532
|
+
# s3_object: {
|
533
|
+
# bucket: "S3Bucket",
|
534
|
+
# name: "S3ObjectName",
|
535
|
+
# version: "S3ObjectVersion",
|
536
|
+
# },
|
537
|
+
# },
|
538
|
+
# max_labels: 1,
|
539
|
+
# min_confidence: 1.0,
|
540
|
+
# })
|
541
|
+
#
|
542
|
+
# @example Response structure
|
543
|
+
#
|
544
|
+
# resp.labels #=> Array
|
545
|
+
# resp.labels[0].name #=> String
|
546
|
+
# resp.labels[0].confidence #=> Float
|
547
|
+
# resp.orientation_correction #=> String, one of "ROTATE_0", "ROTATE_90", "ROTATE_180", "ROTATE_270"
|
548
|
+
#
|
549
|
+
# @overload detect_labels(params = {})
|
550
|
+
# @param [Hash] params ({})
|
551
|
+
def detect_labels(params = {}, options = {})
|
552
|
+
req = build_request(:detect_labels, params)
|
553
|
+
req.send_request(options)
|
554
|
+
end
|
626
555
|
|
627
|
-
|
628
|
-
|
629
|
-
|
630
|
-
|
631
|
-
|
632
|
-
|
633
|
-
|
634
|
-
|
635
|
-
|
636
|
-
|
637
|
-
|
638
|
-
|
639
|
-
|
640
|
-
|
641
|
-
|
642
|
-
|
643
|
-
|
644
|
-
|
645
|
-
|
646
|
-
|
647
|
-
|
648
|
-
|
649
|
-
|
650
|
-
|
651
|
-
|
652
|
-
|
653
|
-
|
654
|
-
|
655
|
-
|
656
|
-
|
657
|
-
|
658
|
-
|
659
|
-
|
556
|
+
# Detects faces in the input image and adds them to the specified
|
557
|
+
# collection.
|
558
|
+
#
|
559
|
+
# Amazon Rekognition does not save the actual faces detected. Instead,
|
560
|
+
# the underlying detection algorithm first detects the faces in the
|
561
|
+
# input image, and for each face extracts facial features into a feature
|
562
|
+
# vector, and stores it in the back-end database. Amazon Rekognition
|
563
|
+
# uses feature vectors when performing face match and search operations
|
564
|
+
# using the and operations.
|
565
|
+
#
|
566
|
+
# If you provide the optional `externalImageID` for the input image you
|
567
|
+
# provided, Amazon Rekognition associates this ID with all faces that it
|
568
|
+
# detects. When you call the operation, the response returns the
|
569
|
+
# external ID. You can use this external image ID to create a
|
570
|
+
# client-side index to associate the faces with each image. You can then
|
571
|
+
# use the index to find all faces in an image.
|
572
|
+
#
|
573
|
+
# In response, the operation returns an array of metadata for all
|
574
|
+
# detected faces. This includes, the bounding box of the detected face,
|
575
|
+
# confidence value (indicating the bounding box contains a face), a face
|
576
|
+
# ID assigned by the service for each face that is detected and stored,
|
577
|
+
# and an image ID assigned by the service for the input image If you
|
578
|
+
# request all facial attributes (using the `detectionAttributes`
|
579
|
+
# parameter, Amazon Rekognition returns detailed facial attributes such
|
580
|
+
# as facial landmarks (for example, location of eye and mount) and other
|
581
|
+
# facial attributes such gender. If you provide the same image, specify
|
582
|
+
# the same collection, and use the same external ID in the `IndexFaces`
|
583
|
+
# operation, Amazon Rekognition doesn't save duplicate face metadata.
|
584
|
+
#
|
585
|
+
# For an example, see example2.
|
586
|
+
#
|
587
|
+
# This operation requires permissions to perform the
|
588
|
+
# `rekognition:IndexFaces` action.
|
589
|
+
#
|
590
|
+
# @option params [required, String] :collection_id
|
591
|
+
# ID of an existing collection to which you want to add the faces that
|
592
|
+
# are detected in the input images.
|
593
|
+
#
|
594
|
+
# @option params [required, Types::Image] :image
|
595
|
+
# Provides the source image either as bytes or an S3 object.
|
596
|
+
#
|
597
|
+
# The region for the S3 bucket containing the S3 object must match the
|
598
|
+
# region you use for Amazon Rekognition operations.
|
599
|
+
#
|
600
|
+
# You may need to Base64-encode the image bytes depending on the
|
601
|
+
# language you are using and whether or not you are using the AWS SDK.
|
602
|
+
# For more information, see example4.
|
603
|
+
#
|
604
|
+
# If you use the Amazon CLI to call Amazon Rekognition operations,
|
605
|
+
# passing image bytes using the Bytes property is not supported. You
|
606
|
+
# must first upload the image to an Amazon S3 bucket and then call the
|
607
|
+
# operation using the S3Object property.
|
608
|
+
#
|
609
|
+
# For Amazon Rekognition to process an S3 object, the user must have
|
610
|
+
# permission to access the S3 object. For more information, see
|
611
|
+
# manage-access-resource-policies.
|
612
|
+
#
|
613
|
+
# @option params [String] :external_image_id
|
614
|
+
# ID you want to assign to all the faces detected in the image.
|
615
|
+
#
|
616
|
+
# @option params [Array<String>] :detection_attributes
|
617
|
+
# (Optional) Returns detailed attributes of indexed faces. By default,
|
618
|
+
# the operation returns a subset of the facial attributes.
|
619
|
+
#
|
620
|
+
# For example, you can specify the value as, \["ALL"\] or
|
621
|
+
# \["DEFAULT"\]. If you provide both, \["ALL", "DEFAULT"\], Amazon
|
622
|
+
# Rekognition uses the logical AND operator to determine which
|
623
|
+
# attributes to return (in this case, it is all attributes). If you
|
624
|
+
# specify all attributes, the service performs additional detection, in
|
625
|
+
# addition to the default.
|
626
|
+
#
|
627
|
+
# @return [Types::IndexFacesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
|
628
|
+
#
|
629
|
+
# * {Types::IndexFacesResponse#face_records #face_records} => Array<Types::FaceRecord>
|
630
|
+
# * {Types::IndexFacesResponse#orientation_correction #orientation_correction} => String
|
631
|
+
#
|
632
|
+
# @example Request syntax with placeholder values
|
633
|
+
#
|
634
|
+
# resp = client.index_faces({
|
635
|
+
# collection_id: "CollectionId", # required
|
636
|
+
# image: { # required
|
637
|
+
# bytes: "data",
|
638
|
+
# s3_object: {
|
639
|
+
# bucket: "S3Bucket",
|
640
|
+
# name: "S3ObjectName",
|
641
|
+
# version: "S3ObjectVersion",
|
642
|
+
# },
|
643
|
+
# },
|
644
|
+
# external_image_id: "ExternalImageId",
|
645
|
+
# detection_attributes: ["DEFAULT"], # accepts DEFAULT, ALL
|
646
|
+
# })
|
647
|
+
#
|
648
|
+
# @example Response structure
|
649
|
+
#
|
650
|
+
# resp.face_records #=> Array
|
651
|
+
# resp.face_records[0].face.face_id #=> String
|
652
|
+
# resp.face_records[0].face.bounding_box.width #=> Float
|
653
|
+
# resp.face_records[0].face.bounding_box.height #=> Float
|
654
|
+
# resp.face_records[0].face.bounding_box.left #=> Float
|
655
|
+
# resp.face_records[0].face.bounding_box.top #=> Float
|
656
|
+
# resp.face_records[0].face.image_id #=> String
|
657
|
+
# resp.face_records[0].face.external_image_id #=> String
|
658
|
+
# resp.face_records[0].face.confidence #=> Float
|
659
|
+
# resp.face_records[0].face_detail.bounding_box.width #=> Float
|
660
|
+
# resp.face_records[0].face_detail.bounding_box.height #=> Float
|
661
|
+
# resp.face_records[0].face_detail.bounding_box.left #=> Float
|
662
|
+
# resp.face_records[0].face_detail.bounding_box.top #=> Float
|
663
|
+
# resp.face_records[0].face_detail.smile.value #=> Boolean
|
664
|
+
# resp.face_records[0].face_detail.smile.confidence #=> Float
|
665
|
+
# resp.face_records[0].face_detail.eyeglasses.value #=> Boolean
|
666
|
+
# resp.face_records[0].face_detail.eyeglasses.confidence #=> Float
|
667
|
+
# resp.face_records[0].face_detail.sunglasses.value #=> Boolean
|
668
|
+
# resp.face_records[0].face_detail.sunglasses.confidence #=> Float
|
669
|
+
# resp.face_records[0].face_detail.gender.value #=> String, one of "MALE", "FEMALE"
|
670
|
+
# resp.face_records[0].face_detail.gender.confidence #=> Float
|
671
|
+
# resp.face_records[0].face_detail.beard.value #=> Boolean
|
672
|
+
# resp.face_records[0].face_detail.beard.confidence #=> Float
|
673
|
+
# resp.face_records[0].face_detail.mustache.value #=> Boolean
|
674
|
+
# resp.face_records[0].face_detail.mustache.confidence #=> Float
|
675
|
+
# resp.face_records[0].face_detail.eyes_open.value #=> Boolean
|
676
|
+
# resp.face_records[0].face_detail.eyes_open.confidence #=> Float
|
677
|
+
# resp.face_records[0].face_detail.mouth_open.value #=> Boolean
|
678
|
+
# resp.face_records[0].face_detail.mouth_open.confidence #=> Float
|
679
|
+
# resp.face_records[0].face_detail.emotions #=> Array
|
680
|
+
# resp.face_records[0].face_detail.emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN"
|
681
|
+
# resp.face_records[0].face_detail.emotions[0].confidence #=> Float
|
682
|
+
# resp.face_records[0].face_detail.landmarks #=> Array
|
683
|
+
# resp.face_records[0].face_detail.landmarks[0].type #=> String, one of "EYE_LEFT", "EYE_RIGHT", "NOSE", "MOUTH_LEFT", "MOUTH_RIGHT", "LEFT_EYEBROW_LEFT", "LEFT_EYEBROW_RIGHT", "LEFT_EYEBROW_UP", "RIGHT_EYEBROW_LEFT", "RIGHT_EYEBROW_RIGHT", "RIGHT_EYEBROW_UP", "LEFT_EYE_LEFT", "LEFT_EYE_RIGHT", "LEFT_EYE_UP", "LEFT_EYE_DOWN", "RIGHT_EYE_LEFT", "RIGHT_EYE_RIGHT", "RIGHT_EYE_UP", "RIGHT_EYE_DOWN", "NOSE_LEFT", "NOSE_RIGHT", "MOUTH_UP", "MOUTH_DOWN", "LEFT_PUPIL", "RIGHT_PUPIL"
|
684
|
+
# resp.face_records[0].face_detail.landmarks[0].x #=> Float
|
685
|
+
# resp.face_records[0].face_detail.landmarks[0].y #=> Float
|
686
|
+
# resp.face_records[0].face_detail.pose.roll #=> Float
|
687
|
+
# resp.face_records[0].face_detail.pose.yaw #=> Float
|
688
|
+
# resp.face_records[0].face_detail.pose.pitch #=> Float
|
689
|
+
# resp.face_records[0].face_detail.quality.brightness #=> Float
|
690
|
+
# resp.face_records[0].face_detail.quality.sharpness #=> Float
|
691
|
+
# resp.face_records[0].face_detail.confidence #=> Float
|
692
|
+
# resp.orientation_correction #=> String, one of "ROTATE_0", "ROTATE_90", "ROTATE_180", "ROTATE_270"
|
693
|
+
#
|
694
|
+
# @overload index_faces(params = {})
|
695
|
+
# @param [Hash] params ({})
|
696
|
+
def index_faces(params = {}, options = {})
|
697
|
+
req = build_request(:index_faces, params)
|
698
|
+
req.send_request(options)
|
699
|
+
end
|
660
700
|
|
661
|
-
|
662
|
-
|
663
|
-
|
664
|
-
|
665
|
-
|
666
|
-
|
667
|
-
|
668
|
-
|
669
|
-
|
670
|
-
|
671
|
-
|
672
|
-
|
673
|
-
|
674
|
-
|
675
|
-
|
676
|
-
|
677
|
-
|
678
|
-
|
679
|
-
|
680
|
-
|
681
|
-
|
682
|
-
|
683
|
-
|
684
|
-
|
685
|
-
|
686
|
-
|
687
|
-
|
688
|
-
|
689
|
-
|
690
|
-
|
691
|
-
|
692
|
-
|
693
|
-
|
694
|
-
|
695
|
-
|
696
|
-
|
697
|
-
|
698
|
-
|
699
|
-
|
700
|
-
# @overload list_faces(params = {})
|
701
|
-
# @param [Hash] params ({})
|
702
|
-
def list_faces(params = {}, options = {})
|
703
|
-
req = build_request(:list_faces, params)
|
704
|
-
req.send_request(options)
|
705
|
-
end
|
701
|
+
# Returns list of collection IDs in your account. If the result is
|
702
|
+
# truncated, the response also provides a `NextToken` that you can use
|
703
|
+
# in the subsequent request to fetch the next set of collection IDs.
|
704
|
+
#
|
705
|
+
# For an example, see example1.
|
706
|
+
#
|
707
|
+
# This operation requires permissions to perform the
|
708
|
+
# `rekognition:ListCollections` action.
|
709
|
+
#
|
710
|
+
# @option params [String] :next_token
|
711
|
+
# Pagination token from the previous response.
|
712
|
+
#
|
713
|
+
# @option params [Integer] :max_results
|
714
|
+
# Maximum number of collection IDs to return.
|
715
|
+
#
|
716
|
+
# @return [Types::ListCollectionsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
|
717
|
+
#
|
718
|
+
# * {Types::ListCollectionsResponse#collection_ids #collection_ids} => Array<String>
|
719
|
+
# * {Types::ListCollectionsResponse#next_token #next_token} => String
|
720
|
+
#
|
721
|
+
# @example Request syntax with placeholder values
|
722
|
+
#
|
723
|
+
# resp = client.list_collections({
|
724
|
+
# next_token: "PaginationToken",
|
725
|
+
# max_results: 1,
|
726
|
+
# })
|
727
|
+
#
|
728
|
+
# @example Response structure
|
729
|
+
#
|
730
|
+
# resp.collection_ids #=> Array
|
731
|
+
# resp.collection_ids[0] #=> String
|
732
|
+
# resp.next_token #=> String
|
733
|
+
#
|
734
|
+
# @overload list_collections(params = {})
|
735
|
+
# @param [Hash] params ({})
|
736
|
+
def list_collections(params = {}, options = {})
|
737
|
+
req = build_request(:list_collections, params)
|
738
|
+
req.send_request(options)
|
739
|
+
end
|
706
740
|
|
707
|
-
|
708
|
-
|
709
|
-
|
710
|
-
|
711
|
-
|
712
|
-
|
713
|
-
|
714
|
-
|
715
|
-
|
716
|
-
|
717
|
-
|
718
|
-
|
719
|
-
|
720
|
-
|
721
|
-
|
722
|
-
|
723
|
-
|
724
|
-
|
725
|
-
|
726
|
-
|
727
|
-
|
728
|
-
|
729
|
-
|
730
|
-
|
731
|
-
|
732
|
-
|
733
|
-
|
734
|
-
|
735
|
-
|
736
|
-
|
737
|
-
|
738
|
-
|
739
|
-
|
740
|
-
|
741
|
-
|
742
|
-
|
743
|
-
|
744
|
-
|
745
|
-
|
746
|
-
|
747
|
-
|
748
|
-
|
749
|
-
|
750
|
-
|
751
|
-
|
752
|
-
|
753
|
-
|
754
|
-
|
755
|
-
|
756
|
-
|
757
|
-
|
758
|
-
|
759
|
-
# resp.face_matches[0].face.bounding_box.left #=> Float
|
760
|
-
# resp.face_matches[0].face.bounding_box.top #=> Float
|
761
|
-
# resp.face_matches[0].face.image_id #=> String
|
762
|
-
# resp.face_matches[0].face.external_image_id #=> String
|
763
|
-
# resp.face_matches[0].face.confidence #=> Float
|
764
|
-
# @overload search_faces(params = {})
|
765
|
-
# @param [Hash] params ({})
|
766
|
-
def search_faces(params = {}, options = {})
|
767
|
-
req = build_request(:search_faces, params)
|
768
|
-
req.send_request(options)
|
769
|
-
end
|
741
|
+
# Returns metadata for faces in the specified collection. This metadata
|
742
|
+
# includes information such as the bounding box coordinates, the
|
743
|
+
# confidence (that the bounding box contains a face), and face ID. For
|
744
|
+
# an example, see example3.
|
745
|
+
#
|
746
|
+
# This operation requires permissions to perform the
|
747
|
+
# `rekognition:ListFaces` action.
|
748
|
+
#
|
749
|
+
# @option params [required, String] :collection_id
|
750
|
+
# ID of the collection from which to list the faces.
|
751
|
+
#
|
752
|
+
# @option params [String] :next_token
|
753
|
+
# If the previous response was incomplete (because there is more data to
|
754
|
+
# retrieve), Amazon Rekognition returns a pagination token in the
|
755
|
+
# response. You can use this pagination token to retrieve the next set
|
756
|
+
# of faces.
|
757
|
+
#
|
758
|
+
# @option params [Integer] :max_results
|
759
|
+
# Maximum number of faces to return.
|
760
|
+
#
|
761
|
+
# @return [Types::ListFacesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
|
762
|
+
#
|
763
|
+
# * {Types::ListFacesResponse#faces #faces} => Array<Types::Face>
|
764
|
+
# * {Types::ListFacesResponse#next_token #next_token} => String
|
765
|
+
#
|
766
|
+
# @example Request syntax with placeholder values
|
767
|
+
#
|
768
|
+
# resp = client.list_faces({
|
769
|
+
# collection_id: "CollectionId", # required
|
770
|
+
# next_token: "PaginationToken",
|
771
|
+
# max_results: 1,
|
772
|
+
# })
|
773
|
+
#
|
774
|
+
# @example Response structure
|
775
|
+
#
|
776
|
+
# resp.faces #=> Array
|
777
|
+
# resp.faces[0].face_id #=> String
|
778
|
+
# resp.faces[0].bounding_box.width #=> Float
|
779
|
+
# resp.faces[0].bounding_box.height #=> Float
|
780
|
+
# resp.faces[0].bounding_box.left #=> Float
|
781
|
+
# resp.faces[0].bounding_box.top #=> Float
|
782
|
+
# resp.faces[0].image_id #=> String
|
783
|
+
# resp.faces[0].external_image_id #=> String
|
784
|
+
# resp.faces[0].confidence #=> Float
|
785
|
+
# resp.next_token #=> String
|
786
|
+
#
|
787
|
+
# @overload list_faces(params = {})
|
788
|
+
# @param [Hash] params ({})
|
789
|
+
def list_faces(params = {}, options = {})
|
790
|
+
req = build_request(:list_faces, params)
|
791
|
+
req.send_request(options)
|
792
|
+
end
|
770
793
|
|
771
|
-
|
772
|
-
|
773
|
-
|
774
|
-
|
775
|
-
|
776
|
-
|
777
|
-
|
778
|
-
|
779
|
-
|
780
|
-
|
781
|
-
|
782
|
-
|
783
|
-
|
784
|
-
|
785
|
-
|
786
|
-
|
787
|
-
|
788
|
-
|
789
|
-
|
790
|
-
|
791
|
-
|
792
|
-
|
793
|
-
|
794
|
-
|
795
|
-
|
796
|
-
|
797
|
-
|
798
|
-
|
799
|
-
|
800
|
-
|
801
|
-
|
802
|
-
|
803
|
-
|
804
|
-
|
805
|
-
|
806
|
-
|
807
|
-
|
808
|
-
|
809
|
-
|
810
|
-
|
811
|
-
|
812
|
-
|
813
|
-
|
814
|
-
|
815
|
-
|
816
|
-
|
817
|
-
|
818
|
-
|
819
|
-
|
820
|
-
|
821
|
-
|
822
|
-
|
823
|
-
|
824
|
-
|
825
|
-
|
826
|
-
|
827
|
-
|
828
|
-
|
829
|
-
|
830
|
-
|
831
|
-
|
832
|
-
|
833
|
-
|
834
|
-
|
835
|
-
|
836
|
-
|
837
|
-
|
838
|
-
|
839
|
-
|
840
|
-
|
841
|
-
|
842
|
-
|
843
|
-
# resp.face_matches[0].face.bounding_box.top #=> Float
|
844
|
-
# resp.face_matches[0].face.image_id #=> String
|
845
|
-
# resp.face_matches[0].face.external_image_id #=> String
|
846
|
-
# resp.face_matches[0].face.confidence #=> Float
|
847
|
-
# @overload search_faces_by_image(params = {})
|
848
|
-
# @param [Hash] params ({})
|
849
|
-
def search_faces_by_image(params = {}, options = {})
|
850
|
-
req = build_request(:search_faces_by_image, params)
|
851
|
-
req.send_request(options)
|
852
|
-
end
|
794
|
+
# For a given input face ID, searches for matching faces in the
|
795
|
+
# collection the face belongs to. You get a face ID when you add a face
|
796
|
+
# to the collection using the IndexFaces operation. The operation
|
797
|
+
# compares the features of the input face with faces in the specified
|
798
|
+
# collection.
|
799
|
+
#
|
800
|
+
# <note markdown="1"> You can also search faces without indexing faces by using the
|
801
|
+
# `SearchFacesByImage` operation.
|
802
|
+
#
|
803
|
+
# </note>
|
804
|
+
#
|
805
|
+
# The operation response returns an array of faces that match, ordered
|
806
|
+
# by similarity score with the highest similarity first. More
|
807
|
+
# specifically, it is an array of metadata for each face match that is
|
808
|
+
# found. Along with the metadata, the response also includes a
|
809
|
+
# `confidence` value for each face match, indicating the confidence that
|
810
|
+
# the specific face matches the input face.
|
811
|
+
#
|
812
|
+
# For an example, see example3.
|
813
|
+
#
|
814
|
+
# This operation requires permissions to perform the
|
815
|
+
# `rekognition:SearchFaces` action.
|
816
|
+
#
|
817
|
+
# @option params [required, String] :collection_id
|
818
|
+
# ID of the collection the face belongs to.
|
819
|
+
#
|
820
|
+
# @option params [required, String] :face_id
|
821
|
+
# ID of a face to find matches for in the collection.
|
822
|
+
#
|
823
|
+
# @option params [Integer] :max_faces
|
824
|
+
# Maximum number of faces to return. The operation returns the maximum
|
825
|
+
# number of faces with the highest confidence in the match.
|
826
|
+
#
|
827
|
+
# @option params [Float] :face_match_threshold
|
828
|
+
# Optional value specifying the minimum confidence in the face match to
|
829
|
+
# return. For example, don't return any matches where confidence in
|
830
|
+
# matches is less than 70%.
|
831
|
+
#
|
832
|
+
# @return [Types::SearchFacesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
|
833
|
+
#
|
834
|
+
# * {Types::SearchFacesResponse#searched_face_id #searched_face_id} => String
|
835
|
+
# * {Types::SearchFacesResponse#face_matches #face_matches} => Array<Types::FaceMatch>
|
836
|
+
#
|
837
|
+
# @example Request syntax with placeholder values
|
838
|
+
#
|
839
|
+
# resp = client.search_faces({
|
840
|
+
# collection_id: "CollectionId", # required
|
841
|
+
# face_id: "FaceId", # required
|
842
|
+
# max_faces: 1,
|
843
|
+
# face_match_threshold: 1.0,
|
844
|
+
# })
|
845
|
+
#
|
846
|
+
# @example Response structure
|
847
|
+
#
|
848
|
+
# resp.searched_face_id #=> String
|
849
|
+
# resp.face_matches #=> Array
|
850
|
+
# resp.face_matches[0].similarity #=> Float
|
851
|
+
# resp.face_matches[0].face.face_id #=> String
|
852
|
+
# resp.face_matches[0].face.bounding_box.width #=> Float
|
853
|
+
# resp.face_matches[0].face.bounding_box.height #=> Float
|
854
|
+
# resp.face_matches[0].face.bounding_box.left #=> Float
|
855
|
+
# resp.face_matches[0].face.bounding_box.top #=> Float
|
856
|
+
# resp.face_matches[0].face.image_id #=> String
|
857
|
+
# resp.face_matches[0].face.external_image_id #=> String
|
858
|
+
# resp.face_matches[0].face.confidence #=> Float
|
859
|
+
#
|
860
|
+
# @overload search_faces(params = {})
|
861
|
+
# @param [Hash] params ({})
|
862
|
+
def search_faces(params = {}, options = {})
|
863
|
+
req = build_request(:search_faces, params)
|
864
|
+
req.send_request(options)
|
865
|
+
end
|
853
866
|
|
854
|
-
|
867
|
+
# For a given input image, first detects the largest face in the image,
|
868
|
+
# and then searches the specified collection for matching faces. The
|
869
|
+
# operation compares the features of the input face with faces in the
|
870
|
+
# specified collection.
|
871
|
+
#
|
872
|
+
# <note markdown="1"> To search for all faces in an input image, you might first call the
|
873
|
+
# operation, and then use the face IDs returned in subsequent calls to
|
874
|
+
# the operation.
|
875
|
+
#
|
876
|
+
# You can also call the `DetectFaces` operation and use the bounding
|
877
|
+
# boxes in the response to make face crops, which then you can pass in
|
878
|
+
# to the `SearchFacesByImage` operation.
|
879
|
+
#
|
880
|
+
# </note>
|
881
|
+
#
|
882
|
+
# The response returns an array of faces that match, ordered by
|
883
|
+
# similarity score with the highest similarity first. More specifically,
|
884
|
+
# it is an array of metadata for each face match found. Along with the
|
885
|
+
# metadata, the response also includes a `similarity` indicating how
|
886
|
+
# similar the face is to the input face. In the response, the operation
|
887
|
+
# also returns the bounding box (and a confidence level that the
|
888
|
+
# bounding box contains a face) of the face that Amazon Rekognition used
|
889
|
+
# for the input image.
|
890
|
+
#
|
891
|
+
# For an example, see example3.
|
892
|
+
#
|
893
|
+
# This operation requires permissions to perform the
|
894
|
+
# `rekognition:SearchFacesByImage` action.
|
895
|
+
#
|
896
|
+
# @option params [required, String] :collection_id
|
897
|
+
# ID of the collection to search.
|
898
|
+
#
|
899
|
+
# @option params [required, Types::Image] :image
|
900
|
+
# Provides the source image either as bytes or an S3 object.
|
901
|
+
#
|
902
|
+
# The region for the S3 bucket containing the S3 object must match the
|
903
|
+
# region you use for Amazon Rekognition operations.
|
904
|
+
#
|
905
|
+
# You may need to Base64-encode the image bytes depending on the
|
906
|
+
# language you are using and whether or not you are using the AWS SDK.
|
907
|
+
# For more information, see example4.
|
908
|
+
#
|
909
|
+
# If you use the Amazon CLI to call Amazon Rekognition operations,
|
910
|
+
# passing image bytes using the Bytes property is not supported. You
|
911
|
+
# must first upload the image to an Amazon S3 bucket and then call the
|
912
|
+
# operation using the S3Object property.
|
913
|
+
#
|
914
|
+
# For Amazon Rekognition to process an S3 object, the user must have
|
915
|
+
# permission to access the S3 object. For more information, see
|
916
|
+
# manage-access-resource-policies.
|
917
|
+
#
|
918
|
+
# @option params [Integer] :max_faces
|
919
|
+
# Maximum number of faces to return. The operation returns the maximum
|
920
|
+
# number of faces with the highest confidence in the match.
|
921
|
+
#
|
922
|
+
# @option params [Float] :face_match_threshold
|
923
|
+
# (Optional) Specifies the minimum confidence in the face match to
|
924
|
+
# return. For example, don't return any matches where confidence in
|
925
|
+
# matches is less than 70%.
|
926
|
+
#
|
927
|
+
# @return [Types::SearchFacesByImageResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
|
928
|
+
#
|
929
|
+
# * {Types::SearchFacesByImageResponse#searched_face_bounding_box #searched_face_bounding_box} => Types::BoundingBox
|
930
|
+
# * {Types::SearchFacesByImageResponse#searched_face_confidence #searched_face_confidence} => Float
|
931
|
+
# * {Types::SearchFacesByImageResponse#face_matches #face_matches} => Array<Types::FaceMatch>
|
932
|
+
#
|
933
|
+
# @example Request syntax with placeholder values
|
934
|
+
#
|
935
|
+
# resp = client.search_faces_by_image({
|
936
|
+
# collection_id: "CollectionId", # required
|
937
|
+
# image: { # required
|
938
|
+
# bytes: "data",
|
939
|
+
# s3_object: {
|
940
|
+
# bucket: "S3Bucket",
|
941
|
+
# name: "S3ObjectName",
|
942
|
+
# version: "S3ObjectVersion",
|
943
|
+
# },
|
944
|
+
# },
|
945
|
+
# max_faces: 1,
|
946
|
+
# face_match_threshold: 1.0,
|
947
|
+
# })
|
948
|
+
#
|
949
|
+
# @example Response structure
|
950
|
+
#
|
951
|
+
# resp.searched_face_bounding_box.width #=> Float
|
952
|
+
# resp.searched_face_bounding_box.height #=> Float
|
953
|
+
# resp.searched_face_bounding_box.left #=> Float
|
954
|
+
# resp.searched_face_bounding_box.top #=> Float
|
955
|
+
# resp.searched_face_confidence #=> Float
|
956
|
+
# resp.face_matches #=> Array
|
957
|
+
# resp.face_matches[0].similarity #=> Float
|
958
|
+
# resp.face_matches[0].face.face_id #=> String
|
959
|
+
# resp.face_matches[0].face.bounding_box.width #=> Float
|
960
|
+
# resp.face_matches[0].face.bounding_box.height #=> Float
|
961
|
+
# resp.face_matches[0].face.bounding_box.left #=> Float
|
962
|
+
# resp.face_matches[0].face.bounding_box.top #=> Float
|
963
|
+
# resp.face_matches[0].face.image_id #=> String
|
964
|
+
# resp.face_matches[0].face.external_image_id #=> String
|
965
|
+
# resp.face_matches[0].face.confidence #=> Float
|
966
|
+
#
|
967
|
+
# @overload search_faces_by_image(params = {})
|
968
|
+
# @param [Hash] params ({})
|
969
|
+
def search_faces_by_image(params = {}, options = {})
|
970
|
+
req = build_request(:search_faces_by_image, params)
|
971
|
+
req.send_request(options)
|
972
|
+
end
|
855
973
|
|
856
|
-
|
857
|
-
# @api private
|
858
|
-
def build_request(operation_name, params = {})
|
859
|
-
handlers = @handlers.for(operation_name)
|
860
|
-
context = Seahorse::Client::RequestContext.new(
|
861
|
-
operation_name: operation_name,
|
862
|
-
operation: config.api.operation(operation_name),
|
863
|
-
client: self,
|
864
|
-
params: params,
|
865
|
-
config: config)
|
866
|
-
context[:gem_name] = 'aws-sdk-rekognition'
|
867
|
-
context[:gem_version] = '1.0.0.rc2'
|
868
|
-
Seahorse::Client::Request.new(handlers, context)
|
869
|
-
end
|
974
|
+
# @!endgroup
|
870
975
|
|
871
|
-
|
872
|
-
|
873
|
-
|
874
|
-
|
875
|
-
|
976
|
+
# @param params ({})
|
977
|
+
# @api private
|
978
|
+
def build_request(operation_name, params = {})
|
979
|
+
handlers = @handlers.for(operation_name)
|
980
|
+
context = Seahorse::Client::RequestContext.new(
|
981
|
+
operation_name: operation_name,
|
982
|
+
operation: config.api.operation(operation_name),
|
983
|
+
client: self,
|
984
|
+
params: params,
|
985
|
+
config: config)
|
986
|
+
context[:gem_name] = 'aws-sdk-rekognition'
|
987
|
+
context[:gem_version] = '1.0.0.rc2'
|
988
|
+
Seahorse::Client::Request.new(handlers, context)
|
989
|
+
end
|
876
990
|
|
877
|
-
|
991
|
+
# @api private
|
992
|
+
# @deprecated
|
993
|
+
def waiter_names
|
994
|
+
[]
|
995
|
+
end
|
878
996
|
|
879
|
-
|
880
|
-
attr_reader :identifier
|
997
|
+
class << self
|
881
998
|
|
882
|
-
|
883
|
-
|
884
|
-
Errors
|
885
|
-
end
|
999
|
+
# @api private
|
1000
|
+
attr_reader :identifier
|
886
1001
|
|
1002
|
+
# @api private
|
1003
|
+
def errors_module
|
1004
|
+
Errors
|
887
1005
|
end
|
1006
|
+
|
888
1007
|
end
|
889
1008
|
end
|
890
1009
|
end
|