aws-sdk-rekognition 1.0.0.rc2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA1:
3
+ metadata.gz: f0a24ba68fd43b9fff8b4f52921aa82e7b2091f6
4
+ data.tar.gz: d80c8eaa10669aa47b3be60dc0f0d98ea570937f
5
+ SHA512:
6
+ metadata.gz: d9dd3e1e9c8b4416f3795da412f9002bd4225511b9482a3b7c1839a223eeaaca746eeb2a44cfde00adf83c4964e943c5602256a6eb536e06174c7a4160998264
7
+ data.tar.gz: af6c3dfc05028575035204d64f1203877e7e0df5c7bdf10e939f2ee517d6270d602a23bfd1d18fd4f9e0624d3cf48f4f21be8dc11c1c5c8f89f940919aaf9acb
@@ -0,0 +1,47 @@
1
+ # WARNING ABOUT GENERATED CODE
2
+ #
3
+ # This file is generated. See the contributing for info on making contributions:
4
+ # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
+ #
6
+ # WARNING ABOUT GENERATED CODE
7
+
8
+ require 'aws-sdk-core'
9
+ require 'aws-sigv4'
10
+
11
+ require_relative 'aws-sdk-rekognition/types'
12
+ require_relative 'aws-sdk-rekognition/client_api'
13
+ require_relative 'aws-sdk-rekognition/client'
14
+ require_relative 'aws-sdk-rekognition/errors'
15
+ require_relative 'aws-sdk-rekognition/resource'
16
+ require_relative 'aws-sdk-rekognition/customizations'
17
+
18
+ # This module provides support for Amazon Rekognition. This module is available in the
19
+ # `aws-sdk-rekognition` gem.
20
+ #
21
+ # # Client
22
+ #
23
+ # The {Client} class provides one method for each API operation. Operation
24
+ # methods each accept a hash of request parameters and return a response
25
+ # structure.
26
+ #
27
+ # See {Client} for more information.
28
+ #
29
+ # # Errors
30
+ #
31
+ # Errors returned from Amazon Rekognition all
32
+ # extend {Errors::ServiceError}.
33
+ #
34
+ # begin
35
+ # # do stuff
36
+ # rescue Aws::Rekognition::Errors::ServiceError
37
+ # # rescues all service API errors
38
+ # end
39
+ #
40
+ # See {Errors} for more information.
41
+ #
42
+ # @service
43
+ module Aws::Rekognition
44
+
45
+ GEM_VERSION = '1.0.0.rc2'
46
+
47
+ end
@@ -0,0 +1,890 @@
1
+ # WARNING ABOUT GENERATED CODE
2
+ #
3
+ # This file is generated. See the contributing for info on making contributions:
4
+ # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
+ #
6
+ # WARNING ABOUT GENERATED CODE
7
+
8
+ require 'seahorse/client/plugins/content_length.rb'
9
+ require 'aws-sdk-core/plugins/credentials_configuration.rb'
10
+ require 'aws-sdk-core/plugins/logging.rb'
11
+ require 'aws-sdk-core/plugins/param_converter.rb'
12
+ require 'aws-sdk-core/plugins/param_validator.rb'
13
+ require 'aws-sdk-core/plugins/user_agent.rb'
14
+ require 'aws-sdk-core/plugins/helpful_socket_errors.rb'
15
+ require 'aws-sdk-core/plugins/retry_errors.rb'
16
+ require 'aws-sdk-core/plugins/global_configuration.rb'
17
+ require 'aws-sdk-core/plugins/regional_endpoint.rb'
18
+ require 'aws-sdk-core/plugins/response_paging.rb'
19
+ require 'aws-sdk-core/plugins/stub_responses.rb'
20
+ require 'aws-sdk-core/plugins/idempotency_token.rb'
21
+ require 'aws-sdk-core/plugins/signature_v4.rb'
22
+ require 'aws-sdk-core/plugins/protocols/json_rpc.rb'
23
+
24
+ Aws::Plugins::GlobalConfiguration.add_identifier(:rekognition)
25
+
26
+ module Aws
27
+ module Rekognition
28
+ class Client < Seahorse::Client::Base
29
+
30
+ include Aws::ClientStubs
31
+
32
+ @identifier = :rekognition
33
+
34
+ set_api(ClientApi::API)
35
+
36
+ add_plugin(Seahorse::Client::Plugins::ContentLength)
37
+ add_plugin(Aws::Plugins::CredentialsConfiguration)
38
+ add_plugin(Aws::Plugins::Logging)
39
+ add_plugin(Aws::Plugins::ParamConverter)
40
+ add_plugin(Aws::Plugins::ParamValidator)
41
+ add_plugin(Aws::Plugins::UserAgent)
42
+ add_plugin(Aws::Plugins::HelpfulSocketErrors)
43
+ add_plugin(Aws::Plugins::RetryErrors)
44
+ add_plugin(Aws::Plugins::GlobalConfiguration)
45
+ add_plugin(Aws::Plugins::RegionalEndpoint)
46
+ add_plugin(Aws::Plugins::ResponsePaging)
47
+ add_plugin(Aws::Plugins::StubResponses)
48
+ add_plugin(Aws::Plugins::IdempotencyToken)
49
+ add_plugin(Aws::Plugins::SignatureV4)
50
+ add_plugin(Aws::Plugins::Protocols::JsonRpc)
51
+
52
+ # @option options [required, Aws::CredentialProvider] :credentials
53
+ # Your AWS credentials. This can be an instance of any one of the
54
+ # following classes:
55
+ #
56
+ # * `Aws::Credentials` - Used for configuring static, non-refreshing
57
+ # credentials.
58
+ #
59
+ # * `Aws::InstanceProfileCredentials` - Used for loading credentials
60
+ # from an EC2 IMDS on an EC2 instance.
61
+ #
62
+ # * `Aws::SharedCredentials` - Used for loading credentials from a
63
+ # shared file, such as `~/.aws/config`.
64
+ #
65
+ # * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
66
+ #
67
+ # When `:credentials` are not configured directly, the following
68
+ # locations will be searched for credentials:
69
+ #
70
+ # * `Aws.config[:credentials]`
71
+ # * The `:access_key_id`, `:secret_access_key`, and `:session_token` options.
72
+ # * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY']
73
+ # * `~/.aws/credentials`
74
+ # * `~/.aws/config`
75
+ # * EC2 IMDS instance profile - When used by default, the timeouts are
76
+ # very aggressive. Construct and pass an instance of
77
+ # `Aws::InstanceProfileCredentails` to enable retries and extended
78
+ # timeouts.
79
+ # @option options [required, String] :region
80
+ # The AWS region to connect to. The configured `:region` is
81
+ # used to determine the service `:endpoint`. When not passed,
82
+ # a default `:region` is search for in the following locations:
83
+ #
84
+ # * `Aws.config[:region]`
85
+ # * `ENV['AWS_REGION']`
86
+ # * `ENV['AMAZON_REGION']`
87
+ # * `ENV['AWS_DEFAULT_REGION']`
88
+ # * `~/.aws/credentials`
89
+ # * `~/.aws/config`
90
+ # @option options [String] :access_key_id
91
+ # @option options [Boolean] :convert_params (true)
92
+ # When `true`, an attempt is made to coerce request parameters into
93
+ # the required types.
94
+ # @option options [String] :endpoint
95
+ # The client endpoint is normally constructed from the `:region`
96
+ # option. You should only configure an `:endpoint` when connecting
97
+ # to test endpoints. This should be avalid HTTP(S) URI.
98
+ # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
99
+ # The log formatter.
100
+ # @option options [Symbol] :log_level (:info)
101
+ # The log level to send messages to the `:logger` at.
102
+ # @option options [Logger] :logger
103
+ # The Logger instance to send log messages to. If this option
104
+ # is not set, logging will be disabled.
105
+ # @option options [String] :profile ("default")
106
+ # Used when loading credentials from the shared credentials file
107
+ # at HOME/.aws/credentials. When not specified, 'default' is used.
108
+ # @option options [Integer] :retry_limit (3)
109
+ # The maximum number of times to retry failed requests. Only
110
+ # ~ 500 level server errors and certain ~ 400 level client errors
111
+ # are retried. Generally, these are throttling errors, data
112
+ # checksum errors, networking errors, timeout errors and auth
113
+ # errors from expired credentials.
114
+ # @option options [String] :secret_access_key
115
+ # @option options [String] :session_token
116
+ # @option options [Boolean] :simple_json (false)
117
+ # Disables request parameter conversion, validation, and formatting.
118
+ # Also disable response data type conversions. This option is useful
119
+ # when you want to ensure the highest level of performance by
120
+ # avoiding overhead of walking request parameters and response data
121
+ # structures.
122
+ #
123
+ # When `:simple_json` is enabled, the request parameters hash must
124
+ # be formatted exactly as the DynamoDB API expects.
125
+ # @option options [Boolean] :stub_responses (false)
126
+ # Causes the client to return stubbed responses. By default
127
+ # fake responses are generated and returned. You can specify
128
+ # the response data to return or errors to raise by calling
129
+ # {ClientStubs#stub_responses}. See {ClientStubs} for more information.
130
+ #
131
+ # ** Please note ** When response stubbing is enabled, no HTTP
132
+ # requests are made, and retries are disabled.
133
+ # @option options [Boolean] :validate_params (true)
134
+ # When `true`, request parameters are validated before
135
+ # sending the request.
136
+ def initialize(*args)
137
+ super
138
+ end
139
+
140
+ # @!group API Operations
141
+
142
+ # Compares a face in the *source* input image with each face detected in
143
+ # the *target* input image.
144
+ #
145
+ # <note markdown="1"> If the source image contains multiple faces, the service detects the
146
+ # largest face and uses it to compare with each face detected in the
147
+ # target image.
148
+ #
149
+ # </note>
150
+ #
151
+ # In response, the operation returns an array of face matches ordered by
152
+ # similarity score with the highest similarity scores first. For each
153
+ # face match, the response provides a bounding box of the face and
154
+ # `confidence` value (indicating the level of confidence that the
155
+ # bounding box contains a face). The response also provides a
156
+ # `similarity` score, which indicates how closely the faces match.
157
+ #
158
+ # <note markdown="1"> By default, only faces with the similarity score of greater than or
159
+ # equal to 80% are returned in the response. You can change this value.
160
+ #
161
+ # </note>
162
+ #
163
+ # In addition to the face matches, the response returns information
164
+ # about the face in the source image, including the bounding box of the
165
+ # face and confidence value.
166
+ #
167
+ # <note markdown="1"> This is a stateless API operation. That is, the operation does not
168
+ # persist any data.
169
+ #
170
+ # </note>
171
+ #
172
+ # For an example, see get-started-exercise-compare-faces
173
+ #
174
+ # This operation requires permissions to perform the
175
+ # `rekognition:CompareFaces` action.
176
+ # @option params [required, Types::Image] :source_image
177
+ # Source image either as bytes or an Amazon S3 object
178
+ # @option params [required, Types::Image] :target_image
179
+ # Target image either as bytes or an Amazon S3 object
180
+ # @option params [Float] :similarity_threshold
181
+ # The minimum level of confidence in the match you want included in the
182
+ # result.
183
+ # @return [Types::CompareFacesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
184
+ #
185
+ # * {Types::CompareFacesResponse#source_image_face #SourceImageFace} => Types::ComparedSourceImageFace
186
+ # * {Types::CompareFacesResponse#face_matches #FaceMatches} => Array&lt;Types::CompareFacesMatch&gt;
187
+ #
188
+ # @example Request syntax with placeholder values
189
+ # resp = client.compare_faces({
190
+ # source_image: { # required
191
+ # bytes: "data",
192
+ # s3_object: {
193
+ # bucket: "S3Bucket",
194
+ # name: "S3ObjectName",
195
+ # version: "S3ObjectVersion",
196
+ # },
197
+ # },
198
+ # target_image: { # required
199
+ # bytes: "data",
200
+ # s3_object: {
201
+ # bucket: "S3Bucket",
202
+ # name: "S3ObjectName",
203
+ # version: "S3ObjectVersion",
204
+ # },
205
+ # },
206
+ # similarity_threshold: 1.0,
207
+ # })
208
+ #
209
+ # @example Response structure
210
+ # resp.source_image_face.bounding_box.width #=> Float
211
+ # resp.source_image_face.bounding_box.height #=> Float
212
+ # resp.source_image_face.bounding_box.left #=> Float
213
+ # resp.source_image_face.bounding_box.top #=> Float
214
+ # resp.source_image_face.confidence #=> Float
215
+ # resp.face_matches #=> Array
216
+ # resp.face_matches[0].similarity #=> Float
217
+ # resp.face_matches[0].face.bounding_box.width #=> Float
218
+ # resp.face_matches[0].face.bounding_box.height #=> Float
219
+ # resp.face_matches[0].face.bounding_box.left #=> Float
220
+ # resp.face_matches[0].face.bounding_box.top #=> Float
221
+ # resp.face_matches[0].face.confidence #=> Float
222
+ # @overload compare_faces(params = {})
223
+ # @param [Hash] params ({})
224
+ def compare_faces(params = {}, options = {})
225
+ req = build_request(:compare_faces, params)
226
+ req.send_request(options)
227
+ end
228
+
229
+ # Creates a collection in an AWS region. You can add faces to the
230
+ # collection using the operation.
231
+ #
232
+ # For example, you might create collections, one for each of your
233
+ # application users. A user can then index faces using the `IndexFaces`
234
+ # operation and persist results in a specific collection. Then, a user
235
+ # can search the collection for faces in the user-specific container.
236
+ #
237
+ # For an example, see example1.
238
+ #
239
+ # This operation requires permissions to perform the
240
+ # `rekognition:CreateCollection` action.
241
+ # @option params [required, String] :collection_id
242
+ # ID for the collection that you are creating.
243
+ # @return [Types::CreateCollectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
244
+ #
245
+ # * {Types::CreateCollectionResponse#status_code #StatusCode} => Integer
246
+ # * {Types::CreateCollectionResponse#collection_arn #CollectionArn} => String
247
+ #
248
+ # @example Request syntax with placeholder values
249
+ # resp = client.create_collection({
250
+ # collection_id: "CollectionId", # required
251
+ # })
252
+ #
253
+ # @example Response structure
254
+ # resp.status_code #=> Integer
255
+ # resp.collection_arn #=> String
256
+ # @overload create_collection(params = {})
257
+ # @param [Hash] params ({})
258
+ def create_collection(params = {}, options = {})
259
+ req = build_request(:create_collection, params)
260
+ req.send_request(options)
261
+ end
262
+
263
+ # Deletes the specified collection. Note that this operation removes all
264
+ # faces in the collection. For an example, see example1.
265
+ #
266
+ # This operation requires permissions to perform the
267
+ # `rekognition:DeleteCollection` action.
268
+ # @option params [required, String] :collection_id
269
+ # ID of the collection to delete.
270
+ # @return [Types::DeleteCollectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
271
+ #
272
+ # * {Types::DeleteCollectionResponse#status_code #StatusCode} => Integer
273
+ #
274
+ # @example Request syntax with placeholder values
275
+ # resp = client.delete_collection({
276
+ # collection_id: "CollectionId", # required
277
+ # })
278
+ #
279
+ # @example Response structure
280
+ # resp.status_code #=> Integer
281
+ # @overload delete_collection(params = {})
282
+ # @param [Hash] params ({})
283
+ def delete_collection(params = {}, options = {})
284
+ req = build_request(:delete_collection, params)
285
+ req.send_request(options)
286
+ end
287
+
288
+ # Deletes faces from a collection. You specify a collection ID and an
289
+ # array of face IDs to remove from the collection.
290
+ #
291
+ # This operation requires permissions to perform the
292
+ # `rekognition:DeleteFaces` action.
293
+ # @option params [required, String] :collection_id
294
+ # Collection from which to remove the specific faces.
295
+ # @option params [required, Array<String>] :face_ids
296
+ # An array of face IDs to delete.
297
+ # @return [Types::DeleteFacesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
298
+ #
299
+ # * {Types::DeleteFacesResponse#deleted_faces #DeletedFaces} => Array&lt;String&gt;
300
+ #
301
+ # @example Request syntax with placeholder values
302
+ # resp = client.delete_faces({
303
+ # collection_id: "CollectionId", # required
304
+ # face_ids: ["FaceId"], # required
305
+ # })
306
+ #
307
+ # @example Response structure
308
+ # resp.deleted_faces #=> Array
309
+ # resp.deleted_faces[0] #=> String
310
+ # @overload delete_faces(params = {})
311
+ # @param [Hash] params ({})
312
+ def delete_faces(params = {}, options = {})
313
+ req = build_request(:delete_faces, params)
314
+ req.send_request(options)
315
+ end
316
+
317
+ # Detects faces within an image (JPEG or PNG) that is provided as input.
318
+ #
319
+ # For each face detected, the operation returns face details including a
320
+ # bounding box of the face, a confidence value (that the bounding box
321
+ # contains a face), and a fixed set of attributes such as facial
322
+ # landmarks (for example, coordinates of eye and mouth), gender,
323
+ # presence of beard, sunglasses, etc.
324
+ #
325
+ # The face-detection algorithm is most effective on frontal faces. For
326
+ # non-frontal or obscured faces, the algorithm may not detect the faces
327
+ # or might detect faces with lower confidence.
328
+ #
329
+ # <note markdown="1"> This is a stateless API operation. That is, the operation does not
330
+ # persist any data.
331
+ #
332
+ # </note>
333
+ #
334
+ # For an example, see get-started-exercise-detect-faces.
335
+ #
336
+ # This operation requires permissions to perform the
337
+ # `rekognition:DetectFaces` action.
338
+ # @option params [required, Types::Image] :image
339
+ # The image in which you want to detect faces. You can specify a blob or
340
+ # an S3 object.
341
+ # @option params [Array<String>] :attributes
342
+ # A list of facial attributes you would like to be returned. By default,
343
+ # the API returns subset of facial attributes.
344
+ #
345
+ # For example, you can specify the value as, \["ALL"\] or
346
+ # \["DEFAULT"\]. If you provide both, \["ALL", "DEFAULT"\], the
347
+ # service uses a logical AND operator to determine which attributes to
348
+ # return (in this case, it is all attributes). If you specify all
349
+ # attributes, Rekognition performs additional detection.
350
+ # @return [Types::DetectFacesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
351
+ #
352
+ # * {Types::DetectFacesResponse#face_details #FaceDetails} => Array&lt;Types::FaceDetail&gt;
353
+ # * {Types::DetectFacesResponse#orientation_correction #OrientationCorrection} => String
354
+ #
355
+ # @example Request syntax with placeholder values
356
+ # resp = client.detect_faces({
357
+ # image: { # required
358
+ # bytes: "data",
359
+ # s3_object: {
360
+ # bucket: "S3Bucket",
361
+ # name: "S3ObjectName",
362
+ # version: "S3ObjectVersion",
363
+ # },
364
+ # },
365
+ # attributes: ["DEFAULT"], # accepts DEFAULT, ALL
366
+ # })
367
+ #
368
+ # @example Response structure
369
+ # resp.face_details #=> Array
370
+ # resp.face_details[0].bounding_box.width #=> Float
371
+ # resp.face_details[0].bounding_box.height #=> Float
372
+ # resp.face_details[0].bounding_box.left #=> Float
373
+ # resp.face_details[0].bounding_box.top #=> Float
374
+ # resp.face_details[0].smile.value #=> Boolean
375
+ # resp.face_details[0].smile.confidence #=> Float
376
+ # resp.face_details[0].eyeglasses.value #=> Boolean
377
+ # resp.face_details[0].eyeglasses.confidence #=> Float
378
+ # resp.face_details[0].sunglasses.value #=> Boolean
379
+ # resp.face_details[0].sunglasses.confidence #=> Float
380
+ # resp.face_details[0].gender.value #=> String, one of "MALE", "FEMALE"
381
+ # resp.face_details[0].gender.confidence #=> Float
382
+ # resp.face_details[0].beard.value #=> Boolean
383
+ # resp.face_details[0].beard.confidence #=> Float
384
+ # resp.face_details[0].mustache.value #=> Boolean
385
+ # resp.face_details[0].mustache.confidence #=> Float
386
+ # resp.face_details[0].eyes_open.value #=> Boolean
387
+ # resp.face_details[0].eyes_open.confidence #=> Float
388
+ # resp.face_details[0].mouth_open.value #=> Boolean
389
+ # resp.face_details[0].mouth_open.confidence #=> Float
390
+ # resp.face_details[0].emotions #=> Array
391
+ # resp.face_details[0].emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN"
392
+ # resp.face_details[0].emotions[0].confidence #=> Float
393
+ # resp.face_details[0].landmarks #=> Array
394
+ # resp.face_details[0].landmarks[0].type #=> String, one of "EYE_LEFT", "EYE_RIGHT", "NOSE", "MOUTH_LEFT", "MOUTH_RIGHT", "LEFT_EYEBROW_LEFT", "LEFT_EYEBROW_RIGHT", "LEFT_EYEBROW_UP", "RIGHT_EYEBROW_LEFT", "RIGHT_EYEBROW_RIGHT", "RIGHT_EYEBROW_UP", "LEFT_EYE_LEFT", "LEFT_EYE_RIGHT", "LEFT_EYE_UP", "LEFT_EYE_DOWN", "RIGHT_EYE_LEFT", "RIGHT_EYE_RIGHT", "RIGHT_EYE_UP", "RIGHT_EYE_DOWN", "NOSE_LEFT", "NOSE_RIGHT", "MOUTH_UP", "MOUTH_DOWN", "LEFT_PUPIL", "RIGHT_PUPIL"
395
+ # resp.face_details[0].landmarks[0].x #=> Float
396
+ # resp.face_details[0].landmarks[0].y #=> Float
397
+ # resp.face_details[0].pose.roll #=> Float
398
+ # resp.face_details[0].pose.yaw #=> Float
399
+ # resp.face_details[0].pose.pitch #=> Float
400
+ # resp.face_details[0].quality.brightness #=> Float
401
+ # resp.face_details[0].quality.sharpness #=> Float
402
+ # resp.face_details[0].confidence #=> Float
403
+ # resp.orientation_correction #=> String, one of "ROTATE_0", "ROTATE_90", "ROTATE_180", "ROTATE_270"
404
+ # @overload detect_faces(params = {})
405
+ # @param [Hash] params ({})
406
+ def detect_faces(params = {}, options = {})
407
+ req = build_request(:detect_faces, params)
408
+ req.send_request(options)
409
+ end
410
+
411
+ # Detects instances of real-world labels within an image (JPEG or PNG)
412
+ # provided as input. This includes objects like flower, tree, and table;
413
+ # events like wedding, graduation, and birthday party; and concepts like
414
+ # landscape, evening, and nature. For an example, see
415
+ # get-started-exercise-detect-labels.
416
+ #
417
+ # For each object, scene, and concept the API returns one or more
418
+ # labels. Each label provides the object name, and the level of
419
+ # confidence that the image contains the object. For example, suppose
420
+ # the input image has a lighthouse, the sea, and a rock. The response
421
+ # will include all three labels, one for each object.
422
+ #
423
+ # `\{Name: lighthouse, Confidence: 98.4629\}`
424
+ #
425
+ # `\{Name: rock,Confidence: 79.2097\}`
426
+ #
427
+ # ` \{Name: sea,Confidence: 75.061\}`
428
+ #
429
+ # In the preceding example, the operation returns one label for each of
430
+ # the three objects. The operation can also return multiple labels for
431
+ # the same object in the image. For example, if the input image shows a
432
+ # flower (for example, a tulip), the operation might return the
433
+ # following three labels.
434
+ #
435
+ # `\{Name: flower,Confidence: 99.0562\}`
436
+ #
437
+ # `\{Name: plant,Confidence: 99.0562\}`
438
+ #
439
+ # `\{Name: tulip,Confidence: 99.0562\}`
440
+ #
441
+ # In this example, the detection algorithm more precisely identifies the
442
+ # flower as a tulip.
443
+ #
444
+ # You can provide the input image as an S3 object or as base64-encoded
445
+ # bytes. In response, the API returns an array of labels. In addition,
446
+ # the response also includes the orientation correction. Optionally, you
447
+ # can specify `MinConfidence` to control the confidence threshold for
448
+ # the labels returned. The default is 50%. You can also add the
449
+ # `MaxLabels` parameter to limit the number of labels returned.
450
+ #
451
+ # <note markdown="1"> If the object detected is a person, the operation doesn't provide the
452
+ # same facial details that the DetectFaces operation provides.
453
+ #
454
+ # </note>
455
+ #
456
+ # This is a stateless API operation. That is, the operation does not
457
+ # persist any data.
458
+ #
459
+ # This operation requires permissions to perform the
460
+ # `rekognition:DetectLabels` action.
461
+ # @option params [required, Types::Image] :image
462
+ # The input image. You can provide a blob of image bytes or an S3
463
+ # object.
464
+ # @option params [Integer] :max_labels
465
+ # Maximum number of labels you want the service to return in the
466
+ # response. The service returns the specified number of highest
467
+ # confidence labels.
468
+ # @option params [Float] :min_confidence
469
+ # Specifies the minimum confidence level for the labels to return.
470
+ # Amazon Rekognition doesn't return any labels with confidence lower
471
+ # than this specified value.
472
+ #
473
+ # If `minConfidence` is not specified, the operation returns labels with
474
+ # a confidence values greater than or equal to 50 percent.
475
+ # @return [Types::DetectLabelsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
476
+ #
477
+ # * {Types::DetectLabelsResponse#labels #Labels} => Array&lt;Types::Label&gt;
478
+ # * {Types::DetectLabelsResponse#orientation_correction #OrientationCorrection} => String
479
+ #
480
+ # @example Request syntax with placeholder values
481
+ # resp = client.detect_labels({
482
+ # image: { # required
483
+ # bytes: "data",
484
+ # s3_object: {
485
+ # bucket: "S3Bucket",
486
+ # name: "S3ObjectName",
487
+ # version: "S3ObjectVersion",
488
+ # },
489
+ # },
490
+ # max_labels: 1,
491
+ # min_confidence: 1.0,
492
+ # })
493
+ #
494
+ # @example Response structure
495
+ # resp.labels #=> Array
496
+ # resp.labels[0].name #=> String
497
+ # resp.labels[0].confidence #=> Float
498
+ # resp.orientation_correction #=> String, one of "ROTATE_0", "ROTATE_90", "ROTATE_180", "ROTATE_270"
499
+ # @overload detect_labels(params = {})
500
+ # @param [Hash] params ({})
501
+ def detect_labels(params = {}, options = {})
502
+ req = build_request(:detect_labels, params)
503
+ req.send_request(options)
504
+ end
505
+
506
+ # Detects faces in the input image and adds them to the specified
507
+ # collection.
508
+ #
509
+ # Amazon Rekognition does not save the actual faces detected. Instead,
510
+ # the underlying detection algorithm first detects the faces in the
511
+ # input image, and for each face extracts facial features into a feature
512
+ # vector, and stores it in the back-end database. Amazon Rekognition
513
+ # uses feature vectors when performing face match and search operations
514
+ # using the and operations.
515
+ #
516
+ # If you provide the optional `externalImageID` for the input image you
517
+ # provided, Amazon Rekognition associates this ID with all faces that it
518
+ # detects. When you call the operation, the response returns the
519
+ # external ID. You can use this external image ID to create a
520
+ # client-side index to associate the faces with each image. You can then
521
+ # use the index to find all faces in an image.
522
+ #
523
+ # In response, the operation returns an array of metadata for all
524
+ # detected faces. This includes, the bounding box of the detected face,
525
+ # confidence value (indicating the bounding box contains a face), a face
526
+ # ID assigned by the service for each face that is detected and stored,
527
+ # and an image ID assigned by the service for the input image If you
528
+ # request all facial attributes (using the `detectionAttributes`
529
+ # parameter, Rekognition returns detailed facial attributes such as
530
+ # facial landmarks (for example, location of eye and mount) and other
531
+ # facial attributes such gender. If you provide the same image, specify
532
+ # the same collection, and use the same external ID in the `IndexFaces`
533
+ # operation, Rekognition doesn't save duplicate face metadata.
534
+ #
535
+ # For an example, see example2.
536
+ #
537
+ # This operation requires permissions to perform the
538
+ # `rekognition:IndexFaces` action.
539
+ # @option params [required, String] :collection_id
540
+ # ID of an existing collection to which you want to add the faces that
541
+ # are detected in the input images.
542
+ # @option params [required, Types::Image] :image
543
+ # Provides the source image either as bytes or an S3 object.
544
+ # @option params [String] :external_image_id
545
+ # ID you want to assign to all the faces detected in the image.
546
+ # @option params [Array<String>] :detection_attributes
547
+ # (Optional) Returns detailed attributes of indexed faces. By default,
548
+ # the operation returns a subset of the facial attributes.
549
+ #
550
+ # For example, you can specify the value as, \["ALL"\] or
551
+ # \["DEFAULT"\]. If you provide both, \["ALL", "DEFAULT"\],
552
+ # Rekognition uses the logical AND operator to determine which
553
+ # attributes to return (in this case, it is all attributes). If you
554
+ # specify all attributes, the service performs additional detection, in
555
+ # addition to the default.
556
+ # @return [Types::IndexFacesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
557
+ #
558
+ # * {Types::IndexFacesResponse#face_records #FaceRecords} => Array&lt;Types::FaceRecord&gt;
559
+ # * {Types::IndexFacesResponse#orientation_correction #OrientationCorrection} => String
560
+ #
561
+ # @example Request syntax with placeholder values
562
+ # resp = client.index_faces({
563
+ # collection_id: "CollectionId", # required
564
+ # image: { # required
565
+ # bytes: "data",
566
+ # s3_object: {
567
+ # bucket: "S3Bucket",
568
+ # name: "S3ObjectName",
569
+ # version: "S3ObjectVersion",
570
+ # },
571
+ # },
572
+ # external_image_id: "ExternalImageId",
573
+ # detection_attributes: ["DEFAULT"], # accepts DEFAULT, ALL
574
+ # })
575
+ #
576
+ # @example Response structure
577
+ # resp.face_records #=> Array
578
+ # resp.face_records[0].face.face_id #=> String
579
+ # resp.face_records[0].face.bounding_box.width #=> Float
580
+ # resp.face_records[0].face.bounding_box.height #=> Float
581
+ # resp.face_records[0].face.bounding_box.left #=> Float
582
+ # resp.face_records[0].face.bounding_box.top #=> Float
583
+ # resp.face_records[0].face.image_id #=> String
584
+ # resp.face_records[0].face.external_image_id #=> String
585
+ # resp.face_records[0].face.confidence #=> Float
586
+ # resp.face_records[0].face_detail.bounding_box.width #=> Float
587
+ # resp.face_records[0].face_detail.bounding_box.height #=> Float
588
+ # resp.face_records[0].face_detail.bounding_box.left #=> Float
589
+ # resp.face_records[0].face_detail.bounding_box.top #=> Float
590
+ # resp.face_records[0].face_detail.smile.value #=> Boolean
591
+ # resp.face_records[0].face_detail.smile.confidence #=> Float
592
+ # resp.face_records[0].face_detail.eyeglasses.value #=> Boolean
593
+ # resp.face_records[0].face_detail.eyeglasses.confidence #=> Float
594
+ # resp.face_records[0].face_detail.sunglasses.value #=> Boolean
595
+ # resp.face_records[0].face_detail.sunglasses.confidence #=> Float
596
+ # resp.face_records[0].face_detail.gender.value #=> String, one of "MALE", "FEMALE"
597
+ # resp.face_records[0].face_detail.gender.confidence #=> Float
598
+ # resp.face_records[0].face_detail.beard.value #=> Boolean
599
+ # resp.face_records[0].face_detail.beard.confidence #=> Float
600
+ # resp.face_records[0].face_detail.mustache.value #=> Boolean
601
+ # resp.face_records[0].face_detail.mustache.confidence #=> Float
602
+ # resp.face_records[0].face_detail.eyes_open.value #=> Boolean
603
+ # resp.face_records[0].face_detail.eyes_open.confidence #=> Float
604
+ # resp.face_records[0].face_detail.mouth_open.value #=> Boolean
605
+ # resp.face_records[0].face_detail.mouth_open.confidence #=> Float
606
+ # resp.face_records[0].face_detail.emotions #=> Array
607
+ # resp.face_records[0].face_detail.emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN"
608
+ # resp.face_records[0].face_detail.emotions[0].confidence #=> Float
609
+ # resp.face_records[0].face_detail.landmarks #=> Array
610
+ # resp.face_records[0].face_detail.landmarks[0].type #=> String, one of "EYE_LEFT", "EYE_RIGHT", "NOSE", "MOUTH_LEFT", "MOUTH_RIGHT", "LEFT_EYEBROW_LEFT", "LEFT_EYEBROW_RIGHT", "LEFT_EYEBROW_UP", "RIGHT_EYEBROW_LEFT", "RIGHT_EYEBROW_RIGHT", "RIGHT_EYEBROW_UP", "LEFT_EYE_LEFT", "LEFT_EYE_RIGHT", "LEFT_EYE_UP", "LEFT_EYE_DOWN", "RIGHT_EYE_LEFT", "RIGHT_EYE_RIGHT", "RIGHT_EYE_UP", "RIGHT_EYE_DOWN", "NOSE_LEFT", "NOSE_RIGHT", "MOUTH_UP", "MOUTH_DOWN", "LEFT_PUPIL", "RIGHT_PUPIL"
611
+ # resp.face_records[0].face_detail.landmarks[0].x #=> Float
612
+ # resp.face_records[0].face_detail.landmarks[0].y #=> Float
613
+ # resp.face_records[0].face_detail.pose.roll #=> Float
614
+ # resp.face_records[0].face_detail.pose.yaw #=> Float
615
+ # resp.face_records[0].face_detail.pose.pitch #=> Float
616
+ # resp.face_records[0].face_detail.quality.brightness #=> Float
617
+ # resp.face_records[0].face_detail.quality.sharpness #=> Float
618
+ # resp.face_records[0].face_detail.confidence #=> Float
619
+ # resp.orientation_correction #=> String, one of "ROTATE_0", "ROTATE_90", "ROTATE_180", "ROTATE_270"
620
+ # @overload index_faces(params = {})
621
+ # @param [Hash] params ({})
622
+ def index_faces(params = {}, options = {})
623
+ req = build_request(:index_faces, params)
624
+ req.send_request(options)
625
+ end
626
+
627
+ # Returns list of collection IDs in your account. If the result is
628
+ # truncated, the response also provides a `NextToken` that you can use
629
+ # in the subsequent request to fetch the next set of collection IDs.
630
+ #
631
+ # For an example, see example1.
632
+ #
633
+ # This operation requires permissions to perform the
634
+ # `rekognition:ListCollections` action.
635
+ # @option params [String] :next_token
636
+ # Pagination token from the previous response.
637
+ # @option params [Integer] :max_results
638
+ # Maximum number of collection IDs to return.
639
+ # @return [Types::ListCollectionsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
640
+ #
641
+ # * {Types::ListCollectionsResponse#collection_ids #CollectionIds} => Array&lt;String&gt;
642
+ # * {Types::ListCollectionsResponse#next_token #NextToken} => String
643
+ #
644
+ # @example Request syntax with placeholder values
645
+ # resp = client.list_collections({
646
+ # next_token: "PaginationToken",
647
+ # max_results: 1,
648
+ # })
649
+ #
650
+ # @example Response structure
651
+ # resp.collection_ids #=> Array
652
+ # resp.collection_ids[0] #=> String
653
+ # resp.next_token #=> String
654
+ # @overload list_collections(params = {})
655
+ # @param [Hash] params ({})
656
+ def list_collections(params = {}, options = {})
657
+ req = build_request(:list_collections, params)
658
+ req.send_request(options)
659
+ end
660
+
661
+ # Returns metadata for faces in the specified collection. This metadata
662
+ # includes information such as the bounding box coordinates, the
663
+ # confidence (that the bounding box contains a face), and face ID. For
664
+ # an example, see example3.
665
+ #
666
+ # This operation requires permissions to perform the
667
+ # `rekognition:ListFaces` action.
668
+ # @option params [required, String] :collection_id
669
+ # ID of the collection from which to list the faces.
670
+ # @option params [String] :next_token
671
+ # If the previous response was incomplete (because there is more data to
672
+ # retrieve), Amazon Rekognition returns a pagination token in the
673
+ # response. You can use this pagination token to retrieve the next set
674
+ # of faces.
675
+ # @option params [Integer] :max_results
676
+ # Maximum number of faces to return.
677
+ # @return [Types::ListFacesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
678
+ #
679
+ # * {Types::ListFacesResponse#faces #Faces} => Array&lt;Types::Face&gt;
680
+ # * {Types::ListFacesResponse#next_token #NextToken} => String
681
+ #
682
+ # @example Request syntax with placeholder values
683
+ # resp = client.list_faces({
684
+ # collection_id: "CollectionId", # required
685
+ # next_token: "PaginationToken",
686
+ # max_results: 1,
687
+ # })
688
+ #
689
+ # @example Response structure
690
+ # resp.faces #=> Array
691
+ # resp.faces[0].face_id #=> String
692
+ # resp.faces[0].bounding_box.width #=> Float
693
+ # resp.faces[0].bounding_box.height #=> Float
694
+ # resp.faces[0].bounding_box.left #=> Float
695
+ # resp.faces[0].bounding_box.top #=> Float
696
+ # resp.faces[0].image_id #=> String
697
+ # resp.faces[0].external_image_id #=> String
698
+ # resp.faces[0].confidence #=> Float
699
+ # resp.next_token #=> String
700
+ # @overload list_faces(params = {})
701
+ # @param [Hash] params ({})
702
+ def list_faces(params = {}, options = {})
703
+ req = build_request(:list_faces, params)
704
+ req.send_request(options)
705
+ end
706
+
707
+ # For a given input face ID, searches the specified collection for
708
+ # matching faces. You get a face ID when you add a face to the
709
+ # collection using the IndexFaces operation. The operation compares the
710
+ # features of the input face with faces in the specified collection.
711
+ #
712
+ # <note markdown="1"> You can also search faces without indexing faces by using the
713
+ # `SearchFacesByImage` operation.
714
+ #
715
+ # </note>
716
+ #
717
+ # The operation response returns an array of faces that match, ordered
718
+ # by similarity score with the highest similarity first. More
719
+ # specifically, it is an array of metadata for each face match that is
720
+ # found. Along with the metadata, the response also includes a
721
+ # `confidence` value for each face match, indicating the confidence that
722
+ # the specific face matches the input face.
723
+ #
724
+ # For an example, see example3.
725
+ #
726
+ # This operation requires permissions to perform the
727
+ # `rekognition:SearchFaces` action.
728
+ # @option params [required, String] :collection_id
729
+ # ID of the collection to search.
730
+ # @option params [required, String] :face_id
731
+ # ID of a face to find matches for in the collection.
732
+ # @option params [Integer] :max_faces
733
+ # Maximum number of faces to return. The API will return the maximum
734
+ # number of faces with the highest confidence in the match.
735
+ # @option params [Float] :face_match_threshold
736
+ # Optional value specifying the minimum confidence in the face match to
737
+ # return. For example, don't return any matches where confidence in
738
+ # matches is less than 70%.
739
+ # @return [Types::SearchFacesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
740
+ #
741
+ # * {Types::SearchFacesResponse#searched_face_id #SearchedFaceId} => String
742
+ # * {Types::SearchFacesResponse#face_matches #FaceMatches} => Array&lt;Types::FaceMatch&gt;
743
+ #
744
+ # @example Request syntax with placeholder values
745
+ # resp = client.search_faces({
746
+ # collection_id: "CollectionId", # required
747
+ # face_id: "FaceId", # required
748
+ # max_faces: 1,
749
+ # face_match_threshold: 1.0,
750
+ # })
751
+ #
752
+ # @example Response structure
753
+ # resp.searched_face_id #=> String
754
+ # resp.face_matches #=> Array
755
+ # resp.face_matches[0].similarity #=> Float
756
+ # resp.face_matches[0].face.face_id #=> String
757
+ # resp.face_matches[0].face.bounding_box.width #=> Float
758
+ # resp.face_matches[0].face.bounding_box.height #=> Float
759
+ # resp.face_matches[0].face.bounding_box.left #=> Float
760
+ # resp.face_matches[0].face.bounding_box.top #=> Float
761
+ # resp.face_matches[0].face.image_id #=> String
762
+ # resp.face_matches[0].face.external_image_id #=> String
763
+ # resp.face_matches[0].face.confidence #=> Float
764
+ # @overload search_faces(params = {})
765
+ # @param [Hash] params ({})
766
+ def search_faces(params = {}, options = {})
767
+ req = build_request(:search_faces, params)
768
+ req.send_request(options)
769
+ end
770
+
771
+ # For a given input image, first detects the largest face in the image,
772
+ # and then searches the specified collection for matching faces. The
773
+ # operation compares the features of the input face with faces in the
774
+ # specified collection.
775
+ #
776
+ # <note markdown="1"> To search for all faces in an input image, you might first call the
777
+ # API, and then use the face IDs returned in subsequent calls to the
778
+ # API.
779
+ #
780
+ # You can also call the `DetectFaces` API and use the bounding boxes in
781
+ # the response to make face crops, which then you can pass in to the
782
+ # `SearchFacesByImage` API.
783
+ #
784
+ # </note>
785
+ #
786
+ # The response returns an array of faces that match, ordered by
787
+ # similarity score with the highest similarity first. More specifically,
788
+ # it is an array of metadata for each face match found. Along with the
789
+ # metadata, the response also includes a `similarity` indicating how
790
+ # similar the face is to the input face. In the response, the API also
791
+ # returns the bounding box (and a confidence level that the bounding box
792
+ # contains a face) of the face that Rekognition used for the input
793
+ # image.
794
+ #
795
+ # For an example, see example3.
796
+ #
797
+ # This operation requires permissions to perform the
798
+ # `rekognition:SearchFacesByImage` action.
799
+ # @option params [required, String] :collection_id
800
+ # ID of the collection to search.
801
+ # @option params [required, Types::Image] :image
802
+ # Provides the source image either as bytes or an S3 object.
803
+ # @option params [Integer] :max_faces
804
+ # Maximum number of faces to return. The operation returns the maximum
805
+ # number of faces with the highest confidence in the match.
806
+ # @option params [Float] :face_match_threshold
807
+ # (Optional) Specifies the minimum confidence in the face match to
808
+ # return. For example, don't return any matches where confidence in
809
+ # matches is less than 70%.
810
+ # @return [Types::SearchFacesByImageResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
811
+ #
812
+ # * {Types::SearchFacesByImageResponse#searched_face_bounding_box #SearchedFaceBoundingBox} => Types::BoundingBox
813
+ # * {Types::SearchFacesByImageResponse#searched_face_confidence #SearchedFaceConfidence} => Float
814
+ # * {Types::SearchFacesByImageResponse#face_matches #FaceMatches} => Array&lt;Types::FaceMatch&gt;
815
+ #
816
+ # @example Request syntax with placeholder values
817
+ # resp = client.search_faces_by_image({
818
+ # collection_id: "CollectionId", # required
819
+ # image: { # required
820
+ # bytes: "data",
821
+ # s3_object: {
822
+ # bucket: "S3Bucket",
823
+ # name: "S3ObjectName",
824
+ # version: "S3ObjectVersion",
825
+ # },
826
+ # },
827
+ # max_faces: 1,
828
+ # face_match_threshold: 1.0,
829
+ # })
830
+ #
831
+ # @example Response structure
832
+ # resp.searched_face_bounding_box.width #=> Float
833
+ # resp.searched_face_bounding_box.height #=> Float
834
+ # resp.searched_face_bounding_box.left #=> Float
835
+ # resp.searched_face_bounding_box.top #=> Float
836
+ # resp.searched_face_confidence #=> Float
837
+ # resp.face_matches #=> Array
838
+ # resp.face_matches[0].similarity #=> Float
839
+ # resp.face_matches[0].face.face_id #=> String
840
+ # resp.face_matches[0].face.bounding_box.width #=> Float
841
+ # resp.face_matches[0].face.bounding_box.height #=> Float
842
+ # resp.face_matches[0].face.bounding_box.left #=> Float
843
+ # resp.face_matches[0].face.bounding_box.top #=> Float
844
+ # resp.face_matches[0].face.image_id #=> String
845
+ # resp.face_matches[0].face.external_image_id #=> String
846
+ # resp.face_matches[0].face.confidence #=> Float
847
+ # @overload search_faces_by_image(params = {})
848
+ # @param [Hash] params ({})
849
+ def search_faces_by_image(params = {}, options = {})
850
+ req = build_request(:search_faces_by_image, params)
851
+ req.send_request(options)
852
+ end
853
+
854
+ # @!endgroup
855
+
856
+ # @param params ({})
857
+ # @api private
858
+ def build_request(operation_name, params = {})
859
+ handlers = @handlers.for(operation_name)
860
+ context = Seahorse::Client::RequestContext.new(
861
+ operation_name: operation_name,
862
+ operation: config.api.operation(operation_name),
863
+ client: self,
864
+ params: params,
865
+ config: config)
866
+ context[:gem_name] = 'aws-sdk-rekognition'
867
+ context[:gem_version] = '1.0.0.rc2'
868
+ Seahorse::Client::Request.new(handlers, context)
869
+ end
870
+
871
+ # @api private
872
+ # @deprecated
873
+ def waiter_names
874
+ []
875
+ end
876
+
877
+ class << self
878
+
879
+ # @api private
880
+ attr_reader :identifier
881
+
882
+ # @api private
883
+ def errors_module
884
+ Errors
885
+ end
886
+
887
+ end
888
+ end
889
+ end
890
+ end