aws-sdk-rekognition 1.15.0 → 1.16.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: d5292a573a33111abc81b6c31757ec0478a0f450
4
- data.tar.gz: 1ec9b4d39fb4947d093eeb144d9ce6132a52de90
3
+ metadata.gz: 31fc3baa314f96f7e71daa41e992169fb7d8fdd4
4
+ data.tar.gz: c4de58b98f35bd95c22c3d3ae4dc8dcb1bbc8158
5
5
  SHA512:
6
- metadata.gz: 01a500a4c3fef50c671f97436c2353270b8d5df1b2bdbdcdfe9977dadf1eabc37680955536ea6c4781df2b686406ce980bad781a966f6711acba815eb812b1cb
7
- data.tar.gz: 249f9dce59afd829acdbe22a173e82b0a826639a0ed9ab2a78c20c07928e116fcaace95566967b325f27f30f28b2f4bd453853a0aec9d8140eb631521e193df7
6
+ metadata.gz: 0413f61a60dbdbbfbe10cd571c6437d392797e773e83abbf872d4c58718b83881dd4def06ce8fe6d2c14d28b762eafed5ec610003c3c2a13f3dfc33d836dcd3b
7
+ data.tar.gz: 23b91398429ffd8a3ecf00ba27b931fa67b4ca1ee3b529204de3510105d17bd8b757b39e491afb778426a3c2c54cee1ad1c0b5fdcad929f180a868e71a2f7702
@@ -42,6 +42,6 @@ require_relative 'aws-sdk-rekognition/customizations'
42
42
  # @service
43
43
  module Aws::Rekognition
44
44
 
45
- GEM_VERSION = '1.15.0'
45
+ GEM_VERSION = '1.16.0'
46
46
 
47
47
  end
@@ -374,7 +374,7 @@ module Aws::Rekognition
374
374
  # resp.face_matches[0].face.bounding_box.top #=> Float
375
375
  # resp.face_matches[0].face.confidence #=> Float
376
376
  # resp.face_matches[0].face.landmarks #=> Array
377
- # resp.face_matches[0].face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil"
377
+ # resp.face_matches[0].face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
378
378
  # resp.face_matches[0].face.landmarks[0].x #=> Float
379
379
  # resp.face_matches[0].face.landmarks[0].y #=> Float
380
380
  # resp.face_matches[0].face.pose.roll #=> Float
@@ -389,7 +389,7 @@ module Aws::Rekognition
389
389
  # resp.unmatched_faces[0].bounding_box.top #=> Float
390
390
  # resp.unmatched_faces[0].confidence #=> Float
391
391
  # resp.unmatched_faces[0].landmarks #=> Array
392
- # resp.unmatched_faces[0].landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil"
392
+ # resp.unmatched_faces[0].landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
393
393
  # resp.unmatched_faces[0].landmarks[0].x #=> Float
394
394
  # resp.unmatched_faces[0].landmarks[0].y #=> Float
395
395
  # resp.unmatched_faces[0].pose.roll #=> Float
@@ -415,6 +415,9 @@ module Aws::Rekognition
415
415
  # operation and persist results in a specific collection. Then, a user
416
416
  # can search the collection for faces in the user-specific container.
417
417
  #
418
+ # When you create a collection, it is associated with the latest version
419
+ # of the face model version.
420
+ #
418
421
  # <note markdown="1"> Collection names are case-sensitive.
419
422
  #
420
423
  # </note>
@@ -766,9 +769,9 @@ module Aws::Rekognition
766
769
  # faces or might detect faces with lower confidence.
767
770
  #
768
771
  # You pass the input image either as base64-encoded image bytes or as a
769
- # reference to an image in an Amazon S3 bucket. If you use the AWS CLI
770
- # to call Amazon Rekognition operations, passing image bytes is not
771
- # supported. The image must be either a PNG or JPEG formatted file.
772
+ # reference to an image in an Amazon S3 bucket. If you use the to call
773
+ # Amazon Rekognition operations, passing image bytes is not supported.
774
+ # The image must be either a PNG or JPEG formatted file.
772
775
  #
773
776
  # <note markdown="1"> This is a stateless API operation. That is, the operation does not
774
777
  # persist any data.
@@ -910,7 +913,7 @@ module Aws::Rekognition
910
913
  # resp.face_details[0].emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN"
911
914
  # resp.face_details[0].emotions[0].confidence #=> Float
912
915
  # resp.face_details[0].landmarks #=> Array
913
- # resp.face_details[0].landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil"
916
+ # resp.face_details[0].landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
914
917
  # resp.face_details[0].landmarks[0].x #=> Float
915
918
  # resp.face_details[0].landmarks[0].y #=> Float
916
919
  # resp.face_details[0].pose.roll #=> Float
@@ -1415,7 +1418,7 @@ module Aws::Rekognition
1415
1418
  # resp.celebrities[0].celebrity.face.emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN"
1416
1419
  # resp.celebrities[0].celebrity.face.emotions[0].confidence #=> Float
1417
1420
  # resp.celebrities[0].celebrity.face.landmarks #=> Array
1418
- # resp.celebrities[0].celebrity.face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil"
1421
+ # resp.celebrities[0].celebrity.face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
1419
1422
  # resp.celebrities[0].celebrity.face.landmarks[0].x #=> Float
1420
1423
  # resp.celebrities[0].celebrity.face.landmarks[0].y #=> Float
1421
1424
  # resp.celebrities[0].celebrity.face.pose.roll #=> Float
@@ -1626,7 +1629,7 @@ module Aws::Rekognition
1626
1629
  # resp.faces[0].face.emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN"
1627
1630
  # resp.faces[0].face.emotions[0].confidence #=> Float
1628
1631
  # resp.faces[0].face.landmarks #=> Array
1629
- # resp.faces[0].face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil"
1632
+ # resp.faces[0].face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
1630
1633
  # resp.faces[0].face.landmarks[0].x #=> Float
1631
1634
  # resp.faces[0].face.landmarks[0].y #=> Float
1632
1635
  # resp.faces[0].face.pose.roll #=> Float
@@ -1760,7 +1763,7 @@ module Aws::Rekognition
1760
1763
  # resp.persons[0].person.face.emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN"
1761
1764
  # resp.persons[0].person.face.emotions[0].confidence #=> Float
1762
1765
  # resp.persons[0].person.face.landmarks #=> Array
1763
- # resp.persons[0].person.face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil"
1766
+ # resp.persons[0].person.face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
1764
1767
  # resp.persons[0].person.face.landmarks[0].x #=> Float
1765
1768
  # resp.persons[0].person.face.landmarks[0].y #=> Float
1766
1769
  # resp.persons[0].person.face.pose.roll #=> Float
@@ -2017,7 +2020,7 @@ module Aws::Rekognition
2017
2020
  # resp.persons[0].person.face.emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN"
2018
2021
  # resp.persons[0].person.face.emotions[0].confidence #=> Float
2019
2022
  # resp.persons[0].person.face.landmarks #=> Array
2020
- # resp.persons[0].person.face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil"
2023
+ # resp.persons[0].person.face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
2021
2024
  # resp.persons[0].person.face.landmarks[0].x #=> Float
2022
2025
  # resp.persons[0].person.face.landmarks[0].y #=> Float
2023
2026
  # resp.persons[0].person.face.pose.roll #=> Float
@@ -2052,9 +2055,14 @@ module Aws::Rekognition
2052
2055
  # If you're using version 1.0 of the face detection model, `IndexFaces`
2053
2056
  # indexes the 15 largest faces in the input image. Later versions of the
2054
2057
  # face detection model index the 100 largest faces in the input image.
2058
+ #
2059
+ # If you're using version 4 or later of the face model, image
2060
+ # orientation information is not returned in the `OrientationCorrection`
2061
+ # field.
2062
+ #
2055
2063
  # To determine which version of the model you're using, call and supply
2056
2064
  # the collection ID. You can also get the model version from the value
2057
- # of `FaceModelVersion` in the response from `IndexFaces`.
2065
+ # of `FaceModelVersion` in the response from `IndexFaces`
2058
2066
  #
2059
2067
  # For more information, see Model Versioning in the Amazon Rekognition
2060
2068
  # Developer Guide.
@@ -2392,7 +2400,7 @@ module Aws::Rekognition
2392
2400
  # resp.face_records[0].face_detail.emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN"
2393
2401
  # resp.face_records[0].face_detail.emotions[0].confidence #=> Float
2394
2402
  # resp.face_records[0].face_detail.landmarks #=> Array
2395
- # resp.face_records[0].face_detail.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil"
2403
+ # resp.face_records[0].face_detail.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
2396
2404
  # resp.face_records[0].face_detail.landmarks[0].x #=> Float
2397
2405
  # resp.face_records[0].face_detail.landmarks[0].y #=> Float
2398
2406
  # resp.face_records[0].face_detail.pose.roll #=> Float
@@ -2432,7 +2440,7 @@ module Aws::Rekognition
2432
2440
  # resp.unindexed_faces[0].face_detail.emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN"
2433
2441
  # resp.unindexed_faces[0].face_detail.emotions[0].confidence #=> Float
2434
2442
  # resp.unindexed_faces[0].face_detail.landmarks #=> Array
2435
- # resp.unindexed_faces[0].face_detail.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil"
2443
+ # resp.unindexed_faces[0].face_detail.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
2436
2444
  # resp.unindexed_faces[0].face_detail.landmarks[0].x #=> Float
2437
2445
  # resp.unindexed_faces[0].face_detail.landmarks[0].y #=> Float
2438
2446
  # resp.unindexed_faces[0].face_detail.pose.roll #=> Float
@@ -2810,7 +2818,7 @@ module Aws::Rekognition
2810
2818
  # resp.celebrity_faces[0].face.bounding_box.top #=> Float
2811
2819
  # resp.celebrity_faces[0].face.confidence #=> Float
2812
2820
  # resp.celebrity_faces[0].face.landmarks #=> Array
2813
- # resp.celebrity_faces[0].face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil"
2821
+ # resp.celebrity_faces[0].face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
2814
2822
  # resp.celebrity_faces[0].face.landmarks[0].x #=> Float
2815
2823
  # resp.celebrity_faces[0].face.landmarks[0].y #=> Float
2816
2824
  # resp.celebrity_faces[0].face.pose.roll #=> Float
@@ -2826,7 +2834,7 @@ module Aws::Rekognition
2826
2834
  # resp.unrecognized_faces[0].bounding_box.top #=> Float
2827
2835
  # resp.unrecognized_faces[0].confidence #=> Float
2828
2836
  # resp.unrecognized_faces[0].landmarks #=> Array
2829
- # resp.unrecognized_faces[0].landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil"
2837
+ # resp.unrecognized_faces[0].landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil", "upperJawlineLeft", "midJawlineLeft", "chinBottom", "midJawlineRight", "upperJawlineRight"
2830
2838
  # resp.unrecognized_faces[0].landmarks[0].x #=> Float
2831
2839
  # resp.unrecognized_faces[0].landmarks[0].y #=> Float
2832
2840
  # resp.unrecognized_faces[0].pose.roll #=> Float
@@ -3634,7 +3642,7 @@ module Aws::Rekognition
3634
3642
  params: params,
3635
3643
  config: config)
3636
3644
  context[:gem_name] = 'aws-sdk-rekognition'
3637
- context[:gem_version] = '1.15.0'
3645
+ context[:gem_version] = '1.16.0'
3638
3646
  Seahorse::Client::Request.new(handlers, context)
3639
3647
  end
3640
3648
 
@@ -274,39 +274,37 @@ module Aws::Rekognition
274
274
  # @return [Array<Types::ComparedFace>]
275
275
  #
276
276
  # @!attribute [rw] source_image_orientation_correction
277
- # The orientation of the source image (counterclockwise direction). If
278
- # your application displays the source image, you can use this value
279
- # to correct image orientation. The bounding box coordinates returned
280
- # in `SourceImageFace` represent the location of the face before the
281
- # image orientation is corrected.
282
- #
283
- # <note markdown="1"> If the source image is in .jpeg format, it might contain
284
- # exchangeable image (Exif) metadata that includes the image's
285
- # orientation. If the Exif metadata for the source image populates the
286
- # orientation field, the value of `OrientationCorrection` is null. The
287
- # `SourceImageFace` bounding box coordinates represent the location of
288
- # the face after Exif metadata is used to correct the orientation.
289
- # Images in .png format don't contain Exif metadata.
277
+ # The value of `SourceImageOrientationCorrection` is always null.
290
278
  #
291
- # </note>
279
+ # If the input image is in .jpeg format, it might contain exchangeable
280
+ # image file format (Exif) metadata that includes the image's
281
+ # orientation. Amazon Rekognition uses this orientation information to
282
+ # perform image correction. The bounding box coordinates are
283
+ # translated to represent object locations after the orientation
284
+ # information in the Exif metadata is used to correct the image
285
+ # orientation. Images in .png format don't contain Exif metadata.
286
+ #
287
+ # Amazon Rekognition doesn’t perform image correction for images in
288
+ # .png format and .jpeg images without orientation information in the
289
+ # image Exif metadata. The bounding box coordinates aren't translated
290
+ # and represent the object locations before the image is rotated.
292
291
  # @return [String]
293
292
  #
294
293
  # @!attribute [rw] target_image_orientation_correction
295
- # The orientation of the target image (in counterclockwise direction).
296
- # If your application displays the target image, you can use this
297
- # value to correct the orientation of the image. The bounding box
298
- # coordinates returned in `FaceMatches` and `UnmatchedFaces` represent
299
- # face locations before the image orientation is corrected.
300
- #
301
- # <note markdown="1"> If the target image is in .jpg format, it might contain Exif
302
- # metadata that includes the orientation of the image. If the Exif
303
- # metadata for the target image populates the orientation field, the
304
- # value of `OrientationCorrection` is null. The bounding box
305
- # coordinates in `FaceMatches` and `UnmatchedFaces` represent the
306
- # location of the face after Exif metadata is used to correct the
294
+ # The value of `TargetImageOrientationCorrection` is always null.
295
+ #
296
+ # If the input image is in .jpeg format, it might contain exchangeable
297
+ # image file format (Exif) metadata that includes the image's
298
+ # orientation. Amazon Rekognition uses this orientation information to
299
+ # perform image correction. The bounding box coordinates are
300
+ # translated to represent object locations after the orientation
301
+ # information in the Exif metadata is used to correct the image
307
302
  # orientation. Images in .png format don't contain Exif metadata.
308
303
  #
309
- # </note>
304
+ # Amazon Rekognition doesn’t perform image correction for images in
305
+ # .png format and .jpeg images without orientation information in the
306
+ # image Exif metadata. The bounding box coordinates aren't translated
307
+ # and represent the object locations before the image is rotated.
310
308
  # @return [String]
311
309
  #
312
310
  class CompareFacesResponse < Struct.new(
@@ -737,21 +735,20 @@ module Aws::Rekognition
737
735
  # @return [Array<Types::FaceDetail>]
738
736
  #
739
737
  # @!attribute [rw] orientation_correction
740
- # The orientation of the input image (counter-clockwise direction). If
741
- # your application displays the image, you can use this value to
742
- # correct image orientation. The bounding box coordinates returned in
743
- # `FaceDetails` represent face locations before the image orientation
744
- # is corrected.
738
+ # The value of `OrientationCorrection` is always null.
745
739
  #
746
- # <note markdown="1"> If the input image is in .jpeg format, it might contain exchangeable
747
- # image (Exif) metadata that includes the image's orientation. If so,
748
- # and the Exif metadata for the input image populates the orientation
749
- # field, the value of `OrientationCorrection` is null. The
750
- # `FaceDetails` bounding box coordinates represent face locations
751
- # after Exif metadata is used to correct the image orientation. Images
752
- # in .png format don't contain Exif metadata.
740
+ # If the input image is in .jpeg format, it might contain exchangeable
741
+ # image file format (Exif) metadata that includes the image's
742
+ # orientation. Amazon Rekognition uses this orientation information to
743
+ # perform image correction. The bounding box coordinates are
744
+ # translated to represent object locations after the orientation
745
+ # information in the Exif metadata is used to correct the image
746
+ # orientation. Images in .png format don't contain Exif metadata.
753
747
  #
754
- # </note>
748
+ # Amazon Rekognition doesn’t perform image correction for images in
749
+ # .png format and .jpeg images without orientation information in the
750
+ # image Exif metadata. The bounding box coordinates aren't translated
751
+ # and represent the object locations before the image is rotated.
755
752
  # @return [String]
756
753
  #
757
754
  class DetectFacesResponse < Struct.new(
@@ -812,16 +809,16 @@ module Aws::Rekognition
812
809
  # The value of `OrientationCorrection` is always null.
813
810
  #
814
811
  # If the input image is in .jpeg format, it might contain exchangeable
815
- # image (Exif) metadata that includes the image's orientation. Amazon
816
- # Rekognition uses this orientation information to perform image
817
- # correction - the bounding box coordinates are translated to
818
- # represent object locations after the orientation information in the
819
- # Exif metadata is used to correct the image orientation. Images in
820
- # .png format don't contain Exif metadata.
812
+ # image file format (Exif) metadata that includes the image's
813
+ # orientation. Amazon Rekognition uses this orientation information to
814
+ # perform image correction. The bounding box coordinates are
815
+ # translated to represent object locations after the orientation
816
+ # information in the Exif metadata is used to correct the image
817
+ # orientation. Images in .png format don't contain Exif metadata.
821
818
  #
822
819
  # Amazon Rekognition doesn’t perform image correction for images in
823
820
  # .png format and .jpeg images without orientation information in the
824
- # image Exif metadata. The bounding box coordinates are not translated
821
+ # image Exif metadata. The bounding box coordinates aren't translated
825
822
  # and represent the object locations before the image is rotated.
826
823
  # @return [String]
827
824
  #
@@ -1919,20 +1916,31 @@ module Aws::Rekognition
1919
1916
  # @return [Array<Types::FaceRecord>]
1920
1917
  #
1921
1918
  # @!attribute [rw] orientation_correction
1922
- # The orientation of the input image (counterclockwise direction). If
1923
- # your application displays the image, you can use this value to
1924
- # correct image orientation. The bounding box coordinates returned in
1925
- # `FaceRecords` represent face locations before the image orientation
1926
- # is corrected.
1927
- #
1928
- # <note markdown="1"> If the input image is in jpeg format, it might contain exchangeable
1929
- # image (Exif) metadata. If so, and the Exif metadata populates the
1930
- # orientation field, the value of `OrientationCorrection` is null. The
1931
- # bounding box coordinates in `FaceRecords` represent face locations
1932
- # after Exif metadata is used to correct the image orientation. Images
1933
- # in .png format don't contain Exif metadata.
1934
- #
1935
- # </note>
1919
+ # If your collection is associated with a face detection model that's
1920
+ # later than version 3.0, the value of `OrientationCorrection` is
1921
+ # always null and no orientation information is returned.
1922
+ #
1923
+ # If your collection is associated with a face detection model that's
1924
+ # version 3.0 or earlier, the following applies:
1925
+ #
1926
+ # * If the input image is in .jpeg format, it might contain
1927
+ # exchangeable image file format (Exif) metadata that includes the
1928
+ # image's orientation. Amazon Rekognition uses this orientation
1929
+ # information to perform image correction - the bounding box
1930
+ # coordinates are translated to represent object locations after the
1931
+ # orientation information in the Exif metadata is used to correct
1932
+ # the image orientation. Images in .png format don't contain Exif
1933
+ # metadata. The value of `OrientationCorrection` is null.
1934
+ #
1935
+ # * If the image doesn't contain orientation information in its Exif
1936
+ # metadata, Amazon Rekognition returns an estimated orientation
1937
+ # (ROTATE\_0, ROTATE\_90, ROTATE\_180, ROTATE\_270). Amazon
1938
+ # Rekognition doesn’t perform image correction for images. The
1939
+ # bounding box coordinates aren't translated and represent the
1940
+ # object locations before the image is rotated.
1941
+ #
1942
+ # Bounding box information is returned in the `FaceRecords` array. You
1943
+ # can get the version of the face detection model by calling .
1936
1944
  # @return [String]
1937
1945
  #
1938
1946
  # @!attribute [rw] face_model_version
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: aws-sdk-rekognition
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.15.0
4
+ version: 1.16.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Amazon Web Services
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2018-11-20 00:00:00.000000000 Z
11
+ date: 2018-11-21 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: aws-sdk-core