aws-sdk-rekognition 1.0.0 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/aws-sdk-rekognition.rb +1 -1
- data/lib/aws-sdk-rekognition/client.rb +194 -41
- data/lib/aws-sdk-rekognition/client_api.rb +59 -0
- data/lib/aws-sdk-rekognition/types.rb +203 -23
- metadata +5 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 2523eb562390e9e0d7933d2b0263c67504b30f05
|
4
|
+
data.tar.gz: 641ee3cf40966d546ebec72593c0a5b5dcd537fd
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 4b65d4b05f093c6b2b6642059f2ef4ad4f1194a1287b7b67735b88d630afc457311baf4d442366cbd891d3221a72b6bcf9c7d798d5b55518942e13849576e01f
|
7
|
+
data.tar.gz: 11b5e6a3c6e1bda43c180489baa0347fb1a8a277ef5ca49209c0d1d08837b0faf01db51373fcdec2d1ed1dd385872152c846c3541f5ae69533485ef3160090d1
|
data/lib/aws-sdk-rekognition.rb
CHANGED
@@ -155,8 +155,8 @@ module Aws::Rekognition
|
|
155
155
|
|
156
156
|
# @!group API Operations
|
157
157
|
|
158
|
-
# Compares a face in the *source* input image with each
|
159
|
-
# the *target* input image.
|
158
|
+
# Compares a face in the *source* input image with each of the 100
|
159
|
+
# largest faces detected in the *target* input image.
|
160
160
|
#
|
161
161
|
# <note markdown="1"> If the source image contains multiple faces, the service detects the
|
162
162
|
# largest face and compares it with each face detected in the target
|
@@ -164,6 +164,12 @@ module Aws::Rekognition
|
|
164
164
|
#
|
165
165
|
# </note>
|
166
166
|
#
|
167
|
+
# You pass the input and target images either as base64-encoded image
|
168
|
+
# bytes or as a references to images in an Amazon S3 bucket. If you use
|
169
|
+
# the Amazon CLI to call Amazon Rekognition operations, passing image
|
170
|
+
# bytes is not supported. The image must be either a PNG or JPEG
|
171
|
+
# formatted file.
|
172
|
+
#
|
167
173
|
# In response, the operation returns an array of face matches ordered by
|
168
174
|
# similarity score in descending order. For each face match, the
|
169
175
|
# response provides a bounding box of the face, facial landmarks, pose
|
@@ -188,6 +194,9 @@ module Aws::Rekognition
|
|
188
194
|
# orientation information for the source and target images. Use these
|
189
195
|
# values to display the images with the correct image orientation.
|
190
196
|
#
|
197
|
+
# If no faces are detected in the source or target images,
|
198
|
+
# `CompareFaces` returns an `InvalidParameterException` error.
|
199
|
+
#
|
191
200
|
# <note markdown="1"> This is a stateless API operation. That is, data returned by this
|
192
201
|
# operation doesn't persist.
|
193
202
|
#
|
@@ -199,10 +208,14 @@ module Aws::Rekognition
|
|
199
208
|
# `rekognition:CompareFaces` action.
|
200
209
|
#
|
201
210
|
# @option params [required, Types::Image] :source_image
|
202
|
-
# The
|
211
|
+
# The input image as base64-encoded bytes or an S3 object. If you use
|
212
|
+
# the AWS CLI to call Amazon Rekognition operations, passing
|
213
|
+
# base64-encoded image bytes is not supported.
|
203
214
|
#
|
204
215
|
# @option params [required, Types::Image] :target_image
|
205
|
-
# The target image
|
216
|
+
# The target image as base64-encoded bytes or an S3 object. If you use
|
217
|
+
# the AWS CLI to call Amazon Rekognition operations, passing
|
218
|
+
# base64-encoded image bytes is not supported.
|
206
219
|
#
|
207
220
|
# @option params [Float] :similarity_threshold
|
208
221
|
# The minimum level of confidence in the face matches that a match must
|
@@ -358,6 +371,7 @@ module Aws::Rekognition
|
|
358
371
|
#
|
359
372
|
# * {Types::CreateCollectionResponse#status_code #status_code} => Integer
|
360
373
|
# * {Types::CreateCollectionResponse#collection_arn #collection_arn} => String
|
374
|
+
# * {Types::CreateCollectionResponse#face_model_version #face_model_version} => String
|
361
375
|
#
|
362
376
|
#
|
363
377
|
# @example Example: To create a collection
|
@@ -384,6 +398,7 @@ module Aws::Rekognition
|
|
384
398
|
#
|
385
399
|
# resp.status_code #=> Integer
|
386
400
|
# resp.collection_arn #=> String
|
401
|
+
# resp.face_model_version #=> String
|
387
402
|
#
|
388
403
|
# @overload create_collection(params = {})
|
389
404
|
# @param [Hash] params ({})
|
@@ -490,18 +505,24 @@ module Aws::Rekognition
|
|
490
505
|
req.send_request(options)
|
491
506
|
end
|
492
507
|
|
493
|
-
# Detects faces within an image
|
508
|
+
# Detects faces within an image that is provided as input.
|
494
509
|
#
|
495
|
-
#
|
496
|
-
#
|
497
|
-
#
|
498
|
-
#
|
499
|
-
#
|
510
|
+
# `DetectFaces` detects the 100 largest faces in the image. For each
|
511
|
+
# face detected, the operation returns face details including a bounding
|
512
|
+
# box of the face, a confidence value (that the bounding box contains a
|
513
|
+
# face), and a fixed set of attributes such as facial landmarks (for
|
514
|
+
# example, coordinates of eye and mouth), gender, presence of beard,
|
515
|
+
# sunglasses, etc.
|
500
516
|
#
|
501
517
|
# The face-detection algorithm is most effective on frontal faces. For
|
502
518
|
# non-frontal or obscured faces, the algorithm may not detect the faces
|
503
519
|
# or might detect faces with lower confidence.
|
504
520
|
#
|
521
|
+
# You pass the input image either as base64-encoded image bytes or as a
|
522
|
+
# reference to an image in an Amazon S3 bucket. If you use the Amazon
|
523
|
+
# CLI to call Amazon Rekognition operations, passing image bytes is not
|
524
|
+
# supported. The image must be either a PNG or JPEG formatted file.
|
525
|
+
#
|
505
526
|
# <note markdown="1"> This is a stateless API operation. That is, the operation does not
|
506
527
|
# persist any data.
|
507
528
|
#
|
@@ -513,8 +534,9 @@ module Aws::Rekognition
|
|
513
534
|
# `rekognition:DetectFaces` action.
|
514
535
|
#
|
515
536
|
# @option params [required, Types::Image] :image
|
516
|
-
# The image
|
517
|
-
#
|
537
|
+
# The input image as base64-encoded bytes or an S3 object. If you use
|
538
|
+
# the AWS CLI to call Amazon Rekognition operations, passing
|
539
|
+
# base64-encoded image bytes is not supported.
|
518
540
|
#
|
519
541
|
# @option params [Array<String>] :attributes
|
520
542
|
# An array of facial attributes you want to be returned. This can be the
|
@@ -667,6 +689,11 @@ module Aws::Rekognition
|
|
667
689
|
# landscape, evening, and nature. For an example, see
|
668
690
|
# get-started-exercise-detect-labels.
|
669
691
|
#
|
692
|
+
# You pass the input image as base64-encoded image bytes or as a
|
693
|
+
# reference to an image in an Amazon S3 bucket. If you use the Amazon
|
694
|
+
# CLI to call Amazon Rekognition operations, passing image bytes is not
|
695
|
+
# supported. The image must be either a PNG or JPEG formatted file.
|
696
|
+
#
|
670
697
|
# For each object, scene, and concept the API returns one or more
|
671
698
|
# labels. Each label provides the object name, and the level of
|
672
699
|
# confidence that the image contains the object. For example, suppose
|
@@ -694,12 +721,11 @@ module Aws::Rekognition
|
|
694
721
|
# In this example, the detection algorithm more precisely identifies the
|
695
722
|
# flower as a tulip.
|
696
723
|
#
|
697
|
-
#
|
698
|
-
#
|
699
|
-
#
|
700
|
-
#
|
701
|
-
#
|
702
|
-
# `MaxLabels` parameter to limit the number of labels returned.
|
724
|
+
# In response, the API returns an array of labels. In addition, the
|
725
|
+
# response also includes the orientation correction. Optionally, you can
|
726
|
+
# specify `MinConfidence` to control the confidence threshold for the
|
727
|
+
# labels returned. The default is 50%. You can also add the `MaxLabels`
|
728
|
+
# parameter to limit the number of labels returned.
|
703
729
|
#
|
704
730
|
# <note markdown="1"> If the object detected is a person, the operation doesn't provide the
|
705
731
|
# same facial details that the DetectFaces operation provides.
|
@@ -713,8 +739,9 @@ module Aws::Rekognition
|
|
713
739
|
# `rekognition:DetectLabels` action.
|
714
740
|
#
|
715
741
|
# @option params [required, Types::Image] :image
|
716
|
-
# The input image
|
717
|
-
#
|
742
|
+
# The input image as base64-encoded bytes or an S3 object. If you use
|
743
|
+
# the AWS CLI to call Amazon Rekognition operations, passing
|
744
|
+
# base64-encoded image bytes is not supported.
|
718
745
|
#
|
719
746
|
# @option params [Integer] :max_labels
|
720
747
|
# Maximum number of labels you want the service to return in the
|
@@ -803,8 +830,15 @@ module Aws::Rekognition
|
|
803
830
|
# to determine which types of content are appropriate. For information
|
804
831
|
# about moderation labels, see image-moderation.
|
805
832
|
#
|
833
|
+
# You pass the input image either as base64-encoded image bytes or as a
|
834
|
+
# reference to an image in an Amazon S3 bucket. If you use the Amazon
|
835
|
+
# CLI to call Amazon Rekognition operations, passing image bytes is not
|
836
|
+
# supported. The image must be either a PNG or JPEG formatted file.
|
837
|
+
#
|
806
838
|
# @option params [required, Types::Image] :image
|
807
|
-
# The input image as bytes or an S3 object.
|
839
|
+
# The input image as base64-encoded bytes or an S3 object. If you use
|
840
|
+
# the AWS CLI to call Amazon Rekognition operations, passing
|
841
|
+
# base64-encoded image bytes is not supported.
|
808
842
|
#
|
809
843
|
# @option params [Float] :min_confidence
|
810
844
|
# Specifies the minimum confidence level for the labels to return.
|
@@ -846,6 +880,87 @@ module Aws::Rekognition
|
|
846
880
|
req.send_request(options)
|
847
881
|
end
|
848
882
|
|
883
|
+
# Detects text in the input image and converts it into machine-readable
|
884
|
+
# text.
|
885
|
+
#
|
886
|
+
# Pass the input image as base64-encoded image bytes or as a reference
|
887
|
+
# to an image in an Amazon S3 bucket. If you use the AWS CLI to call
|
888
|
+
# Amazon Rekognition operations, you must pass it as a reference to an
|
889
|
+
# image in an Amazon S3 bucket. For the AWS CLI, passing image bytes is
|
890
|
+
# not supported. The image must be either a .png or .jpeg formatted
|
891
|
+
# file.
|
892
|
+
#
|
893
|
+
# The `DetectText` operation returns text in an array of elements,
|
894
|
+
# `TextDetections`. Each `TextDetection` element provides information
|
895
|
+
# about a single word or line of text that was detected in the image.
|
896
|
+
#
|
897
|
+
# A word is one or more ISO basic latin script characters that are not
|
898
|
+
# separated by spaces. `DetectText` can detect up to 50 words in an
|
899
|
+
# image.
|
900
|
+
#
|
901
|
+
# A line is a string of equally spaced words. A line isn't necessarily
|
902
|
+
# a complete sentence. For example, a driver's license number is
|
903
|
+
# detected as a line. A line ends when there is no aligned text after
|
904
|
+
# it. Also, a line ends when there is a large gap between words,
|
905
|
+
# relative to the length of the words. This means, depending on the gap
|
906
|
+
# between words, Amazon Rekognition may detect multiple lines in text
|
907
|
+
# aligned in the same direction. Periods don't represent the end of a
|
908
|
+
# line. If a sentence spans multiple lines, the `DetectText` operation
|
909
|
+
# returns multiple lines.
|
910
|
+
#
|
911
|
+
# To determine whether a `TextDetection` element is a line of text or a
|
912
|
+
# word, use the `TextDetection` object `Type` field.
|
913
|
+
#
|
914
|
+
# To be detected, text must be within +/- 30 degrees orientation of the
|
915
|
+
# horizontal axis.
|
916
|
+
#
|
917
|
+
# For more information, see text-detection.
|
918
|
+
#
|
919
|
+
# @option params [required, Types::Image] :image
|
920
|
+
# The input image as base64-encoded bytes or an Amazon S3 object. If you
|
921
|
+
# use the AWS CLI to call Amazon Rekognition operations, you can't pass
|
922
|
+
# image bytes.
|
923
|
+
#
|
924
|
+
# @return [Types::DetectTextResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
|
925
|
+
#
|
926
|
+
# * {Types::DetectTextResponse#text_detections #text_detections} => Array<Types::TextDetection>
|
927
|
+
#
|
928
|
+
# @example Request syntax with placeholder values
|
929
|
+
#
|
930
|
+
# resp = client.detect_text({
|
931
|
+
# image: { # required
|
932
|
+
# bytes: "data",
|
933
|
+
# s3_object: {
|
934
|
+
# bucket: "S3Bucket",
|
935
|
+
# name: "S3ObjectName",
|
936
|
+
# version: "S3ObjectVersion",
|
937
|
+
# },
|
938
|
+
# },
|
939
|
+
# })
|
940
|
+
#
|
941
|
+
# @example Response structure
|
942
|
+
#
|
943
|
+
# resp.text_detections #=> Array
|
944
|
+
# resp.text_detections[0].detected_text #=> String
|
945
|
+
# resp.text_detections[0].type #=> String, one of "LINE", "WORD"
|
946
|
+
# resp.text_detections[0].id #=> Integer
|
947
|
+
# resp.text_detections[0].parent_id #=> Integer
|
948
|
+
# resp.text_detections[0].confidence #=> Float
|
949
|
+
# resp.text_detections[0].geometry.bounding_box.width #=> Float
|
950
|
+
# resp.text_detections[0].geometry.bounding_box.height #=> Float
|
951
|
+
# resp.text_detections[0].geometry.bounding_box.left #=> Float
|
952
|
+
# resp.text_detections[0].geometry.bounding_box.top #=> Float
|
953
|
+
# resp.text_detections[0].geometry.polygon #=> Array
|
954
|
+
# resp.text_detections[0].geometry.polygon[0].x #=> Float
|
955
|
+
# resp.text_detections[0].geometry.polygon[0].y #=> Float
|
956
|
+
#
|
957
|
+
# @overload detect_text(params = {})
|
958
|
+
# @param [Hash] params ({})
|
959
|
+
def detect_text(params = {}, options = {})
|
960
|
+
req = build_request(:detect_text, params)
|
961
|
+
req.send_request(options)
|
962
|
+
end
|
963
|
+
|
849
964
|
# Gets the name and additional information about a celebrity based on
|
850
965
|
# his or her Rekognition ID. The additional information is returned as
|
851
966
|
# an array of URLs. If there is no additional information about the
|
@@ -893,7 +1008,14 @@ module Aws::Rekognition
|
|
893
1008
|
# uses feature vectors when performing face match and search operations
|
894
1009
|
# using the and operations.
|
895
1010
|
#
|
896
|
-
# If you
|
1011
|
+
# If you are using version 1.0 of the face detection model, `IndexFaces`
|
1012
|
+
# indexes the 15 largest faces in the input image. Later versions of the
|
1013
|
+
# face detection model index the 100 largest faces in the input image.
|
1014
|
+
# To determine which version of the model you are using, check the the
|
1015
|
+
# value of `FaceModelVersion` in the response from `IndexFaces`. For
|
1016
|
+
# more information, see face-detection-model.
|
1017
|
+
#
|
1018
|
+
# If you provide the optional `ExternalImageID` for the input image you
|
897
1019
|
# provided, Amazon Rekognition associates this ID with all faces that it
|
898
1020
|
# detects. When you call the operation, the response returns the
|
899
1021
|
# external ID. You can use this external image ID to create a
|
@@ -912,6 +1034,11 @@ module Aws::Rekognition
|
|
912
1034
|
# the same collection, and use the same external ID in the `IndexFaces`
|
913
1035
|
# operation, Amazon Rekognition doesn't save duplicate face metadata.
|
914
1036
|
#
|
1037
|
+
# The input image is passed either as base64-encoded image bytes or as a
|
1038
|
+
# reference to an image in an Amazon S3 bucket. If you use the Amazon
|
1039
|
+
# CLI to call Amazon Rekognition operations, passing image bytes is not
|
1040
|
+
# supported. The image must be either a PNG or JPEG formatted file.
|
1041
|
+
#
|
915
1042
|
# For an example, see example2.
|
916
1043
|
#
|
917
1044
|
# This operation requires permissions to perform the
|
@@ -922,7 +1049,9 @@ module Aws::Rekognition
|
|
922
1049
|
# that are detected in the input images.
|
923
1050
|
#
|
924
1051
|
# @option params [required, Types::Image] :image
|
925
|
-
# The input image as bytes or an S3 object.
|
1052
|
+
# The input image as base64-encoded bytes or an S3 object. If you use
|
1053
|
+
# the AWS CLI to call Amazon Rekognition operations, passing
|
1054
|
+
# base64-encoded image bytes is not supported.
|
926
1055
|
#
|
927
1056
|
# @option params [String] :external_image_id
|
928
1057
|
# ID you want to assign to all the faces detected in the image.
|
@@ -944,6 +1073,7 @@ module Aws::Rekognition
|
|
944
1073
|
#
|
945
1074
|
# * {Types::IndexFacesResponse#face_records #face_records} => Array<Types::FaceRecord>
|
946
1075
|
# * {Types::IndexFacesResponse#orientation_correction #orientation_correction} => String
|
1076
|
+
# * {Types::IndexFacesResponse#face_model_version #face_model_version} => String
|
947
1077
|
#
|
948
1078
|
#
|
949
1079
|
# @example Example: To add a face to a collection
|
@@ -1149,6 +1279,7 @@ module Aws::Rekognition
|
|
1149
1279
|
# resp.face_records[0].face_detail.quality.sharpness #=> Float
|
1150
1280
|
# resp.face_records[0].face_detail.confidence #=> Float
|
1151
1281
|
# resp.orientation_correction #=> String, one of "ROTATE_0", "ROTATE_90", "ROTATE_180", "ROTATE_270"
|
1282
|
+
# resp.face_model_version #=> String
|
1152
1283
|
#
|
1153
1284
|
# @overload index_faces(params = {})
|
1154
1285
|
# @param [Hash] params ({})
|
@@ -1176,6 +1307,7 @@ module Aws::Rekognition
|
|
1176
1307
|
#
|
1177
1308
|
# * {Types::ListCollectionsResponse#collection_ids #collection_ids} => Array<String>
|
1178
1309
|
# * {Types::ListCollectionsResponse#next_token #next_token} => String
|
1310
|
+
# * {Types::ListCollectionsResponse#face_model_versions #face_model_versions} => Array<String>
|
1179
1311
|
#
|
1180
1312
|
#
|
1181
1313
|
# @example Example: To list the collections
|
@@ -1204,6 +1336,8 @@ module Aws::Rekognition
|
|
1204
1336
|
# resp.collection_ids #=> Array
|
1205
1337
|
# resp.collection_ids[0] #=> String
|
1206
1338
|
# resp.next_token #=> String
|
1339
|
+
# resp.face_model_versions #=> Array
|
1340
|
+
# resp.face_model_versions[0] #=> String
|
1207
1341
|
#
|
1208
1342
|
# @overload list_collections(params = {})
|
1209
1343
|
# @param [Hash] params ({})
|
@@ -1236,6 +1370,7 @@ module Aws::Rekognition
|
|
1236
1370
|
#
|
1237
1371
|
# * {Types::ListFacesResponse#faces #faces} => Array<Types::Face>
|
1238
1372
|
# * {Types::ListFacesResponse#next_token #next_token} => String
|
1373
|
+
# * {Types::ListFacesResponse#face_model_version #face_model_version} => String
|
1239
1374
|
#
|
1240
1375
|
#
|
1241
1376
|
# @example Example: To list the faces in a collection
|
@@ -1394,6 +1529,7 @@ module Aws::Rekognition
|
|
1394
1529
|
# resp.faces[0].external_image_id #=> String
|
1395
1530
|
# resp.faces[0].confidence #=> Float
|
1396
1531
|
# resp.next_token #=> String
|
1532
|
+
# resp.face_model_version #=> String
|
1397
1533
|
#
|
1398
1534
|
# @overload list_faces(params = {})
|
1399
1535
|
# @param [Hash] params ({})
|
@@ -1402,21 +1538,20 @@ module Aws::Rekognition
|
|
1402
1538
|
req.send_request(options)
|
1403
1539
|
end
|
1404
1540
|
|
1405
|
-
# Returns an array of celebrities recognized in the input image.
|
1406
|
-
#
|
1407
|
-
#
|
1408
|
-
#
|
1409
|
-
#
|
1410
|
-
#
|
1411
|
-
#
|
1412
|
-
#
|
1413
|
-
#
|
1414
|
-
#
|
1415
|
-
#
|
1416
|
-
#
|
1417
|
-
#
|
1418
|
-
#
|
1419
|
-
# that you can use to locate the celebrity's face on the image.
|
1541
|
+
# Returns an array of celebrities recognized in the input image. For
|
1542
|
+
# more information, see celebrity-recognition.
|
1543
|
+
#
|
1544
|
+
# `RecognizeCelebrities` returns the 100 largest faces in the image. It
|
1545
|
+
# lists recognized celebrities in the `CelebrityFaces` array and
|
1546
|
+
# unrecognized faces in the `UnrecognizedFaces` array.
|
1547
|
+
# `RecognizeCelebrities` doesn't return celebrities whose faces are not
|
1548
|
+
# amongst the largest 100 faces in the image.
|
1549
|
+
#
|
1550
|
+
# For each celebrity recognized, the `RecognizeCelebrities` returns a
|
1551
|
+
# `Celebrity` object. The `Celebrity` object contains the celebrity
|
1552
|
+
# name, ID, URL links to additional information, match confidence, and a
|
1553
|
+
# `ComparedFace` object that you can use to locate the celebrity's face
|
1554
|
+
# on the image.
|
1420
1555
|
#
|
1421
1556
|
# Rekognition does not retain information about which images a celebrity
|
1422
1557
|
# has been recognized in. Your application must store this information
|
@@ -1425,13 +1560,20 @@ module Aws::Rekognition
|
|
1425
1560
|
# information URLs returned by `RecognizeCelebrities`, you will need the
|
1426
1561
|
# ID to identify the celebrity in a call to the operation.
|
1427
1562
|
#
|
1563
|
+
# You pass the imput image either as base64-encoded image bytes or as a
|
1564
|
+
# reference to an image in an Amazon S3 bucket. If you use the Amazon
|
1565
|
+
# CLI to call Amazon Rekognition operations, passing image bytes is not
|
1566
|
+
# supported. The image must be either a PNG or JPEG formatted file.
|
1567
|
+
#
|
1428
1568
|
# For an example, see recognize-celebrities-tutorial.
|
1429
1569
|
#
|
1430
1570
|
# This operation requires permissions to perform the
|
1431
1571
|
# `rekognition:RecognizeCelebrities` operation.
|
1432
1572
|
#
|
1433
1573
|
# @option params [required, Types::Image] :image
|
1434
|
-
# The input image
|
1574
|
+
# The input image as base64-encoded bytes or an S3 object. If you use
|
1575
|
+
# the AWS CLI to call Amazon Rekognition operations, passing
|
1576
|
+
# base64-encoded image bytes is not supported.
|
1435
1577
|
#
|
1436
1578
|
# @return [Types::RecognizeCelebritiesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
|
1437
1579
|
#
|
@@ -1540,6 +1682,7 @@ module Aws::Rekognition
|
|
1540
1682
|
#
|
1541
1683
|
# * {Types::SearchFacesResponse#searched_face_id #searched_face_id} => String
|
1542
1684
|
# * {Types::SearchFacesResponse#face_matches #face_matches} => Array<Types::FaceMatch>
|
1685
|
+
# * {Types::SearchFacesResponse#face_model_version #face_model_version} => String
|
1543
1686
|
#
|
1544
1687
|
#
|
1545
1688
|
# @example Example: To delete a face
|
@@ -1624,6 +1767,7 @@ module Aws::Rekognition
|
|
1624
1767
|
# resp.face_matches[0].face.image_id #=> String
|
1625
1768
|
# resp.face_matches[0].face.external_image_id #=> String
|
1626
1769
|
# resp.face_matches[0].face.confidence #=> Float
|
1770
|
+
# resp.face_model_version #=> String
|
1627
1771
|
#
|
1628
1772
|
# @overload search_faces(params = {})
|
1629
1773
|
# @param [Hash] params ({})
|
@@ -1647,6 +1791,11 @@ module Aws::Rekognition
|
|
1647
1791
|
#
|
1648
1792
|
# </note>
|
1649
1793
|
#
|
1794
|
+
# You pass the input image either as base64-encoded image bytes or as a
|
1795
|
+
# reference to an image in an Amazon S3 bucket. If you use the Amazon
|
1796
|
+
# CLI to call Amazon Rekognition operations, passing image bytes is not
|
1797
|
+
# supported. The image must be either a PNG or JPEG formatted file.
|
1798
|
+
#
|
1650
1799
|
# The response returns an array of faces that match, ordered by
|
1651
1800
|
# similarity score with the highest similarity first. More specifically,
|
1652
1801
|
# it is an array of metadata for each face match found. Along with the
|
@@ -1665,7 +1814,9 @@ module Aws::Rekognition
|
|
1665
1814
|
# ID of the collection to search.
|
1666
1815
|
#
|
1667
1816
|
# @option params [required, Types::Image] :image
|
1668
|
-
# The input image as bytes or an S3 object.
|
1817
|
+
# The input image as base64-encoded bytes or an S3 object. If you use
|
1818
|
+
# the AWS CLI to call Amazon Rekognition operations, passing
|
1819
|
+
# base64-encoded image bytes is not supported.
|
1669
1820
|
#
|
1670
1821
|
# @option params [Integer] :max_faces
|
1671
1822
|
# Maximum number of faces to return. The operation returns the maximum
|
@@ -1681,6 +1832,7 @@ module Aws::Rekognition
|
|
1681
1832
|
# * {Types::SearchFacesByImageResponse#searched_face_bounding_box #searched_face_bounding_box} => Types::BoundingBox
|
1682
1833
|
# * {Types::SearchFacesByImageResponse#searched_face_confidence #searched_face_confidence} => Float
|
1683
1834
|
# * {Types::SearchFacesByImageResponse#face_matches #face_matches} => Array<Types::FaceMatch>
|
1835
|
+
# * {Types::SearchFacesByImageResponse#face_model_version #face_model_version} => String
|
1684
1836
|
#
|
1685
1837
|
#
|
1686
1838
|
# @example Example: To search for faces matching a supplied image
|
@@ -1759,6 +1911,7 @@ module Aws::Rekognition
|
|
1759
1911
|
# resp.face_matches[0].face.image_id #=> String
|
1760
1912
|
# resp.face_matches[0].face.external_image_id #=> String
|
1761
1913
|
# resp.face_matches[0].face.confidence #=> Float
|
1914
|
+
# resp.face_model_version #=> String
|
1762
1915
|
#
|
1763
1916
|
# @overload search_faces_by_image(params = {})
|
1764
1917
|
# @param [Hash] params ({})
|
@@ -1780,7 +1933,7 @@ module Aws::Rekognition
|
|
1780
1933
|
params: params,
|
1781
1934
|
config: config)
|
1782
1935
|
context[:gem_name] = 'aws-sdk-rekognition'
|
1783
|
-
context[:gem_version] = '1.
|
1936
|
+
context[:gem_version] = '1.1.0'
|
1784
1937
|
Seahorse::Client::Request.new(handlers, context)
|
1785
1938
|
end
|
1786
1939
|
|
@@ -43,6 +43,8 @@ module Aws::Rekognition
|
|
43
43
|
DetectLabelsResponse = Shapes::StructureShape.new(name: 'DetectLabelsResponse')
|
44
44
|
DetectModerationLabelsRequest = Shapes::StructureShape.new(name: 'DetectModerationLabelsRequest')
|
45
45
|
DetectModerationLabelsResponse = Shapes::StructureShape.new(name: 'DetectModerationLabelsResponse')
|
46
|
+
DetectTextRequest = Shapes::StructureShape.new(name: 'DetectTextRequest')
|
47
|
+
DetectTextResponse = Shapes::StructureShape.new(name: 'DetectTextResponse')
|
46
48
|
Emotion = Shapes::StructureShape.new(name: 'Emotion')
|
47
49
|
EmotionName = Shapes::StringShape.new(name: 'EmotionName')
|
48
50
|
Emotions = Shapes::ListShape.new(name: 'Emotions')
|
@@ -57,11 +59,13 @@ module Aws::Rekognition
|
|
57
59
|
FaceList = Shapes::ListShape.new(name: 'FaceList')
|
58
60
|
FaceMatch = Shapes::StructureShape.new(name: 'FaceMatch')
|
59
61
|
FaceMatchList = Shapes::ListShape.new(name: 'FaceMatchList')
|
62
|
+
FaceModelVersionList = Shapes::ListShape.new(name: 'FaceModelVersionList')
|
60
63
|
FaceRecord = Shapes::StructureShape.new(name: 'FaceRecord')
|
61
64
|
FaceRecordList = Shapes::ListShape.new(name: 'FaceRecordList')
|
62
65
|
Float = Shapes::FloatShape.new(name: 'Float')
|
63
66
|
Gender = Shapes::StructureShape.new(name: 'Gender')
|
64
67
|
GenderType = Shapes::StringShape.new(name: 'GenderType')
|
68
|
+
Geometry = Shapes::StructureShape.new(name: 'Geometry')
|
65
69
|
GetCelebrityInfoRequest = Shapes::StructureShape.new(name: 'GetCelebrityInfoRequest')
|
66
70
|
GetCelebrityInfoResponse = Shapes::StructureShape.new(name: 'GetCelebrityInfoResponse')
|
67
71
|
Image = Shapes::StructureShape.new(name: 'Image')
|
@@ -94,6 +98,8 @@ module Aws::Rekognition
|
|
94
98
|
PageSize = Shapes::IntegerShape.new(name: 'PageSize')
|
95
99
|
PaginationToken = Shapes::StringShape.new(name: 'PaginationToken')
|
96
100
|
Percent = Shapes::FloatShape.new(name: 'Percent')
|
101
|
+
Point = Shapes::StructureShape.new(name: 'Point')
|
102
|
+
Polygon = Shapes::ListShape.new(name: 'Polygon')
|
97
103
|
Pose = Shapes::StructureShape.new(name: 'Pose')
|
98
104
|
ProvisionedThroughputExceededException = Shapes::StructureShape.new(name: 'ProvisionedThroughputExceededException')
|
99
105
|
RecognizeCelebritiesRequest = Shapes::StructureShape.new(name: 'RecognizeCelebritiesRequest')
|
@@ -112,6 +118,9 @@ module Aws::Rekognition
|
|
112
118
|
Smile = Shapes::StructureShape.new(name: 'Smile')
|
113
119
|
String = Shapes::StringShape.new(name: 'String')
|
114
120
|
Sunglasses = Shapes::StructureShape.new(name: 'Sunglasses')
|
121
|
+
TextDetection = Shapes::StructureShape.new(name: 'TextDetection')
|
122
|
+
TextDetectionList = Shapes::ListShape.new(name: 'TextDetectionList')
|
123
|
+
TextTypes = Shapes::StringShape.new(name: 'TextTypes')
|
115
124
|
ThrottlingException = Shapes::StructureShape.new(name: 'ThrottlingException')
|
116
125
|
UInteger = Shapes::IntegerShape.new(name: 'UInteger')
|
117
126
|
Url = Shapes::StringShape.new(name: 'Url')
|
@@ -182,6 +191,7 @@ module Aws::Rekognition
|
|
182
191
|
|
183
192
|
CreateCollectionResponse.add_member(:status_code, Shapes::ShapeRef.new(shape: UInteger, location_name: "StatusCode"))
|
184
193
|
CreateCollectionResponse.add_member(:collection_arn, Shapes::ShapeRef.new(shape: String, location_name: "CollectionArn"))
|
194
|
+
CreateCollectionResponse.add_member(:face_model_version, Shapes::ShapeRef.new(shape: String, location_name: "FaceModelVersion"))
|
185
195
|
CreateCollectionResponse.struct_class = Types::CreateCollectionResponse
|
186
196
|
|
187
197
|
DeleteCollectionRequest.add_member(:collection_id, Shapes::ShapeRef.new(shape: CollectionId, required: true, location_name: "CollectionId"))
|
@@ -221,6 +231,12 @@ module Aws::Rekognition
|
|
221
231
|
DetectModerationLabelsResponse.add_member(:moderation_labels, Shapes::ShapeRef.new(shape: ModerationLabels, location_name: "ModerationLabels"))
|
222
232
|
DetectModerationLabelsResponse.struct_class = Types::DetectModerationLabelsResponse
|
223
233
|
|
234
|
+
DetectTextRequest.add_member(:image, Shapes::ShapeRef.new(shape: Image, required: true, location_name: "Image"))
|
235
|
+
DetectTextRequest.struct_class = Types::DetectTextRequest
|
236
|
+
|
237
|
+
DetectTextResponse.add_member(:text_detections, Shapes::ShapeRef.new(shape: TextDetectionList, location_name: "TextDetections"))
|
238
|
+
DetectTextResponse.struct_class = Types::DetectTextResponse
|
239
|
+
|
224
240
|
Emotion.add_member(:type, Shapes::ShapeRef.new(shape: EmotionName, location_name: "Type"))
|
225
241
|
Emotion.add_member(:confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "Confidence"))
|
226
242
|
Emotion.struct_class = Types::Emotion
|
@@ -271,6 +287,8 @@ module Aws::Rekognition
|
|
271
287
|
|
272
288
|
FaceMatchList.member = Shapes::ShapeRef.new(shape: FaceMatch)
|
273
289
|
|
290
|
+
FaceModelVersionList.member = Shapes::ShapeRef.new(shape: String)
|
291
|
+
|
274
292
|
FaceRecord.add_member(:face, Shapes::ShapeRef.new(shape: Face, location_name: "Face"))
|
275
293
|
FaceRecord.add_member(:face_detail, Shapes::ShapeRef.new(shape: FaceDetail, location_name: "FaceDetail"))
|
276
294
|
FaceRecord.struct_class = Types::FaceRecord
|
@@ -281,6 +299,10 @@ module Aws::Rekognition
|
|
281
299
|
Gender.add_member(:confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "Confidence"))
|
282
300
|
Gender.struct_class = Types::Gender
|
283
301
|
|
302
|
+
Geometry.add_member(:bounding_box, Shapes::ShapeRef.new(shape: BoundingBox, location_name: "BoundingBox"))
|
303
|
+
Geometry.add_member(:polygon, Shapes::ShapeRef.new(shape: Polygon, location_name: "Polygon"))
|
304
|
+
Geometry.struct_class = Types::Geometry
|
305
|
+
|
284
306
|
GetCelebrityInfoRequest.add_member(:id, Shapes::ShapeRef.new(shape: RekognitionUniqueId, required: true, location_name: "Id"))
|
285
307
|
GetCelebrityInfoRequest.struct_class = Types::GetCelebrityInfoRequest
|
286
308
|
|
@@ -304,6 +326,7 @@ module Aws::Rekognition
|
|
304
326
|
|
305
327
|
IndexFacesResponse.add_member(:face_records, Shapes::ShapeRef.new(shape: FaceRecordList, location_name: "FaceRecords"))
|
306
328
|
IndexFacesResponse.add_member(:orientation_correction, Shapes::ShapeRef.new(shape: OrientationCorrection, location_name: "OrientationCorrection"))
|
329
|
+
IndexFacesResponse.add_member(:face_model_version, Shapes::ShapeRef.new(shape: String, location_name: "FaceModelVersion"))
|
307
330
|
IndexFacesResponse.struct_class = Types::IndexFacesResponse
|
308
331
|
|
309
332
|
Label.add_member(:name, Shapes::ShapeRef.new(shape: String, location_name: "Name"))
|
@@ -325,6 +348,7 @@ module Aws::Rekognition
|
|
325
348
|
|
326
349
|
ListCollectionsResponse.add_member(:collection_ids, Shapes::ShapeRef.new(shape: CollectionIdList, location_name: "CollectionIds"))
|
327
350
|
ListCollectionsResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
|
351
|
+
ListCollectionsResponse.add_member(:face_model_versions, Shapes::ShapeRef.new(shape: FaceModelVersionList, location_name: "FaceModelVersions"))
|
328
352
|
ListCollectionsResponse.struct_class = Types::ListCollectionsResponse
|
329
353
|
|
330
354
|
ListFacesRequest.add_member(:collection_id, Shapes::ShapeRef.new(shape: CollectionId, required: true, location_name: "CollectionId"))
|
@@ -334,6 +358,7 @@ module Aws::Rekognition
|
|
334
358
|
|
335
359
|
ListFacesResponse.add_member(:faces, Shapes::ShapeRef.new(shape: FaceList, location_name: "Faces"))
|
336
360
|
ListFacesResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location_name: "NextToken"))
|
361
|
+
ListFacesResponse.add_member(:face_model_version, Shapes::ShapeRef.new(shape: String, location_name: "FaceModelVersion"))
|
337
362
|
ListFacesResponse.struct_class = Types::ListFacesResponse
|
338
363
|
|
339
364
|
ModerationLabel.add_member(:confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "Confidence"))
|
@@ -351,6 +376,12 @@ module Aws::Rekognition
|
|
351
376
|
Mustache.add_member(:confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "Confidence"))
|
352
377
|
Mustache.struct_class = Types::Mustache
|
353
378
|
|
379
|
+
Point.add_member(:x, Shapes::ShapeRef.new(shape: Float, location_name: "X"))
|
380
|
+
Point.add_member(:y, Shapes::ShapeRef.new(shape: Float, location_name: "Y"))
|
381
|
+
Point.struct_class = Types::Point
|
382
|
+
|
383
|
+
Polygon.member = Shapes::ShapeRef.new(shape: Point)
|
384
|
+
|
354
385
|
Pose.add_member(:roll, Shapes::ShapeRef.new(shape: Degree, location_name: "Roll"))
|
355
386
|
Pose.add_member(:yaw, Shapes::ShapeRef.new(shape: Degree, location_name: "Yaw"))
|
356
387
|
Pose.add_member(:pitch, Shapes::ShapeRef.new(shape: Degree, location_name: "Pitch"))
|
@@ -378,6 +409,7 @@ module Aws::Rekognition
|
|
378
409
|
SearchFacesByImageResponse.add_member(:searched_face_bounding_box, Shapes::ShapeRef.new(shape: BoundingBox, location_name: "SearchedFaceBoundingBox"))
|
379
410
|
SearchFacesByImageResponse.add_member(:searched_face_confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "SearchedFaceConfidence"))
|
380
411
|
SearchFacesByImageResponse.add_member(:face_matches, Shapes::ShapeRef.new(shape: FaceMatchList, location_name: "FaceMatches"))
|
412
|
+
SearchFacesByImageResponse.add_member(:face_model_version, Shapes::ShapeRef.new(shape: String, location_name: "FaceModelVersion"))
|
381
413
|
SearchFacesByImageResponse.struct_class = Types::SearchFacesByImageResponse
|
382
414
|
|
383
415
|
SearchFacesRequest.add_member(:collection_id, Shapes::ShapeRef.new(shape: CollectionId, required: true, location_name: "CollectionId"))
|
@@ -388,6 +420,7 @@ module Aws::Rekognition
|
|
388
420
|
|
389
421
|
SearchFacesResponse.add_member(:searched_face_id, Shapes::ShapeRef.new(shape: FaceId, location_name: "SearchedFaceId"))
|
390
422
|
SearchFacesResponse.add_member(:face_matches, Shapes::ShapeRef.new(shape: FaceMatchList, location_name: "FaceMatches"))
|
423
|
+
SearchFacesResponse.add_member(:face_model_version, Shapes::ShapeRef.new(shape: String, location_name: "FaceModelVersion"))
|
391
424
|
SearchFacesResponse.struct_class = Types::SearchFacesResponse
|
392
425
|
|
393
426
|
Smile.add_member(:value, Shapes::ShapeRef.new(shape: Boolean, location_name: "Value"))
|
@@ -398,6 +431,16 @@ module Aws::Rekognition
|
|
398
431
|
Sunglasses.add_member(:confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "Confidence"))
|
399
432
|
Sunglasses.struct_class = Types::Sunglasses
|
400
433
|
|
434
|
+
TextDetection.add_member(:detected_text, Shapes::ShapeRef.new(shape: String, location_name: "DetectedText"))
|
435
|
+
TextDetection.add_member(:type, Shapes::ShapeRef.new(shape: TextTypes, location_name: "Type"))
|
436
|
+
TextDetection.add_member(:id, Shapes::ShapeRef.new(shape: UInteger, location_name: "Id"))
|
437
|
+
TextDetection.add_member(:parent_id, Shapes::ShapeRef.new(shape: UInteger, location_name: "ParentId"))
|
438
|
+
TextDetection.add_member(:confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "Confidence"))
|
439
|
+
TextDetection.add_member(:geometry, Shapes::ShapeRef.new(shape: Geometry, location_name: "Geometry"))
|
440
|
+
TextDetection.struct_class = Types::TextDetection
|
441
|
+
|
442
|
+
TextDetectionList.member = Shapes::ShapeRef.new(shape: TextDetection)
|
443
|
+
|
401
444
|
Urls.member = Shapes::ShapeRef.new(shape: Url)
|
402
445
|
|
403
446
|
|
@@ -521,6 +564,22 @@ module Aws::Rekognition
|
|
521
564
|
o.errors << Shapes::ShapeRef.new(shape: InvalidImageFormatException)
|
522
565
|
end)
|
523
566
|
|
567
|
+
api.add_operation(:detect_text, Seahorse::Model::Operation.new.tap do |o|
|
568
|
+
o.name = "DetectText"
|
569
|
+
o.http_method = "POST"
|
570
|
+
o.http_request_uri = "/"
|
571
|
+
o.input = Shapes::ShapeRef.new(shape: DetectTextRequest)
|
572
|
+
o.output = Shapes::ShapeRef.new(shape: DetectTextResponse)
|
573
|
+
o.errors << Shapes::ShapeRef.new(shape: InvalidS3ObjectException)
|
574
|
+
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
|
575
|
+
o.errors << Shapes::ShapeRef.new(shape: ImageTooLargeException)
|
576
|
+
o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
|
577
|
+
o.errors << Shapes::ShapeRef.new(shape: InternalServerError)
|
578
|
+
o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
|
579
|
+
o.errors << Shapes::ShapeRef.new(shape: ProvisionedThroughputExceededException)
|
580
|
+
o.errors << Shapes::ShapeRef.new(shape: InvalidImageFormatException)
|
581
|
+
end)
|
582
|
+
|
524
583
|
api.add_operation(:get_celebrity_info, Seahorse::Model::Operation.new.tap do |o|
|
525
584
|
o.name = "GetCelebrityInfo"
|
526
585
|
o.http_method = "POST"
|
@@ -46,10 +46,10 @@ module Aws::Rekognition
|
|
46
46
|
include Aws::Structure
|
47
47
|
end
|
48
48
|
|
49
|
-
# Identifies the bounding box around the object or
|
50
|
-
# (x-coordinate) and `top` (y-coordinate) are coordinates
|
51
|
-
# the top and left sides of the bounding box. Note that the
|
52
|
-
# corner of the image is the origin (0,0).
|
49
|
+
# Identifies the bounding box around the object, face or text. The
|
50
|
+
# `left` (x-coordinate) and `top` (y-coordinate) are coordinates
|
51
|
+
# representing the top and left sides of the bounding box. Note that the
|
52
|
+
# upper-left corner of the image is the origin (0,0).
|
53
53
|
#
|
54
54
|
# The `top` and `left` values returned are ratios of the overall image
|
55
55
|
# size. For example, if the input image is 700x200 pixels, and the
|
@@ -177,11 +177,15 @@ module Aws::Rekognition
|
|
177
177
|
# }
|
178
178
|
#
|
179
179
|
# @!attribute [rw] source_image
|
180
|
-
# The
|
180
|
+
# The input image as base64-encoded bytes or an S3 object. If you use
|
181
|
+
# the AWS CLI to call Amazon Rekognition operations, passing
|
182
|
+
# base64-encoded image bytes is not supported.
|
181
183
|
# @return [Types::Image]
|
182
184
|
#
|
183
185
|
# @!attribute [rw] target_image
|
184
|
-
# The target image
|
186
|
+
# The target image as base64-encoded bytes or an S3 object. If you use
|
187
|
+
# the AWS CLI to call Amazon Rekognition operations, passing
|
188
|
+
# base64-encoded image bytes is not supported.
|
185
189
|
# @return [Types::Image]
|
186
190
|
#
|
187
191
|
# @!attribute [rw] similarity_threshold
|
@@ -336,9 +340,15 @@ module Aws::Rekognition
|
|
336
340
|
# manage permissions on your resources.
|
337
341
|
# @return [String]
|
338
342
|
#
|
343
|
+
# @!attribute [rw] face_model_version
|
344
|
+
# Version number of the face detection model associated with the
|
345
|
+
# collection you are creating.
|
346
|
+
# @return [String]
|
347
|
+
#
|
339
348
|
class CreateCollectionResponse < Struct.new(
|
340
349
|
:status_code,
|
341
|
-
:collection_arn
|
350
|
+
:collection_arn,
|
351
|
+
:face_model_version)
|
342
352
|
include Aws::Structure
|
343
353
|
end
|
344
354
|
|
@@ -414,8 +424,9 @@ module Aws::Rekognition
|
|
414
424
|
# }
|
415
425
|
#
|
416
426
|
# @!attribute [rw] image
|
417
|
-
# The image
|
418
|
-
#
|
427
|
+
# The input image as base64-encoded bytes or an S3 object. If you use
|
428
|
+
# the AWS CLI to call Amazon Rekognition operations, passing
|
429
|
+
# base64-encoded image bytes is not supported.
|
419
430
|
# @return [Types::Image]
|
420
431
|
#
|
421
432
|
# @!attribute [rw] attributes
|
@@ -483,8 +494,9 @@ module Aws::Rekognition
|
|
483
494
|
# }
|
484
495
|
#
|
485
496
|
# @!attribute [rw] image
|
486
|
-
# The input image
|
487
|
-
#
|
497
|
+
# The input image as base64-encoded bytes or an S3 object. If you use
|
498
|
+
# the AWS CLI to call Amazon Rekognition operations, passing
|
499
|
+
# base64-encoded image bytes is not supported.
|
488
500
|
# @return [Types::Image]
|
489
501
|
#
|
490
502
|
# @!attribute [rw] max_labels
|
@@ -549,7 +561,9 @@ module Aws::Rekognition
|
|
549
561
|
# }
|
550
562
|
#
|
551
563
|
# @!attribute [rw] image
|
552
|
-
# The input image as bytes or an S3 object.
|
564
|
+
# The input image as base64-encoded bytes or an S3 object. If you use
|
565
|
+
# the AWS CLI to call Amazon Rekognition operations, passing
|
566
|
+
# base64-encoded image bytes is not supported.
|
553
567
|
# @return [Types::Image]
|
554
568
|
#
|
555
569
|
# @!attribute [rw] min_confidence
|
@@ -569,9 +583,9 @@ module Aws::Rekognition
|
|
569
583
|
|
570
584
|
# @!attribute [rw] moderation_labels
|
571
585
|
# An array of labels for explicit or suggestive adult content found in
|
572
|
-
# the image. The list includes the top-level label and each
|
573
|
-
# label detected in the image. This is useful for
|
574
|
-
# categories of content.
|
586
|
+
# the image. The list includes the top-level label and each
|
587
|
+
# second-level label detected in the image. This is useful for
|
588
|
+
# filtering specific categories of content.
|
575
589
|
# @return [Array<Types::ModerationLabel>]
|
576
590
|
#
|
577
591
|
class DetectModerationLabelsResponse < Struct.new(
|
@@ -579,6 +593,40 @@ module Aws::Rekognition
|
|
579
593
|
include Aws::Structure
|
580
594
|
end
|
581
595
|
|
596
|
+
# @note When making an API call, you may pass DetectTextRequest
|
597
|
+
# data as a hash:
|
598
|
+
#
|
599
|
+
# {
|
600
|
+
# image: { # required
|
601
|
+
# bytes: "data",
|
602
|
+
# s3_object: {
|
603
|
+
# bucket: "S3Bucket",
|
604
|
+
# name: "S3ObjectName",
|
605
|
+
# version: "S3ObjectVersion",
|
606
|
+
# },
|
607
|
+
# },
|
608
|
+
# }
|
609
|
+
#
|
610
|
+
# @!attribute [rw] image
|
611
|
+
# The input image as base64-encoded bytes or an Amazon S3 object. If
|
612
|
+
# you use the AWS CLI to call Amazon Rekognition operations, you
|
613
|
+
# can't pass image bytes.
|
614
|
+
# @return [Types::Image]
|
615
|
+
#
|
616
|
+
class DetectTextRequest < Struct.new(
|
617
|
+
:image)
|
618
|
+
include Aws::Structure
|
619
|
+
end
|
620
|
+
|
621
|
+
# @!attribute [rw] text_detections
|
622
|
+
# An array of text that was detected in the input image.
|
623
|
+
# @return [Array<Types::TextDetection>]
|
624
|
+
#
|
625
|
+
class DetectTextResponse < Struct.new(
|
626
|
+
:text_detections)
|
627
|
+
include Aws::Structure
|
628
|
+
end
|
629
|
+
|
582
630
|
# The emotions detected on the face, and the confidence level in the
|
583
631
|
# determination. For example, HAPPY, SAD, and ANGRY.
|
584
632
|
#
|
@@ -814,6 +862,24 @@ module Aws::Rekognition
|
|
814
862
|
include Aws::Structure
|
815
863
|
end
|
816
864
|
|
865
|
+
# Information about where text detected by is located on an image.
|
866
|
+
#
|
867
|
+
# @!attribute [rw] bounding_box
|
868
|
+
# An axis-aligned coarse representation of the detected text's
|
869
|
+
# location on the image.
|
870
|
+
# @return [Types::BoundingBox]
|
871
|
+
#
|
872
|
+
# @!attribute [rw] polygon
|
873
|
+
# Within the bounding box, a fine-grained polygon around the detected
|
874
|
+
# text.
|
875
|
+
# @return [Array<Types::Point>]
|
876
|
+
#
|
877
|
+
class Geometry < Struct.new(
|
878
|
+
:bounding_box,
|
879
|
+
:polygon)
|
880
|
+
include Aws::Structure
|
881
|
+
end
|
882
|
+
|
817
883
|
# @note When making an API call, you may pass GetCelebrityInfoRequest
|
818
884
|
# data as a hash:
|
819
885
|
#
|
@@ -939,7 +1005,9 @@ module Aws::Rekognition
|
|
939
1005
|
# @return [String]
|
940
1006
|
#
|
941
1007
|
# @!attribute [rw] image
|
942
|
-
# The input image as bytes or an S3 object.
|
1008
|
+
# The input image as base64-encoded bytes or an S3 object. If you use
|
1009
|
+
# the AWS CLI to call Amazon Rekognition operations, passing
|
1010
|
+
# base64-encoded image bytes is not supported.
|
943
1011
|
# @return [Types::Image]
|
944
1012
|
#
|
945
1013
|
# @!attribute [rw] external_image_id
|
@@ -990,9 +1058,15 @@ module Aws::Rekognition
|
|
990
1058
|
# </note>
|
991
1059
|
# @return [String]
|
992
1060
|
#
|
1061
|
+
# @!attribute [rw] face_model_version
|
1062
|
+
# Version number of the face detection model associated with the input
|
1063
|
+
# collection (`CollectionId`).
|
1064
|
+
# @return [String]
|
1065
|
+
#
|
993
1066
|
class IndexFacesResponse < Struct.new(
|
994
1067
|
:face_records,
|
995
|
-
:orientation_correction
|
1068
|
+
:orientation_correction,
|
1069
|
+
:face_model_version)
|
996
1070
|
include Aws::Structure
|
997
1071
|
end
|
998
1072
|
|
@@ -1072,9 +1146,17 @@ module Aws::Rekognition
|
|
1072
1146
|
# collection IDs.
|
1073
1147
|
# @return [String]
|
1074
1148
|
#
|
1149
|
+
# @!attribute [rw] face_model_versions
|
1150
|
+
# Version numbers of the face detection models associated with the
|
1151
|
+
# collections in the array `CollectionIds`. For example, the value of
|
1152
|
+
# `FaceModelVersions[2]` is the version number for the face detection
|
1153
|
+
# model used by the collection in `CollectionId[2]`.
|
1154
|
+
# @return [Array<String>]
|
1155
|
+
#
|
1075
1156
|
class ListCollectionsResponse < Struct.new(
|
1076
1157
|
:collection_ids,
|
1077
|
-
:next_token
|
1158
|
+
:next_token,
|
1159
|
+
:face_model_versions)
|
1078
1160
|
include Aws::Structure
|
1079
1161
|
end
|
1080
1162
|
|
@@ -1119,9 +1201,15 @@ module Aws::Rekognition
|
|
1119
1201
|
# of faces.
|
1120
1202
|
# @return [String]
|
1121
1203
|
#
|
1204
|
+
# @!attribute [rw] face_model_version
|
1205
|
+
# Version number of the face detection model associated with the input
|
1206
|
+
# collection (`CollectionId`).
|
1207
|
+
# @return [String]
|
1208
|
+
#
|
1122
1209
|
class ListFacesResponse < Struct.new(
|
1123
1210
|
:faces,
|
1124
|
-
:next_token
|
1211
|
+
:next_token,
|
1212
|
+
:face_model_version)
|
1125
1213
|
include Aws::Structure
|
1126
1214
|
end
|
1127
1215
|
|
@@ -1189,6 +1277,29 @@ module Aws::Rekognition
|
|
1189
1277
|
include Aws::Structure
|
1190
1278
|
end
|
1191
1279
|
|
1280
|
+
# The X and Y coordinates of a point on an image. The X and Y values
|
1281
|
+
# returned are ratios of the overall image size. For example, if the
|
1282
|
+
# input image is 700x200 and the operation returns X=0.5 and Y=0.25,
|
1283
|
+
# then the point is at the (350,50) pixel coordinate on the image.
|
1284
|
+
#
|
1285
|
+
# An array of `Point` objects, `Polygon`, is returned by . `Polygon`
|
1286
|
+
# represents a fine-grained polygon around detected text. For more
|
1287
|
+
# information, see .
|
1288
|
+
#
|
1289
|
+
# @!attribute [rw] x
|
1290
|
+
# The value of the X coordinate for a point on a `Polygon`.
|
1291
|
+
# @return [Float]
|
1292
|
+
#
|
1293
|
+
# @!attribute [rw] y
|
1294
|
+
# The value of the Y coordinate for a point on a `Polygon`.
|
1295
|
+
# @return [Float]
|
1296
|
+
#
|
1297
|
+
class Point < Struct.new(
|
1298
|
+
:x,
|
1299
|
+
:y)
|
1300
|
+
include Aws::Structure
|
1301
|
+
end
|
1302
|
+
|
1192
1303
|
# Indicates the pose of the face as determined by its pitch, roll, and
|
1193
1304
|
# yaw.
|
1194
1305
|
#
|
@@ -1226,7 +1337,9 @@ module Aws::Rekognition
|
|
1226
1337
|
# }
|
1227
1338
|
#
|
1228
1339
|
# @!attribute [rw] image
|
1229
|
-
# The input image
|
1340
|
+
# The input image as base64-encoded bytes or an S3 object. If you use
|
1341
|
+
# the AWS CLI to call Amazon Rekognition operations, passing
|
1342
|
+
# base64-encoded image bytes is not supported.
|
1230
1343
|
# @return [Types::Image]
|
1231
1344
|
#
|
1232
1345
|
class RecognizeCelebritiesRequest < Struct.new(
|
@@ -1329,7 +1442,9 @@ module Aws::Rekognition
|
|
1329
1442
|
# @return [String]
|
1330
1443
|
#
|
1331
1444
|
# @!attribute [rw] image
|
1332
|
-
# The input image as bytes or an S3 object.
|
1445
|
+
# The input image as base64-encoded bytes or an S3 object. If you use
|
1446
|
+
# the AWS CLI to call Amazon Rekognition operations, passing
|
1447
|
+
# base64-encoded image bytes is not supported.
|
1333
1448
|
# @return [Types::Image]
|
1334
1449
|
#
|
1335
1450
|
# @!attribute [rw] max_faces
|
@@ -1366,10 +1481,16 @@ module Aws::Rekognition
|
|
1366
1481
|
# confidence in the match.
|
1367
1482
|
# @return [Array<Types::FaceMatch>]
|
1368
1483
|
#
|
1484
|
+
# @!attribute [rw] face_model_version
|
1485
|
+
# Version number of the face detection model associated with the input
|
1486
|
+
# collection (`CollectionId`).
|
1487
|
+
# @return [String]
|
1488
|
+
#
|
1369
1489
|
class SearchFacesByImageResponse < Struct.new(
|
1370
1490
|
:searched_face_bounding_box,
|
1371
1491
|
:searched_face_confidence,
|
1372
|
-
:face_matches
|
1492
|
+
:face_matches,
|
1493
|
+
:face_model_version)
|
1373
1494
|
include Aws::Structure
|
1374
1495
|
end
|
1375
1496
|
|
@@ -1419,9 +1540,15 @@ module Aws::Rekognition
|
|
1419
1540
|
# confidence in the match.
|
1420
1541
|
# @return [Array<Types::FaceMatch>]
|
1421
1542
|
#
|
1543
|
+
# @!attribute [rw] face_model_version
|
1544
|
+
# Version number of the face detection model associated with the input
|
1545
|
+
# collection (`CollectionId`).
|
1546
|
+
# @return [String]
|
1547
|
+
#
|
1422
1548
|
class SearchFacesResponse < Struct.new(
|
1423
1549
|
:searched_face_id,
|
1424
|
-
:face_matches
|
1550
|
+
:face_matches,
|
1551
|
+
:face_model_version)
|
1425
1552
|
include Aws::Structure
|
1426
1553
|
end
|
1427
1554
|
|
@@ -1460,5 +1587,58 @@ module Aws::Rekognition
|
|
1460
1587
|
include Aws::Structure
|
1461
1588
|
end
|
1462
1589
|
|
1590
|
+
# Information about a word or line of text detected by .
|
1591
|
+
#
|
1592
|
+
# The `DetectedText` field contains the text that Amazon Rekognition
|
1593
|
+
# detected in the image.
|
1594
|
+
#
|
1595
|
+
# Every word and line has an identifier (`Id`). Each word belongs to a
|
1596
|
+
# line and has a parent identifier (`ParentId`) that identifies the line
|
1597
|
+
# of text in which the word appears. The word `Id` is also an index for
|
1598
|
+
# the word within a line of words.
|
1599
|
+
#
|
1600
|
+
# For more information, see text-detection.
|
1601
|
+
#
|
1602
|
+
# @!attribute [rw] detected_text
|
1603
|
+
# The word or line of text recognized by Amazon Rekognition.
|
1604
|
+
# @return [String]
|
1605
|
+
#
|
1606
|
+
# @!attribute [rw] type
|
1607
|
+
# The type of text that was detected.
|
1608
|
+
# @return [String]
|
1609
|
+
#
|
1610
|
+
# @!attribute [rw] id
|
1611
|
+
# The identifier for the detected text. The identifier is only unique
|
1612
|
+
# for a single call to `DetectText`.
|
1613
|
+
# @return [Integer]
|
1614
|
+
#
|
1615
|
+
# @!attribute [rw] parent_id
|
1616
|
+
# The Parent identifier for the detected text identified by the value
|
1617
|
+
# of `ID`. If the type of detected text is `LINE`, the value of
|
1618
|
+
# `ParentId` is `Null`.
|
1619
|
+
# @return [Integer]
|
1620
|
+
#
|
1621
|
+
# @!attribute [rw] confidence
|
1622
|
+
# The confidence that Amazon Rekognition has in the accuracy of the
|
1623
|
+
# detected text and the accuracy of the geometry points around the
|
1624
|
+
# detected text.
|
1625
|
+
# @return [Float]
|
1626
|
+
#
|
1627
|
+
# @!attribute [rw] geometry
|
1628
|
+
# The location of the detected text on the image. Includes an axis
|
1629
|
+
# aligned coarse bounding box surrounding the text and a finer grain
|
1630
|
+
# polygon for more accurate spatial information.
|
1631
|
+
# @return [Types::Geometry]
|
1632
|
+
#
|
1633
|
+
class TextDetection < Struct.new(
|
1634
|
+
:detected_text,
|
1635
|
+
:type,
|
1636
|
+
:id,
|
1637
|
+
:parent_id,
|
1638
|
+
:confidence,
|
1639
|
+
:geometry)
|
1640
|
+
include Aws::Structure
|
1641
|
+
end
|
1642
|
+
|
1463
1643
|
end
|
1464
1644
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: aws-sdk-rekognition
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.1.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Amazon Web Services
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2017-
|
11
|
+
date: 2017-11-22 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: aws-sdk-core
|
@@ -56,7 +56,9 @@ files:
|
|
56
56
|
homepage: http://github.com/aws/aws-sdk-ruby
|
57
57
|
licenses:
|
58
58
|
- Apache-2.0
|
59
|
-
metadata:
|
59
|
+
metadata:
|
60
|
+
source_code_uri: https://github.com/aws/aws-sdk-ruby/tree/master/gems/aws-sdk-rekognition
|
61
|
+
changelog_uri: https://github.com/aws/aws-sdk-ruby/tree/master/gems/aws-sdk-rekognition/CHANGELOG.md
|
60
62
|
post_install_message:
|
61
63
|
rdoc_options: []
|
62
64
|
require_paths:
|