aws-sdk-rekognition 1.0.0.rc10 → 1.0.0.rc11

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: d530f5a46ac184014f42546dc6fa4d7fb44c9524
4
- data.tar.gz: a6f7cd4bc098e1846794f975b44e74f7446d379c
3
+ metadata.gz: 7d80d078bbdfb71e163d2ba617606596fdcb23cd
4
+ data.tar.gz: 786d65689c62b082c24d90daf44932d89b03d2f7
5
5
  SHA512:
6
- metadata.gz: 4ca0644eec96c84f77e46280557b52643a91f3a834a8419ed28402ddc0b7233f1272badb4beaaecb0e3cbfec1dfec06a8303cb61666fb6f678ac7f11f47e1c94
7
- data.tar.gz: e48374dcc8436f70f4f9884713ee92bc955b75e5a2329e70734d589c7c117b589c8dcc994dd843574f6a86b54dcb20a1139064b08e88edc744c2477aff1e401e
6
+ metadata.gz: 0bf25fd769ddf14f23d3caf8d3475f5d7d4d0e6455d3a8f1ce321fe3fd904b1e892f383ae83fac1563b33f7e2cb4a45f101f177d56ccc9fa15da373dcc63fcb6
7
+ data.tar.gz: 926d2f366bccd837a24ef562114986e3ee85edbf48498c9a8f6ac182bc81497fec2daa47a8b5422c0bbcb96366cf691fad58036f51c92d186db6b75d3265ff5a
@@ -42,6 +42,6 @@ require_relative 'aws-sdk-rekognition/customizations'
42
42
  # @service
43
43
  module Aws::Rekognition
44
44
 
45
- GEM_VERSION = '1.0.0.rc10'
45
+ GEM_VERSION = '1.0.0.rc11'
46
46
 
47
47
  end
@@ -216,6 +216,54 @@ module Aws::Rekognition
216
216
  # * {Types::CompareFacesResponse#source_image_orientation_correction #source_image_orientation_correction} => String
217
217
  # * {Types::CompareFacesResponse#target_image_orientation_correction #target_image_orientation_correction} => String
218
218
  #
219
+ #
220
+ # @example Example: To compare two images
221
+ #
222
+ # # This operation compares the largest face detected in the source image with each face detected in the target image.
223
+ #
224
+ # resp = client.compare_faces({
225
+ # similarity_threshold: 90,
226
+ # source_image: {
227
+ # s3_object: {
228
+ # bucket: "mybucket",
229
+ # name: "mysourceimage",
230
+ # },
231
+ # },
232
+ # target_image: {
233
+ # s3_object: {
234
+ # bucket: "mybucket",
235
+ # name: "mytargetimage",
236
+ # },
237
+ # },
238
+ # })
239
+ #
240
+ # resp.to_h outputs the following:
241
+ # {
242
+ # face_matches: [
243
+ # {
244
+ # face: {
245
+ # bounding_box: {
246
+ # height: 0.33481481671333313,
247
+ # left: 0.31888890266418457,
248
+ # top: 0.4933333396911621,
249
+ # width: 0.25,
250
+ # },
251
+ # confidence: 99.9991226196289,
252
+ # },
253
+ # similarity: 100,
254
+ # },
255
+ # ],
256
+ # source_image_face: {
257
+ # bounding_box: {
258
+ # height: 0.33481481671333313,
259
+ # left: 0.31888890266418457,
260
+ # top: 0.4933333396911621,
261
+ # width: 0.25,
262
+ # },
263
+ # confidence: 99.9991226196289,
264
+ # },
265
+ # }
266
+ #
219
267
  # @example Request syntax with placeholder values
220
268
  #
221
269
  # resp = client.compare_faces({
@@ -311,6 +359,21 @@ module Aws::Rekognition
311
359
  # * {Types::CreateCollectionResponse#status_code #status_code} => Integer
312
360
  # * {Types::CreateCollectionResponse#collection_arn #collection_arn} => String
313
361
  #
362
+ #
363
+ # @example Example: To create a collection
364
+ #
365
+ # # This operation creates a Rekognition collection for storing image data.
366
+ #
367
+ # resp = client.create_collection({
368
+ # collection_id: "myphotos",
369
+ # })
370
+ #
371
+ # resp.to_h outputs the following:
372
+ # {
373
+ # collection_arn: "aws:rekognition:us-west-2:123456789012:collection/myphotos",
374
+ # status_code: 200,
375
+ # }
376
+ #
314
377
  # @example Request syntax with placeholder values
315
378
  #
316
379
  # resp = client.create_collection({
@@ -342,6 +405,20 @@ module Aws::Rekognition
342
405
  #
343
406
  # * {Types::DeleteCollectionResponse#status_code #status_code} => Integer
344
407
  #
408
+ #
409
+ # @example Example: To delete a collection
410
+ #
411
+ # # This operation deletes a Rekognition collection.
412
+ #
413
+ # resp = client.delete_collection({
414
+ # collection_id: "myphotos",
415
+ # })
416
+ #
417
+ # resp.to_h outputs the following:
418
+ # {
419
+ # status_code: 200,
420
+ # }
421
+ #
345
422
  # @example Request syntax with placeholder values
346
423
  #
347
424
  # resp = client.delete_collection({
@@ -375,6 +452,25 @@ module Aws::Rekognition
375
452
  #
376
453
  # * {Types::DeleteFacesResponse#deleted_faces #deleted_faces} => Array<String>
377
454
  #
455
+ #
456
+ # @example Example: To delete a face
457
+ #
458
+ # # This operation deletes one or more faces from a Rekognition collection.
459
+ #
460
+ # resp = client.delete_faces({
461
+ # collection_id: "myphotos",
462
+ # face_ids: [
463
+ # "ff43d742-0c13-5d16-a3e8-03d3f58e980b",
464
+ # ],
465
+ # })
466
+ #
467
+ # resp.to_h outputs the following:
468
+ # {
469
+ # deleted_faces: [
470
+ # "ff43d742-0c13-5d16-a3e8-03d3f58e980b",
471
+ # ],
472
+ # }
473
+ #
378
474
  # @example Request syntax with placeholder values
379
475
  #
380
476
  # resp = client.delete_faces({
@@ -438,6 +534,72 @@ module Aws::Rekognition
438
534
  # * {Types::DetectFacesResponse#face_details #face_details} => Array<Types::FaceDetail>
439
535
  # * {Types::DetectFacesResponse#orientation_correction #orientation_correction} => String
440
536
  #
537
+ #
538
+ # @example Example: To detect faces in an image
539
+ #
540
+ # # This operation detects faces in an image stored in an AWS S3 bucket.
541
+ #
542
+ # resp = client.detect_faces({
543
+ # image: {
544
+ # s3_object: {
545
+ # bucket: "mybucket",
546
+ # name: "myphoto",
547
+ # },
548
+ # },
549
+ # })
550
+ #
551
+ # resp.to_h outputs the following:
552
+ # {
553
+ # face_details: [
554
+ # {
555
+ # bounding_box: {
556
+ # height: 0.18000000715255737,
557
+ # left: 0.5555555820465088,
558
+ # top: 0.33666667342185974,
559
+ # width: 0.23999999463558197,
560
+ # },
561
+ # confidence: 100,
562
+ # landmarks: [
563
+ # {
564
+ # type: "EYE_LEFT",
565
+ # x: 0.6394737362861633,
566
+ # y: 0.40819624066352844,
567
+ # },
568
+ # {
569
+ # type: "EYE_RIGHT",
570
+ # x: 0.7266660928726196,
571
+ # y: 0.41039225459098816,
572
+ # },
573
+ # {
574
+ # type: "NOSE_LEFT",
575
+ # x: 0.6912462115287781,
576
+ # y: 0.44240960478782654,
577
+ # },
578
+ # {
579
+ # type: "MOUTH_DOWN",
580
+ # x: 0.6306198239326477,
581
+ # y: 0.46700039505958557,
582
+ # },
583
+ # {
584
+ # type: "MOUTH_UP",
585
+ # x: 0.7215608954429626,
586
+ # y: 0.47114261984825134,
587
+ # },
588
+ # ],
589
+ # pose: {
590
+ # pitch: 4.050806522369385,
591
+ # roll: 0.9950747489929199,
592
+ # yaw: 13.693790435791016,
593
+ # },
594
+ # quality: {
595
+ # brightness: 37.60169982910156,
596
+ # sharpness: 80,
597
+ # },
598
+ # },
599
+ # ],
600
+ # orientation_correction: "ROTATE_0",
601
+ # }
602
+ #
441
603
  # @example Request syntax with placeholder values
442
604
  #
443
605
  # resp = client.detect_faces({
@@ -572,6 +734,36 @@ module Aws::Rekognition
572
734
  # * {Types::DetectLabelsResponse#labels #labels} => Array<Types::Label>
573
735
  # * {Types::DetectLabelsResponse#orientation_correction #orientation_correction} => String
574
736
  #
737
+ #
738
+ # @example Example: To detect labels
739
+ #
740
+ # # This operation detects labels in the supplied image
741
+ #
742
+ # resp = client.detect_labels({
743
+ # image: {
744
+ # s3_object: {
745
+ # bucket: "mybucket",
746
+ # name: "myphoto",
747
+ # },
748
+ # },
749
+ # max_labels: 123,
750
+ # min_confidence: 70,
751
+ # })
752
+ #
753
+ # resp.to_h outputs the following:
754
+ # {
755
+ # labels: [
756
+ # {
757
+ # confidence: 99.25072479248047,
758
+ # name: "People",
759
+ # },
760
+ # {
761
+ # confidence: 99.25074005126953,
762
+ # name: "Person",
763
+ # },
764
+ # ],
765
+ # }
766
+ #
575
767
  # @example Request syntax with placeholder values
576
768
  #
577
769
  # resp = client.detect_labels({
@@ -753,6 +945,147 @@ module Aws::Rekognition
753
945
  # * {Types::IndexFacesResponse#face_records #face_records} => Array<Types::FaceRecord>
754
946
  # * {Types::IndexFacesResponse#orientation_correction #orientation_correction} => String
755
947
  #
948
+ #
949
+ # @example Example: To add a face to a collection
950
+ #
951
+ # # This operation detects faces in an image and adds them to the specified Rekognition collection.
952
+ #
953
+ # resp = client.index_faces({
954
+ # collection_id: "myphotos",
955
+ # detection_attributes: [
956
+ # ],
957
+ # external_image_id: "myphotoid",
958
+ # image: {
959
+ # s3_object: {
960
+ # bucket: "mybucket",
961
+ # name: "myphoto",
962
+ # },
963
+ # },
964
+ # })
965
+ #
966
+ # resp.to_h outputs the following:
967
+ # {
968
+ # face_records: [
969
+ # {
970
+ # face: {
971
+ # bounding_box: {
972
+ # height: 0.33481481671333313,
973
+ # left: 0.31888890266418457,
974
+ # top: 0.4933333396911621,
975
+ # width: 0.25,
976
+ # },
977
+ # confidence: 99.9991226196289,
978
+ # face_id: "ff43d742-0c13-5d16-a3e8-03d3f58e980b",
979
+ # image_id: "465f4e93-763e-51d0-b030-b9667a2d94b1",
980
+ # },
981
+ # face_detail: {
982
+ # bounding_box: {
983
+ # height: 0.33481481671333313,
984
+ # left: 0.31888890266418457,
985
+ # top: 0.4933333396911621,
986
+ # width: 0.25,
987
+ # },
988
+ # confidence: 99.9991226196289,
989
+ # landmarks: [
990
+ # {
991
+ # type: "EYE_LEFT",
992
+ # x: 0.3976764678955078,
993
+ # y: 0.6248345971107483,
994
+ # },
995
+ # {
996
+ # type: "EYE_RIGHT",
997
+ # x: 0.4810936450958252,
998
+ # y: 0.6317117214202881,
999
+ # },
1000
+ # {
1001
+ # type: "NOSE_LEFT",
1002
+ # x: 0.41986238956451416,
1003
+ # y: 0.7111940383911133,
1004
+ # },
1005
+ # {
1006
+ # type: "MOUTH_DOWN",
1007
+ # x: 0.40525302290916443,
1008
+ # y: 0.7497701048851013,
1009
+ # },
1010
+ # {
1011
+ # type: "MOUTH_UP",
1012
+ # x: 0.4753248989582062,
1013
+ # y: 0.7558549642562866,
1014
+ # },
1015
+ # ],
1016
+ # pose: {
1017
+ # pitch: -9.713645935058594,
1018
+ # roll: 4.707281112670898,
1019
+ # yaw: -24.438663482666016,
1020
+ # },
1021
+ # quality: {
1022
+ # brightness: 29.23358917236328,
1023
+ # sharpness: 80,
1024
+ # },
1025
+ # },
1026
+ # },
1027
+ # {
1028
+ # face: {
1029
+ # bounding_box: {
1030
+ # height: 0.32592591643333435,
1031
+ # left: 0.5144444704055786,
1032
+ # top: 0.15111111104488373,
1033
+ # width: 0.24444444477558136,
1034
+ # },
1035
+ # confidence: 99.99950408935547,
1036
+ # face_id: "8be04dba-4e58-520d-850e-9eae4af70eb2",
1037
+ # image_id: "465f4e93-763e-51d0-b030-b9667a2d94b1",
1038
+ # },
1039
+ # face_detail: {
1040
+ # bounding_box: {
1041
+ # height: 0.32592591643333435,
1042
+ # left: 0.5144444704055786,
1043
+ # top: 0.15111111104488373,
1044
+ # width: 0.24444444477558136,
1045
+ # },
1046
+ # confidence: 99.99950408935547,
1047
+ # landmarks: [
1048
+ # {
1049
+ # type: "EYE_LEFT",
1050
+ # x: 0.6006892323493958,
1051
+ # y: 0.290842205286026,
1052
+ # },
1053
+ # {
1054
+ # type: "EYE_RIGHT",
1055
+ # x: 0.6808141469955444,
1056
+ # y: 0.29609042406082153,
1057
+ # },
1058
+ # {
1059
+ # type: "NOSE_LEFT",
1060
+ # x: 0.6395332217216492,
1061
+ # y: 0.3522595763206482,
1062
+ # },
1063
+ # {
1064
+ # type: "MOUTH_DOWN",
1065
+ # x: 0.5892083048820496,
1066
+ # y: 0.38689887523651123,
1067
+ # },
1068
+ # {
1069
+ # type: "MOUTH_UP",
1070
+ # x: 0.674560010433197,
1071
+ # y: 0.394125759601593,
1072
+ # },
1073
+ # ],
1074
+ # pose: {
1075
+ # pitch: -4.683138370513916,
1076
+ # roll: 2.1029529571533203,
1077
+ # yaw: 6.716655254364014,
1078
+ # },
1079
+ # quality: {
1080
+ # brightness: 34.951698303222656,
1081
+ # sharpness: 160,
1082
+ # },
1083
+ # },
1084
+ # },
1085
+ # ],
1086
+ # orientation_correction: "ROTATE_0",
1087
+ # }
1088
+ #
756
1089
  # @example Request syntax with placeholder values
757
1090
  #
758
1091
  # resp = client.index_faces({
@@ -844,6 +1177,21 @@ module Aws::Rekognition
844
1177
  # * {Types::ListCollectionsResponse#collection_ids #collection_ids} => Array<String>
845
1178
  # * {Types::ListCollectionsResponse#next_token #next_token} => String
846
1179
  #
1180
+ #
1181
+ # @example Example: To list the collections
1182
+ #
1183
+ # # This operation returns a list of Rekognition collections.
1184
+ #
1185
+ # resp = client.list_collections({
1186
+ # })
1187
+ #
1188
+ # resp.to_h outputs the following:
1189
+ # {
1190
+ # collection_ids: [
1191
+ # "myphotos",
1192
+ # ],
1193
+ # }
1194
+ #
847
1195
  # @example Request syntax with placeholder values
848
1196
  #
849
1197
  # resp = client.list_collections({
@@ -889,6 +1237,143 @@ module Aws::Rekognition
889
1237
  # * {Types::ListFacesResponse#faces #faces} => Array<Types::Face>
890
1238
  # * {Types::ListFacesResponse#next_token #next_token} => String
891
1239
  #
1240
+ #
1241
+ # @example Example: To list the faces in a collection
1242
+ #
1243
+ # # This operation lists the faces in a Rekognition collection.
1244
+ #
1245
+ # resp = client.list_faces({
1246
+ # collection_id: "myphotos",
1247
+ # max_results: 20,
1248
+ # })
1249
+ #
1250
+ # resp.to_h outputs the following:
1251
+ # {
1252
+ # faces: [
1253
+ # {
1254
+ # bounding_box: {
1255
+ # height: 0.18000000715255737,
1256
+ # left: 0.5555559992790222,
1257
+ # top: 0.336667001247406,
1258
+ # width: 0.23999999463558197,
1259
+ # },
1260
+ # confidence: 100,
1261
+ # face_id: "1c62e8b5-69a7-5b7d-b3cd-db4338a8a7e7",
1262
+ # image_id: "147fdf82-7a71-52cf-819b-e786c7b9746e",
1263
+ # },
1264
+ # {
1265
+ # bounding_box: {
1266
+ # height: 0.16555599868297577,
1267
+ # left: 0.30963000655174255,
1268
+ # top: 0.7066670060157776,
1269
+ # width: 0.22074100375175476,
1270
+ # },
1271
+ # confidence: 100,
1272
+ # face_id: "29a75abe-397b-5101-ba4f-706783b2246c",
1273
+ # image_id: "147fdf82-7a71-52cf-819b-e786c7b9746e",
1274
+ # },
1275
+ # {
1276
+ # bounding_box: {
1277
+ # height: 0.3234420120716095,
1278
+ # left: 0.3233329951763153,
1279
+ # top: 0.5,
1280
+ # width: 0.24222199618816376,
1281
+ # },
1282
+ # confidence: 99.99829864501953,
1283
+ # face_id: "38271d79-7bc2-5efb-b752-398a8d575b85",
1284
+ # image_id: "d5631190-d039-54e4-b267-abd22c8647c5",
1285
+ # },
1286
+ # {
1287
+ # bounding_box: {
1288
+ # height: 0.03555560111999512,
1289
+ # left: 0.37388700246810913,
1290
+ # top: 0.2477779984474182,
1291
+ # width: 0.04747769981622696,
1292
+ # },
1293
+ # confidence: 99.99210357666016,
1294
+ # face_id: "3b01bef0-c883-5654-ba42-d5ad28b720b3",
1295
+ # image_id: "812d9f04-86f9-54fc-9275-8d0dcbcb6784",
1296
+ # },
1297
+ # {
1298
+ # bounding_box: {
1299
+ # height: 0.05333330109715462,
1300
+ # left: 0.2937690019607544,
1301
+ # top: 0.35666701197624207,
1302
+ # width: 0.07121659815311432,
1303
+ # },
1304
+ # confidence: 99.99919891357422,
1305
+ # face_id: "4839a608-49d0-566c-8301-509d71b534d1",
1306
+ # image_id: "812d9f04-86f9-54fc-9275-8d0dcbcb6784",
1307
+ # },
1308
+ # {
1309
+ # bounding_box: {
1310
+ # height: 0.3249259889125824,
1311
+ # left: 0.5155559778213501,
1312
+ # top: 0.1513350009918213,
1313
+ # width: 0.24333299696445465,
1314
+ # },
1315
+ # confidence: 99.99949645996094,
1316
+ # face_id: "70008e50-75e4-55d0-8e80-363fb73b3a14",
1317
+ # image_id: "d5631190-d039-54e4-b267-abd22c8647c5",
1318
+ # },
1319
+ # {
1320
+ # bounding_box: {
1321
+ # height: 0.03777780011296272,
1322
+ # left: 0.7002969980239868,
1323
+ # top: 0.18777799606323242,
1324
+ # width: 0.05044509842991829,
1325
+ # },
1326
+ # confidence: 99.92639923095703,
1327
+ # face_id: "7f5f88ed-d684-5a88-b0df-01e4a521552b",
1328
+ # image_id: "812d9f04-86f9-54fc-9275-8d0dcbcb6784",
1329
+ # },
1330
+ # {
1331
+ # bounding_box: {
1332
+ # height: 0.05555560067296028,
1333
+ # left: 0.13946600258350372,
1334
+ # top: 0.46333301067352295,
1335
+ # width: 0.07270029932260513,
1336
+ # },
1337
+ # confidence: 99.99469757080078,
1338
+ # face_id: "895b4e2c-81de-5902-a4bd-d1792bda00b2",
1339
+ # image_id: "812d9f04-86f9-54fc-9275-8d0dcbcb6784",
1340
+ # },
1341
+ # {
1342
+ # bounding_box: {
1343
+ # height: 0.3259260058403015,
1344
+ # left: 0.5144439935684204,
1345
+ # top: 0.15111100673675537,
1346
+ # width: 0.24444399774074554,
1347
+ # },
1348
+ # confidence: 99.99949645996094,
1349
+ # face_id: "8be04dba-4e58-520d-850e-9eae4af70eb2",
1350
+ # image_id: "465f4e93-763e-51d0-b030-b9667a2d94b1",
1351
+ # },
1352
+ # {
1353
+ # bounding_box: {
1354
+ # height: 0.18888899683952332,
1355
+ # left: 0.3783380091190338,
1356
+ # top: 0.2355560064315796,
1357
+ # width: 0.25222599506378174,
1358
+ # },
1359
+ # confidence: 99.9999008178711,
1360
+ # face_id: "908544ad-edc3-59df-8faf-6a87cc256cf5",
1361
+ # image_id: "3c731605-d772-541a-a5e7-0375dbc68a07",
1362
+ # },
1363
+ # {
1364
+ # bounding_box: {
1365
+ # height: 0.33481499552726746,
1366
+ # left: 0.31888899207115173,
1367
+ # top: 0.49333301186561584,
1368
+ # width: 0.25,
1369
+ # },
1370
+ # confidence: 99.99909973144531,
1371
+ # face_id: "ff43d742-0c13-5d16-a3e8-03d3f58e980b",
1372
+ # image_id: "465f4e93-763e-51d0-b030-b9667a2d94b1",
1373
+ # },
1374
+ # ],
1375
+ # }
1376
+ #
892
1377
  # @example Request syntax with placeholder values
893
1378
  #
894
1379
  # resp = client.list_faces({
@@ -1056,6 +1541,67 @@ module Aws::Rekognition
1056
1541
  # * {Types::SearchFacesResponse#searched_face_id #searched_face_id} => String
1057
1542
  # * {Types::SearchFacesResponse#face_matches #face_matches} => Array<Types::FaceMatch>
1058
1543
  #
1544
+ #
1545
+ # @example Example: To delete a face
1546
+ #
1547
+ # # This operation searches for matching faces in the collection the supplied face belongs to.
1548
+ #
1549
+ # resp = client.search_faces({
1550
+ # collection_id: "myphotos",
1551
+ # face_id: "70008e50-75e4-55d0-8e80-363fb73b3a14",
1552
+ # face_match_threshold: 90,
1553
+ # max_faces: 10,
1554
+ # })
1555
+ #
1556
+ # resp.to_h outputs the following:
1557
+ # {
1558
+ # face_matches: [
1559
+ # {
1560
+ # face: {
1561
+ # bounding_box: {
1562
+ # height: 0.3259260058403015,
1563
+ # left: 0.5144439935684204,
1564
+ # top: 0.15111100673675537,
1565
+ # width: 0.24444399774074554,
1566
+ # },
1567
+ # confidence: 99.99949645996094,
1568
+ # face_id: "8be04dba-4e58-520d-850e-9eae4af70eb2",
1569
+ # image_id: "465f4e93-763e-51d0-b030-b9667a2d94b1",
1570
+ # },
1571
+ # similarity: 99.97222137451172,
1572
+ # },
1573
+ # {
1574
+ # face: {
1575
+ # bounding_box: {
1576
+ # height: 0.16555599868297577,
1577
+ # left: 0.30963000655174255,
1578
+ # top: 0.7066670060157776,
1579
+ # width: 0.22074100375175476,
1580
+ # },
1581
+ # confidence: 100,
1582
+ # face_id: "29a75abe-397b-5101-ba4f-706783b2246c",
1583
+ # image_id: "147fdf82-7a71-52cf-819b-e786c7b9746e",
1584
+ # },
1585
+ # similarity: 97.04154968261719,
1586
+ # },
1587
+ # {
1588
+ # face: {
1589
+ # bounding_box: {
1590
+ # height: 0.18888899683952332,
1591
+ # left: 0.3783380091190338,
1592
+ # top: 0.2355560064315796,
1593
+ # width: 0.25222599506378174,
1594
+ # },
1595
+ # confidence: 99.9999008178711,
1596
+ # face_id: "908544ad-edc3-59df-8faf-6a87cc256cf5",
1597
+ # image_id: "3c731605-d772-541a-a5e7-0375dbc68a07",
1598
+ # },
1599
+ # similarity: 95.94520568847656,
1600
+ # },
1601
+ # ],
1602
+ # searched_face_id: "70008e50-75e4-55d0-8e80-363fb73b3a14",
1603
+ # }
1604
+ #
1059
1605
  # @example Request syntax with placeholder values
1060
1606
  #
1061
1607
  # resp = client.search_faces({
@@ -1136,6 +1682,50 @@ module Aws::Rekognition
1136
1682
  # * {Types::SearchFacesByImageResponse#searched_face_confidence #searched_face_confidence} => Float
1137
1683
  # * {Types::SearchFacesByImageResponse#face_matches #face_matches} => Array<Types::FaceMatch>
1138
1684
  #
1685
+ #
1686
+ # @example Example: To search for faces matching a supplied image
1687
+ #
1688
+ # # This operation searches for faces in a Rekognition collection that match the largest face in an S3 bucket stored image.
1689
+ #
1690
+ # resp = client.search_faces_by_image({
1691
+ # collection_id: "myphotos",
1692
+ # face_match_threshold: 95,
1693
+ # image: {
1694
+ # s3_object: {
1695
+ # bucket: "mybucket",
1696
+ # name: "myphoto",
1697
+ # },
1698
+ # },
1699
+ # max_faces: 5,
1700
+ # })
1701
+ #
1702
+ # resp.to_h outputs the following:
1703
+ # {
1704
+ # face_matches: [
1705
+ # {
1706
+ # face: {
1707
+ # bounding_box: {
1708
+ # height: 0.3234420120716095,
1709
+ # left: 0.3233329951763153,
1710
+ # top: 0.5,
1711
+ # width: 0.24222199618816376,
1712
+ # },
1713
+ # confidence: 99.99829864501953,
1714
+ # face_id: "38271d79-7bc2-5efb-b752-398a8d575b85",
1715
+ # image_id: "d5631190-d039-54e4-b267-abd22c8647c5",
1716
+ # },
1717
+ # similarity: 99.97036743164062,
1718
+ # },
1719
+ # ],
1720
+ # searched_face_bounding_box: {
1721
+ # height: 0.33481481671333313,
1722
+ # left: 0.31888890266418457,
1723
+ # top: 0.4933333396911621,
1724
+ # width: 0.25,
1725
+ # },
1726
+ # searched_face_confidence: 99.9991226196289,
1727
+ # }
1728
+ #
1139
1729
  # @example Request syntax with placeholder values
1140
1730
  #
1141
1731
  # resp = client.search_faces_by_image({
@@ -1190,7 +1780,7 @@ module Aws::Rekognition
1190
1780
  params: params,
1191
1781
  config: config)
1192
1782
  context[:gem_name] = 'aws-sdk-rekognition'
1193
- context[:gem_version] = '1.0.0.rc10'
1783
+ context[:gem_version] = '1.0.0.rc11'
1194
1784
  Seahorse::Client::Request.new(handlers, context)
1195
1785
  end
1196
1786
 
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: aws-sdk-rekognition
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.0.0.rc10
4
+ version: 1.0.0.rc11
5
5
  platform: ruby
6
6
  authors:
7
7
  - Amazon Web Services
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2017-06-26 00:00:00.000000000 Z
11
+ date: 2017-06-29 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: aws-sdk-core
@@ -16,14 +16,14 @@ dependencies:
16
16
  requirements:
17
17
  - - "~>"
18
18
  - !ruby/object:Gem::Version
19
- version: 3.0.0.rc13
19
+ version: 3.0.0.rc14
20
20
  type: :runtime
21
21
  prerelease: false
22
22
  version_requirements: !ruby/object:Gem::Requirement
23
23
  requirements:
24
24
  - - "~>"
25
25
  - !ruby/object:Gem::Version
26
- version: 3.0.0.rc13
26
+ version: 3.0.0.rc14
27
27
  - !ruby/object:Gem::Dependency
28
28
  name: aws-sigv4
29
29
  requirement: !ruby/object:Gem::Requirement