aws-sdk-rekognition 1.16.0 → 1.17.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 31fc3baa314f96f7e71daa41e992169fb7d8fdd4
4
- data.tar.gz: c4de58b98f35bd95c22c3d3ae4dc8dcb1bbc8158
3
+ metadata.gz: bb4d2c32162a1218486c97b8b9cd0b69416d2f83
4
+ data.tar.gz: 98906ce7278a97292d947ae339e1aa31ec34dbeb
5
5
  SHA512:
6
- metadata.gz: 0413f61a60dbdbbfbe10cd571c6437d392797e773e83abbf872d4c58718b83881dd4def06ce8fe6d2c14d28b762eafed5ec610003c3c2a13f3dfc33d836dcd3b
7
- data.tar.gz: 23b91398429ffd8a3ecf00ba27b931fa67b4ca1ee3b529204de3510105d17bd8b757b39e491afb778426a3c2c54cee1ad1c0b5fdcad929f180a868e71a2f7702
6
+ metadata.gz: e141b4a26cf096429e56365ce66d721dd9b06c1378731279011185cadaa50ebfed8d5af67b58378d994df4acd35a0f669b316c07b30d5b91279bd8637b4e40dd
7
+ data.tar.gz: 3df162b8063857211c31c83731a605d22cc45ca5880e5af948da2c2916f74f4b5cb164328e987c0a406d5d93938910fda9a2e70993eca825f7f982bb80683fe2
@@ -42,6 +42,6 @@ require_relative 'aws-sdk-rekognition/customizations'
42
42
  # @service
43
43
  module Aws::Rekognition
44
44
 
45
- GEM_VERSION = '1.16.0'
45
+ GEM_VERSION = '1.17.0'
46
46
 
47
47
  end
@@ -408,7 +408,7 @@ module Aws::Rekognition
408
408
  end
409
409
 
410
410
  # Creates a collection in an AWS Region. You can add faces to the
411
- # collection using the operation.
411
+ # collection using the IndexFaces operation.
412
412
  #
413
413
  # For example, you might create collections, one for each of your
414
414
  # application users. A user can then index faces using the `IndexFaces`
@@ -480,11 +480,12 @@ module Aws::Rekognition
480
480
  # criteria in `Settings`. For example, the collection containing faces
481
481
  # that you want to recognize. Use `Name` to assign an identifier for the
482
482
  # stream processor. You use `Name` to manage the stream processor. For
483
- # example, you can start processing the source video by calling with the
484
- # `Name` field.
483
+ # example, you can start processing the source video by calling
484
+ # StartStreamProcessor with the `Name` field.
485
485
  #
486
- # After you have finished analyzing a streaming video, use to stop
487
- # processing. You can delete the stream processor by calling .
486
+ # After you have finished analyzing a streaming video, use
487
+ # StopStreamProcessor to stop processing. You can delete the stream
488
+ # processor by calling DeleteStreamProcessor.
488
489
  #
489
490
  # @option params [required, Types::StreamProcessorInput] :input
490
491
  # Kinesis video stream stream that provides the source streaming video.
@@ -499,7 +500,8 @@ module Aws::Rekognition
499
500
  # @option params [required, String] :name
500
501
  # An identifier you assign to the stream processor. You can use `Name`
501
502
  # to manage the stream processor. For example, you can get the current
502
- # status of the stream processor by calling . `Name` is idempotent.
503
+ # status of the stream processor by calling DescribeStreamProcessor.
504
+ # `Name` is idempotent.
503
505
  #
504
506
  # @option params [required, Types::StreamProcessorSettings] :settings
505
507
  # Face recognition input parameters to be used by the stream processor.
@@ -647,9 +649,10 @@ module Aws::Rekognition
647
649
  end
648
650
 
649
651
  # Deletes the stream processor identified by `Name`. You assign the
650
- # value for `Name` when you create the stream processor with . You might
651
- # not be able to use the same name for a stream processor for a few
652
- # seconds after calling `DeleteStreamProcessor`.
652
+ # value for `Name` when you create the stream processor with
653
+ # CreateStreamProcessor. You might not be able to use the same name for
654
+ # a stream processor for a few seconds after calling
655
+ # `DeleteStreamProcessor`.
653
656
  #
654
657
  # @option params [required, String] :name
655
658
  # The name of the stream processor you want to delete.
@@ -707,10 +710,10 @@ module Aws::Rekognition
707
710
  req.send_request(options)
708
711
  end
709
712
 
710
- # Provides information about a stream processor created by . You can get
711
- # information about the input and output streams, the input parameters
712
- # for the face recognition being performed, and the current status of
713
- # the stream processor.
713
+ # Provides information about a stream processor created by
714
+ # CreateStreamProcessor. You can get information about the input and
715
+ # output streams, the input parameters for the face recognition being
716
+ # performed, and the current status of the stream processor.
714
717
  #
715
718
  # @option params [required, String] :name
716
719
  # Name of the stream processor for which you want information.
@@ -981,7 +984,7 @@ module Aws::Rekognition
981
984
  # In response, the API returns an array of labels. In addition, the
982
985
  # response also includes the orientation correction. Optionally, you can
983
986
  # specify `MinConfidence` to control the confidence threshold for the
984
- # labels returned. The default is 50%. You can also add the `MaxLabels`
987
+ # labels returned. The default is 55%. You can also add the `MaxLabels`
985
988
  # parameter to limit the number of labels returned.
986
989
  #
987
990
  # <note markdown="1"> If the object detected is a person, the operation doesn't provide the
@@ -990,9 +993,9 @@ module Aws::Rekognition
990
993
  # </note>
991
994
  #
992
995
  # `DetectLabels` returns bounding boxes for instances of common object
993
- # labels in an array of objects. An `Instance` object contains a object,
994
- # for the location of the label on the image. It also includes the
995
- # confidence by which the bounding box was detected.
996
+ # labels in an array of Instance objects. An `Instance` object contains
997
+ # a BoundingBox object, for the location of the label on the image. It
998
+ # also includes the confidence by which the bounding box was detected.
996
999
  #
997
1000
  # `DetectLabels` also returns a hierarchical taxonomy of detected
998
1001
  # labels. For example, a detected car might be assigned the label *car*.
@@ -1024,7 +1027,7 @@ module Aws::Rekognition
1024
1027
  # than this specified value.
1025
1028
  #
1026
1029
  # If `MinConfidence` is not specified, the operation returns labels with
1027
- # a confidence values greater than or equal to 50 percent.
1030
+ # a confidence values greater than or equal to 55 percent.
1028
1031
  #
1029
1032
  # @return [Types::DetectLabelsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1030
1033
  #
@@ -1133,6 +1136,7 @@ module Aws::Rekognition
1133
1136
  # @return [Types::DetectModerationLabelsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1134
1137
  #
1135
1138
  # * {Types::DetectModerationLabelsResponse#moderation_labels #moderation_labels} => Array&lt;Types::ModerationLabel&gt;
1139
+ # * {Types::DetectModerationLabelsResponse#moderation_model_version #moderation_model_version} => String
1136
1140
  #
1137
1141
  # @example Request syntax with placeholder values
1138
1142
  #
@@ -1154,6 +1158,7 @@ module Aws::Rekognition
1154
1158
  # resp.moderation_labels[0].confidence #=> Float
1155
1159
  # resp.moderation_labels[0].name #=> String
1156
1160
  # resp.moderation_labels[0].parent_name #=> String
1161
+ # resp.moderation_model_version #=> String
1157
1162
  #
1158
1163
  # @overload detect_moderation_labels(params = {})
1159
1164
  # @param [Hash] params ({})
@@ -1172,9 +1177,10 @@ module Aws::Rekognition
1172
1177
  # not supported. The image must be either a .png or .jpeg formatted
1173
1178
  # file.
1174
1179
  #
1175
- # The `DetectText` operation returns text in an array of elements,
1176
- # `TextDetections`. Each `TextDetection` element provides information
1177
- # about a single word or line of text that was detected in the image.
1180
+ # The `DetectText` operation returns text in an array of TextDetection
1181
+ # elements, `TextDetections`. Each `TextDetection` element provides
1182
+ # information about a single word or line of text that was detected in
1183
+ # the image.
1178
1184
  #
1179
1185
  # A word is one or more ISO basic latin script characters that are not
1180
1186
  # separated by spaces. `DetectText` can detect up to 50 words in an
@@ -1257,7 +1263,8 @@ module Aws::Rekognition
1257
1263
  #
1258
1264
  # @option params [required, String] :id
1259
1265
  # The ID for the celebrity. You get the celebrity ID from a call to the
1260
- # operation, which recognizes celebrities in an image.
1266
+ # RecognizeCelebrities operation, which recognizes celebrities in an
1267
+ # image.
1261
1268
  #
1262
1269
  # @return [Types::GetCelebrityInfoResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1263
1270
  #
@@ -1284,16 +1291,16 @@ module Aws::Rekognition
1284
1291
  end
1285
1292
 
1286
1293
  # Gets the celebrity recognition results for a Amazon Rekognition Video
1287
- # analysis started by .
1294
+ # analysis started by StartCelebrityRecognition.
1288
1295
  #
1289
1296
  # Celebrity recognition in a video is an asynchronous operation.
1290
- # Analysis is started by a call to which returns a job identifier
1291
- # (`JobId`). When the celebrity recognition operation finishes, Amazon
1292
- # Rekognition Video publishes a completion status to the Amazon Simple
1293
- # Notification Service topic registered in the initial call to
1294
- # `StartCelebrityRecognition`. To get the results of the celebrity
1295
- # recognition analysis, first check that the status value published to
1296
- # the Amazon SNS topic is `SUCCEEDED`. If so, call
1297
+ # Analysis is started by a call to StartCelebrityRecognition which
1298
+ # returns a job identifier (`JobId`). When the celebrity recognition
1299
+ # operation finishes, Amazon Rekognition Video publishes a completion
1300
+ # status to the Amazon Simple Notification Service topic registered in
1301
+ # the initial call to `StartCelebrityRecognition`. To get the results of
1302
+ # the celebrity recognition analysis, first check that the status value
1303
+ # published to the Amazon SNS topic is `SUCCEEDED`. If so, call
1297
1304
  # `GetCelebrityDetection` and pass the job identifier (`JobId`) from the
1298
1305
  # initial call to `StartCelebrityDetection`.
1299
1306
  #
@@ -1301,9 +1308,10 @@ module Aws::Rekognition
1301
1308
  # Rekognition Developer Guide.
1302
1309
  #
1303
1310
  # `GetCelebrityRecognition` returns detected celebrities and the time(s)
1304
- # they are detected in an array (`Celebrities`) of objects. Each
1305
- # `CelebrityRecognition` contains information about the celebrity in a
1306
- # object and the time, `Timestamp`, the celebrity was detected.
1311
+ # they are detected in an array (`Celebrities`) of CelebrityRecognition
1312
+ # objects. Each `CelebrityRecognition` contains information about the
1313
+ # celebrity in a CelebrityDetail object and the time, `Timestamp`, the
1314
+ # celebrity was detected.
1307
1315
  #
1308
1316
  # <note markdown="1"> `GetCelebrityRecognition` only returns the default facial attributes
1309
1317
  # (`BoundingBox`, `Confidence`, `Landmarks`, `Pose`, and `Quality`). The
@@ -1319,8 +1327,8 @@ module Aws::Rekognition
1319
1327
  #
1320
1328
  # The `CelebrityDetail` object includes the celebrity identifer and
1321
1329
  # additional information urls. If you don't store the additional
1322
- # information urls, you can get them later by calling with the celebrity
1323
- # identifer.
1330
+ # information urls, you can get them later by calling GetCelebrityInfo
1331
+ # with the celebrity identifer.
1324
1332
  #
1325
1333
  # No information is returned for faces not recognized as celebrities.
1326
1334
  #
@@ -1436,24 +1444,25 @@ module Aws::Rekognition
1436
1444
  end
1437
1445
 
1438
1446
  # Gets the content moderation analysis results for a Amazon Rekognition
1439
- # Video analysis started by .
1447
+ # Video analysis started by StartContentModeration.
1440
1448
  #
1441
1449
  # Content moderation analysis of a video is an asynchronous operation.
1442
- # You start analysis by calling . which returns a job identifier
1443
- # (`JobId`). When analysis finishes, Amazon Rekognition Video publishes
1444
- # a completion status to the Amazon Simple Notification Service topic
1445
- # registered in the initial call to `StartContentModeration`. To get the
1446
- # results of the content moderation analysis, first check that the
1447
- # status value published to the Amazon SNS topic is `SUCCEEDED`. If so,
1448
- # call `GetCelebrityDetection` and pass the job identifier (`JobId`)
1449
- # from the initial call to `StartCelebrityDetection`.
1450
+ # You start analysis by calling StartContentModeration. which returns a
1451
+ # job identifier (`JobId`). When analysis finishes, Amazon Rekognition
1452
+ # Video publishes a completion status to the Amazon Simple Notification
1453
+ # Service topic registered in the initial call to
1454
+ # `StartContentModeration`. To get the results of the content moderation
1455
+ # analysis, first check that the status value published to the Amazon
1456
+ # SNS topic is `SUCCEEDED`. If so, call `GetCelebrityDetection` and pass
1457
+ # the job identifier (`JobId`) from the initial call to
1458
+ # `StartCelebrityDetection`.
1450
1459
  #
1451
1460
  # For more information, see Working with Stored Videos in the Amazon
1452
1461
  # Rekognition Devlopers Guide.
1453
1462
  #
1454
1463
  # `GetContentModeration` returns detected content moderation labels, and
1455
1464
  # the time they are detected, in an array, `ModerationLabels`, of
1456
- # objects.
1465
+ # ContentModerationDetection objects.
1457
1466
  #
1458
1467
  # By default, the moderated labels are returned sorted by time, in
1459
1468
  # milliseconds from the start of the video. You can also sort them by
@@ -1536,17 +1545,18 @@ module Aws::Rekognition
1536
1545
  end
1537
1546
 
1538
1547
  # Gets face detection results for a Amazon Rekognition Video analysis
1539
- # started by .
1548
+ # started by StartFaceDetection.
1540
1549
  #
1541
1550
  # Face detection with Amazon Rekognition Video is an asynchronous
1542
- # operation. You start face detection by calling which returns a job
1543
- # identifier (`JobId`). When the face detection operation finishes,
1544
- # Amazon Rekognition Video publishes a completion status to the Amazon
1545
- # Simple Notification Service topic registered in the initial call to
1546
- # `StartFaceDetection`. To get the results of the face detection
1547
- # operation, first check that the status value published to the Amazon
1548
- # SNS topic is `SUCCEEDED`. If so, call and pass the job identifier
1549
- # (`JobId`) from the initial call to `StartFaceDetection`.
1551
+ # operation. You start face detection by calling StartFaceDetection
1552
+ # which returns a job identifier (`JobId`). When the face detection
1553
+ # operation finishes, Amazon Rekognition Video publishes a completion
1554
+ # status to the Amazon Simple Notification Service topic registered in
1555
+ # the initial call to `StartFaceDetection`. To get the results of the
1556
+ # face detection operation, first check that the status value published
1557
+ # to the Amazon SNS topic is `SUCCEEDED`. If so, call GetFaceDetection
1558
+ # and pass the job identifier (`JobId`) from the initial call to
1559
+ # `StartFaceDetection`.
1550
1560
  #
1551
1561
  # `GetFaceDetection` returns an array of detected faces (`Faces`) sorted
1552
1562
  # by the time the faces were detected.
@@ -1647,27 +1657,28 @@ module Aws::Rekognition
1647
1657
  end
1648
1658
 
1649
1659
  # Gets the face search results for Amazon Rekognition Video face search
1650
- # started by . The search returns faces in a collection that match the
1651
- # faces of persons detected in a video. It also includes the time(s)
1652
- # that faces are matched in the video.
1660
+ # started by StartFaceSearch. The search returns faces in a collection
1661
+ # that match the faces of persons detected in a video. It also includes
1662
+ # the time(s) that faces are matched in the video.
1653
1663
  #
1654
1664
  # Face search in a video is an asynchronous operation. You start face
1655
- # search by calling to which returns a job identifier (`JobId`). When
1656
- # the search operation finishes, Amazon Rekognition Video publishes a
1657
- # completion status to the Amazon Simple Notification Service topic
1658
- # registered in the initial call to `StartFaceSearch`. To get the search
1659
- # results, first check that the status value published to the Amazon SNS
1660
- # topic is `SUCCEEDED`. If so, call `GetFaceSearch` and pass the job
1661
- # identifier (`JobId`) from the initial call to `StartFaceSearch`.
1665
+ # search by calling to StartFaceSearch which returns a job identifier
1666
+ # (`JobId`). When the search operation finishes, Amazon Rekognition
1667
+ # Video publishes a completion status to the Amazon Simple Notification
1668
+ # Service topic registered in the initial call to `StartFaceSearch`. To
1669
+ # get the search results, first check that the status value published to
1670
+ # the Amazon SNS topic is `SUCCEEDED`. If so, call `GetFaceSearch` and
1671
+ # pass the job identifier (`JobId`) from the initial call to
1672
+ # `StartFaceSearch`.
1662
1673
  #
1663
1674
  # For more information, see Searching Faces in a Collection in the
1664
1675
  # Amazon Rekognition Developer Guide.
1665
1676
  #
1666
- # The search results are retured in an array, `Persons`, of objects.
1667
- # Each`PersonMatch` element contains details about the matching faces in
1668
- # the input collection, person information (facial attributes, bounding
1669
- # boxes, and person identifer) for the matched person, and the time the
1670
- # person was matched in the video.
1677
+ # The search results are retured in an array, `Persons`, of PersonMatch
1678
+ # objects. Each`PersonMatch` element contains details about the matching
1679
+ # faces in the input collection, person information (facial attributes,
1680
+ # bounding boxes, and person identifer) for the matched person, and the
1681
+ # time the person was matched in the video.
1671
1682
  #
1672
1683
  # <note markdown="1"> `GetFaceSearch` only returns the default facial attributes
1673
1684
  # (`BoundingBox`, `Confidence`, `Landmarks`, `Pose`, and `Quality`). The
@@ -1791,16 +1802,17 @@ module Aws::Rekognition
1791
1802
  end
1792
1803
 
1793
1804
  # Gets the label detection results of a Amazon Rekognition Video
1794
- # analysis started by .
1805
+ # analysis started by StartLabelDetection.
1795
1806
  #
1796
- # The label detection operation is started by a call to which returns a
1797
- # job identifier (`JobId`). When the label detection operation finishes,
1798
- # Amazon Rekognition publishes a completion status to the Amazon Simple
1799
- # Notification Service topic registered in the initial call to
1800
- # `StartlabelDetection`. To get the results of the label detection
1801
- # operation, first check that the status value published to the Amazon
1802
- # SNS topic is `SUCCEEDED`. If so, call and pass the job identifier
1803
- # (`JobId`) from the initial call to `StartLabelDetection`.
1807
+ # The label detection operation is started by a call to
1808
+ # StartLabelDetection which returns a job identifier (`JobId`). When the
1809
+ # label detection operation finishes, Amazon Rekognition publishes a
1810
+ # completion status to the Amazon Simple Notification Service topic
1811
+ # registered in the initial call to `StartlabelDetection`. To get the
1812
+ # results of the label detection operation, first check that the status
1813
+ # value published to the Amazon SNS topic is `SUCCEEDED`. If so, call
1814
+ # GetLabelDetection and pass the job identifier (`JobId`) from the
1815
+ # initial call to `StartLabelDetection`.
1804
1816
  #
1805
1817
  # `GetLabelDetection` returns an array of detected labels (`Labels`)
1806
1818
  # sorted by the time the labels were detected. You can also sort by the
@@ -1810,6 +1822,10 @@ module Aws::Rekognition
1810
1822
  # in the accuracy of the detected label, and the time the label was
1811
1823
  # detected in the video.
1812
1824
  #
1825
+ # The returned labels also include bounding box information for common
1826
+ # objects, a hierarchical taxonomy of detected labels, and the version
1827
+ # of the label model used for detection.
1828
+ #
1813
1829
  # Use MaxResults parameter to limit the number of labels returned. If
1814
1830
  # there are more results than specified in `MaxResults`, the value of
1815
1831
  # `NextToken` in the operation response contains a pagination token for
@@ -1818,13 +1834,6 @@ module Aws::Rekognition
1818
1834
  # with the token value returned from the previous call to
1819
1835
  # `GetLabelDetection`.
1820
1836
  #
1821
- # <note markdown="1"> `GetLabelDetection` doesn't return a hierarchical taxonomy, or
1822
- # bounding box information, for detected labels. `GetLabelDetection`
1823
- # returns `null` for the `Parents` and `Instances` attributes of the
1824
- # object which is returned in the `Labels` array.
1825
- #
1826
- # </note>
1827
- #
1828
1837
  # @option params [required, String] :job_id
1829
1838
  # Job identifier for the label detection operation for which you want
1830
1839
  # results returned. You get the job identifer from an initial call to
@@ -1856,6 +1865,7 @@ module Aws::Rekognition
1856
1865
  # * {Types::GetLabelDetectionResponse#video_metadata #video_metadata} => Types::VideoMetadata
1857
1866
  # * {Types::GetLabelDetectionResponse#next_token #next_token} => String
1858
1867
  # * {Types::GetLabelDetectionResponse#labels #labels} => Array&lt;Types::LabelDetection&gt;
1868
+ # * {Types::GetLabelDetectionResponse#label_model_version #label_model_version} => String
1859
1869
  #
1860
1870
  # @example Request syntax with placeholder values
1861
1871
  #
@@ -1889,6 +1899,7 @@ module Aws::Rekognition
1889
1899
  # resp.labels[0].label.instances[0].confidence #=> Float
1890
1900
  # resp.labels[0].label.parents #=> Array
1891
1901
  # resp.labels[0].label.parents[0].name #=> String
1902
+ # resp.label_model_version #=> String
1892
1903
  #
1893
1904
  # @overload get_label_detection(params = {})
1894
1905
  # @param [Hash] params ({})
@@ -1898,7 +1909,7 @@ module Aws::Rekognition
1898
1909
  end
1899
1910
 
1900
1911
  # Gets the path tracking results of a Amazon Rekognition Video analysis
1901
- # started by .
1912
+ # started by StartPersonTracking.
1902
1913
  #
1903
1914
  # The person path tracking operation is started by a call to
1904
1915
  # `StartPersonTracking` which returns a job identifier (`JobId`). When
@@ -1908,8 +1919,8 @@ module Aws::Rekognition
1908
1919
  #
1909
1920
  # To get the results of the person path tracking operation, first check
1910
1921
  # that the status value published to the Amazon SNS topic is
1911
- # `SUCCEEDED`. If so, call and pass the job identifier (`JobId`) from
1912
- # the initial call to `StartPersonTracking`.
1922
+ # `SUCCEEDED`. If so, call GetPersonTracking and pass the job identifier
1923
+ # (`JobId`) from the initial call to `StartPersonTracking`.
1913
1924
  #
1914
1925
  # `GetPersonTracking` returns an array, `Persons`, of tracked persons
1915
1926
  # and the time(s) their paths were tracked in the video.
@@ -2045,12 +2056,13 @@ module Aws::Rekognition
2045
2056
  # the input image. For each face, the algorithm extracts facial features
2046
2057
  # into a feature vector, and stores it in the backend database. Amazon
2047
2058
  # Rekognition uses feature vectors when it performs face match and
2048
- # search operations using the and operations.
2059
+ # search operations using the SearchFaces and SearchFacesByImage
2060
+ # operations.
2049
2061
  #
2050
2062
  # For more information, see Adding Faces to a Collection in the Amazon
2051
2063
  # Rekognition Developer Guide.
2052
2064
  #
2053
- # To get the number of faces in a collection, call .
2065
+ # To get the number of faces in a collection, call DescribeCollection.
2054
2066
  #
2055
2067
  # If you're using version 1.0 of the face detection model, `IndexFaces`
2056
2068
  # indexes the 15 largest faces in the input image. Later versions of the
@@ -2060,17 +2072,18 @@ module Aws::Rekognition
2060
2072
  # orientation information is not returned in the `OrientationCorrection`
2061
2073
  # field.
2062
2074
  #
2063
- # To determine which version of the model you're using, call and supply
2064
- # the collection ID. You can also get the model version from the value
2065
- # of `FaceModelVersion` in the response from `IndexFaces`
2075
+ # To determine which version of the model you're using, call
2076
+ # DescribeCollection and supply the collection ID. You can also get the
2077
+ # model version from the value of `FaceModelVersion` in the response
2078
+ # from `IndexFaces`
2066
2079
  #
2067
2080
  # For more information, see Model Versioning in the Amazon Rekognition
2068
2081
  # Developer Guide.
2069
2082
  #
2070
2083
  # If you provide the optional `ExternalImageID` for the input image you
2071
2084
  # provided, Amazon Rekognition associates this ID with all faces that it
2072
- # detects. When you call the operation, the response returns the
2073
- # external ID. You can use this external image ID to create a
2085
+ # detects. When you call the ListFaces operation, the response returns
2086
+ # the external ID. You can use this external image ID to create a
2074
2087
  # client-side index to associate the faces with each image. You can then
2075
2088
  # use the index to find all faces in an image.
2076
2089
  #
@@ -2089,13 +2102,13 @@ module Aws::Rekognition
2089
2102
  #
2090
2103
  # <note markdown="1"> To use quality filtering, you need a collection associated with
2091
2104
  # version 3 of the face model. To get the version of the face model
2092
- # associated with a collection, call .
2105
+ # associated with a collection, call DescribeCollection.
2093
2106
  #
2094
2107
  # </note>
2095
2108
  #
2096
2109
  # Information about faces detected in an image, but not indexed, is
2097
- # returned in an array of objects, `UnindexedFaces`. Faces aren't
2098
- # indexed for reasons such as:
2110
+ # returned in an array of UnindexedFace objects, `UnindexedFaces`. Faces
2111
+ # aren't indexed for reasons such as:
2099
2112
  #
2100
2113
  # * The number of faces detected exceeds the value of the `MaxFaces`
2101
2114
  # request parameter.
@@ -2709,7 +2722,8 @@ module Aws::Rekognition
2709
2722
  req.send_request(options)
2710
2723
  end
2711
2724
 
2712
- # Gets a list of stream processors that you have created with .
2725
+ # Gets a list of stream processors that you have created with
2726
+ # CreateStreamProcessor.
2713
2727
  #
2714
2728
  # @option params [String] :next_token
2715
2729
  # If the previous response was incomplete (because there are more stream
@@ -2768,7 +2782,8 @@ module Aws::Rekognition
2768
2782
  # information and use the `Celebrity` ID property as a unique identifier
2769
2783
  # for the celebrity. If you don't store the celebrity name or
2770
2784
  # additional information URLs returned by `RecognizeCelebrities`, you
2771
- # will need the ID to identify the celebrity in a call to the operation.
2785
+ # will need the ID to identify the celebrity in a call to the
2786
+ # GetCelebrityInfo operation.
2772
2787
  #
2773
2788
  # You pass the input image either as base64-encoded image bytes or as a
2774
2789
  # reference to an image in an Amazon S3 bucket. If you use the AWS CLI
@@ -2994,8 +3009,8 @@ module Aws::Rekognition
2994
3009
  # specified collection.
2995
3010
  #
2996
3011
  # <note markdown="1"> To search for all faces in an input image, you might first call the
2997
- # operation, and then use the face IDs returned in subsequent calls to
2998
- # the operation.
3012
+ # IndexFaces operation, and then use the face IDs returned in subsequent
3013
+ # calls to the SearchFaces operation.
2999
3014
  #
3000
3015
  # You can also call the `DetectFaces` operation and use the bounding
3001
3016
  # boxes in the response to make face crops, which then you can pass in
@@ -3144,8 +3159,9 @@ module Aws::Rekognition
3144
3159
  # Notification Service topic that you specify in `NotificationChannel`.
3145
3160
  # To get the results of the celebrity recognition analysis, first check
3146
3161
  # that the status value published to the Amazon SNS topic is
3147
- # `SUCCEEDED`. If so, call and pass the job identifier (`JobId`) from
3148
- # the initial call to `StartCelebrityRecognition`.
3162
+ # `SUCCEEDED`. If so, call GetCelebrityRecognition and pass the job
3163
+ # identifier (`JobId`) from the initial call to
3164
+ # `StartCelebrityRecognition`.
3149
3165
  #
3150
3166
  # For more information, see Recognizing Celebrities in the Amazon
3151
3167
  # Rekognition Developer Guide.
@@ -3215,8 +3231,9 @@ module Aws::Rekognition
3215
3231
  #
3216
3232
  # To get the results of the content moderation analysis, first check
3217
3233
  # that the status value published to the Amazon SNS topic is
3218
- # `SUCCEEDED`. If so, call and pass the job identifier (`JobId`) from
3219
- # the initial call to `StartContentModeration`.
3234
+ # `SUCCEEDED`. If so, call GetContentModeration and pass the job
3235
+ # identifier (`JobId`) from the initial call to
3236
+ # `StartContentModeration`.
3220
3237
  #
3221
3238
  # For more information, see Detecting Unsafe Content in the Amazon
3222
3239
  # Rekognition Developer Guide.
@@ -3293,8 +3310,9 @@ module Aws::Rekognition
3293
3310
  # status to the Amazon Simple Notification Service topic that you
3294
3311
  # specify in `NotificationChannel`. To get the results of the face
3295
3312
  # detection operation, first check that the status value published to
3296
- # the Amazon SNS topic is `SUCCEEDED`. If so, call and pass the job
3297
- # identifier (`JobId`) from the initial call to `StartFaceDetection`.
3313
+ # the Amazon SNS topic is `SUCCEEDED`. If so, call GetFaceDetection and
3314
+ # pass the job identifier (`JobId`) from the initial call to
3315
+ # `StartFaceDetection`.
3298
3316
  #
3299
3317
  # For more information, see Detecting Faces in a Stored Video in the
3300
3318
  # Amazon Rekognition Developer Guide.
@@ -3371,8 +3389,8 @@ module Aws::Rekognition
3371
3389
  # Simple Notification Service topic that you specify in
3372
3390
  # `NotificationChannel`. To get the search results, first check that the
3373
3391
  # status value published to the Amazon SNS topic is `SUCCEEDED`. If so,
3374
- # call and pass the job identifier (`JobId`) from the initial call to
3375
- # `StartFaceSearch`. For more information, see
3392
+ # call GetFaceSearch and pass the job identifier (`JobId`) from the
3393
+ # initial call to `StartFaceSearch`. For more information, see
3376
3394
  # procedure-person-search-videos.
3377
3395
  #
3378
3396
  # @option params [required, Types::Video] :video
@@ -3453,8 +3471,8 @@ module Aws::Rekognition
3453
3471
  #
3454
3472
  # To get the results of the label detection operation, first check that
3455
3473
  # the status value published to the Amazon SNS topic is `SUCCEEDED`. If
3456
- # so, call and pass the job identifier (`JobId`) from the initial call
3457
- # to `StartLabelDetection`.
3474
+ # so, call GetLabelDetection and pass the job identifier (`JobId`) from
3475
+ # the initial call to `StartLabelDetection`.
3458
3476
  #
3459
3477
  # @option params [required, Types::Video] :video
3460
3478
  # The video in which you want to detect labels. The video must be stored
@@ -3532,8 +3550,8 @@ module Aws::Rekognition
3532
3550
  #
3533
3551
  # To get the results of the person detection operation, first check that
3534
3552
  # the status value published to the Amazon SNS topic is `SUCCEEDED`. If
3535
- # so, call and pass the job identifier (`JobId`) from the initial call
3536
- # to `StartPersonTracking`.
3553
+ # so, call GetPersonTracking and pass the job identifier (`JobId`) from
3554
+ # the initial call to `StartPersonTracking`.
3537
3555
  #
3538
3556
  # @option params [required, Types::Video] :video
3539
3557
  # The video in which you want to detect people. The video must be stored
@@ -3587,9 +3605,9 @@ module Aws::Rekognition
3587
3605
  end
3588
3606
 
3589
3607
  # Starts processing a stream processor. You create a stream processor by
3590
- # calling . To tell `StartStreamProcessor` which stream processor to
3591
- # start, use the value of the `Name` field specified in the call to
3592
- # `CreateStreamProcessor`.
3608
+ # calling CreateStreamProcessor. To tell `StartStreamProcessor` which
3609
+ # stream processor to start, use the value of the `Name` field specified
3610
+ # in the call to `CreateStreamProcessor`.
3593
3611
  #
3594
3612
  # @option params [required, String] :name
3595
3613
  # The name of the stream processor to start processing.
@@ -3609,10 +3627,11 @@ module Aws::Rekognition
3609
3627
  req.send_request(options)
3610
3628
  end
3611
3629
 
3612
- # Stops a running stream processor that was created by .
3630
+ # Stops a running stream processor that was created by
3631
+ # CreateStreamProcessor.
3613
3632
  #
3614
3633
  # @option params [required, String] :name
3615
- # The name of a stream processor created by .
3634
+ # The name of a stream processor created by CreateStreamProcessor.
3616
3635
  #
3617
3636
  # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
3618
3637
  #
@@ -3642,7 +3661,7 @@ module Aws::Rekognition
3642
3661
  params: params,
3643
3662
  config: config)
3644
3663
  context[:gem_name] = 'aws-sdk-rekognition'
3645
- context[:gem_version] = '1.16.0'
3664
+ context[:gem_version] = '1.17.0'
3646
3665
  Seahorse::Client::Request.new(handlers, context)
3647
3666
  end
3648
3667
 
@@ -389,6 +389,7 @@ module Aws::Rekognition
389
389
  DetectModerationLabelsRequest.struct_class = Types::DetectModerationLabelsRequest
390
390
 
391
391
  DetectModerationLabelsResponse.add_member(:moderation_labels, Shapes::ShapeRef.new(shape: ModerationLabels, location_name: "ModerationLabels"))
392
+ DetectModerationLabelsResponse.add_member(:moderation_model_version, Shapes::ShapeRef.new(shape: String, location_name: "ModerationModelVersion"))
392
393
  DetectModerationLabelsResponse.struct_class = Types::DetectModerationLabelsResponse
393
394
 
394
395
  DetectTextRequest.add_member(:image, Shapes::ShapeRef.new(shape: Image, required: true, location_name: "Image"))
@@ -542,6 +543,7 @@ module Aws::Rekognition
542
543
  GetLabelDetectionResponse.add_member(:video_metadata, Shapes::ShapeRef.new(shape: VideoMetadata, location_name: "VideoMetadata"))
543
544
  GetLabelDetectionResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
544
545
  GetLabelDetectionResponse.add_member(:labels, Shapes::ShapeRef.new(shape: LabelDetections, location_name: "Labels"))
546
+ GetLabelDetectionResponse.add_member(:label_model_version, Shapes::ShapeRef.new(shape: String, location_name: "LabelModelVersion"))
545
547
  GetLabelDetectionResponse.struct_class = Types::GetLabelDetectionResponse
546
548
 
547
549
  GetPersonTrackingRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
@@ -97,7 +97,8 @@ module Aws::Rekognition
97
97
  include Aws::Structure
98
98
  end
99
99
 
100
- # Provides information about a celebrity recognized by the operation.
100
+ # Provides information about a celebrity recognized by the
101
+ # RecognizeCelebrities operation.
101
102
  #
102
103
  # @!attribute [rw] urls
103
104
  # An array of URLs pointing to additional information about the
@@ -462,7 +463,8 @@ module Aws::Rekognition
462
463
  # @!attribute [rw] name
463
464
  # An identifier you assign to the stream processor. You can use `Name`
464
465
  # to manage the stream processor. For example, you can get the current
465
- # status of the stream processor by calling . `Name` is idempotent.
466
+ # status of the stream processor by calling DescribeStreamProcessor.
467
+ # `Name` is idempotent.
466
468
  # @return [String]
467
469
  #
468
470
  # @!attribute [rw] settings
@@ -585,7 +587,7 @@ module Aws::Rekognition
585
587
 
586
588
  # @!attribute [rw] face_count
587
589
  # The number of faces that are indexed into the collection. To index
588
- # faces into a collection, use .
590
+ # faces into a collection, use IndexFaces.
589
591
  # @return [Integer]
590
592
  #
591
593
  # @!attribute [rw] face_model_version
@@ -791,7 +793,7 @@ module Aws::Rekognition
791
793
  # than this specified value.
792
794
  #
793
795
  # If `MinConfidence` is not specified, the operation returns labels
794
- # with a confidence values greater than or equal to 50 percent.
796
+ # with a confidence values greater than or equal to 55 percent.
795
797
  # @return [Float]
796
798
  #
797
799
  class DetectLabelsRequest < Struct.new(
@@ -875,8 +877,14 @@ module Aws::Rekognition
875
877
  # from the start of the video, they were detected.
876
878
  # @return [Array<Types::ModerationLabel>]
877
879
  #
880
+ # @!attribute [rw] moderation_model_version
881
+ # Version number of the moderation detection model that was used to
882
+ # detect unsafe content.
883
+ # @return [String]
884
+ #
878
885
  class DetectModerationLabelsResponse < Struct.new(
879
- :moderation_labels)
886
+ :moderation_labels,
887
+ :moderation_model_version)
880
888
  include Aws::Structure
881
889
  end
882
890
 
@@ -1007,12 +1015,12 @@ module Aws::Rekognition
1007
1015
  # all facial attributes. The default attributes are `BoundingBox`,
1008
1016
  # `Confidence`, `Landmarks`, `Pose`, and `Quality`.
1009
1017
  #
1010
- # is the only Amazon Rekognition Video stored video operation that can
1011
- # return a `FaceDetail` object with all attributes. To specify which
1012
- # attributes to return, use the `FaceAttributes` input parameter for .
1013
- # The following Amazon Rekognition Video operations return only the
1014
- # default attributes. The corresponding Start operations don't have a
1015
- # `FaceAttributes` input parameter.
1018
+ # GetFaceDetection is the only Amazon Rekognition Video stored video
1019
+ # operation that can return a `FaceDetail` object with all attributes.
1020
+ # To specify which attributes to return, use the `FaceAttributes` input
1021
+ # parameter for StartFaceDetection. The following Amazon Rekognition
1022
+ # Video operations return only the default attributes. The corresponding
1023
+ # Start operations don't have a `FaceAttributes` input parameter.
1016
1024
  #
1017
1025
  # * GetCelebrityRecognition
1018
1026
  #
@@ -1020,10 +1028,10 @@ module Aws::Rekognition
1020
1028
  #
1021
1029
  # * GetFaceSearch
1022
1030
  #
1023
- # The Amazon Rekognition Image and operations can return all facial
1024
- # attributes. To specify which attributes to return, use the
1025
- # `Attributes` input parameter for `DetectFaces`. For `IndexFaces`, use
1026
- # the `DetectAttributes` input parameter.
1031
+ # The Amazon Rekognition Image DetectFaces and IndexFaces operations can
1032
+ # return all facial attributes. To specify which attributes to return,
1033
+ # use the `Attributes` input parameter for `DetectFaces`. For
1034
+ # `IndexFaces`, use the `DetectAttributes` input parameter.
1027
1035
  #
1028
1036
  # @!attribute [rw] bounding_box
1029
1037
  # Bounding box of the face. Default attribute.
@@ -1174,7 +1182,8 @@ module Aws::Rekognition
1174
1182
  end
1175
1183
 
1176
1184
  # Input face recognition parameters for an Amazon Rekognition stream
1177
- # processor. `FaceRecognitionSettings` is a request parameter for .
1185
+ # processor. `FaceRecognitionSettings` is a request parameter for
1186
+ # CreateStreamProcessor.
1178
1187
  #
1179
1188
  # @note When making an API call, you may pass FaceSearchSettings
1180
1189
  # data as a hash:
@@ -1217,7 +1226,8 @@ module Aws::Rekognition
1217
1226
  include Aws::Structure
1218
1227
  end
1219
1228
 
1220
- # Information about where the text detected by is located on an image.
1229
+ # Information about where the text detected by DetectText is located on
1230
+ # an image.
1221
1231
  #
1222
1232
  # @!attribute [rw] bounding_box
1223
1233
  # An axis-aligned coarse representation of the detected text's
@@ -1244,7 +1254,8 @@ module Aws::Rekognition
1244
1254
  #
1245
1255
  # @!attribute [rw] id
1246
1256
  # The ID for the celebrity. You get the celebrity ID from a call to
1247
- # the operation, which recognizes celebrities in an image.
1257
+ # the RecognizeCelebrities operation, which recognizes celebrities in
1258
+ # an image.
1248
1259
  # @return [String]
1249
1260
  #
1250
1261
  class GetCelebrityInfoRequest < Struct.new(
@@ -1559,13 +1570,14 @@ module Aws::Rekognition
1559
1570
  # @return [Types::VideoMetadata]
1560
1571
  #
1561
1572
  # @!attribute [rw] persons
1562
- # An array of persons, , in the video whose face(s) match the face(s)
1563
- # in an Amazon Rekognition collection. It also includes time
1564
- # information for when persons are matched in the video. You specify
1565
- # the input collection in an initial call to `StartFaceSearch`. Each
1566
- # `Persons` element includes a time the person was matched, face match
1567
- # details (`FaceMatches`) for matching faces in the collection, and
1568
- # person information (`Person`) for the matched person.
1573
+ # An array of persons, PersonMatch, in the video whose face(s) match
1574
+ # the face(s) in an Amazon Rekognition collection. It also includes
1575
+ # time information for when persons are matched in the video. You
1576
+ # specify the input collection in an initial call to
1577
+ # `StartFaceSearch`. Each `Persons` element includes a time the person
1578
+ # was matched, face match details (`FaceMatches`) for matching faces
1579
+ # in the collection, and person information (`Person`) for the matched
1580
+ # person.
1569
1581
  # @return [Array<Types::PersonMatch>]
1570
1582
  #
1571
1583
  class GetFaceSearchResponse < Struct.new(
@@ -1650,12 +1662,18 @@ module Aws::Rekognition
1650
1662
  # video, that the label was detected.
1651
1663
  # @return [Array<Types::LabelDetection>]
1652
1664
  #
1665
+ # @!attribute [rw] label_model_version
1666
+ # Version number of the label detection model that was used to detect
1667
+ # labels.
1668
+ # @return [String]
1669
+ #
1653
1670
  class GetLabelDetectionResponse < Struct.new(
1654
1671
  :job_status,
1655
1672
  :status_message,
1656
1673
  :video_metadata,
1657
1674
  :next_token,
1658
- :labels)
1675
+ :labels,
1676
+ :label_model_version)
1659
1677
  include Aws::Structure
1660
1678
  end
1661
1679
 
@@ -1940,7 +1958,8 @@ module Aws::Rekognition
1940
1958
  # object locations before the image is rotated.
1941
1959
  #
1942
1960
  # Bounding box information is returned in the `FaceRecords` array. You
1943
- # can get the version of the face detection model by calling .
1961
+ # can get the version of the face detection model by calling
1962
+ # DescribeCollection.
1944
1963
  # @return [String]
1945
1964
  #
1946
1965
  # @!attribute [rw] face_model_version
@@ -1964,15 +1983,16 @@ module Aws::Rekognition
1964
1983
  include Aws::Structure
1965
1984
  end
1966
1985
 
1967
- # An instance of a label detected by .
1986
+ # An instance of a label returned by Amazon Rekognition Image
1987
+ # (DetectLabels) or by Amazon Rekognition Video (GetLabelDetection).
1968
1988
  #
1969
1989
  # @!attribute [rw] bounding_box
1970
1990
  # The position of the label instance on the image.
1971
1991
  # @return [Types::BoundingBox]
1972
1992
  #
1973
1993
  # @!attribute [rw] confidence
1974
- # The confidence that Amazon Rekognition Image has in the accuracy of
1975
- # the bounding box.
1994
+ # The confidence that Amazon Rekognition has in the accuracy of the
1995
+ # bounding box.
1976
1996
  # @return [Float]
1977
1997
  #
1978
1998
  class Instance < Struct.new(
@@ -2024,13 +2044,7 @@ module Aws::Rekognition
2024
2044
  end
2025
2045
 
2026
2046
  # Structure containing details about the detected label, including the
2027
- # name, and level of confidence.
2028
- #
2029
- # The Amazon Rekognition Image operation operation returns a
2030
- # hierarchical taxonomy (`Parents`) for detected labels and also
2031
- # bounding box information (`Instances`) for detected labels. Amazon
2032
- # Rekognition Video doesn't return this information and returns `null`
2033
- # for the `Parents` and `Instances` attributes.
2047
+ # name, detected instances, parent labels, and level of confidence.
2034
2048
  #
2035
2049
  # @!attribute [rw] name
2036
2050
  # The name (label) of the object or scene.
@@ -2045,23 +2059,11 @@ module Aws::Rekognition
2045
2059
  # boxes for each instance of the detected object. Bounding boxes are
2046
2060
  # returned for common object labels such as people, cars, furniture,
2047
2061
  # apparel or pets.
2048
- #
2049
- # <note markdown="1"> Amazon Rekognition Video does not support bounding box information
2050
- # for detected labels. The value of `Instances` is returned as `null`
2051
- # by `GetLabelDetection`.
2052
- #
2053
- # </note>
2054
2062
  # @return [Array<Types::Instance>]
2055
2063
  #
2056
2064
  # @!attribute [rw] parents
2057
2065
  # The parent labels for a label. The response includes all ancestor
2058
2066
  # labels.
2059
- #
2060
- # <note markdown="1"> Amazon Rekognition Video does not support a hierarchical taxonomy of
2061
- # detected labels. The value of `Parents` is returned as `null` by
2062
- # `GetLabelDetection`.
2063
- #
2064
- # </note>
2065
2067
  # @return [Array<Types::Parent>]
2066
2068
  #
2067
2069
  class Label < Struct.new(
@@ -2390,8 +2392,8 @@ module Aws::Rekognition
2390
2392
  # people's paths return an array of `PersonDetection` objects with
2391
2393
  # elements for each time a person's path is tracked in a video.
2392
2394
  #
2393
- # For more information, see API\_GetPersonTracking in the Amazon
2394
- # Rekognition Developer Guide.
2395
+ # For more information, see GetPersonTracking in the Amazon Rekognition
2396
+ # Developer Guide.
2395
2397
  #
2396
2398
  # @!attribute [rw] timestamp
2397
2399
  # The time, in milliseconds from the start of the video, that the
@@ -2410,9 +2412,10 @@ module Aws::Rekognition
2410
2412
 
2411
2413
  # Information about a person whose face matches a face(s) in an Amazon
2412
2414
  # Rekognition collection. Includes information about the faces in the
2413
- # Amazon Rekognition collection (), information about the person
2414
- # (PersonDetail), and the time stamp for when the person was detected in
2415
- # a video. An array of `PersonMatch` objects is returned by .
2415
+ # Amazon Rekognition collection (FaceMatch), information about the
2416
+ # person (PersonDetail), and the time stamp for when the person was
2417
+ # detected in a video. An array of `PersonMatch` objects is returned by
2418
+ # GetFaceSearch.
2416
2419
  #
2417
2420
  # @!attribute [rw] timestamp
2418
2421
  # The time, in milliseconds from the beginning of the video, that the
@@ -2440,9 +2443,10 @@ module Aws::Rekognition
2440
2443
  # input image is 700x200 and the operation returns X=0.5 and Y=0.25,
2441
2444
  # then the point is at the (350,50) pixel coordinate on the image.
2442
2445
  #
2443
- # An array of `Point` objects, `Polygon`, is returned by . `Polygon`
2444
- # represents a fine-grained polygon around detected text. For more
2445
- # information, see Geometry in the Amazon Rekognition Developer Guide.
2446
+ # An array of `Point` objects, `Polygon`, is returned by DetectText.
2447
+ # `Polygon` represents a fine-grained polygon around detected text. For
2448
+ # more information, see Geometry in the Amazon Rekognition Developer
2449
+ # Guide.
2446
2450
  #
2447
2451
  # @!attribute [rw] x
2448
2452
  # The value of the X coordinate for a point on a `Polygon`.
@@ -3162,7 +3166,7 @@ module Aws::Rekognition
3162
3166
  # }
3163
3167
  #
3164
3168
  # @!attribute [rw] name
3165
- # The name of a stream processor created by .
3169
+ # The name of a stream processor created by CreateStreamProcessor.
3166
3170
  # @return [String]
3167
3171
  #
3168
3172
  class StopStreamProcessorRequest < Struct.new(
@@ -3173,10 +3177,11 @@ module Aws::Rekognition
3173
3177
  class StopStreamProcessorResponse < Aws::EmptyStructure; end
3174
3178
 
3175
3179
  # An object that recognizes faces in a streaming video. An Amazon
3176
- # Rekognition stream processor is created by a call to . The request
3177
- # parameters for `CreateStreamProcessor` describe the Kinesis video
3178
- # stream source for the streaming video, face recognition parameters,
3179
- # and where to stream the analysis resullts.
3180
+ # Rekognition stream processor is created by a call to
3181
+ # CreateStreamProcessor. The request parameters for
3182
+ # `CreateStreamProcessor` describe the Kinesis video stream source for
3183
+ # the streaming video, face recognition parameters, and where to stream
3184
+ # the analysis resullts.
3180
3185
  #
3181
3186
  # @!attribute [rw] name
3182
3187
  # Name of the Amazon Rekognition stream processor.
@@ -3277,7 +3282,7 @@ module Aws::Rekognition
3277
3282
  include Aws::Structure
3278
3283
  end
3279
3284
 
3280
- # Information about a word or line of text detected by .
3285
+ # Information about a word or line of text detected by DetectText.
3281
3286
  #
3282
3287
  # The `DetectedText` field contains the text that Amazon Rekognition
3283
3288
  # detected in the image.
@@ -3331,8 +3336,8 @@ module Aws::Rekognition
3331
3336
  include Aws::Structure
3332
3337
  end
3333
3338
 
3334
- # A face that detected, but didn't index. Use the `Reasons` response
3335
- # attribute to determine why a face wasn't indexed.
3339
+ # A face that IndexFaces detected, but didn't index. Use the `Reasons`
3340
+ # response attribute to determine why a face wasn't indexed.
3336
3341
  #
3337
3342
  # @!attribute [rw] reasons
3338
3343
  # An array of reasons that specify why a face wasn't indexed.
@@ -3366,8 +3371,9 @@ module Aws::Rekognition
3366
3371
  end
3367
3372
 
3368
3373
  # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
3369
- # start operations such as use `Video` to specify a video for analysis.
3370
- # The supported file formats are .mp4, .mov and .avi.
3374
+ # start operations such as StartLabelDetection use `Video` to specify a
3375
+ # video for analysis. The supported file formats are .mp4, .mov and
3376
+ # .avi.
3371
3377
  #
3372
3378
  # @note When making an API call, you may pass Video
3373
3379
  # data as a hash:
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: aws-sdk-rekognition
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.16.0
4
+ version: 1.17.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Amazon Web Services
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2018-11-21 00:00:00.000000000 Z
11
+ date: 2019-01-17 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: aws-sdk-core