aws-sdk-rekognition 1.33.0 → 1.34.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: '0073779d5db2875ab000e60fa739f8731b4fccb8'
4
- data.tar.gz: 7427f1652ab2e61ae91127e38a47b03bbb18eec2
3
+ metadata.gz: be8aeee40c517d2db7bdb3d1f4cc5e8ac57981c5
4
+ data.tar.gz: cde89b0c775fd9a9fd709753ec4ebff46e78078e
5
5
  SHA512:
6
- metadata.gz: 386fcd38b965c535dd8d2286633f826b77b8e30f9fb4e1b72e784b892aa62c817283aa4066778d133e0542baa295652833328a32b19283ac3cc2147ec85a132d
7
- data.tar.gz: 2f3b0a9813affe582a4129d7de1ccaaa91b2f6c95fa49445664a3d3fa289c7c65c1c79ce15e9012106cec4f5e3b53f883a3f6cbf408f0656eef39973abbf130d
6
+ metadata.gz: 592a0b5cc96f6bf397006b23138f5d6fab76ab2b7ae4d13de4b859f00b6c1d2ca5857d030c4960e8105995b202a2bd28b62ffcca9c3b3ad7d29e79168159da63
7
+ data.tar.gz: 00b37a0363372228ad146c5381e1f047f8b5719b0b3d123f8a776cda6b69d0bbeb54600024bf301377098036aa6bb7780b4bf82735872818e09b471bacce0292
@@ -43,6 +43,6 @@ require_relative 'aws-sdk-rekognition/customizations'
43
43
  # @service
44
44
  module Aws::Rekognition
45
45
 
46
- GEM_VERSION = '1.33.0'
46
+ GEM_VERSION = '1.34.0'
47
47
 
48
48
  end
@@ -1230,9 +1230,9 @@ module Aws::Rekognition
1230
1230
  # faces or might detect faces with lower confidence.
1231
1231
  #
1232
1232
  # You pass the input image either as base64-encoded image bytes or as a
1233
- # reference to an image in an Amazon S3 bucket. If you use the to call
1234
- # Amazon Rekognition operations, passing image bytes is not supported.
1235
- # The image must be either a PNG or JPEG formatted file.
1233
+ # reference to an image in an Amazon S3 bucket. If you use the AWS CLI
1234
+ # to call Amazon Rekognition operations, passing image bytes is not
1235
+ # supported. The image must be either a PNG or JPEG formatted file.
1236
1236
  #
1237
1237
  # <note markdown="1"> This is a stateless API operation. That is, the operation does not
1238
1238
  # persist any data.
@@ -1704,9 +1704,14 @@ module Aws::Rekognition
1704
1704
  # more information, see Images in the Amazon Rekognition developer
1705
1705
  # guide.
1706
1706
  #
1707
+ # @option params [Types::DetectTextFilters] :filters
1708
+ # Optional parameters that let you set the criteria that the text must
1709
+ # meet to be included in your response.
1710
+ #
1707
1711
  # @return [Types::DetectTextResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1708
1712
  #
1709
1713
  # * {Types::DetectTextResponse#text_detections #text_detections} => Array&lt;Types::TextDetection&gt;
1714
+ # * {Types::DetectTextResponse#text_model_version #text_model_version} => String
1710
1715
  #
1711
1716
  # @example Request syntax with placeholder values
1712
1717
  #
@@ -1719,6 +1724,23 @@ module Aws::Rekognition
1719
1724
  # version: "S3ObjectVersion",
1720
1725
  # },
1721
1726
  # },
1727
+ # filters: {
1728
+ # word_filter: {
1729
+ # min_confidence: 1.0,
1730
+ # min_bounding_box_height: 1.0,
1731
+ # min_bounding_box_width: 1.0,
1732
+ # },
1733
+ # regions_of_interest: [
1734
+ # {
1735
+ # bounding_box: {
1736
+ # width: 1.0,
1737
+ # height: 1.0,
1738
+ # left: 1.0,
1739
+ # top: 1.0,
1740
+ # },
1741
+ # },
1742
+ # ],
1743
+ # },
1722
1744
  # })
1723
1745
  #
1724
1746
  # @example Response structure
@@ -1736,6 +1758,7 @@ module Aws::Rekognition
1736
1758
  # resp.text_detections[0].geometry.polygon #=> Array
1737
1759
  # resp.text_detections[0].geometry.polygon[0].x #=> Float
1738
1760
  # resp.text_detections[0].geometry.polygon[0].y #=> Float
1761
+ # resp.text_model_version #=> String
1739
1762
  #
1740
1763
  # @overload detect_text(params = {})
1741
1764
  # @param [Hash] params ({})
@@ -2544,6 +2567,103 @@ module Aws::Rekognition
2544
2567
  req.send_request(options)
2545
2568
  end
2546
2569
 
2570
+ # Gets the text detection results of a Amazon Rekognition Video analysis
2571
+ # started by StartTextDetection.
2572
+ #
2573
+ # Text detection with Amazon Rekognition Video is an asynchronous
2574
+ # operation. You start text detection by calling StartTextDetection
2575
+ # which returns a job identifier (`JobId`) When the text detection
2576
+ # operation finishes, Amazon Rekognition publishes a completion status
2577
+ # to the Amazon Simple Notification Service topic registered in the
2578
+ # initial call to `StartTextDetection`. To get the results of the text
2579
+ # detection operation, first check that the status value published to
2580
+ # the Amazon SNS topic is `SUCCEEDED`. if so, call `GetTextDetection`
2581
+ # and pass the job identifier (`JobId`) from the initial call of
2582
+ # `StartLabelDetection`.
2583
+ #
2584
+ # `GetTextDetection` returns an array of detected text
2585
+ # (`TextDetections`) sorted by the time the text was detected, up to 50
2586
+ # words per frame of video.
2587
+ #
2588
+ # Each element of the array includes the detected text, the precentage
2589
+ # confidence in the acuracy of the detected text, the time the text was
2590
+ # detected, bounding box information for where the text was located, and
2591
+ # unique identifiers for words and their lines.
2592
+ #
2593
+ # Use MaxResults parameter to limit the number of text detections
2594
+ # returned. If there are more results than specified in `MaxResults`,
2595
+ # the value of `NextToken` in the operation response contains a
2596
+ # pagination token for getting the next set of results. To get the next
2597
+ # page of results, call `GetTextDetection` and populate the `NextToken`
2598
+ # request parameter with the token value returned from the previous call
2599
+ # to `GetTextDetection`.
2600
+ #
2601
+ # @option params [required, String] :job_id
2602
+ # Job identifier for the label detection operation for which you want
2603
+ # results returned. You get the job identifer from an initial call to
2604
+ # `StartTextDetection`.
2605
+ #
2606
+ # @option params [Integer] :max_results
2607
+ # Maximum number of results to return per paginated call. The largest
2608
+ # value you can specify is 1000.
2609
+ #
2610
+ # @option params [String] :next_token
2611
+ # If the previous response was incomplete (because there are more labels
2612
+ # to retrieve), Amazon Rekognition Video returns a pagination token in
2613
+ # the response. You can use this pagination token to retrieve the next
2614
+ # set of text.
2615
+ #
2616
+ # @return [Types::GetTextDetectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2617
+ #
2618
+ # * {Types::GetTextDetectionResponse#job_status #job_status} => String
2619
+ # * {Types::GetTextDetectionResponse#status_message #status_message} => String
2620
+ # * {Types::GetTextDetectionResponse#video_metadata #video_metadata} => Types::VideoMetadata
2621
+ # * {Types::GetTextDetectionResponse#text_detections #text_detections} => Array&lt;Types::TextDetectionResult&gt;
2622
+ # * {Types::GetTextDetectionResponse#next_token #next_token} => String
2623
+ # * {Types::GetTextDetectionResponse#text_model_version #text_model_version} => String
2624
+ #
2625
+ # @example Request syntax with placeholder values
2626
+ #
2627
+ # resp = client.get_text_detection({
2628
+ # job_id: "JobId", # required
2629
+ # max_results: 1,
2630
+ # next_token: "PaginationToken",
2631
+ # })
2632
+ #
2633
+ # @example Response structure
2634
+ #
2635
+ # resp.job_status #=> String, one of "IN_PROGRESS", "SUCCEEDED", "FAILED"
2636
+ # resp.status_message #=> String
2637
+ # resp.video_metadata.codec #=> String
2638
+ # resp.video_metadata.duration_millis #=> Integer
2639
+ # resp.video_metadata.format #=> String
2640
+ # resp.video_metadata.frame_rate #=> Float
2641
+ # resp.video_metadata.frame_height #=> Integer
2642
+ # resp.video_metadata.frame_width #=> Integer
2643
+ # resp.text_detections #=> Array
2644
+ # resp.text_detections[0].timestamp #=> Integer
2645
+ # resp.text_detections[0].text_detection.detected_text #=> String
2646
+ # resp.text_detections[0].text_detection.type #=> String, one of "LINE", "WORD"
2647
+ # resp.text_detections[0].text_detection.id #=> Integer
2648
+ # resp.text_detections[0].text_detection.parent_id #=> Integer
2649
+ # resp.text_detections[0].text_detection.confidence #=> Float
2650
+ # resp.text_detections[0].text_detection.geometry.bounding_box.width #=> Float
2651
+ # resp.text_detections[0].text_detection.geometry.bounding_box.height #=> Float
2652
+ # resp.text_detections[0].text_detection.geometry.bounding_box.left #=> Float
2653
+ # resp.text_detections[0].text_detection.geometry.bounding_box.top #=> Float
2654
+ # resp.text_detections[0].text_detection.geometry.polygon #=> Array
2655
+ # resp.text_detections[0].text_detection.geometry.polygon[0].x #=> Float
2656
+ # resp.text_detections[0].text_detection.geometry.polygon[0].y #=> Float
2657
+ # resp.next_token #=> String
2658
+ # resp.text_model_version #=> String
2659
+ #
2660
+ # @overload get_text_detection(params = {})
2661
+ # @param [Hash] params ({})
2662
+ def get_text_detection(params = {}, options = {})
2663
+ req = build_request(:get_text_detection, params)
2664
+ req.send_request(options)
2665
+ end
2666
+
2547
2667
  # Detects faces in the input image and adds them to the specified
2548
2668
  # collection.
2549
2669
  #
@@ -4228,6 +4348,98 @@ module Aws::Rekognition
4228
4348
  req.send_request(options)
4229
4349
  end
4230
4350
 
4351
+ # Starts asynchronous detection of text in a stored video.
4352
+ #
4353
+ # Amazon Rekognition Video can detect text in a video stored in an
4354
+ # Amazon S3 bucket. Use Video to specify the bucket name and the
4355
+ # filename of the video. `StartTextDetection` returns a job identifier
4356
+ # (`JobId`) which you use to get the results of the operation. When text
4357
+ # detection is finished, Amazon Rekognition Video publishes a completion
4358
+ # status to the Amazon Simple Notification Service topic that you
4359
+ # specify in `NotificationChannel`.
4360
+ #
4361
+ # To get the results of the text detection operation, first check that
4362
+ # the status value published to the Amazon SNS topic is `SUCCEEDED`. if
4363
+ # so, call GetTextDetection and pass the job identifier (`JobId`) from
4364
+ # the initial call to `StartTextDetection`.
4365
+ #
4366
+ # @option params [required, Types::Video] :video
4367
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
4368
+ # start operations such as StartLabelDetection use `Video` to specify a
4369
+ # video for analysis. The supported file formats are .mp4, .mov and
4370
+ # .avi.
4371
+ #
4372
+ # @option params [String] :client_request_token
4373
+ # Idempotent token used to identify the start request. If you use the
4374
+ # same token with multiple `StartTextDetection` requests, the same
4375
+ # `JobId` is returned. Use `ClientRequestToken` to prevent the same job
4376
+ # from being accidentaly started more than once.
4377
+ #
4378
+ # @option params [Types::NotificationChannel] :notification_channel
4379
+ # The Amazon Simple Notification Service topic to which Amazon
4380
+ # Rekognition publishes the completion status of a video analysis
4381
+ # operation. For more information, see api-video.
4382
+ #
4383
+ # @option params [String] :job_tag
4384
+ # An identifier returned in the completion status published by your
4385
+ # Amazon Simple Notification Service topic. For example, you can use
4386
+ # `JobTag` to group related jobs and identify them in the completion
4387
+ # notification.
4388
+ #
4389
+ # @option params [Types::StartTextDetectionFilters] :filters
4390
+ # Optional parameters that let you set criteria the text must meet to be
4391
+ # included in your response.
4392
+ #
4393
+ # @return [Types::StartTextDetectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
4394
+ #
4395
+ # * {Types::StartTextDetectionResponse#job_id #job_id} => String
4396
+ #
4397
+ # @example Request syntax with placeholder values
4398
+ #
4399
+ # resp = client.start_text_detection({
4400
+ # video: { # required
4401
+ # s3_object: {
4402
+ # bucket: "S3Bucket",
4403
+ # name: "S3ObjectName",
4404
+ # version: "S3ObjectVersion",
4405
+ # },
4406
+ # },
4407
+ # client_request_token: "ClientRequestToken",
4408
+ # notification_channel: {
4409
+ # sns_topic_arn: "SNSTopicArn", # required
4410
+ # role_arn: "RoleArn", # required
4411
+ # },
4412
+ # job_tag: "JobTag",
4413
+ # filters: {
4414
+ # word_filter: {
4415
+ # min_confidence: 1.0,
4416
+ # min_bounding_box_height: 1.0,
4417
+ # min_bounding_box_width: 1.0,
4418
+ # },
4419
+ # regions_of_interest: [
4420
+ # {
4421
+ # bounding_box: {
4422
+ # width: 1.0,
4423
+ # height: 1.0,
4424
+ # left: 1.0,
4425
+ # top: 1.0,
4426
+ # },
4427
+ # },
4428
+ # ],
4429
+ # },
4430
+ # })
4431
+ #
4432
+ # @example Response structure
4433
+ #
4434
+ # resp.job_id #=> String
4435
+ #
4436
+ # @overload start_text_detection(params = {})
4437
+ # @param [Hash] params ({})
4438
+ def start_text_detection(params = {}, options = {})
4439
+ req = build_request(:start_text_detection, params)
4440
+ req.send_request(options)
4441
+ end
4442
+
4231
4443
  # Stops a running model. The operation might take a while to complete.
4232
4444
  # To check the current status, call DescribeProjectVersions.
4233
4445
  #
@@ -4293,7 +4505,7 @@ module Aws::Rekognition
4293
4505
  params: params,
4294
4506
  config: config)
4295
4507
  context[:gem_name] = 'aws-sdk-rekognition'
4296
- context[:gem_version] = '1.33.0'
4508
+ context[:gem_version] = '1.34.0'
4297
4509
  Seahorse::Client::Request.new(handlers, context)
4298
4510
  end
4299
4511
 
@@ -20,6 +20,8 @@ module Aws::Rekognition
20
20
  Beard = Shapes::StructureShape.new(name: 'Beard')
21
21
  Boolean = Shapes::BooleanShape.new(name: 'Boolean')
22
22
  BoundingBox = Shapes::StructureShape.new(name: 'BoundingBox')
23
+ BoundingBoxHeight = Shapes::FloatShape.new(name: 'BoundingBoxHeight')
24
+ BoundingBoxWidth = Shapes::FloatShape.new(name: 'BoundingBoxWidth')
23
25
  Celebrity = Shapes::StructureShape.new(name: 'Celebrity')
24
26
  CelebrityDetail = Shapes::StructureShape.new(name: 'CelebrityDetail')
25
27
  CelebrityList = Shapes::ListShape.new(name: 'CelebrityList')
@@ -76,8 +78,10 @@ module Aws::Rekognition
76
78
  DetectLabelsResponse = Shapes::StructureShape.new(name: 'DetectLabelsResponse')
77
79
  DetectModerationLabelsRequest = Shapes::StructureShape.new(name: 'DetectModerationLabelsRequest')
78
80
  DetectModerationLabelsResponse = Shapes::StructureShape.new(name: 'DetectModerationLabelsResponse')
81
+ DetectTextFilters = Shapes::StructureShape.new(name: 'DetectTextFilters')
79
82
  DetectTextRequest = Shapes::StructureShape.new(name: 'DetectTextRequest')
80
83
  DetectTextResponse = Shapes::StructureShape.new(name: 'DetectTextResponse')
84
+ DetectionFilter = Shapes::StructureShape.new(name: 'DetectionFilter')
81
85
  Emotion = Shapes::StructureShape.new(name: 'Emotion')
82
86
  EmotionName = Shapes::StringShape.new(name: 'EmotionName')
83
87
  Emotions = Shapes::ListShape.new(name: 'Emotions')
@@ -121,6 +125,8 @@ module Aws::Rekognition
121
125
  GetLabelDetectionResponse = Shapes::StructureShape.new(name: 'GetLabelDetectionResponse')
122
126
  GetPersonTrackingRequest = Shapes::StructureShape.new(name: 'GetPersonTrackingRequest')
123
127
  GetPersonTrackingResponse = Shapes::StructureShape.new(name: 'GetPersonTrackingResponse')
128
+ GetTextDetectionRequest = Shapes::StructureShape.new(name: 'GetTextDetectionRequest')
129
+ GetTextDetectionResponse = Shapes::StructureShape.new(name: 'GetTextDetectionResponse')
124
130
  GroundTruthManifest = Shapes::StructureShape.new(name: 'GroundTruthManifest')
125
131
  HumanLoopActivationConditionsEvaluationResults = Shapes::StringShape.new(name: 'HumanLoopActivationConditionsEvaluationResults')
126
132
  HumanLoopActivationOutput = Shapes::StructureShape.new(name: 'HumanLoopActivationOutput')
@@ -210,6 +216,8 @@ module Aws::Rekognition
210
216
  Reasons = Shapes::ListShape.new(name: 'Reasons')
211
217
  RecognizeCelebritiesRequest = Shapes::StructureShape.new(name: 'RecognizeCelebritiesRequest')
212
218
  RecognizeCelebritiesResponse = Shapes::StructureShape.new(name: 'RecognizeCelebritiesResponse')
219
+ RegionOfInterest = Shapes::StructureShape.new(name: 'RegionOfInterest')
220
+ RegionsOfInterest = Shapes::ListShape.new(name: 'RegionsOfInterest')
213
221
  RekognitionUniqueId = Shapes::StringShape.new(name: 'RekognitionUniqueId')
214
222
  ResourceAlreadyExistsException = Shapes::StructureShape.new(name: 'ResourceAlreadyExistsException')
215
223
  ResourceInUseException = Shapes::StructureShape.new(name: 'ResourceInUseException')
@@ -243,6 +251,9 @@ module Aws::Rekognition
243
251
  StartProjectVersionResponse = Shapes::StructureShape.new(name: 'StartProjectVersionResponse')
244
252
  StartStreamProcessorRequest = Shapes::StructureShape.new(name: 'StartStreamProcessorRequest')
245
253
  StartStreamProcessorResponse = Shapes::StructureShape.new(name: 'StartStreamProcessorResponse')
254
+ StartTextDetectionFilters = Shapes::StructureShape.new(name: 'StartTextDetectionFilters')
255
+ StartTextDetectionRequest = Shapes::StructureShape.new(name: 'StartTextDetectionRequest')
256
+ StartTextDetectionResponse = Shapes::StructureShape.new(name: 'StartTextDetectionResponse')
246
257
  StatusMessage = Shapes::StringShape.new(name: 'StatusMessage')
247
258
  StopProjectVersionRequest = Shapes::StructureShape.new(name: 'StopProjectVersionRequest')
248
259
  StopProjectVersionResponse = Shapes::StructureShape.new(name: 'StopProjectVersionResponse')
@@ -263,6 +274,8 @@ module Aws::Rekognition
263
274
  TestingDataResult = Shapes::StructureShape.new(name: 'TestingDataResult')
264
275
  TextDetection = Shapes::StructureShape.new(name: 'TextDetection')
265
276
  TextDetectionList = Shapes::ListShape.new(name: 'TextDetectionList')
277
+ TextDetectionResult = Shapes::StructureShape.new(name: 'TextDetectionResult')
278
+ TextDetectionResults = Shapes::ListShape.new(name: 'TextDetectionResults')
266
279
  TextTypes = Shapes::StringShape.new(name: 'TextTypes')
267
280
  ThrottlingException = Shapes::StructureShape.new(name: 'ThrottlingException')
268
281
  Timestamp = Shapes::IntegerShape.new(name: 'Timestamp')
@@ -507,12 +520,23 @@ module Aws::Rekognition
507
520
  DetectModerationLabelsResponse.add_member(:human_loop_activation_output, Shapes::ShapeRef.new(shape: HumanLoopActivationOutput, location_name: "HumanLoopActivationOutput"))
508
521
  DetectModerationLabelsResponse.struct_class = Types::DetectModerationLabelsResponse
509
522
 
523
+ DetectTextFilters.add_member(:word_filter, Shapes::ShapeRef.new(shape: DetectionFilter, location_name: "WordFilter"))
524
+ DetectTextFilters.add_member(:regions_of_interest, Shapes::ShapeRef.new(shape: RegionsOfInterest, location_name: "RegionsOfInterest"))
525
+ DetectTextFilters.struct_class = Types::DetectTextFilters
526
+
510
527
  DetectTextRequest.add_member(:image, Shapes::ShapeRef.new(shape: Image, required: true, location_name: "Image"))
528
+ DetectTextRequest.add_member(:filters, Shapes::ShapeRef.new(shape: DetectTextFilters, location_name: "Filters"))
511
529
  DetectTextRequest.struct_class = Types::DetectTextRequest
512
530
 
513
531
  DetectTextResponse.add_member(:text_detections, Shapes::ShapeRef.new(shape: TextDetectionList, location_name: "TextDetections"))
532
+ DetectTextResponse.add_member(:text_model_version, Shapes::ShapeRef.new(shape: String, location_name: "TextModelVersion"))
514
533
  DetectTextResponse.struct_class = Types::DetectTextResponse
515
534
 
535
+ DetectionFilter.add_member(:min_confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "MinConfidence"))
536
+ DetectionFilter.add_member(:min_bounding_box_height, Shapes::ShapeRef.new(shape: BoundingBoxHeight, location_name: "MinBoundingBoxHeight"))
537
+ DetectionFilter.add_member(:min_bounding_box_width, Shapes::ShapeRef.new(shape: BoundingBoxWidth, location_name: "MinBoundingBoxWidth"))
538
+ DetectionFilter.struct_class = Types::DetectionFilter
539
+
516
540
  Emotion.add_member(:type, Shapes::ShapeRef.new(shape: EmotionName, location_name: "Type"))
517
541
  Emotion.add_member(:confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "Confidence"))
518
542
  Emotion.struct_class = Types::Emotion
@@ -679,6 +703,19 @@ module Aws::Rekognition
679
703
  GetPersonTrackingResponse.add_member(:persons, Shapes::ShapeRef.new(shape: PersonDetections, location_name: "Persons"))
680
704
  GetPersonTrackingResponse.struct_class = Types::GetPersonTrackingResponse
681
705
 
706
+ GetTextDetectionRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
707
+ GetTextDetectionRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location_name: "MaxResults"))
708
+ GetTextDetectionRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
709
+ GetTextDetectionRequest.struct_class = Types::GetTextDetectionRequest
710
+
711
+ GetTextDetectionResponse.add_member(:job_status, Shapes::ShapeRef.new(shape: VideoJobStatus, location_name: "JobStatus"))
712
+ GetTextDetectionResponse.add_member(:status_message, Shapes::ShapeRef.new(shape: StatusMessage, location_name: "StatusMessage"))
713
+ GetTextDetectionResponse.add_member(:video_metadata, Shapes::ShapeRef.new(shape: VideoMetadata, location_name: "VideoMetadata"))
714
+ GetTextDetectionResponse.add_member(:text_detections, Shapes::ShapeRef.new(shape: TextDetectionResults, location_name: "TextDetections"))
715
+ GetTextDetectionResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
716
+ GetTextDetectionResponse.add_member(:text_model_version, Shapes::ShapeRef.new(shape: String, location_name: "TextModelVersion"))
717
+ GetTextDetectionResponse.struct_class = Types::GetTextDetectionResponse
718
+
682
719
  GroundTruthManifest.add_member(:s3_object, Shapes::ShapeRef.new(shape: S3Object, location_name: "S3Object"))
683
720
  GroundTruthManifest.struct_class = Types::GroundTruthManifest
684
721
 
@@ -873,6 +910,11 @@ module Aws::Rekognition
873
910
  RecognizeCelebritiesResponse.add_member(:orientation_correction, Shapes::ShapeRef.new(shape: OrientationCorrection, location_name: "OrientationCorrection"))
874
911
  RecognizeCelebritiesResponse.struct_class = Types::RecognizeCelebritiesResponse
875
912
 
913
+ RegionOfInterest.add_member(:bounding_box, Shapes::ShapeRef.new(shape: BoundingBox, location_name: "BoundingBox"))
914
+ RegionOfInterest.struct_class = Types::RegionOfInterest
915
+
916
+ RegionsOfInterest.member = Shapes::ShapeRef.new(shape: RegionOfInterest)
917
+
876
918
  S3Object.add_member(:bucket, Shapes::ShapeRef.new(shape: S3Bucket, location_name: "Bucket"))
877
919
  S3Object.add_member(:name, Shapes::ShapeRef.new(shape: S3ObjectName, location_name: "Name"))
878
920
  S3Object.add_member(:version, Shapes::ShapeRef.new(shape: S3ObjectVersion, location_name: "Version"))
@@ -977,6 +1019,20 @@ module Aws::Rekognition
977
1019
 
978
1020
  StartStreamProcessorResponse.struct_class = Types::StartStreamProcessorResponse
979
1021
 
1022
+ StartTextDetectionFilters.add_member(:word_filter, Shapes::ShapeRef.new(shape: DetectionFilter, location_name: "WordFilter"))
1023
+ StartTextDetectionFilters.add_member(:regions_of_interest, Shapes::ShapeRef.new(shape: RegionsOfInterest, location_name: "RegionsOfInterest"))
1024
+ StartTextDetectionFilters.struct_class = Types::StartTextDetectionFilters
1025
+
1026
+ StartTextDetectionRequest.add_member(:video, Shapes::ShapeRef.new(shape: Video, required: true, location_name: "Video"))
1027
+ StartTextDetectionRequest.add_member(:client_request_token, Shapes::ShapeRef.new(shape: ClientRequestToken, location_name: "ClientRequestToken"))
1028
+ StartTextDetectionRequest.add_member(:notification_channel, Shapes::ShapeRef.new(shape: NotificationChannel, location_name: "NotificationChannel"))
1029
+ StartTextDetectionRequest.add_member(:job_tag, Shapes::ShapeRef.new(shape: JobTag, location_name: "JobTag"))
1030
+ StartTextDetectionRequest.add_member(:filters, Shapes::ShapeRef.new(shape: StartTextDetectionFilters, location_name: "Filters"))
1031
+ StartTextDetectionRequest.struct_class = Types::StartTextDetectionRequest
1032
+
1033
+ StartTextDetectionResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, location_name: "JobId"))
1034
+ StartTextDetectionResponse.struct_class = Types::StartTextDetectionResponse
1035
+
980
1036
  StopProjectVersionRequest.add_member(:project_version_arn, Shapes::ShapeRef.new(shape: ProjectVersionArn, required: true, location_name: "ProjectVersionArn"))
981
1037
  StopProjectVersionRequest.struct_class = Types::StopProjectVersionRequest
982
1038
 
@@ -1028,6 +1084,12 @@ module Aws::Rekognition
1028
1084
 
1029
1085
  TextDetectionList.member = Shapes::ShapeRef.new(shape: TextDetection)
1030
1086
 
1087
+ TextDetectionResult.add_member(:timestamp, Shapes::ShapeRef.new(shape: Timestamp, location_name: "Timestamp"))
1088
+ TextDetectionResult.add_member(:text_detection, Shapes::ShapeRef.new(shape: TextDetection, location_name: "TextDetection"))
1089
+ TextDetectionResult.struct_class = Types::TextDetectionResult
1090
+
1091
+ TextDetectionResults.member = Shapes::ShapeRef.new(shape: TextDetectionResult)
1092
+
1031
1093
  TrainingData.add_member(:assets, Shapes::ShapeRef.new(shape: Assets, location_name: "Assets"))
1032
1094
  TrainingData.struct_class = Types::TrainingData
1033
1095
 
@@ -1486,6 +1548,27 @@ module Aws::Rekognition
1486
1548
  )
1487
1549
  end)
1488
1550
 
1551
+ api.add_operation(:get_text_detection, Seahorse::Model::Operation.new.tap do |o|
1552
+ o.name = "GetTextDetection"
1553
+ o.http_method = "POST"
1554
+ o.http_request_uri = "/"
1555
+ o.input = Shapes::ShapeRef.new(shape: GetTextDetectionRequest)
1556
+ o.output = Shapes::ShapeRef.new(shape: GetTextDetectionResponse)
1557
+ o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
1558
+ o.errors << Shapes::ShapeRef.new(shape: InternalServerError)
1559
+ o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
1560
+ o.errors << Shapes::ShapeRef.new(shape: InvalidPaginationTokenException)
1561
+ o.errors << Shapes::ShapeRef.new(shape: ProvisionedThroughputExceededException)
1562
+ o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
1563
+ o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
1564
+ o[:pager] = Aws::Pager.new(
1565
+ limit_key: "max_results",
1566
+ tokens: {
1567
+ "next_token" => "next_token"
1568
+ }
1569
+ )
1570
+ end)
1571
+
1489
1572
  api.add_operation(:index_faces, Seahorse::Model::Operation.new.tap do |o|
1490
1573
  o.name = "IndexFaces"
1491
1574
  o.http_method = "POST"
@@ -1747,6 +1830,23 @@ module Aws::Rekognition
1747
1830
  o.errors << Shapes::ShapeRef.new(shape: ProvisionedThroughputExceededException)
1748
1831
  end)
1749
1832
 
1833
+ api.add_operation(:start_text_detection, Seahorse::Model::Operation.new.tap do |o|
1834
+ o.name = "StartTextDetection"
1835
+ o.http_method = "POST"
1836
+ o.http_request_uri = "/"
1837
+ o.input = Shapes::ShapeRef.new(shape: StartTextDetectionRequest)
1838
+ o.output = Shapes::ShapeRef.new(shape: StartTextDetectionResponse)
1839
+ o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
1840
+ o.errors << Shapes::ShapeRef.new(shape: IdempotentParameterMismatchException)
1841
+ o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
1842
+ o.errors << Shapes::ShapeRef.new(shape: InvalidS3ObjectException)
1843
+ o.errors << Shapes::ShapeRef.new(shape: InternalServerError)
1844
+ o.errors << Shapes::ShapeRef.new(shape: VideoTooLargeException)
1845
+ o.errors << Shapes::ShapeRef.new(shape: ProvisionedThroughputExceededException)
1846
+ o.errors << Shapes::ShapeRef.new(shape: LimitExceededException)
1847
+ o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
1848
+ end)
1849
+
1750
1850
  api.add_operation(:stop_project_version, Seahorse::Model::Operation.new.tap do |o|
1751
1851
  o.name = "StopProjectVersion"
1752
1852
  o.http_method = "POST"
@@ -97,6 +97,16 @@ module Aws::Rekognition
97
97
  #
98
98
  # </note>
99
99
  #
100
+ # @note When making an API call, you may pass BoundingBox
101
+ # data as a hash:
102
+ #
103
+ # {
104
+ # width: 1.0,
105
+ # height: 1.0,
106
+ # left: 1.0,
107
+ # top: 1.0,
108
+ # }
109
+ #
100
110
  # @!attribute [rw] width
101
111
  # Width of the bounding box as a ratio of the overall image width.
102
112
  # @return [Float]
@@ -1297,6 +1307,48 @@ module Aws::Rekognition
1297
1307
  include Aws::Structure
1298
1308
  end
1299
1309
 
1310
+ # A set of optional parameters that you can use to set the criteria that
1311
+ # the text must meet to be included in your response. `WordFilter` looks
1312
+ # at a word’s height, width, and minimum confidence. `RegionOfInterest`
1313
+ # lets you set a specific region of the image to look for text in.
1314
+ #
1315
+ # @note When making an API call, you may pass DetectTextFilters
1316
+ # data as a hash:
1317
+ #
1318
+ # {
1319
+ # word_filter: {
1320
+ # min_confidence: 1.0,
1321
+ # min_bounding_box_height: 1.0,
1322
+ # min_bounding_box_width: 1.0,
1323
+ # },
1324
+ # regions_of_interest: [
1325
+ # {
1326
+ # bounding_box: {
1327
+ # width: 1.0,
1328
+ # height: 1.0,
1329
+ # left: 1.0,
1330
+ # top: 1.0,
1331
+ # },
1332
+ # },
1333
+ # ],
1334
+ # }
1335
+ #
1336
+ # @!attribute [rw] word_filter
1337
+ # A set of parameters that allow you to filter out certain results
1338
+ # from your returned results.
1339
+ # @return [Types::DetectionFilter]
1340
+ #
1341
+ # @!attribute [rw] regions_of_interest
1342
+ # A Filter focusing on a certain area of the image. Uses a
1343
+ # `BoundingBox` object to set the region of the image.
1344
+ # @return [Array<Types::RegionOfInterest>]
1345
+ #
1346
+ class DetectTextFilters < Struct.new(
1347
+ :word_filter,
1348
+ :regions_of_interest)
1349
+ include Aws::Structure
1350
+ end
1351
+
1300
1352
  # @note When making an API call, you may pass DetectTextRequest
1301
1353
  # data as a hash:
1302
1354
  #
@@ -1309,6 +1361,23 @@ module Aws::Rekognition
1309
1361
  # version: "S3ObjectVersion",
1310
1362
  # },
1311
1363
  # },
1364
+ # filters: {
1365
+ # word_filter: {
1366
+ # min_confidence: 1.0,
1367
+ # min_bounding_box_height: 1.0,
1368
+ # min_bounding_box_width: 1.0,
1369
+ # },
1370
+ # regions_of_interest: [
1371
+ # {
1372
+ # bounding_box: {
1373
+ # width: 1.0,
1374
+ # height: 1.0,
1375
+ # left: 1.0,
1376
+ # top: 1.0,
1377
+ # },
1378
+ # },
1379
+ # ],
1380
+ # },
1312
1381
  # }
1313
1382
  #
1314
1383
  # @!attribute [rw] image
@@ -1322,8 +1391,14 @@ module Aws::Rekognition
1322
1391
  # developer guide.
1323
1392
  # @return [Types::Image]
1324
1393
  #
1394
+ # @!attribute [rw] filters
1395
+ # Optional parameters that let you set the criteria that the text must
1396
+ # meet to be included in your response.
1397
+ # @return [Types::DetectTextFilters]
1398
+ #
1325
1399
  class DetectTextRequest < Struct.new(
1326
- :image)
1400
+ :image,
1401
+ :filters)
1327
1402
  include Aws::Structure
1328
1403
  end
1329
1404
 
@@ -1331,8 +1406,51 @@ module Aws::Rekognition
1331
1406
  # An array of text that was detected in the input image.
1332
1407
  # @return [Array<Types::TextDetection>]
1333
1408
  #
1409
+ # @!attribute [rw] text_model_version
1410
+ # The model version used to detect text.
1411
+ # @return [String]
1412
+ #
1334
1413
  class DetectTextResponse < Struct.new(
1335
- :text_detections)
1414
+ :text_detections,
1415
+ :text_model_version)
1416
+ include Aws::Structure
1417
+ end
1418
+
1419
+ # A set of parameters that allow you to filter out certain results from
1420
+ # your returned results.
1421
+ #
1422
+ # @note When making an API call, you may pass DetectionFilter
1423
+ # data as a hash:
1424
+ #
1425
+ # {
1426
+ # min_confidence: 1.0,
1427
+ # min_bounding_box_height: 1.0,
1428
+ # min_bounding_box_width: 1.0,
1429
+ # }
1430
+ #
1431
+ # @!attribute [rw] min_confidence
1432
+ # Sets confidence of word detection. Words with detection confidence
1433
+ # below this will be excluded from the result. Values should be
1434
+ # between 0.5 and 1 as Text in Video will not return any result below
1435
+ # 0.5.
1436
+ # @return [Float]
1437
+ #
1438
+ # @!attribute [rw] min_bounding_box_height
1439
+ # Sets the minimum height of the word bounding box. Words with
1440
+ # bounding box heights lesser than this value will be excluded from
1441
+ # the result. Value is relative to the video frame height.
1442
+ # @return [Float]
1443
+ #
1444
+ # @!attribute [rw] min_bounding_box_width
1445
+ # Sets the minimum width of the word bounding box. Words with bounding
1446
+ # boxes widths lesser than this value will be excluded from the
1447
+ # result. Value is relative to the video frame width.
1448
+ # @return [Float]
1449
+ #
1450
+ class DetectionFilter < Struct.new(
1451
+ :min_confidence,
1452
+ :min_bounding_box_height,
1453
+ :min_bounding_box_width)
1336
1454
  include Aws::Structure
1337
1455
  end
1338
1456
 
@@ -1642,7 +1760,7 @@ module Aws::Rekognition
1642
1760
  #
1643
1761
  # @!attribute [rw] face_match_threshold
1644
1762
  # Minimum face match confidence score that must be met to return a
1645
- # result for a recognized face. Default is 70. 0 is the lowest
1763
+ # result for a recognized face. Default is 80. 0 is the lowest
1646
1764
  # confidence. 100 is the highest confidence.
1647
1765
  # @return [Float]
1648
1766
  #
@@ -2223,6 +2341,82 @@ module Aws::Rekognition
2223
2341
  include Aws::Structure
2224
2342
  end
2225
2343
 
2344
+ # @note When making an API call, you may pass GetTextDetectionRequest
2345
+ # data as a hash:
2346
+ #
2347
+ # {
2348
+ # job_id: "JobId", # required
2349
+ # max_results: 1,
2350
+ # next_token: "PaginationToken",
2351
+ # }
2352
+ #
2353
+ # @!attribute [rw] job_id
2354
+ # Job identifier for the label detection operation for which you want
2355
+ # results returned. You get the job identifer from an initial call to
2356
+ # `StartTextDetection`.
2357
+ # @return [String]
2358
+ #
2359
+ # @!attribute [rw] max_results
2360
+ # Maximum number of results to return per paginated call. The largest
2361
+ # value you can specify is 1000.
2362
+ # @return [Integer]
2363
+ #
2364
+ # @!attribute [rw] next_token
2365
+ # If the previous response was incomplete (because there are more
2366
+ # labels to retrieve), Amazon Rekognition Video returns a pagination
2367
+ # token in the response. You can use this pagination token to retrieve
2368
+ # the next set of text.
2369
+ # @return [String]
2370
+ #
2371
+ class GetTextDetectionRequest < Struct.new(
2372
+ :job_id,
2373
+ :max_results,
2374
+ :next_token)
2375
+ include Aws::Structure
2376
+ end
2377
+
2378
+ # @!attribute [rw] job_status
2379
+ # Current status of the text detection job.
2380
+ # @return [String]
2381
+ #
2382
+ # @!attribute [rw] status_message
2383
+ # If the job fails, `StatusMessage` provides a descriptive error
2384
+ # message.
2385
+ # @return [String]
2386
+ #
2387
+ # @!attribute [rw] video_metadata
2388
+ # Information about a video that Amazon Rekognition analyzed.
2389
+ # `Videometadata` is returned in every page of paginated responses
2390
+ # from a Amazon Rekognition video operation.
2391
+ # @return [Types::VideoMetadata]
2392
+ #
2393
+ # @!attribute [rw] text_detections
2394
+ # An array of text detected in the video. Each element contains the
2395
+ # detected text, the time in milliseconds from the start of the video
2396
+ # that the text was detected, and where it was detected on the screen.
2397
+ # @return [Array<Types::TextDetectionResult>]
2398
+ #
2399
+ # @!attribute [rw] next_token
2400
+ # If the response is truncated, Amazon Rekognition Video returns this
2401
+ # token that you can use in the subsequent request to retrieve the
2402
+ # next set of text.
2403
+ # @return [String]
2404
+ #
2405
+ # @!attribute [rw] text_model_version
2406
+ # Version number of the text detection model that was used to detect
2407
+ # text.
2408
+ # @return [String]
2409
+ #
2410
+ class GetTextDetectionResponse < Struct.new(
2411
+ :job_status,
2412
+ :status_message,
2413
+ :video_metadata,
2414
+ :text_detections,
2415
+ :next_token,
2416
+ :text_model_version)
2417
+ include Aws::Structure
2418
+ end
2419
+
2226
2420
  # The S3 bucket that contains the Ground Truth manifest file.
2227
2421
  #
2228
2422
  # @note When making an API call, you may pass GroundTruthManifest
@@ -3256,6 +3450,35 @@ module Aws::Rekognition
3256
3450
  include Aws::Structure
3257
3451
  end
3258
3452
 
3453
+ # Specifies a location within the frame that Rekognition checks for
3454
+ # text. Uses a `BoundingBox` object to set a region of the screen.
3455
+ #
3456
+ # A word is included in the region if the word is more than half in that
3457
+ # region. If there is more than one region, the word will be compared
3458
+ # with all regions of the screen. Any word more than half in a region is
3459
+ # kept in the results.
3460
+ #
3461
+ # @note When making an API call, you may pass RegionOfInterest
3462
+ # data as a hash:
3463
+ #
3464
+ # {
3465
+ # bounding_box: {
3466
+ # width: 1.0,
3467
+ # height: 1.0,
3468
+ # left: 1.0,
3469
+ # top: 1.0,
3470
+ # },
3471
+ # }
3472
+ #
3473
+ # @!attribute [rw] bounding_box
3474
+ # The box representing a region of interest on screen.
3475
+ # @return [Types::BoundingBox]
3476
+ #
3477
+ class RegionOfInterest < Struct.new(
3478
+ :bounding_box)
3479
+ include Aws::Structure
3480
+ end
3481
+
3259
3482
  # Provides the S3 bucket name and object name.
3260
3483
  #
3261
3484
  # The region for the S3 bucket containing the S3 object must match the
@@ -3941,6 +4164,135 @@ module Aws::Rekognition
3941
4164
 
3942
4165
  class StartStreamProcessorResponse < Aws::EmptyStructure; end
3943
4166
 
4167
+ # Set of optional parameters that let you set the criteria text must
4168
+ # meet to be included in your response. `WordFilter` looks at a word's
4169
+ # height, width and minimum confidence. `RegionOfInterest` lets you set
4170
+ # a specific region of the screen to look for text in.
4171
+ #
4172
+ # @note When making an API call, you may pass StartTextDetectionFilters
4173
+ # data as a hash:
4174
+ #
4175
+ # {
4176
+ # word_filter: {
4177
+ # min_confidence: 1.0,
4178
+ # min_bounding_box_height: 1.0,
4179
+ # min_bounding_box_width: 1.0,
4180
+ # },
4181
+ # regions_of_interest: [
4182
+ # {
4183
+ # bounding_box: {
4184
+ # width: 1.0,
4185
+ # height: 1.0,
4186
+ # left: 1.0,
4187
+ # top: 1.0,
4188
+ # },
4189
+ # },
4190
+ # ],
4191
+ # }
4192
+ #
4193
+ # @!attribute [rw] word_filter
4194
+ # Filters focusing on qualities of the text, such as confidence or
4195
+ # size.
4196
+ # @return [Types::DetectionFilter]
4197
+ #
4198
+ # @!attribute [rw] regions_of_interest
4199
+ # Filter focusing on a certain area of the frame. Uses a `BoundingBox`
4200
+ # object to set the region of the screen.
4201
+ # @return [Array<Types::RegionOfInterest>]
4202
+ #
4203
+ class StartTextDetectionFilters < Struct.new(
4204
+ :word_filter,
4205
+ :regions_of_interest)
4206
+ include Aws::Structure
4207
+ end
4208
+
4209
+ # @note When making an API call, you may pass StartTextDetectionRequest
4210
+ # data as a hash:
4211
+ #
4212
+ # {
4213
+ # video: { # required
4214
+ # s3_object: {
4215
+ # bucket: "S3Bucket",
4216
+ # name: "S3ObjectName",
4217
+ # version: "S3ObjectVersion",
4218
+ # },
4219
+ # },
4220
+ # client_request_token: "ClientRequestToken",
4221
+ # notification_channel: {
4222
+ # sns_topic_arn: "SNSTopicArn", # required
4223
+ # role_arn: "RoleArn", # required
4224
+ # },
4225
+ # job_tag: "JobTag",
4226
+ # filters: {
4227
+ # word_filter: {
4228
+ # min_confidence: 1.0,
4229
+ # min_bounding_box_height: 1.0,
4230
+ # min_bounding_box_width: 1.0,
4231
+ # },
4232
+ # regions_of_interest: [
4233
+ # {
4234
+ # bounding_box: {
4235
+ # width: 1.0,
4236
+ # height: 1.0,
4237
+ # left: 1.0,
4238
+ # top: 1.0,
4239
+ # },
4240
+ # },
4241
+ # ],
4242
+ # },
4243
+ # }
4244
+ #
4245
+ # @!attribute [rw] video
4246
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
4247
+ # start operations such as StartLabelDetection use `Video` to specify
4248
+ # a video for analysis. The supported file formats are .mp4, .mov and
4249
+ # .avi.
4250
+ # @return [Types::Video]
4251
+ #
4252
+ # @!attribute [rw] client_request_token
4253
+ # Idempotent token used to identify the start request. If you use the
4254
+ # same token with multiple `StartTextDetection` requests, the same
4255
+ # `JobId` is returned. Use `ClientRequestToken` to prevent the same
4256
+ # job from being accidentaly started more than once.
4257
+ # @return [String]
4258
+ #
4259
+ # @!attribute [rw] notification_channel
4260
+ # The Amazon Simple Notification Service topic to which Amazon
4261
+ # Rekognition publishes the completion status of a video analysis
4262
+ # operation. For more information, see api-video.
4263
+ # @return [Types::NotificationChannel]
4264
+ #
4265
+ # @!attribute [rw] job_tag
4266
+ # An identifier returned in the completion status published by your
4267
+ # Amazon Simple Notification Service topic. For example, you can use
4268
+ # `JobTag` to group related jobs and identify them in the completion
4269
+ # notification.
4270
+ # @return [String]
4271
+ #
4272
+ # @!attribute [rw] filters
4273
+ # Optional parameters that let you set criteria the text must meet to
4274
+ # be included in your response.
4275
+ # @return [Types::StartTextDetectionFilters]
4276
+ #
4277
+ class StartTextDetectionRequest < Struct.new(
4278
+ :video,
4279
+ :client_request_token,
4280
+ :notification_channel,
4281
+ :job_tag,
4282
+ :filters)
4283
+ include Aws::Structure
4284
+ end
4285
+
4286
+ # @!attribute [rw] job_id
4287
+ # Identifier for the text detection job. Use `JobId` to identify the
4288
+ # job in a subsequent call to `GetTextDetection`.
4289
+ # @return [String]
4290
+ #
4291
+ class StartTextDetectionResponse < Struct.new(
4292
+ :job_id)
4293
+ include Aws::Structure
4294
+ end
4295
+
3944
4296
  # @note When making an API call, you may pass StopProjectVersionRequest
3945
4297
  # data as a hash:
3946
4298
  #
@@ -4227,6 +4579,25 @@ module Aws::Rekognition
4227
4579
  include Aws::Structure
4228
4580
  end
4229
4581
 
4582
+ # Information about text detected in a video. Incudes the detected text,
4583
+ # the time in milliseconds from the start of the video that the text was
4584
+ # detected, and where it was detected on the screen.
4585
+ #
4586
+ # @!attribute [rw] timestamp
4587
+ # The time, in milliseconds from the start of the video, that the text
4588
+ # was detected.
4589
+ # @return [Integer]
4590
+ #
4591
+ # @!attribute [rw] text_detection
4592
+ # Details about text detected in a video.
4593
+ # @return [Types::TextDetection]
4594
+ #
4595
+ class TextDetectionResult < Struct.new(
4596
+ :timestamp,
4597
+ :text_detection)
4598
+ include Aws::Structure
4599
+ end
4600
+
4230
4601
  # The dataset used for training.
4231
4602
  #
4232
4603
  # @note When making an API call, you may pass TrainingData
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: aws-sdk-rekognition
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.33.0
4
+ version: 1.34.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Amazon Web Services
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2019-12-04 00:00:00.000000000 Z
11
+ date: 2020-02-17 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: aws-sdk-core