google-cloud-vision 0.22.1 → 0.23.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -101,13 +101,21 @@ module Google
101
101
  # Practices
102
102
  #
103
103
  # @param [String, IO, StringIO, Tempfile, Google::Cloud::Storage::File]
104
- # source A string file path or Cloud Storage URI of the form
104
+ # source A string file path, publicly-accessible image HTTP/HTTPS URL,
105
+ # or Cloud Storage URI of the form
105
106
  # `"gs://bucketname/path/to/image_filename"`; or a File, IO, StringIO,
106
107
  # or Tempfile instance; or an instance of
107
108
  # Google::Cloud::Storage::File.
108
109
  #
109
110
  # @return [Image] An image for the Vision service.
110
111
  #
112
+ # @example With a publicly-accessible image HTTP/HTTPS URL:
113
+ # require "google/cloud/vision"
114
+ #
115
+ # vision = Google::Cloud::Vision.new
116
+ #
117
+ # image = vision.image "https://www.example.com/images/landmark.jpg"
118
+ #
111
119
  # @example With a Google Cloud Storage URI:
112
120
  # require "google/cloud/vision"
113
121
  #
@@ -153,7 +161,11 @@ module Google
153
161
  #
154
162
  # @param [Image, Object] images The image or images to annotate. This
155
163
  # can be an {Image} instance, or any other type that converts to an
156
- # {Image}. See {#image} for details.
164
+ # {Image}: A string file path, publicly-accessible image HTTP/HTTPS
165
+ # URL, or Cloud Storage URI of the form
166
+ # `"gs://bucketname/path/to/image_filename"`; or a File, IO, StringIO,
167
+ # or Tempfile instance; or an instance of
168
+ # Google::Cloud::Storage::File.
157
169
  # @param [Boolean, Integer] faces Whether to perform the facial
158
170
  # detection feature. The maximum number of results is configured in
159
171
  # {Google::Cloud::Vision.default_max_faces}, or may be provided here.
@@ -170,12 +182,19 @@ module Google
170
182
  # detection feature. The maximum number of results is configured in
171
183
  # {Google::Cloud::Vision.default_max_labels}, or may be provided here.
172
184
  # Optional.
173
- # @param [Boolean] text Whether to perform the text (OCR) feature.
185
+ # @param [Boolean] text Whether to perform the text detection feature
186
+ # (OCR for shorter documents with sparse text). Optional.
187
+ # @param [Boolean] document Whether to perform the document text
188
+ # detection feature (OCR for longer documents with dense text).
174
189
  # Optional.
175
190
  # @param [Boolean] safe_search Whether to perform the safe search
176
191
  # feature. Optional.
177
192
  # @param [Boolean] properties Whether to perform the image properties
178
193
  # feature (currently, the image's dominant colors.) Optional.
194
+ # @param [Boolean, Integer] crop_hints Whether to perform the crop hints
195
+ # feature. Optional.
196
+ # @param [Boolean, Integer] web Whether to perform the web annotation
197
+ # feature. Optional.
179
198
  #
180
199
  # @yield [annotate] A block for requests that involve multiple feature
181
200
  # configurations. See {Annotate#annotate}.
@@ -230,7 +249,7 @@ module Google
230
249
  # annotations[0].faces.count #=> 1
231
250
  # annotations[0].labels.count #=> 4
232
251
  # annotations[1].landmarks.count #=> 1
233
- # annotations[2].text.words.count #=> 28
252
+ # annotations[2].text.pages.count #=> 1
234
253
  #
235
254
  # @example Maximum result values can also be provided:
236
255
  # require "google/cloud/vision"
@@ -245,12 +264,14 @@ module Google
245
264
  # # ["stone carving", "ancient history", "statue"]
246
265
  #
247
266
  def annotate *images, faces: false, landmarks: false, logos: false,
248
- labels: false, text: false, safe_search: false,
249
- properties: false
267
+ labels: false, text: false, document: false,
268
+ safe_search: false, properties: false, crop_hints: false,
269
+ web: false
250
270
  a = Annotate.new self
251
271
  a.annotate(*images, faces: faces, landmarks: landmarks, logos: logos,
252
- labels: labels, text: text,
253
- safe_search: safe_search, properties: properties)
272
+ labels: labels, text: text, document: document,
273
+ safe_search: safe_search, properties: properties,
274
+ crop_hints: crop_hints, web: web)
254
275
 
255
276
  yield a if block_given?
256
277
 
@@ -16,9 +16,9 @@ module Google
16
16
  module Cloud
17
17
  module Vision
18
18
  module V1
19
- # The <em>Feature</em> indicates what type of image detection task to perform.
20
19
  # Users describe the type of Google Cloud Vision API tasks to perform over
21
- # images by using <em>Feature</em>s. Features encode the Cloud Vision API
20
+ # images by using *Feature*s. Each Feature indicates a type of image
21
+ # detection task to perform. Features encode the Cloud Vision API
22
22
  # vertical to operate on and the number of top-scoring results to return.
23
23
  # @!attribute [rw] type
24
24
  # @return [Google::Cloud::Vision::V1::Feature::Type]
@@ -47,21 +47,45 @@ module Google
47
47
  # Run OCR.
48
48
  TEXT_DETECTION = 5
49
49
 
50
- # Run various computer vision models to compute image safe-search properties.
50
+ # Run dense text document OCR. Takes precedence when both
51
+ # DOCUMENT_TEXT_DETECTION and TEXT_DETECTION are present.
52
+ DOCUMENT_TEXT_DETECTION = 11
53
+
54
+ # Run computer vision models to compute image safe-search properties.
51
55
  SAFE_SEARCH_DETECTION = 6
52
56
 
53
- # Compute a set of properties about the image (such as the image's dominant colors).
57
+ # Compute a set of image properties, such as the image's dominant colors.
54
58
  IMAGE_PROPERTIES = 7
59
+
60
+ # Run crop hints.
61
+ CROP_HINTS = 9
62
+
63
+ # Run web detection.
64
+ WEB_DETECTION = 10
55
65
  end
56
66
  end
57
67
 
58
68
  # External image source (Google Cloud Storage image location).
59
69
  # @!attribute [rw] gcs_image_uri
60
70
  # @return [String]
61
- # Google Cloud Storage image URI. It must be in the following form:
62
- # +gs://bucket_name/object_name+. For more
63
- # details, please see: https://cloud.google.com/storage/docs/reference-uris.
64
- # NOTE: Cloud Storage object versioning is not supported!
71
+ # NOTE: For new code +image_uri+ below is preferred.
72
+ # Google Cloud Storage image URI, which must be in the following form:
73
+ # +gs://bucket_name/object_name+ (for details, see
74
+ # {Google Cloud Storage Request
75
+ # URIs}[https://cloud.google.com/storage/docs/reference-uris]).
76
+ # NOTE: Cloud Storage object versioning is not supported.
77
+ # @!attribute [rw] image_uri
78
+ # @return [String]
79
+ # Image URI which supports:
80
+ # 1) Google Cloud Storage image URI, which must be in the following form:
81
+ # +gs://bucket_name/object_name+ (for details, see
82
+ # {Google Cloud Storage Request
83
+ # URIs}[https://cloud.google.com/storage/docs/reference-uris]).
84
+ # NOTE: Cloud Storage object versioning is not supported.
85
+ # 2) Publicly accessible image HTTP/HTTPS URL.
86
+ # This is preferred over the legacy +gcs_image_uri+ above. When both
87
+ # +gcs_image_uri+ and +image_uri+ are specified, +image_uri+ takes
88
+ # precedence.
65
89
  class ImageSource; end
66
90
 
67
91
  # Client image to perform Google Cloud Vision API tasks over.
@@ -72,27 +96,26 @@ module Google
72
96
  # representation, whereas JSON representations use base64.
73
97
  # @!attribute [rw] source
74
98
  # @return [Google::Cloud::Vision::V1::ImageSource]
75
- # Google Cloud Storage image location. If both 'content' and 'source'
76
- # are filled for an image, 'content' takes precedence and it will be
77
- # used for performing the image annotation request.
99
+ # Google Cloud Storage image location. If both +content+ and +source+
100
+ # are provided for an image, +content+ takes precedence and is
101
+ # used to perform the image annotation request.
78
102
  class Image; end
79
103
 
80
104
  # A face annotation object contains the results of face detection.
81
105
  # @!attribute [rw] bounding_poly
82
106
  # @return [Google::Cloud::Vision::V1::BoundingPoly]
83
107
  # The bounding polygon around the face. The coordinates of the bounding box
84
- # are in the original image's scale, as returned in ImageParams.
108
+ # are in the original image's scale, as returned in +ImageParams+.
85
109
  # The bounding box is computed to "frame" the face in accordance with human
86
110
  # expectations. It is based on the landmarker results.
87
111
  # Note that one or more x and/or y coordinates may not be generated in the
88
- # BoundingPoly (the polygon will be unbounded) if only a partial face appears in
89
- # the image to be annotated.
112
+ # +BoundingPoly+ (the polygon will be unbounded) if only a partial face
113
+ # appears in the image to be annotated.
90
114
  # @!attribute [rw] fd_bounding_poly
91
115
  # @return [Google::Cloud::Vision::V1::BoundingPoly]
92
- # This bounding polygon is tighter than the previous
93
- # <code>boundingPoly</code>, and
94
- # encloses only the skin part of the face. Typically, it is used to
95
- # eliminate the face from any image analysis that detects the
116
+ # The +fd_bounding_poly+ bounding polygon is tighter than the
117
+ # +boundingPoly+, and encloses only the skin part of the face. Typically, it
118
+ # is used to eliminate the face from any image analysis that detects the
96
119
  # "amount of skin" visible in an image. It is not based on the
97
120
  # landmarker results, only on the initial face detection, hence
98
121
  # the <code>fd</code> (face detection) prefix.
@@ -101,20 +124,18 @@ module Google
101
124
  # Detected face landmarks.
102
125
  # @!attribute [rw] roll_angle
103
126
  # @return [Float]
104
- # Roll angle. Indicates the amount of clockwise/anti-clockwise rotation of
105
- # the
106
- # face relative to the image vertical, about the axis perpendicular to the
107
- # face. Range [-180,180].
127
+ # Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
128
+ # of the face relative to the image vertical about the axis perpendicular to
129
+ # the face. Range [-180,180].
108
130
  # @!attribute [rw] pan_angle
109
131
  # @return [Float]
110
- # Yaw angle. Indicates the leftward/rightward angle that the face is
111
- # pointing, relative to the vertical plane perpendicular to the image. Range
132
+ # Yaw angle, which indicates the leftward/rightward angle that the face is
133
+ # pointing relative to the vertical plane perpendicular to the image. Range
112
134
  # [-180,180].
113
135
  # @!attribute [rw] tilt_angle
114
136
  # @return [Float]
115
- # Pitch angle. Indicates the upwards/downwards angle that the face is
116
- # pointing
117
- # relative to the image's horizontal plane. Range [-180,180].
137
+ # Pitch angle, which indicates the upwards/downwards angle that the face is
138
+ # pointing relative to the image's horizontal plane. Range [-180,180].
118
139
  # @!attribute [rw] detection_confidence
119
140
  # @return [Float]
120
141
  # Detection confidence. Range [0, 1].
@@ -145,8 +166,9 @@ module Google
145
166
  class FaceAnnotation
146
167
  # A face-specific landmark (for example, a face feature).
147
168
  # Landmark positions may fall outside the bounds of the image
148
- # when the face is near one or more edges of the image.
149
- # Therefore it is NOT guaranteed that 0 <= x < width or 0 <= y < height.
169
+ # if the face is near one or more edges of the image.
170
+ # Therefore it is NOT guaranteed that +0 <= x < width+ or
171
+ # +0 <= y < height+.
150
172
  # @!attribute [rw] type
151
173
  # @return [Google::Cloud::Vision::V1::FaceAnnotation::Landmark::Type]
152
174
  # Face landmark type.
@@ -155,9 +177,9 @@ module Google
155
177
  # Face landmark position.
156
178
  class Landmark
157
179
  # Face landmark (feature) type.
158
- # Left and right are defined from the vantage of the viewer of the image,
159
- # without considering mirror projections typical of photos. So, LEFT_EYE,
160
- # typically is the person's right eye.
180
+ # Left and right are defined from the vantage of the viewer of the image
181
+ # without considering mirror projections typical of photos. So, +LEFT_EYE+,
182
+ # typically, is the person's right eye.
161
183
  module Type
162
184
  # Unknown face landmark detected. Should not be filled.
163
185
  UNKNOWN_LANDMARK = 0
@@ -270,10 +292,10 @@ module Google
270
292
  # Detected entity location information.
271
293
  # @!attribute [rw] lat_lng
272
294
  # @return [Google::Type::LatLng]
273
- # Lat - long location coordinates.
295
+ # lat/long location coordinates.
274
296
  class LocationInfo; end
275
297
 
276
- # Arbitrary name/value pair.
298
+ # A +Property+ consists of a user-supplied name/value pair.
277
299
  # @!attribute [rw] name
278
300
  # @return [String]
279
301
  # Name of the property.
@@ -285,70 +307,70 @@ module Google
285
307
  # Set of detected entity features.
286
308
  # @!attribute [rw] mid
287
309
  # @return [String]
288
- # Opaque entity ID. Some IDs might be available in Knowledge Graph(KG).
289
- # For more details on KG please see:
290
- # https://developers.google.com/knowledge-graph/
310
+ # Opaque entity ID. Some IDs may be available in
311
+ # {Google Knowledge Graph Search API}[https://developers.google.com/knowledge-graph/].
291
312
  # @!attribute [rw] locale
292
313
  # @return [String]
293
314
  # The language code for the locale in which the entity textual
294
- # <code>description</code> (next field) is expressed.
315
+ # +description+ is expressed.
295
316
  # @!attribute [rw] description
296
317
  # @return [String]
297
- # Entity textual description, expressed in its <code>locale</code> language.
318
+ # Entity textual description, expressed in its +locale+ language.
298
319
  # @!attribute [rw] score
299
320
  # @return [Float]
300
321
  # Overall score of the result. Range [0, 1].
301
322
  # @!attribute [rw] confidence
302
323
  # @return [Float]
303
324
  # The accuracy of the entity detection in an image.
304
- # For example, for an image containing 'Eiffel Tower,' this field represents
305
- # the confidence that there is a tower in the query image. Range [0, 1].
325
+ # For example, for an image in which the "Eiffel Tower" entity is detected,
326
+ # this field represents the confidence that there is a tower in the query
327
+ # image. Range [0, 1].
306
328
  # @!attribute [rw] topicality
307
329
  # @return [Float]
308
330
  # The relevancy of the ICA (Image Content Annotation) label to the
309
- # image. For example, the relevancy of 'tower' to an image containing
310
- # 'Eiffel Tower' is likely higher than an image containing a distant towering
311
- # building, though the confidence that there is a tower may be the same.
312
- # Range [0, 1].
331
+ # image. For example, the relevancy of "tower" is likely higher to an image
332
+ # containing the detected "Eiffel Tower" than to an image containing a
333
+ # detected distant towering building, even though the confidence that
334
+ # there is a tower in each image may be the same. Range [0, 1].
313
335
  # @!attribute [rw] bounding_poly
314
336
  # @return [Google::Cloud::Vision::V1::BoundingPoly]
315
- # Image region to which this entity belongs. Not filled currently
337
+ # Image region to which this entity belongs. Currently not produced
316
338
  # for +LABEL_DETECTION+ features. For +TEXT_DETECTION+ (OCR), +boundingPoly+s
317
339
  # are produced for the entire text detected in an image region, followed by
318
340
  # +boundingPoly+s for each word within the detected text.
319
341
  # @!attribute [rw] locations
320
342
  # @return [Array<Google::Cloud::Vision::V1::LocationInfo>]
321
343
  # The location information for the detected entity. Multiple
322
- # <code>LocationInfo</code> elements can be present since one location may
323
- # indicate the location of the scene in the query image, and another the
324
- # location of the place where the query image was taken. Location information
325
- # is usually present for landmarks.
344
+ # +LocationInfo+ elements can be present because one location may
345
+ # indicate the location of the scene in the image, and another location
346
+ # may indicate the location of the place where the image was taken.
347
+ # Location information is usually present for landmarks.
326
348
  # @!attribute [rw] properties
327
349
  # @return [Array<Google::Cloud::Vision::V1::Property>]
328
- # Some entities can have additional optional <code>Property</code> fields.
329
- # For example a different kind of score or string that qualifies the entity.
350
+ # Some entities may have optional user-supplied +Property+ (name/value)
351
+ # fields, such a score or string that qualifies the entity.
330
352
  class EntityAnnotation; end
331
353
 
332
- # Set of features pertaining to the image, computed by various computer vision
354
+ # Set of features pertaining to the image, computed by computer vision
333
355
  # methods over safe-search verticals (for example, adult, spoof, medical,
334
356
  # violence).
335
357
  # @!attribute [rw] adult
336
358
  # @return [Google::Cloud::Vision::V1::Likelihood]
337
- # Represents the adult contents likelihood for the image.
359
+ # Represents the adult content likelihood for the image.
338
360
  # @!attribute [rw] spoof
339
361
  # @return [Google::Cloud::Vision::V1::Likelihood]
340
- # Spoof likelihood. The likelihood that an obvious modification
362
+ # Spoof likelihood. The likelihood that an modification
341
363
  # was made to the image's canonical version to make it appear
342
364
  # funny or offensive.
343
365
  # @!attribute [rw] medical
344
366
  # @return [Google::Cloud::Vision::V1::Likelihood]
345
- # Likelihood this is a medical image.
367
+ # Likelihood that this is a medical image.
346
368
  # @!attribute [rw] violence
347
369
  # @return [Google::Cloud::Vision::V1::Likelihood]
348
370
  # Violence likelihood.
349
371
  class SafeSearchAnnotation; end
350
372
 
351
- # Rectangle determined by min and max LatLng pairs.
373
+ # Rectangle determined by min and max +LatLng+ pairs.
352
374
  # @!attribute [rw] min_lat_lng
353
375
  # @return [Google::Type::LatLng]
354
376
  # Min lat/long pair.
@@ -357,8 +379,8 @@ module Google
357
379
  # Max lat/long pair.
358
380
  class LatLongRect; end
359
381
 
360
- # Color information consists of RGB channels, score and fraction of
361
- # image the color occupies in the image.
382
+ # Color information consists of RGB channels, score, and the fraction of
383
+ # the image that the color occupies in the image.
362
384
  # @!attribute [rw] color
363
385
  # @return [Google::Type::Color]
364
386
  # RGB components of the color.
@@ -367,26 +389,56 @@ module Google
367
389
  # Image-specific score for this color. Value in range [0, 1].
368
390
  # @!attribute [rw] pixel_fraction
369
391
  # @return [Float]
370
- # Stores the fraction of pixels the color occupies in the image.
392
+ # The fraction of pixels the color occupies in the image.
371
393
  # Value in range [0, 1].
372
394
  class ColorInfo; end
373
395
 
374
396
  # Set of dominant colors and their corresponding scores.
375
397
  # @!attribute [rw] colors
376
398
  # @return [Array<Google::Cloud::Vision::V1::ColorInfo>]
377
- # RGB color values, with their score and pixel fraction.
399
+ # RGB color values with their score and pixel fraction.
378
400
  class DominantColorsAnnotation; end
379
401
 
380
- # Stores image properties (e.g. dominant colors).
402
+ # Stores image properties, such as dominant colors.
381
403
  # @!attribute [rw] dominant_colors
382
404
  # @return [Google::Cloud::Vision::V1::DominantColorsAnnotation]
383
405
  # If present, dominant colors completed successfully.
384
406
  class ImageProperties; end
385
407
 
386
- # Image context.
408
+ # Single crop hint that is used to generate a new crop when serving an image.
409
+ # @!attribute [rw] bounding_poly
410
+ # @return [Google::Cloud::Vision::V1::BoundingPoly]
411
+ # The bounding polygon for the crop region. The coordinates of the bounding
412
+ # box are in the original image's scale, as returned in +ImageParams+.
413
+ # @!attribute [rw] confidence
414
+ # @return [Float]
415
+ # Confidence of this being a salient region. Range [0, 1].
416
+ # @!attribute [rw] importance_fraction
417
+ # @return [Float]
418
+ # Fraction of importance of this salient region with respect to the original
419
+ # image.
420
+ class CropHint; end
421
+
422
+ # Set of crop hints that are used to generate new crops when serving images.
423
+ # @!attribute [rw] crop_hints
424
+ # @return [Array<Google::Cloud::Vision::V1::CropHint>]
425
+ class CropHintsAnnotation; end
426
+
427
+ # Parameters for crop hints annotation request.
428
+ # @!attribute [rw] aspect_ratios
429
+ # @return [Array<Float>]
430
+ # Aspect ratios in floats, representing the ratio of the width to the height
431
+ # of the image. For example, if the desired aspect ratio is 4/3, the
432
+ # corresponding float value should be 1.33333. If not specified, the
433
+ # best possible crop is returned. The number of provided aspect ratios is
434
+ # limited to a maximum of 16; any aspect ratios provided after the 16th are
435
+ # ignored.
436
+ class CropHintsParams; end
437
+
438
+ # Image context and/or feature-specific parameters.
387
439
  # @!attribute [rw] lat_long_rect
388
440
  # @return [Google::Cloud::Vision::V1::LatLongRect]
389
- # Lat/long rectangle that specifies the location of the image.
441
+ # lat/long rectangle that specifies the location of the image.
390
442
  # @!attribute [rw] language_hints
391
443
  # @return [Array<String>]
392
444
  # List of languages to use for TEXT_DETECTION. In most cases, an empty value
@@ -396,8 +448,10 @@ module Google
396
448
  # setting a hint will help get better results (although it will be a
397
449
  # significant hindrance if the hint is wrong). Text detection returns an
398
450
  # error if one or more of the specified languages is not one of the
399
- # {supported
400
- # languages}[https://cloud.google.com/translate/v2/translate-reference#supported_languages].
451
+ # {supported languages}[https://cloud.google.com/vision/docs/languages].
452
+ # @!attribute [rw] crop_hints_params
453
+ # @return [Google::Cloud::Vision::V1::CropHintsParams]
454
+ # Parameters for crop hints annotation request.
401
455
  class ImageContext; end
402
456
 
403
457
  # Request for performing Google Cloud Vision API tasks over a user-provided
@@ -416,30 +470,43 @@ module Google
416
470
  # Response to an image annotation request.
417
471
  # @!attribute [rw] face_annotations
418
472
  # @return [Array<Google::Cloud::Vision::V1::FaceAnnotation>]
419
- # If present, face detection completed successfully.
473
+ # If present, face detection has completed successfully.
420
474
  # @!attribute [rw] landmark_annotations
421
475
  # @return [Array<Google::Cloud::Vision::V1::EntityAnnotation>]
422
- # If present, landmark detection completed successfully.
476
+ # If present, landmark detection has completed successfully.
423
477
  # @!attribute [rw] logo_annotations
424
478
  # @return [Array<Google::Cloud::Vision::V1::EntityAnnotation>]
425
- # If present, logo detection completed successfully.
479
+ # If present, logo detection has completed successfully.
426
480
  # @!attribute [rw] label_annotations
427
481
  # @return [Array<Google::Cloud::Vision::V1::EntityAnnotation>]
428
- # If present, label detection completed successfully.
482
+ # If present, label detection has completed successfully.
429
483
  # @!attribute [rw] text_annotations
430
484
  # @return [Array<Google::Cloud::Vision::V1::EntityAnnotation>]
431
- # If present, text (OCR) detection completed successfully.
485
+ # If present, text (OCR) detection or document (OCR) text detection has
486
+ # completed successfully.
487
+ # @!attribute [rw] full_text_annotation
488
+ # @return [Google::Cloud::Vision::V1::TextAnnotation]
489
+ # If present, text (OCR) detection or document (OCR) text detection has
490
+ # completed successfully.
491
+ # This annotation provides the structural hierarchy for the OCR detected
492
+ # text.
432
493
  # @!attribute [rw] safe_search_annotation
433
494
  # @return [Google::Cloud::Vision::V1::SafeSearchAnnotation]
434
- # If present, safe-search annotation completed successfully.
495
+ # If present, safe-search annotation has completed successfully.
435
496
  # @!attribute [rw] image_properties_annotation
436
497
  # @return [Google::Cloud::Vision::V1::ImageProperties]
437
498
  # If present, image properties were extracted successfully.
499
+ # @!attribute [rw] crop_hints_annotation
500
+ # @return [Google::Cloud::Vision::V1::CropHintsAnnotation]
501
+ # If present, crop hints have completed successfully.
502
+ # @!attribute [rw] web_detection
503
+ # @return [Google::Cloud::Vision::V1::WebDetection]
504
+ # If present, web detection has completed successfully.
438
505
  # @!attribute [rw] error
439
506
  # @return [Google::Rpc::Status]
440
507
  # If set, represents the error message for the operation.
441
- # Note that filled-in mage annotations are guaranteed to be
442
- # correct, even when <code>error</code> is non-empty.
508
+ # Note that filled-in image annotations are guaranteed to be
509
+ # correct, even when +error+ is set.
443
510
  class AnnotateImageResponse; end
444
511
 
445
512
  # Multiple image annotation requests are batched into a single service call.
@@ -454,25 +521,25 @@ module Google
454
521
  # Individual responses to image annotation requests within the batch.
455
522
  class BatchAnnotateImagesResponse; end
456
523
 
457
- # A bucketized representation of likelihood meant to give our clients highly
458
- # stable results across model upgrades.
524
+ # A bucketized representation of likelihood, which is intended to give clients
525
+ # highly stable results across model upgrades.
459
526
  module Likelihood
460
527
  # Unknown likelihood.
461
528
  UNKNOWN = 0
462
529
 
463
- # The image very unlikely belongs to the vertical specified.
530
+ # It is very unlikely that the image belongs to the specified vertical.
464
531
  VERY_UNLIKELY = 1
465
532
 
466
- # The image unlikely belongs to the vertical specified.
533
+ # It is unlikely that the image belongs to the specified vertical.
467
534
  UNLIKELY = 2
468
535
 
469
- # The image possibly belongs to the vertical specified.
536
+ # It is possible that the image belongs to the specified vertical.
470
537
  POSSIBLE = 3
471
538
 
472
- # The image likely belongs to the vertical specified.
539
+ # It is likely that the image belongs to the specified vertical.
473
540
  LIKELY = 4
474
541
 
475
- # The image very likely belongs to the vertical specified.
542
+ # It is very likely that the image belongs to the specified vertical.
476
543
  VERY_LIKELY = 5
477
544
  end
478
545
  end