google-cloud-vision 0.21.1 → 0.22.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -112,8 +112,8 @@ module Google
112
112
  # color.blue #=> 20.0
113
113
  # color.rgb #=> "f7ec14"
114
114
  # color.alpha #=> 1.0
115
- # color.score #=> 0.20301804
116
- # color.pixel_fraction #=> 0.0072649573
115
+ # color.score #=> 0.20301803946495056
116
+ # color.pixel_fraction #=> 0.007264957297593355
117
117
  #
118
118
  class Color
119
119
  ##
@@ -35,7 +35,7 @@ module Google
35
35
  #
36
36
  # safe_search = image.safe_search
37
37
  # safe_search.spoof? #=> false
38
- # safe_search.spoof #=> "VERY_UNLIKELY"
38
+ # safe_search.spoof #=> :VERY_UNLIKELY
39
39
  #
40
40
  class SafeSearch
41
41
  POSITIVE_RATINGS = %i(POSSIBLE LIKELY VERY_LIKELY)
@@ -35,7 +35,7 @@ module Google
35
35
  # text.locale #=> "en"
36
36
  # text.words.count #=> 28
37
37
  # text.text
38
- # #=> "Google Cloud Client for Ruby an idiomatic, intuitive... "
38
+ # # "Google Cloud Client for Ruby an idiomatic, intuitive... "
39
39
  #
40
40
  class Text
41
41
  ##
@@ -148,7 +148,9 @@ module Google
148
148
  # word = words.first
149
149
  # word.text #=> "Google"
150
150
  # word.bounds.count #=> 4
151
- # word.bounds.first #=> #<Vertex (x: 13, y: 8)>
151
+ # vertex = word.bounds.first
152
+ # vertex.x #=> 13
153
+ # vertex.y #=> 8
152
154
  #
153
155
  class Word
154
156
  ##
@@ -37,8 +37,8 @@ module Google
37
37
  #
38
38
  # text.bounds.count #=> 4
39
39
  # vertex = text.bounds.first
40
- # vertex.x #=> 13
41
- # vertex.y #=> 8
40
+ # vertex.x #=> 1
41
+ # vertex.y #=> 0
42
42
  #
43
43
  class Vertex
44
44
  attr_reader :x, :y
@@ -103,7 +103,9 @@ module Google
103
103
  #
104
104
  # face = faces.first
105
105
  # face.bounds.face.count #=> 4
106
- # face.bounds.face.first #=> #<Vertex (x: 153, y: 34)>
106
+ # vertex = face.bounds.face.first
107
+ # vertex.x #=> 28
108
+ # vertex.y #=> 40
107
109
  #
108
110
  def faces max_results = Google::Cloud::Vision.default_max_faces
109
111
  ensure_vision!
@@ -140,7 +142,7 @@ module Google
140
142
  # landmarks = image.landmarks
141
143
  #
142
144
  # landmark = landmarks.first
143
- # landmark.score #=> 0.91912264
145
+ # landmark.score #=> 0.9191226363182068
144
146
  # landmark.description #=> "Mount Rushmore"
145
147
  # landmark.mid #=> "/m/019dvv"
146
148
  #
@@ -179,7 +181,7 @@ module Google
179
181
  # logos = image.logos
180
182
  #
181
183
  # logo = logos.first
182
- # logo.score #=> 0.70057315
184
+ # logo.score #=> 0.7005731463432312
183
185
  # logo.description #=> "Google"
184
186
  # logo.mid #=> "/m/0b34hf"
185
187
  #
@@ -213,15 +215,15 @@ module Google
213
215
  # require "google/cloud/vision"
214
216
  #
215
217
  # vision = Google::Cloud::Vision.new
216
- # image = vision.image "path/to/face.jpg"
218
+ # image = vision.image "path/to/landmark.jpg"
217
219
  #
218
220
  # labels = image.labels
219
221
  #
220
222
  # labels.count #=> 4
221
223
  # label = labels.first
222
- # label.score #=> 0.9481349
223
- # label.description #=> "person"
224
- # label.mid #=> "/m/01g317"
224
+ # label.score #=> 0.9481348991394043
225
+ # label.description #=> "stone carving"
226
+ # label.mid #=> "/m/02wtjj"
225
227
  #
226
228
  def labels max_results = Google::Cloud::Vision.default_max_labels
227
229
  ensure_vision!
@@ -254,11 +256,10 @@ module Google
254
256
  #
255
257
  # text = image.text
256
258
  #
257
- # text = image.text
258
259
  # text.locale #=> "en"
259
260
  # text.words.count #=> 28
260
261
  # text.text
261
- # #=> "Google Cloud Client for Ruby an idiomatic, intuitive... "
262
+ # # "Google Cloud Client for Ruby an idiomatic, intuitive... "
262
263
  #
263
264
  def text
264
265
  ensure_vision!
@@ -282,7 +283,7 @@ module Google
282
283
  # safe_search = image.safe_search
283
284
  #
284
285
  # safe_search.spoof? #=> false
285
- # safe_search.spoof #=> "VERY_UNLIKELY"
286
+ # safe_search.spoof #=> :VERY_UNLIKELY
286
287
  #
287
288
  def safe_search
288
289
  ensure_vision!
@@ -42,8 +42,8 @@ module Google
42
42
  # annotation = vision.annotate image, labels: true
43
43
  #
44
44
  # annotation.labels.map &:description
45
- # #=> ["stone carving", "ancient history", "statue", "sculpture",
46
- # #=> "monument", "landmark"]
45
+ # # ["stone carving", "ancient history", "statue", "sculpture",
46
+ # # "monument", "landmark"]
47
47
  #
48
48
  # See Google::Cloud#vision
49
49
  class Project
@@ -196,8 +196,8 @@ module Google
196
196
  # annotation = vision.annotate image, labels: true
197
197
  #
198
198
  # annotation.labels.map &:description
199
- # #=> ["stone carving", "ancient history", "statue", "sculpture",
200
- # #=> "monument", "landmark"]
199
+ # # ["stone carving", "ancient history", "statue", "sculpture",
200
+ # # "monument", "landmark"]
201
201
  #
202
202
  # @example With multiple images:
203
203
  # require "google/cloud/vision"
@@ -242,7 +242,7 @@ module Google
242
242
  # annotation = vision.annotate image, labels: 3
243
243
  #
244
244
  # annotation.labels.map &:description
245
- # #=> ["stone carving", "ancient history", "statue"]
245
+ # # ["stone carving", "ancient history", "statue"]
246
246
  #
247
247
  def annotate *images, faces: false, landmarks: false, logos: false,
248
248
  labels: false, text: false, safe_search: false,
@@ -33,7 +33,7 @@ module Google
33
33
  client_config: nil
34
34
  @project = project
35
35
  @credentials = credentials
36
- @host = host || V1::ImageAnnotatorApi::SERVICE_ADDRESS
36
+ @host = host || V1::ImageAnnotatorClient::SERVICE_ADDRESS
37
37
  @timeout = timeout
38
38
  @client_config = client_config || {}
39
39
  end
@@ -53,7 +53,7 @@ module Google
53
53
  def service
54
54
  return mocked_service if mocked_service
55
55
  @service ||= \
56
- V1::ImageAnnotatorApi.new(
56
+ V1::ImageAnnotatorClient.new(
57
57
  service_path: host,
58
58
  channel: channel,
59
59
  timeout: timeout,
@@ -12,4 +12,4 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- require "google/cloud/vision/v1/image_annotator_api"
15
+ require "google/cloud/vision/v1/image_annotator_client"
@@ -0,0 +1,51 @@
1
+ # Copyright 2016 Google Inc. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ module Google
16
+ module Cloud
17
+ module Vision
18
+ module V1
19
+ # A vertex represents a 2D point in the image.
20
+ # NOTE: the vertex coordinates are in the same scale as the original image.
21
+ # @!attribute [rw] x
22
+ # @return [Integer]
23
+ # X coordinate.
24
+ # @!attribute [rw] y
25
+ # @return [Integer]
26
+ # Y coordinate.
27
+ class Vertex; end
28
+
29
+ # A bounding polygon for the detected image annotation.
30
+ # @!attribute [rw] vertices
31
+ # @return [Array<Google::Cloud::Vision::V1::Vertex>]
32
+ # The bounding polygon vertices.
33
+ class BoundingPoly; end
34
+
35
+ # A 3D position in the image, used primarily for Face detection landmarks.
36
+ # A valid Position must have both x and y coordinates.
37
+ # The position coordinates are in the same scale as the original image.
38
+ # @!attribute [rw] x
39
+ # @return [Float]
40
+ # X coordinate.
41
+ # @!attribute [rw] y
42
+ # @return [Float]
43
+ # Y coordinate.
44
+ # @!attribute [rw] z
45
+ # @return [Float]
46
+ # Z coordinate (or depth).
47
+ class Position; end
48
+ end
49
+ end
50
+ end
51
+ end
@@ -0,0 +1,481 @@
1
+ # Copyright 2016 Google Inc. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ module Google
16
+ module Cloud
17
+ module Vision
18
+ module V1
19
+ # The <em>Feature</em> indicates what type of image detection task to perform.
20
+ # Users describe the type of Google Cloud Vision API tasks to perform over
21
+ # images by using <em>Feature</em>s. Features encode the Cloud Vision API
22
+ # vertical to operate on and the number of top-scoring results to return.
23
+ # @!attribute [rw] type
24
+ # @return [Google::Cloud::Vision::V1::Feature::Type]
25
+ # The feature type.
26
+ # @!attribute [rw] max_results
27
+ # @return [Integer]
28
+ # Maximum number of results of this type.
29
+ class Feature
30
+ # Type of image feature.
31
+ module Type
32
+ # Unspecified feature type.
33
+ TYPE_UNSPECIFIED = 0
34
+
35
+ # Run face detection.
36
+ FACE_DETECTION = 1
37
+
38
+ # Run landmark detection.
39
+ LANDMARK_DETECTION = 2
40
+
41
+ # Run logo detection.
42
+ LOGO_DETECTION = 3
43
+
44
+ # Run label detection.
45
+ LABEL_DETECTION = 4
46
+
47
+ # Run OCR.
48
+ TEXT_DETECTION = 5
49
+
50
+ # Run various computer vision models to compute image safe-search properties.
51
+ SAFE_SEARCH_DETECTION = 6
52
+
53
+ # Compute a set of properties about the image (such as the image's dominant colors).
54
+ IMAGE_PROPERTIES = 7
55
+ end
56
+ end
57
+
58
+ # External image source (Google Cloud Storage image location).
59
+ # @!attribute [rw] gcs_image_uri
60
+ # @return [String]
61
+ # Google Cloud Storage image URI. It must be in the following form:
62
+ # +gs://bucket_name/object_name+. For more
63
+ # details, please see: https://cloud.google.com/storage/docs/reference-uris.
64
+ # NOTE: Cloud Storage object versioning is not supported!
65
+ class ImageSource; end
66
+
67
+ # Client image to perform Google Cloud Vision API tasks over.
68
+ # @!attribute [rw] content
69
+ # @return [String]
70
+ # Image content, represented as a stream of bytes.
71
+ # Note: as with all +bytes+ fields, protobuffers use a pure binary
72
+ # representation, whereas JSON representations use base64.
73
+ # @!attribute [rw] source
74
+ # @return [Google::Cloud::Vision::V1::ImageSource]
75
+ # Google Cloud Storage image location. If both 'content' and 'source'
76
+ # are filled for an image, 'content' takes precedence and it will be
77
+ # used for performing the image annotation request.
78
+ class Image; end
79
+
80
+ # A face annotation object contains the results of face detection.
81
+ # @!attribute [rw] bounding_poly
82
+ # @return [Google::Cloud::Vision::V1::BoundingPoly]
83
+ # The bounding polygon around the face. The coordinates of the bounding box
84
+ # are in the original image's scale, as returned in ImageParams.
85
+ # The bounding box is computed to "frame" the face in accordance with human
86
+ # expectations. It is based on the landmarker results.
87
+ # Note that one or more x and/or y coordinates may not be generated in the
88
+ # BoundingPoly (the polygon will be unbounded) if only a partial face appears in
89
+ # the image to be annotated.
90
+ # @!attribute [rw] fd_bounding_poly
91
+ # @return [Google::Cloud::Vision::V1::BoundingPoly]
92
+ # This bounding polygon is tighter than the previous
93
+ # <code>boundingPoly</code>, and
94
+ # encloses only the skin part of the face. Typically, it is used to
95
+ # eliminate the face from any image analysis that detects the
96
+ # "amount of skin" visible in an image. It is not based on the
97
+ # landmarker results, only on the initial face detection, hence
98
+ # the <code>fd</code> (face detection) prefix.
99
+ # @!attribute [rw] landmarks
100
+ # @return [Array<Google::Cloud::Vision::V1::FaceAnnotation::Landmark>]
101
+ # Detected face landmarks.
102
+ # @!attribute [rw] roll_angle
103
+ # @return [Float]
104
+ # Roll angle. Indicates the amount of clockwise/anti-clockwise rotation of
105
+ # the
106
+ # face relative to the image vertical, about the axis perpendicular to the
107
+ # face. Range [-180,180].
108
+ # @!attribute [rw] pan_angle
109
+ # @return [Float]
110
+ # Yaw angle. Indicates the leftward/rightward angle that the face is
111
+ # pointing, relative to the vertical plane perpendicular to the image. Range
112
+ # [-180,180].
113
+ # @!attribute [rw] tilt_angle
114
+ # @return [Float]
115
+ # Pitch angle. Indicates the upwards/downwards angle that the face is
116
+ # pointing
117
+ # relative to the image's horizontal plane. Range [-180,180].
118
+ # @!attribute [rw] detection_confidence
119
+ # @return [Float]
120
+ # Detection confidence. Range [0, 1].
121
+ # @!attribute [rw] landmarking_confidence
122
+ # @return [Float]
123
+ # Face landmarking confidence. Range [0, 1].
124
+ # @!attribute [rw] joy_likelihood
125
+ # @return [Google::Cloud::Vision::V1::Likelihood]
126
+ # Joy likelihood.
127
+ # @!attribute [rw] sorrow_likelihood
128
+ # @return [Google::Cloud::Vision::V1::Likelihood]
129
+ # Sorrow likelihood.
130
+ # @!attribute [rw] anger_likelihood
131
+ # @return [Google::Cloud::Vision::V1::Likelihood]
132
+ # Anger likelihood.
133
+ # @!attribute [rw] surprise_likelihood
134
+ # @return [Google::Cloud::Vision::V1::Likelihood]
135
+ # Surprise likelihood.
136
+ # @!attribute [rw] under_exposed_likelihood
137
+ # @return [Google::Cloud::Vision::V1::Likelihood]
138
+ # Under-exposed likelihood.
139
+ # @!attribute [rw] blurred_likelihood
140
+ # @return [Google::Cloud::Vision::V1::Likelihood]
141
+ # Blurred likelihood.
142
+ # @!attribute [rw] headwear_likelihood
143
+ # @return [Google::Cloud::Vision::V1::Likelihood]
144
+ # Headwear likelihood.
145
+ class FaceAnnotation
146
+ # A face-specific landmark (for example, a face feature).
147
+ # Landmark positions may fall outside the bounds of the image
148
+ # when the face is near one or more edges of the image.
149
+ # Therefore it is NOT guaranteed that 0 <= x < width or 0 <= y < height.
150
+ # @!attribute [rw] type
151
+ # @return [Google::Cloud::Vision::V1::FaceAnnotation::Landmark::Type]
152
+ # Face landmark type.
153
+ # @!attribute [rw] position
154
+ # @return [Google::Cloud::Vision::V1::Position]
155
+ # Face landmark position.
156
+ class Landmark
157
+ # Face landmark (feature) type.
158
+ # Left and right are defined from the vantage of the viewer of the image,
159
+ # without considering mirror projections typical of photos. So, LEFT_EYE,
160
+ # typically is the person's right eye.
161
+ module Type
162
+ # Unknown face landmark detected. Should not be filled.
163
+ UNKNOWN_LANDMARK = 0
164
+
165
+ # Left eye.
166
+ LEFT_EYE = 1
167
+
168
+ # Right eye.
169
+ RIGHT_EYE = 2
170
+
171
+ # Left of left eyebrow.
172
+ LEFT_OF_LEFT_EYEBROW = 3
173
+
174
+ # Right of left eyebrow.
175
+ RIGHT_OF_LEFT_EYEBROW = 4
176
+
177
+ # Left of right eyebrow.
178
+ LEFT_OF_RIGHT_EYEBROW = 5
179
+
180
+ # Right of right eyebrow.
181
+ RIGHT_OF_RIGHT_EYEBROW = 6
182
+
183
+ # Midpoint between eyes.
184
+ MIDPOINT_BETWEEN_EYES = 7
185
+
186
+ # Nose tip.
187
+ NOSE_TIP = 8
188
+
189
+ # Upper lip.
190
+ UPPER_LIP = 9
191
+
192
+ # Lower lip.
193
+ LOWER_LIP = 10
194
+
195
+ # Mouth left.
196
+ MOUTH_LEFT = 11
197
+
198
+ # Mouth right.
199
+ MOUTH_RIGHT = 12
200
+
201
+ # Mouth center.
202
+ MOUTH_CENTER = 13
203
+
204
+ # Nose, bottom right.
205
+ NOSE_BOTTOM_RIGHT = 14
206
+
207
+ # Nose, bottom left.
208
+ NOSE_BOTTOM_LEFT = 15
209
+
210
+ # Nose, bottom center.
211
+ NOSE_BOTTOM_CENTER = 16
212
+
213
+ # Left eye, top boundary.
214
+ LEFT_EYE_TOP_BOUNDARY = 17
215
+
216
+ # Left eye, right corner.
217
+ LEFT_EYE_RIGHT_CORNER = 18
218
+
219
+ # Left eye, bottom boundary.
220
+ LEFT_EYE_BOTTOM_BOUNDARY = 19
221
+
222
+ # Left eye, left corner.
223
+ LEFT_EYE_LEFT_CORNER = 20
224
+
225
+ # Right eye, top boundary.
226
+ RIGHT_EYE_TOP_BOUNDARY = 21
227
+
228
+ # Right eye, right corner.
229
+ RIGHT_EYE_RIGHT_CORNER = 22
230
+
231
+ # Right eye, bottom boundary.
232
+ RIGHT_EYE_BOTTOM_BOUNDARY = 23
233
+
234
+ # Right eye, left corner.
235
+ RIGHT_EYE_LEFT_CORNER = 24
236
+
237
+ # Left eyebrow, upper midpoint.
238
+ LEFT_EYEBROW_UPPER_MIDPOINT = 25
239
+
240
+ # Right eyebrow, upper midpoint.
241
+ RIGHT_EYEBROW_UPPER_MIDPOINT = 26
242
+
243
+ # Left ear tragion.
244
+ LEFT_EAR_TRAGION = 27
245
+
246
+ # Right ear tragion.
247
+ RIGHT_EAR_TRAGION = 28
248
+
249
+ # Left eye pupil.
250
+ LEFT_EYE_PUPIL = 29
251
+
252
+ # Right eye pupil.
253
+ RIGHT_EYE_PUPIL = 30
254
+
255
+ # Forehead glabella.
256
+ FOREHEAD_GLABELLA = 31
257
+
258
+ # Chin gnathion.
259
+ CHIN_GNATHION = 32
260
+
261
+ # Chin left gonion.
262
+ CHIN_LEFT_GONION = 33
263
+
264
+ # Chin right gonion.
265
+ CHIN_RIGHT_GONION = 34
266
+ end
267
+ end
268
+ end
269
+
270
+ # Detected entity location information.
271
+ # @!attribute [rw] lat_lng
272
+ # @return [Google::Type::LatLng]
273
+ # Lat - long location coordinates.
274
+ class LocationInfo; end
275
+
276
+ # Arbitrary name/value pair.
277
+ # @!attribute [rw] name
278
+ # @return [String]
279
+ # Name of the property.
280
+ # @!attribute [rw] value
281
+ # @return [String]
282
+ # Value of the property.
283
+ class Property; end
284
+
285
+ # Set of detected entity features.
286
+ # @!attribute [rw] mid
287
+ # @return [String]
288
+ # Opaque entity ID. Some IDs might be available in Knowledge Graph(KG).
289
+ # For more details on KG please see:
290
+ # https://developers.google.com/knowledge-graph/
291
+ # @!attribute [rw] locale
292
+ # @return [String]
293
+ # The language code for the locale in which the entity textual
294
+ # <code>description</code> (next field) is expressed.
295
+ # @!attribute [rw] description
296
+ # @return [String]
297
+ # Entity textual description, expressed in its <code>locale</code> language.
298
+ # @!attribute [rw] score
299
+ # @return [Float]
300
+ # Overall score of the result. Range [0, 1].
301
+ # @!attribute [rw] confidence
302
+ # @return [Float]
303
+ # The accuracy of the entity detection in an image.
304
+ # For example, for an image containing 'Eiffel Tower,' this field represents
305
+ # the confidence that there is a tower in the query image. Range [0, 1].
306
+ # @!attribute [rw] topicality
307
+ # @return [Float]
308
+ # The relevancy of the ICA (Image Content Annotation) label to the
309
+ # image. For example, the relevancy of 'tower' to an image containing
310
+ # 'Eiffel Tower' is likely higher than an image containing a distant towering
311
+ # building, though the confidence that there is a tower may be the same.
312
+ # Range [0, 1].
313
+ # @!attribute [rw] bounding_poly
314
+ # @return [Google::Cloud::Vision::V1::BoundingPoly]
315
+ # Image region to which this entity belongs. Not filled currently
316
+ # for +LABEL_DETECTION+ features. For +TEXT_DETECTION+ (OCR), +boundingPoly+s
317
+ # are produced for the entire text detected in an image region, followed by
318
+ # +boundingPoly+s for each word within the detected text.
319
+ # @!attribute [rw] locations
320
+ # @return [Array<Google::Cloud::Vision::V1::LocationInfo>]
321
+ # The location information for the detected entity. Multiple
322
+ # <code>LocationInfo</code> elements can be present since one location may
323
+ # indicate the location of the scene in the query image, and another the
324
+ # location of the place where the query image was taken. Location information
325
+ # is usually present for landmarks.
326
+ # @!attribute [rw] properties
327
+ # @return [Array<Google::Cloud::Vision::V1::Property>]
328
+ # Some entities can have additional optional <code>Property</code> fields.
329
+ # For example a different kind of score or string that qualifies the entity.
330
+ class EntityAnnotation; end
331
+
332
+ # Set of features pertaining to the image, computed by various computer vision
333
+ # methods over safe-search verticals (for example, adult, spoof, medical,
334
+ # violence).
335
+ # @!attribute [rw] adult
336
+ # @return [Google::Cloud::Vision::V1::Likelihood]
337
+ # Represents the adult contents likelihood for the image.
338
+ # @!attribute [rw] spoof
339
+ # @return [Google::Cloud::Vision::V1::Likelihood]
340
+ # Spoof likelihood. The likelihood that an obvious modification
341
+ # was made to the image's canonical version to make it appear
342
+ # funny or offensive.
343
+ # @!attribute [rw] medical
344
+ # @return [Google::Cloud::Vision::V1::Likelihood]
345
+ # Likelihood this is a medical image.
346
+ # @!attribute [rw] violence
347
+ # @return [Google::Cloud::Vision::V1::Likelihood]
348
+ # Violence likelihood.
349
+ class SafeSearchAnnotation; end
350
+
351
+ # Rectangle determined by min and max LatLng pairs.
352
+ # @!attribute [rw] min_lat_lng
353
+ # @return [Google::Type::LatLng]
354
+ # Min lat/long pair.
355
+ # @!attribute [rw] max_lat_lng
356
+ # @return [Google::Type::LatLng]
357
+ # Max lat/long pair.
358
+ class LatLongRect; end
359
+
360
+ # Color information consists of RGB channels, score and fraction of
361
+ # image the color occupies in the image.
362
+ # @!attribute [rw] color
363
+ # @return [Google::Type::Color]
364
+ # RGB components of the color.
365
+ # @!attribute [rw] score
366
+ # @return [Float]
367
+ # Image-specific score for this color. Value in range [0, 1].
368
+ # @!attribute [rw] pixel_fraction
369
+ # @return [Float]
370
+ # Stores the fraction of pixels the color occupies in the image.
371
+ # Value in range [0, 1].
372
+ class ColorInfo; end
373
+
374
+ # Set of dominant colors and their corresponding scores.
375
+ # @!attribute [rw] colors
376
+ # @return [Array<Google::Cloud::Vision::V1::ColorInfo>]
377
+ # RGB color values, with their score and pixel fraction.
378
+ class DominantColorsAnnotation; end
379
+
380
+ # Stores image properties (e.g. dominant colors).
381
+ # @!attribute [rw] dominant_colors
382
+ # @return [Google::Cloud::Vision::V1::DominantColorsAnnotation]
383
+ # If present, dominant colors completed successfully.
384
+ class ImageProperties; end
385
+
386
+ # Image context.
387
+ # @!attribute [rw] lat_long_rect
388
+ # @return [Google::Cloud::Vision::V1::LatLongRect]
389
+ # Lat/long rectangle that specifies the location of the image.
390
+ # @!attribute [rw] language_hints
391
+ # @return [Array<String>]
392
+ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
393
+ # yields the best results since it enables automatic language detection. For
394
+ # languages based on the Latin alphabet, setting +language_hints+ is not
395
+ # needed. In rare cases, when the language of the text in the image is known,
396
+ # setting a hint will help get better results (although it will be a
397
+ # significant hindrance if the hint is wrong). Text detection returns an
398
+ # error if one or more of the specified languages is not one of the
399
+ # {supported
400
+ # languages}[https://cloud.google.com/translate/v2/translate-reference#supported_languages].
401
+ class ImageContext; end
402
+
403
+ # Request for performing Google Cloud Vision API tasks over a user-provided
404
+ # image, with user-requested features.
405
+ # @!attribute [rw] image
406
+ # @return [Google::Cloud::Vision::V1::Image]
407
+ # The image to be processed.
408
+ # @!attribute [rw] features
409
+ # @return [Array<Google::Cloud::Vision::V1::Feature>]
410
+ # Requested features.
411
+ # @!attribute [rw] image_context
412
+ # @return [Google::Cloud::Vision::V1::ImageContext]
413
+ # Additional context that may accompany the image.
414
+ class AnnotateImageRequest; end
415
+
416
+ # Response to an image annotation request.
417
+ # @!attribute [rw] face_annotations
418
+ # @return [Array<Google::Cloud::Vision::V1::FaceAnnotation>]
419
+ # If present, face detection completed successfully.
420
+ # @!attribute [rw] landmark_annotations
421
+ # @return [Array<Google::Cloud::Vision::V1::EntityAnnotation>]
422
+ # If present, landmark detection completed successfully.
423
+ # @!attribute [rw] logo_annotations
424
+ # @return [Array<Google::Cloud::Vision::V1::EntityAnnotation>]
425
+ # If present, logo detection completed successfully.
426
+ # @!attribute [rw] label_annotations
427
+ # @return [Array<Google::Cloud::Vision::V1::EntityAnnotation>]
428
+ # If present, label detection completed successfully.
429
+ # @!attribute [rw] text_annotations
430
+ # @return [Array<Google::Cloud::Vision::V1::EntityAnnotation>]
431
+ # If present, text (OCR) detection completed successfully.
432
+ # @!attribute [rw] safe_search_annotation
433
+ # @return [Google::Cloud::Vision::V1::SafeSearchAnnotation]
434
+ # If present, safe-search annotation completed successfully.
435
+ # @!attribute [rw] image_properties_annotation
436
+ # @return [Google::Cloud::Vision::V1::ImageProperties]
437
+ # If present, image properties were extracted successfully.
438
+ # @!attribute [rw] error
439
+ # @return [Google::Rpc::Status]
440
+ # If set, represents the error message for the operation.
441
+ # Note that filled-in mage annotations are guaranteed to be
442
+ # correct, even when <code>error</code> is non-empty.
443
+ class AnnotateImageResponse; end
444
+
445
+ # Multiple image annotation requests are batched into a single service call.
446
+ # @!attribute [rw] requests
447
+ # @return [Array<Google::Cloud::Vision::V1::AnnotateImageRequest>]
448
+ # Individual image annotation requests for this batch.
449
+ class BatchAnnotateImagesRequest; end
450
+
451
+ # Response to a batch image annotation request.
452
+ # @!attribute [rw] responses
453
+ # @return [Array<Google::Cloud::Vision::V1::AnnotateImageResponse>]
454
+ # Individual responses to image annotation requests within the batch.
455
+ class BatchAnnotateImagesResponse; end
456
+
457
+ # A bucketized representation of likelihood meant to give our clients highly
458
+ # stable results across model upgrades.
459
+ module Likelihood
460
+ # Unknown likelihood.
461
+ UNKNOWN = 0
462
+
463
+ # The image very unlikely belongs to the vertical specified.
464
+ VERY_UNLIKELY = 1
465
+
466
+ # The image unlikely belongs to the vertical specified.
467
+ UNLIKELY = 2
468
+
469
+ # The image possibly belongs to the vertical specified.
470
+ POSSIBLE = 3
471
+
472
+ # The image likely belongs to the vertical specified.
473
+ LIKELY = 4
474
+
475
+ # The image very likely belongs to the vertical specified.
476
+ VERY_LIKELY = 5
477
+ end
478
+ end
479
+ end
480
+ end
481
+ end