google-cloud-vision_ai-v1 0.a → 0.2.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (104) hide show
  1. checksums.yaml +4 -4
  2. data/.yardopts +12 -0
  3. data/AUTHENTICATION.md +122 -0
  4. data/README.md +144 -8
  5. data/lib/google/cloud/vision_ai/v1/app_platform/client.rb +3318 -0
  6. data/lib/google/cloud/vision_ai/v1/app_platform/credentials.rb +47 -0
  7. data/lib/google/cloud/vision_ai/v1/app_platform/operations.rb +809 -0
  8. data/lib/google/cloud/vision_ai/v1/app_platform/paths.rb +151 -0
  9. data/lib/google/cloud/vision_ai/v1/app_platform/rest/client.rb +3106 -0
  10. data/lib/google/cloud/vision_ai/v1/app_platform/rest/operations.rb +951 -0
  11. data/lib/google/cloud/vision_ai/v1/app_platform/rest/service_stub.rb +1618 -0
  12. data/lib/google/cloud/vision_ai/v1/app_platform/rest.rb +53 -0
  13. data/lib/google/cloud/vision_ai/v1/app_platform.rb +56 -0
  14. data/lib/google/cloud/vision_ai/v1/health_check_service/client.rb +456 -0
  15. data/lib/google/cloud/vision_ai/v1/health_check_service/credentials.rb +47 -0
  16. data/lib/google/cloud/vision_ai/v1/health_check_service/paths.rb +52 -0
  17. data/lib/google/cloud/vision_ai/v1/health_check_service/rest/client.rb +423 -0
  18. data/lib/google/cloud/vision_ai/v1/health_check_service/rest/service_stub.rb +128 -0
  19. data/lib/google/cloud/vision_ai/v1/health_check_service/rest.rb +53 -0
  20. data/lib/google/cloud/vision_ai/v1/health_check_service.rb +56 -0
  21. data/lib/google/cloud/vision_ai/v1/live_video_analytics/client.rb +2375 -0
  22. data/lib/google/cloud/vision_ai/v1/live_video_analytics/credentials.rb +47 -0
  23. data/lib/google/cloud/vision_ai/v1/live_video_analytics/operations.rb +809 -0
  24. data/lib/google/cloud/vision_ai/v1/live_video_analytics/paths.rb +130 -0
  25. data/lib/google/cloud/vision_ai/v1/live_video_analytics/rest/client.rb +2220 -0
  26. data/lib/google/cloud/vision_ai/v1/live_video_analytics/rest/operations.rb +951 -0
  27. data/lib/google/cloud/vision_ai/v1/live_video_analytics/rest/service_stub.rb +1139 -0
  28. data/lib/google/cloud/vision_ai/v1/live_video_analytics/rest.rb +54 -0
  29. data/lib/google/cloud/vision_ai/v1/live_video_analytics.rb +57 -0
  30. data/lib/google/cloud/vision_ai/v1/rest.rb +42 -0
  31. data/lib/google/cloud/vision_ai/v1/streaming_service/client.rb +915 -0
  32. data/lib/google/cloud/vision_ai/v1/streaming_service/credentials.rb +47 -0
  33. data/lib/google/cloud/vision_ai/v1/streaming_service/paths.rb +54 -0
  34. data/lib/google/cloud/vision_ai/v1/streaming_service/rest/client.rb +610 -0
  35. data/lib/google/cloud/vision_ai/v1/streaming_service/rest/service_stub.rb +249 -0
  36. data/lib/google/cloud/vision_ai/v1/streaming_service/rest.rb +52 -0
  37. data/lib/google/cloud/vision_ai/v1/streaming_service.rb +55 -0
  38. data/lib/google/cloud/vision_ai/v1/streams_service/client.rb +2931 -0
  39. data/lib/google/cloud/vision_ai/v1/streams_service/credentials.rb +47 -0
  40. data/lib/google/cloud/vision_ai/v1/streams_service/operations.rb +809 -0
  41. data/lib/google/cloud/vision_ai/v1/streams_service/paths.rb +153 -0
  42. data/lib/google/cloud/vision_ai/v1/streams_service/rest/client.rb +2740 -0
  43. data/lib/google/cloud/vision_ai/v1/streams_service/rest/operations.rb +951 -0
  44. data/lib/google/cloud/vision_ai/v1/streams_service/rest/service_stub.rb +1437 -0
  45. data/lib/google/cloud/vision_ai/v1/streams_service/rest.rb +56 -0
  46. data/lib/google/cloud/vision_ai/v1/streams_service.rb +59 -0
  47. data/lib/google/cloud/vision_ai/v1/version.rb +8 -3
  48. data/lib/google/cloud/vision_ai/v1/warehouse/client.rb +6958 -0
  49. data/lib/google/cloud/vision_ai/v1/warehouse/credentials.rb +47 -0
  50. data/lib/google/cloud/vision_ai/v1/warehouse/operations.rb +809 -0
  51. data/lib/google/cloud/vision_ai/v1/warehouse/paths.rb +237 -0
  52. data/lib/google/cloud/vision_ai/v1/warehouse/rest/client.rb +6403 -0
  53. data/lib/google/cloud/vision_ai/v1/warehouse/rest/operations.rb +951 -0
  54. data/lib/google/cloud/vision_ai/v1/warehouse/rest/service_stub.rb +3760 -0
  55. data/lib/google/cloud/vision_ai/v1/warehouse/rest.rb +53 -0
  56. data/lib/google/cloud/vision_ai/v1/warehouse.rb +56 -0
  57. data/lib/google/cloud/vision_ai/v1.rb +50 -0
  58. data/lib/google/cloud/visionai/v1/annotations_pb.rb +90 -0
  59. data/lib/google/cloud/visionai/v1/common_pb.rb +50 -0
  60. data/lib/google/cloud/visionai/v1/health_service_pb.rb +48 -0
  61. data/lib/google/cloud/visionai/v1/health_service_services_pb.rb +46 -0
  62. data/lib/google/cloud/visionai/v1/lva_pb.rb +54 -0
  63. data/lib/google/cloud/visionai/v1/lva_resources_pb.rb +51 -0
  64. data/lib/google/cloud/visionai/v1/lva_service_pb.rb +80 -0
  65. data/lib/google/cloud/visionai/v1/lva_service_services_pb.rb +81 -0
  66. data/lib/google/cloud/visionai/v1/platform_pb.rb +162 -0
  67. data/lib/google/cloud/visionai/v1/platform_services_pb.rb +111 -0
  68. data/lib/google/cloud/visionai/v1/streaming_resources_pb.rb +58 -0
  69. data/lib/google/cloud/visionai/v1/streaming_service_pb.rb +71 -0
  70. data/lib/google/cloud/visionai/v1/streaming_service_services_pb.rb +55 -0
  71. data/lib/google/cloud/visionai/v1/streams_resources_pb.rb +53 -0
  72. data/lib/google/cloud/visionai/v1/streams_service_pb.rb +85 -0
  73. data/lib/google/cloud/visionai/v1/streams_service_services_pb.rb +92 -0
  74. data/lib/google/cloud/visionai/v1/warehouse_pb.rb +251 -0
  75. data/lib/google/cloud/visionai/v1/warehouse_services_pb.rb +228 -0
  76. data/lib/google-cloud-vision_ai-v1.rb +21 -0
  77. data/proto_docs/README.md +4 -0
  78. data/proto_docs/google/api/client.rb +420 -0
  79. data/proto_docs/google/api/field_behavior.rb +85 -0
  80. data/proto_docs/google/api/launch_stage.rb +71 -0
  81. data/proto_docs/google/api/resource.rb +227 -0
  82. data/proto_docs/google/cloud/visionai/v1/annotations.rb +787 -0
  83. data/proto_docs/google/cloud/visionai/v1/common.rb +134 -0
  84. data/proto_docs/google/cloud/visionai/v1/health_service.rb +61 -0
  85. data/proto_docs/google/cloud/visionai/v1/lva.rb +345 -0
  86. data/proto_docs/google/cloud/visionai/v1/lva_resources.rb +167 -0
  87. data/proto_docs/google/cloud/visionai/v1/lva_service.rb +543 -0
  88. data/proto_docs/google/cloud/visionai/v1/platform.rb +2228 -0
  89. data/proto_docs/google/cloud/visionai/v1/streaming_resources.rb +178 -0
  90. data/proto_docs/google/cloud/visionai/v1/streaming_service.rb +393 -0
  91. data/proto_docs/google/cloud/visionai/v1/streams_resources.rb +229 -0
  92. data/proto_docs/google/cloud/visionai/v1/streams_service.rb +644 -0
  93. data/proto_docs/google/cloud/visionai/v1/warehouse.rb +3055 -0
  94. data/proto_docs/google/longrunning/operations.rb +164 -0
  95. data/proto_docs/google/protobuf/any.rb +145 -0
  96. data/proto_docs/google/protobuf/duration.rb +98 -0
  97. data/proto_docs/google/protobuf/empty.rb +34 -0
  98. data/proto_docs/google/protobuf/field_mask.rb +229 -0
  99. data/proto_docs/google/protobuf/struct.rb +96 -0
  100. data/proto_docs/google/protobuf/timestamp.rb +127 -0
  101. data/proto_docs/google/rpc/status.rb +48 -0
  102. data/proto_docs/google/type/datetime.rb +99 -0
  103. data/proto_docs/google/type/expr.rb +75 -0
  104. metadata +183 -10
@@ -0,0 +1,787 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Copyright 2024 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # https://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Auto-generated by gapic-generator-ruby. DO NOT EDIT!
18
+
19
+
20
+ module Google
21
+ module Cloud
22
+ module VisionAI
23
+ module V1
24
+ # Output format for Personal Protective Equipment Detection Operator.
25
+ # @!attribute [rw] current_time
26
+ # @return [::Google::Protobuf::Timestamp]
27
+ # Current timestamp.
28
+ # @!attribute [rw] detected_persons
29
+ # @return [::Array<::Google::Cloud::VisionAI::V1::PersonalProtectiveEquipmentDetectionOutput::DetectedPerson>]
30
+ # A list of DetectedPersons.
31
+ class PersonalProtectiveEquipmentDetectionOutput
32
+ include ::Google::Protobuf::MessageExts
33
+ extend ::Google::Protobuf::MessageExts::ClassMethods
34
+
35
+ # The entity info for annotations from person detection prediction result.
36
+ # @!attribute [rw] person_entity_id
37
+ # @return [::Integer]
38
+ # Entity id.
39
+ class PersonEntity
40
+ include ::Google::Protobuf::MessageExts
41
+ extend ::Google::Protobuf::MessageExts::ClassMethods
42
+ end
43
+
44
+ # The entity info for annotations from PPE detection prediction result.
45
+ # @!attribute [rw] ppe_label_id
46
+ # @return [::Integer]
47
+ # Label id.
48
+ # @!attribute [rw] ppe_label_string
49
+ # @return [::String]
50
+ # Human readable string of the label (Examples: helmet, glove, mask).
51
+ # @!attribute [rw] ppe_supercategory_label_string
52
+ # @return [::String]
53
+ # Human readable string of the super category label (Examples: head_cover,
54
+ # hands_cover, face_cover).
55
+ # @!attribute [rw] ppe_entity_id
56
+ # @return [::Integer]
57
+ # Entity id.
58
+ class PPEEntity
59
+ include ::Google::Protobuf::MessageExts
60
+ extend ::Google::Protobuf::MessageExts::ClassMethods
61
+ end
62
+
63
+ # Bounding Box in the normalized coordinates.
64
+ # @!attribute [rw] xmin
65
+ # @return [::Float]
66
+ # Min in x coordinate.
67
+ # @!attribute [rw] ymin
68
+ # @return [::Float]
69
+ # Min in y coordinate.
70
+ # @!attribute [rw] width
71
+ # @return [::Float]
72
+ # Width of the bounding box.
73
+ # @!attribute [rw] height
74
+ # @return [::Float]
75
+ # Height of the bounding box.
76
+ class NormalizedBoundingBox
77
+ include ::Google::Protobuf::MessageExts
78
+ extend ::Google::Protobuf::MessageExts::ClassMethods
79
+ end
80
+
81
+ # PersonIdentified box contains the location and the entity info of the
82
+ # person.
83
+ # @!attribute [rw] box_id
84
+ # @return [::Integer]
85
+ # An unique id for this box.
86
+ # @!attribute [rw] normalized_bounding_box
87
+ # @return [::Google::Cloud::VisionAI::V1::PersonalProtectiveEquipmentDetectionOutput::NormalizedBoundingBox]
88
+ # Bounding Box in the normalized coordinates.
89
+ # @!attribute [rw] confidence_score
90
+ # @return [::Float]
91
+ # Confidence score associated with this box.
92
+ # @!attribute [rw] person_entity
93
+ # @return [::Google::Cloud::VisionAI::V1::PersonalProtectiveEquipmentDetectionOutput::PersonEntity]
94
+ # Person entity info.
95
+ class PersonIdentifiedBox
96
+ include ::Google::Protobuf::MessageExts
97
+ extend ::Google::Protobuf::MessageExts::ClassMethods
98
+ end
99
+
100
+ # PPEIdentified box contains the location and the entity info of the PPE.
101
+ # @!attribute [rw] box_id
102
+ # @return [::Integer]
103
+ # An unique id for this box.
104
+ # @!attribute [rw] normalized_bounding_box
105
+ # @return [::Google::Cloud::VisionAI::V1::PersonalProtectiveEquipmentDetectionOutput::NormalizedBoundingBox]
106
+ # Bounding Box in the normalized coordinates.
107
+ # @!attribute [rw] confidence_score
108
+ # @return [::Float]
109
+ # Confidence score associated with this box.
110
+ # @!attribute [rw] ppe_entity
111
+ # @return [::Google::Cloud::VisionAI::V1::PersonalProtectiveEquipmentDetectionOutput::PPEEntity]
112
+ # PPE entity info.
113
+ class PPEIdentifiedBox
114
+ include ::Google::Protobuf::MessageExts
115
+ extend ::Google::Protobuf::MessageExts::ClassMethods
116
+ end
117
+
118
+ # Detected Person contains the detected person and their associated
119
+ # ppes and their protecting information.
120
+ # @!attribute [rw] person_id
121
+ # @return [::Integer]
122
+ # The id of detected person.
123
+ # @!attribute [rw] detected_person_identified_box
124
+ # @return [::Google::Cloud::VisionAI::V1::PersonalProtectiveEquipmentDetectionOutput::PersonIdentifiedBox]
125
+ # The info of detected person identified box.
126
+ # @!attribute [rw] detected_ppe_identified_boxes
127
+ # @return [::Array<::Google::Cloud::VisionAI::V1::PersonalProtectiveEquipmentDetectionOutput::PPEIdentifiedBox>]
128
+ # The info of detected person associated ppe identified boxes.
129
+ # @!attribute [rw] face_coverage_score
130
+ # @return [::Float]
131
+ # Coverage score for each body part.
132
+ # Coverage score for face.
133
+ # @!attribute [rw] eyes_coverage_score
134
+ # @return [::Float]
135
+ # Coverage score for eyes.
136
+ # @!attribute [rw] head_coverage_score
137
+ # @return [::Float]
138
+ # Coverage score for head.
139
+ # @!attribute [rw] hands_coverage_score
140
+ # @return [::Float]
141
+ # Coverage score for hands.
142
+ # @!attribute [rw] body_coverage_score
143
+ # @return [::Float]
144
+ # Coverage score for body.
145
+ # @!attribute [rw] feet_coverage_score
146
+ # @return [::Float]
147
+ # Coverage score for feet.
148
+ class DetectedPerson
149
+ include ::Google::Protobuf::MessageExts
150
+ extend ::Google::Protobuf::MessageExts::ClassMethods
151
+ end
152
+ end
153
+
154
+ # Prediction output format for Generic Object Detection.
155
+ # @!attribute [rw] current_time
156
+ # @return [::Google::Protobuf::Timestamp]
157
+ # Current timestamp.
158
+ # @!attribute [rw] identified_boxes
159
+ # @return [::Array<::Google::Cloud::VisionAI::V1::ObjectDetectionPredictionResult::IdentifiedBox>]
160
+ # A list of identified boxes.
161
+ class ObjectDetectionPredictionResult
162
+ include ::Google::Protobuf::MessageExts
163
+ extend ::Google::Protobuf::MessageExts::ClassMethods
164
+
165
+ # The entity info for annotations from object detection prediction result.
166
+ # @!attribute [rw] label_id
167
+ # @return [::Integer]
168
+ # Label id.
169
+ # @!attribute [rw] label_string
170
+ # @return [::String]
171
+ # Human readable string of the label.
172
+ class Entity
173
+ include ::Google::Protobuf::MessageExts
174
+ extend ::Google::Protobuf::MessageExts::ClassMethods
175
+ end
176
+
177
+ # Identified box contains location and the entity of the object.
178
+ # @!attribute [rw] box_id
179
+ # @return [::Integer]
180
+ # An unique id for this box.
181
+ # @!attribute [rw] normalized_bounding_box
182
+ # @return [::Google::Cloud::VisionAI::V1::ObjectDetectionPredictionResult::IdentifiedBox::NormalizedBoundingBox]
183
+ # Bounding Box in the normalized coordinates.
184
+ # @!attribute [rw] confidence_score
185
+ # @return [::Float]
186
+ # Confidence score associated with this box.
187
+ # @!attribute [rw] entity
188
+ # @return [::Google::Cloud::VisionAI::V1::ObjectDetectionPredictionResult::Entity]
189
+ # Entity of this box.
190
+ class IdentifiedBox
191
+ include ::Google::Protobuf::MessageExts
192
+ extend ::Google::Protobuf::MessageExts::ClassMethods
193
+
194
+ # Bounding Box in the normalized coordinates.
195
+ # @!attribute [rw] xmin
196
+ # @return [::Float]
197
+ # Min in x coordinate.
198
+ # @!attribute [rw] ymin
199
+ # @return [::Float]
200
+ # Min in y coordinate.
201
+ # @!attribute [rw] width
202
+ # @return [::Float]
203
+ # Width of the bounding box.
204
+ # @!attribute [rw] height
205
+ # @return [::Float]
206
+ # Height of the bounding box.
207
+ class NormalizedBoundingBox
208
+ include ::Google::Protobuf::MessageExts
209
+ extend ::Google::Protobuf::MessageExts::ClassMethods
210
+ end
211
+ end
212
+ end
213
+
214
+ # Prediction output format for Image Object Detection.
215
+ # @!attribute [rw] ids
216
+ # @return [::Array<::Integer>]
217
+ # The resource IDs of the AnnotationSpecs that had been identified, ordered
218
+ # by the confidence score descendingly. It is the id segment instead of full
219
+ # resource name.
220
+ # @!attribute [rw] display_names
221
+ # @return [::Array<::String>]
222
+ # The display names of the AnnotationSpecs that had been identified, order
223
+ # matches the IDs.
224
+ # @!attribute [rw] confidences
225
+ # @return [::Array<::Float>]
226
+ # The Model's confidences in correctness of the predicted IDs, higher value
227
+ # means higher confidence. Order matches the Ids.
228
+ # @!attribute [rw] bboxes
229
+ # @return [::Array<::Google::Protobuf::ListValue>]
230
+ # Bounding boxes, i.e. the rectangles over the image, that pinpoint
231
+ # the found AnnotationSpecs. Given in order that matches the IDs. Each
232
+ # bounding box is an array of 4 numbers `xMin`, `xMax`, `yMin`, and
233
+ # `yMax`, which represent the extremal coordinates of the box. They are
234
+ # relative to the image size, and the point 0,0 is in the top left
235
+ # of the image.
236
+ class ImageObjectDetectionPredictionResult
237
+ include ::Google::Protobuf::MessageExts
238
+ extend ::Google::Protobuf::MessageExts::ClassMethods
239
+ end
240
+
241
+ # Prediction output format for Image and Text Classification.
242
+ # @!attribute [rw] ids
243
+ # @return [::Array<::Integer>]
244
+ # The resource IDs of the AnnotationSpecs that had been identified.
245
+ # @!attribute [rw] display_names
246
+ # @return [::Array<::String>]
247
+ # The display names of the AnnotationSpecs that had been identified, order
248
+ # matches the IDs.
249
+ # @!attribute [rw] confidences
250
+ # @return [::Array<::Float>]
251
+ # The Model's confidences in correctness of the predicted IDs, higher value
252
+ # means higher confidence. Order matches the Ids.
253
+ class ClassificationPredictionResult
254
+ include ::Google::Protobuf::MessageExts
255
+ extend ::Google::Protobuf::MessageExts::ClassMethods
256
+ end
257
+
258
+ # Prediction output format for Image Segmentation.
259
+ # @!attribute [rw] category_mask
260
+ # @return [::String]
261
+ # A PNG image where each pixel in the mask represents the category in which
262
+ # the pixel in the original image was predicted to belong to. The size of
263
+ # this image will be the same as the original image. The mapping between the
264
+ # AnntoationSpec and the color can be found in model's metadata. The model
265
+ # will choose the most likely category and if none of the categories reach
266
+ # the confidence threshold, the pixel will be marked as background.
267
+ # @!attribute [rw] confidence_mask
268
+ # @return [::String]
269
+ # A one channel image which is encoded as an 8bit lossless PNG. The size of
270
+ # the image will be the same as the original image. For a specific pixel,
271
+ # darker color means less confidence in correctness of the cateogry in the
272
+ # categoryMask for the corresponding pixel. Black means no confidence and
273
+ # white means complete confidence.
274
+ class ImageSegmentationPredictionResult
275
+ include ::Google::Protobuf::MessageExts
276
+ extend ::Google::Protobuf::MessageExts::ClassMethods
277
+ end
278
+
279
+ # Prediction output format for Video Action Recognition.
280
+ # @!attribute [rw] segment_start_time
281
+ # @return [::Google::Protobuf::Timestamp]
282
+ # The beginning, inclusive, of the video's time segment in which the
283
+ # actions have been identified.
284
+ # @!attribute [rw] segment_end_time
285
+ # @return [::Google::Protobuf::Timestamp]
286
+ # The end, inclusive, of the video's time segment in which the actions have
287
+ # been identified. Particularly, if the end is the same as the start, it
288
+ # means the identification happens on a specific video frame.
289
+ # @!attribute [rw] actions
290
+ # @return [::Array<::Google::Cloud::VisionAI::V1::VideoActionRecognitionPredictionResult::IdentifiedAction>]
291
+ # All of the actions identified in the time range.
292
+ class VideoActionRecognitionPredictionResult
293
+ include ::Google::Protobuf::MessageExts
294
+ extend ::Google::Protobuf::MessageExts::ClassMethods
295
+
296
+ # Each IdentifiedAction is one particular identification of an action
297
+ # specified with the AnnotationSpec id, display_name and the associated
298
+ # confidence score.
299
+ # @!attribute [rw] id
300
+ # @return [::String]
301
+ # The resource ID of the AnnotationSpec that had been identified.
302
+ # @!attribute [rw] display_name
303
+ # @return [::String]
304
+ # The display name of the AnnotationSpec that had been identified.
305
+ # @!attribute [rw] confidence
306
+ # @return [::Float]
307
+ # The Model's confidence in correction of this identification, higher
308
+ # value means higher confidence.
309
+ class IdentifiedAction
310
+ include ::Google::Protobuf::MessageExts
311
+ extend ::Google::Protobuf::MessageExts::ClassMethods
312
+ end
313
+ end
314
+
315
+ # Prediction output format for Video Object Tracking.
316
+ # @!attribute [rw] segment_start_time
317
+ # @return [::Google::Protobuf::Timestamp]
318
+ # The beginning, inclusive, of the video's time segment in which the
319
+ # current identifications happens.
320
+ # @!attribute [rw] segment_end_time
321
+ # @return [::Google::Protobuf::Timestamp]
322
+ # The end, inclusive, of the video's time segment in which the current
323
+ # identifications happen. Particularly, if the end is the same as the start,
324
+ # it means the identifications happen on a specific video frame.
325
+ # @!attribute [rw] objects
326
+ # @return [::Array<::Google::Cloud::VisionAI::V1::VideoObjectTrackingPredictionResult::DetectedObject>]
327
+ # All of the objects detected in the specified time range.
328
+ class VideoObjectTrackingPredictionResult
329
+ include ::Google::Protobuf::MessageExts
330
+ extend ::Google::Protobuf::MessageExts::ClassMethods
331
+
332
+ # Boundingbox for detected object. I.e. the rectangle over the video frame
333
+ # pinpointing the found AnnotationSpec. The coordinates are relative to the
334
+ # frame size, and the point 0,0 is in the top left of the frame.
335
+ # @!attribute [rw] x_min
336
+ # @return [::Float]
337
+ # The leftmost coordinate of the bounding box.
338
+ # @!attribute [rw] x_max
339
+ # @return [::Float]
340
+ # The rightmost coordinate of the bounding box.
341
+ # @!attribute [rw] y_min
342
+ # @return [::Float]
343
+ # The topmost coordinate of the bounding box.
344
+ # @!attribute [rw] y_max
345
+ # @return [::Float]
346
+ # The bottommost coordinate of the bounding box.
347
+ class BoundingBox
348
+ include ::Google::Protobuf::MessageExts
349
+ extend ::Google::Protobuf::MessageExts::ClassMethods
350
+ end
351
+
352
+ # Each DetectedObject is one particular identification of an object
353
+ # specified with the AnnotationSpec id and display_name, the bounding box,
354
+ # the associated confidence score and the corresponding track_id.
355
+ # @!attribute [rw] id
356
+ # @return [::String]
357
+ # The resource ID of the AnnotationSpec that had been identified.
358
+ # @!attribute [rw] display_name
359
+ # @return [::String]
360
+ # The display name of the AnnotationSpec that had been identified.
361
+ # @!attribute [rw] bounding_box
362
+ # @return [::Google::Cloud::VisionAI::V1::VideoObjectTrackingPredictionResult::BoundingBox]
363
+ # Boundingbox.
364
+ # @!attribute [rw] confidence
365
+ # @return [::Float]
366
+ # The Model's confidence in correction of this identification, higher
367
+ # value means higher confidence.
368
+ # @!attribute [rw] track_id
369
+ # @return [::Integer]
370
+ # The same object may be identified on muitiple frames which are typical
371
+ # adjacent. The set of frames where a particular object has been detected
372
+ # form a track. This track_id can be used to trace down all frames for an
373
+ # detected object.
374
+ class DetectedObject
375
+ include ::Google::Protobuf::MessageExts
376
+ extend ::Google::Protobuf::MessageExts::ClassMethods
377
+ end
378
+ end
379
+
380
+ # Prediction output format for Video Classification.
381
+ # @!attribute [rw] segment_start_time
382
+ # @return [::Google::Protobuf::Timestamp]
383
+ # The beginning, inclusive, of the video's time segment in which the
384
+ # classifications have been identified.
385
+ # @!attribute [rw] segment_end_time
386
+ # @return [::Google::Protobuf::Timestamp]
387
+ # The end, inclusive, of the video's time segment in which the
388
+ # classifications have been identified. Particularly, if the end is the same
389
+ # as the start, it means the identification happens on a specific video
390
+ # frame.
391
+ # @!attribute [rw] classifications
392
+ # @return [::Array<::Google::Cloud::VisionAI::V1::VideoClassificationPredictionResult::IdentifiedClassification>]
393
+ # All of the classifications identified in the time range.
394
+ class VideoClassificationPredictionResult
395
+ include ::Google::Protobuf::MessageExts
396
+ extend ::Google::Protobuf::MessageExts::ClassMethods
397
+
398
+ # Each IdentifiedClassification is one particular identification of an
399
+ # classification specified with the AnnotationSpec id and display_name,
400
+ # and the associated confidence score.
401
+ # @!attribute [rw] id
402
+ # @return [::String]
403
+ # The resource ID of the AnnotationSpec that had been identified.
404
+ # @!attribute [rw] display_name
405
+ # @return [::String]
406
+ # The display name of the AnnotationSpec that had been identified.
407
+ # @!attribute [rw] confidence
408
+ # @return [::Float]
409
+ # The Model's confidence in correction of this identification, higher
410
+ # value means higher confidence.
411
+ class IdentifiedClassification
412
+ include ::Google::Protobuf::MessageExts
413
+ extend ::Google::Protobuf::MessageExts::ClassMethods
414
+ end
415
+ end
416
+
417
+ # The prediction result proto for occupancy counting.
418
+ # @!attribute [rw] current_time
419
+ # @return [::Google::Protobuf::Timestamp]
420
+ # Current timestamp.
421
+ # @!attribute [rw] identified_boxes
422
+ # @return [::Array<::Google::Cloud::VisionAI::V1::OccupancyCountingPredictionResult::IdentifiedBox>]
423
+ # A list of identified boxes.
424
+ # @!attribute [rw] stats
425
+ # @return [::Google::Cloud::VisionAI::V1::OccupancyCountingPredictionResult::Stats]
426
+ # Detection statistics.
427
+ # @!attribute [rw] track_info
428
+ # @return [::Array<::Google::Cloud::VisionAI::V1::OccupancyCountingPredictionResult::TrackInfo>]
429
+ # Track related information. All the tracks that are live at this timestamp.
430
+ # It only exists if tracking is enabled.
431
+ # @!attribute [rw] dwell_time_info
432
+ # @return [::Array<::Google::Cloud::VisionAI::V1::OccupancyCountingPredictionResult::DwellTimeInfo>]
433
+ # Dwell time related information. All the tracks that are live in a given
434
+ # zone with a start and end dwell time timestamp
435
+ # @!attribute [rw] pts
436
+ # @return [::Integer]
437
+ # The presentation timestamp of the frame.
438
+ class OccupancyCountingPredictionResult
439
+ include ::Google::Protobuf::MessageExts
440
+ extend ::Google::Protobuf::MessageExts::ClassMethods
441
+
442
+ # The entity info for annotations from occupancy counting operator.
443
+ # @!attribute [rw] label_id
444
+ # @return [::Integer]
445
+ # Label id.
446
+ # @!attribute [rw] label_string
447
+ # @return [::String]
448
+ # Human readable string of the label.
449
+ class Entity
450
+ include ::Google::Protobuf::MessageExts
451
+ extend ::Google::Protobuf::MessageExts::ClassMethods
452
+ end
453
+
454
+ # Identified box contains location and the entity of the object.
455
+ # @!attribute [rw] box_id
456
+ # @return [::Integer]
457
+ # An unique id for this box.
458
+ # @!attribute [rw] normalized_bounding_box
459
+ # @return [::Google::Cloud::VisionAI::V1::OccupancyCountingPredictionResult::IdentifiedBox::NormalizedBoundingBox]
460
+ # Bounding Box in the normalized coordinates.
461
+ # @!attribute [rw] score
462
+ # @return [::Float]
463
+ # Confidence score associated with this box.
464
+ # @!attribute [rw] entity
465
+ # @return [::Google::Cloud::VisionAI::V1::OccupancyCountingPredictionResult::Entity]
466
+ # Entity of this box.
467
+ # @!attribute [rw] track_id
468
+ # @return [::Integer]
469
+ # An unique id to identify a track. It should be consistent across frames.
470
+ # It only exists if tracking is enabled.
471
+ class IdentifiedBox
472
+ include ::Google::Protobuf::MessageExts
473
+ extend ::Google::Protobuf::MessageExts::ClassMethods
474
+
475
+ # Bounding Box in the normalized coordinates.
476
+ # @!attribute [rw] xmin
477
+ # @return [::Float]
478
+ # Min in x coordinate.
479
+ # @!attribute [rw] ymin
480
+ # @return [::Float]
481
+ # Min in y coordinate.
482
+ # @!attribute [rw] width
483
+ # @return [::Float]
484
+ # Width of the bounding box.
485
+ # @!attribute [rw] height
486
+ # @return [::Float]
487
+ # Height of the bounding box.
488
+ class NormalizedBoundingBox
489
+ include ::Google::Protobuf::MessageExts
490
+ extend ::Google::Protobuf::MessageExts::ClassMethods
491
+ end
492
+ end
493
+
494
+ # The statistics info for annotations from occupancy counting operator.
495
+ # @!attribute [rw] full_frame_count
496
+ # @return [::Array<::Google::Cloud::VisionAI::V1::OccupancyCountingPredictionResult::Stats::ObjectCount>]
497
+ # Counts of the full frame.
498
+ # @!attribute [rw] crossing_line_counts
499
+ # @return [::Array<::Google::Cloud::VisionAI::V1::OccupancyCountingPredictionResult::Stats::CrossingLineCount>]
500
+ # Crossing line counts.
501
+ # @!attribute [rw] active_zone_counts
502
+ # @return [::Array<::Google::Cloud::VisionAI::V1::OccupancyCountingPredictionResult::Stats::ActiveZoneCount>]
503
+ # Active zone counts.
504
+ class Stats
505
+ include ::Google::Protobuf::MessageExts
506
+ extend ::Google::Protobuf::MessageExts::ClassMethods
507
+
508
+ # The object info and instant count for annotations from occupancy counting
509
+ # operator.
510
+ # @!attribute [rw] entity
511
+ # @return [::Google::Cloud::VisionAI::V1::OccupancyCountingPredictionResult::Entity]
512
+ # Entity of this object.
513
+ # @!attribute [rw] count
514
+ # @return [::Integer]
515
+ # Count of the object.
516
+ class ObjectCount
517
+ include ::Google::Protobuf::MessageExts
518
+ extend ::Google::Protobuf::MessageExts::ClassMethods
519
+ end
520
+
521
+ # The object info and accumulated count for annotations from occupancy
522
+ # counting operator.
523
+ # @!attribute [rw] start_time
524
+ # @return [::Google::Protobuf::Timestamp]
525
+ # The start time of the accumulated count.
526
+ # @!attribute [rw] object_count
527
+ # @return [::Google::Cloud::VisionAI::V1::OccupancyCountingPredictionResult::Stats::ObjectCount]
528
+ # The object count for the accumulated count.
529
+ class AccumulatedObjectCount
530
+ include ::Google::Protobuf::MessageExts
531
+ extend ::Google::Protobuf::MessageExts::ClassMethods
532
+ end
533
+
534
+ # Message for Crossing line count.
535
+ # @!attribute [rw] annotation
536
+ # @return [::Google::Cloud::VisionAI::V1::StreamAnnotation]
537
+ # Line annotation from the user.
538
+ # @!attribute [rw] positive_direction_counts
539
+ # @return [::Array<::Google::Cloud::VisionAI::V1::OccupancyCountingPredictionResult::Stats::ObjectCount>]
540
+ # The direction that follows the right hand rule.
541
+ # @!attribute [rw] negative_direction_counts
542
+ # @return [::Array<::Google::Cloud::VisionAI::V1::OccupancyCountingPredictionResult::Stats::ObjectCount>]
543
+ # The direction that is opposite to the right hand rule.
544
+ # @!attribute [rw] accumulated_positive_direction_counts
545
+ # @return [::Array<::Google::Cloud::VisionAI::V1::OccupancyCountingPredictionResult::Stats::AccumulatedObjectCount>]
546
+ # The accumulated positive count.
547
+ # @!attribute [rw] accumulated_negative_direction_counts
548
+ # @return [::Array<::Google::Cloud::VisionAI::V1::OccupancyCountingPredictionResult::Stats::AccumulatedObjectCount>]
549
+ # The accumulated negative count.
550
+ class CrossingLineCount
551
+ include ::Google::Protobuf::MessageExts
552
+ extend ::Google::Protobuf::MessageExts::ClassMethods
553
+ end
554
+
555
+ # Message for the active zone count.
556
+ # @!attribute [rw] annotation
557
+ # @return [::Google::Cloud::VisionAI::V1::StreamAnnotation]
558
+ # Active zone annotation from the user.
559
+ # @!attribute [rw] counts
560
+ # @return [::Array<::Google::Cloud::VisionAI::V1::OccupancyCountingPredictionResult::Stats::ObjectCount>]
561
+ # Counts in the zone.
562
+ class ActiveZoneCount
563
+ include ::Google::Protobuf::MessageExts
564
+ extend ::Google::Protobuf::MessageExts::ClassMethods
565
+ end
566
+ end
567
+
568
+ # The track info for annotations from occupancy counting operator.
569
+ # @!attribute [rw] track_id
570
+ # @return [::String]
571
+ # An unique id to identify a track. It should be consistent across frames.
572
+ # @!attribute [rw] start_time
573
+ # @return [::Google::Protobuf::Timestamp]
574
+ # Start timestamp of this track.
575
+ class TrackInfo
576
+ include ::Google::Protobuf::MessageExts
577
+ extend ::Google::Protobuf::MessageExts::ClassMethods
578
+ end
579
+
580
+ # The dwell time info for annotations from occupancy counting operator.
581
+ # @!attribute [rw] track_id
582
+ # @return [::String]
583
+ # An unique id to identify a track. It should be consistent across frames.
584
+ # @!attribute [rw] zone_id
585
+ # @return [::String]
586
+ # The unique id for the zone in which the object is dwelling/waiting.
587
+ # @!attribute [rw] dwell_start_time
588
+ # @return [::Google::Protobuf::Timestamp]
589
+ # The beginning time when a dwelling object has been identified in a zone.
590
+ # @!attribute [rw] dwell_end_time
591
+ # @return [::Google::Protobuf::Timestamp]
592
+ # The end time when a dwelling object has exited in a zone.
593
+ class DwellTimeInfo
594
+ include ::Google::Protobuf::MessageExts
595
+ extend ::Google::Protobuf::MessageExts::ClassMethods
596
+ end
597
+ end
598
+
599
+ # message about annotations about Vision AI stream resource.
600
+ # @!attribute [rw] active_zone
601
+ # @return [::Google::Cloud::VisionAI::V1::NormalizedPolygon]
602
+ # Annotation for type ACTIVE_ZONE
603
+ # @!attribute [rw] crossing_line
604
+ # @return [::Google::Cloud::VisionAI::V1::NormalizedPolyline]
605
+ # Annotation for type CROSSING_LINE
606
+ # @!attribute [rw] id
607
+ # @return [::String]
608
+ # ID of the annotation. It must be unique when used in the certain context.
609
+ # For example, all the annotations to one input streams of a Vision AI
610
+ # application.
611
+ # @!attribute [rw] display_name
612
+ # @return [::String]
613
+ # User-friendly name for the annotation.
614
+ # @!attribute [rw] source_stream
615
+ # @return [::String]
616
+ # The Vision AI stream resource name.
617
+ # @!attribute [rw] type
618
+ # @return [::Google::Cloud::VisionAI::V1::StreamAnnotationType]
619
+ # The actual type of Annotation.
620
+ class StreamAnnotation
621
+ include ::Google::Protobuf::MessageExts
622
+ extend ::Google::Protobuf::MessageExts::ClassMethods
623
+ end
624
+
625
+ # A wrapper of repeated StreamAnnotation.
626
+ # @!attribute [rw] stream_annotations
627
+ # @return [::Array<::Google::Cloud::VisionAI::V1::StreamAnnotation>]
628
+ # Multiple annotations.
629
+ class StreamAnnotations
630
+ include ::Google::Protobuf::MessageExts
631
+ extend ::Google::Protobuf::MessageExts::ClassMethods
632
+ end
633
+
634
+ # Normalized Polygon.
635
+ # @!attribute [rw] normalized_vertices
636
+ # @return [::Array<::Google::Cloud::VisionAI::V1::NormalizedVertex>]
637
+ # The bounding polygon normalized vertices. Top left corner of the image
638
+ # will be [0, 0].
639
+ class NormalizedPolygon
640
+ include ::Google::Protobuf::MessageExts
641
+ extend ::Google::Protobuf::MessageExts::ClassMethods
642
+ end
643
+
644
+ # Normalized Pplyline, which represents a curve consisting of connected
645
+ # straight-line segments.
646
+ # @!attribute [rw] normalized_vertices
647
+ # @return [::Array<::Google::Cloud::VisionAI::V1::NormalizedVertex>]
648
+ # A sequence of vertices connected by straight lines.
649
+ class NormalizedPolyline
650
+ include ::Google::Protobuf::MessageExts
651
+ extend ::Google::Protobuf::MessageExts::ClassMethods
652
+ end
653
+
654
+ # A vertex represents a 2D point in the image.
655
+ # NOTE: the normalized vertex coordinates are relative to the original image
656
+ # and range from 0 to 1.
657
+ # @!attribute [rw] x
658
+ # @return [::Float]
659
+ # X coordinate.
660
+ # @!attribute [rw] y
661
+ # @return [::Float]
662
+ # Y coordinate.
663
+ class NormalizedVertex
664
+ include ::Google::Protobuf::MessageExts
665
+ extend ::Google::Protobuf::MessageExts::ClassMethods
666
+ end
667
+
668
+ # Message of essential metadata of App Platform.
669
+ # This message is usually attached to a certain processor output annotation for
670
+ # customer to identify the source of the data.
671
+ # @!attribute [rw] application
672
+ # @return [::String]
673
+ # The application resource name.
674
+ # @!attribute [rw] instance_id
675
+ # @return [::String]
676
+ # The instance resource id. Instance is the nested resource of application
677
+ # under collection 'instances'.
678
+ # @!attribute [rw] node
679
+ # @return [::String]
680
+ # The node name of the application graph.
681
+ # @!attribute [rw] processor
682
+ # @return [::String]
683
+ # The referred processor resource name of the application node.
684
+ class AppPlatformMetadata
685
+ include ::Google::Protobuf::MessageExts
686
+ extend ::Google::Protobuf::MessageExts::ClassMethods
687
+ end
688
+
689
+ # For any cloud function based customer processing logic, customer's cloud
690
+ # function is expected to receive AppPlatformCloudFunctionRequest as request
691
+ # and send back AppPlatformCloudFunctionResponse as response.
692
+ # Message of request from AppPlatform to Cloud Function.
693
+ # @!attribute [rw] app_platform_metadata
694
+ # @return [::Google::Cloud::VisionAI::V1::AppPlatformMetadata]
695
+ # The metadata of the AppPlatform for customer to identify the source of the
696
+ # payload.
697
+ # @!attribute [rw] annotations
698
+ # @return [::Array<::Google::Cloud::VisionAI::V1::AppPlatformCloudFunctionRequest::StructedInputAnnotation>]
699
+ # The actual annotations to be processed by the customized Cloud Function.
700
+ class AppPlatformCloudFunctionRequest
701
+ include ::Google::Protobuf::MessageExts
702
+ extend ::Google::Protobuf::MessageExts::ClassMethods
703
+
704
+ # A general annotation message that uses struct format to represent different
705
+ # concrete annotation protobufs.
706
+ # @!attribute [rw] ingestion_time_micros
707
+ # @return [::Integer]
708
+ # The ingestion time of the current annotation.
709
+ # @!attribute [rw] annotation
710
+ # @return [::Google::Protobuf::Struct]
711
+ # The struct format of the actual annotation.
712
+ class StructedInputAnnotation
713
+ include ::Google::Protobuf::MessageExts
714
+ extend ::Google::Protobuf::MessageExts::ClassMethods
715
+ end
716
+ end
717
+
718
+ # Message of the response from customer's Cloud Function to AppPlatform.
719
+ # @!attribute [rw] annotations
720
+ # @return [::Array<::Google::Cloud::VisionAI::V1::AppPlatformCloudFunctionResponse::StructedOutputAnnotation>]
721
+ # The modified annotations that is returned back to AppPlatform.
722
+ # If the annotations fields are empty, then those annotations will be dropped
723
+ # by AppPlatform.
724
+ # @!attribute [rw] annotation_passthrough
725
+ # @return [::Boolean]
726
+ # If set to true, AppPlatform will use original annotations instead of
727
+ # dropping them, even if it is empty in the annotations filed.
728
+ # @!attribute [rw] events
729
+ # @return [::Array<::Google::Cloud::VisionAI::V1::AppPlatformEventBody>]
730
+ # The event notifications that is returned back to AppPlatform. Typically it
731
+ # will then be configured to be consumed/forwared to a operator that handles
732
+ # events, such as Pub/Sub operator.
733
+ class AppPlatformCloudFunctionResponse
734
+ include ::Google::Protobuf::MessageExts
735
+ extend ::Google::Protobuf::MessageExts::ClassMethods
736
+
737
+ # A general annotation message that uses struct format to represent different
738
+ # concrete annotation protobufs.
739
+ # @!attribute [rw] annotation
740
+ # @return [::Google::Protobuf::Struct]
741
+ # The struct format of the actual annotation.
742
+ class StructedOutputAnnotation
743
+ include ::Google::Protobuf::MessageExts
744
+ extend ::Google::Protobuf::MessageExts::ClassMethods
745
+ end
746
+ end
747
+
748
+ # Message of content of appPlatform event
749
+ # @!attribute [rw] event_message
750
+ # @return [::String]
751
+ # Human readable string of the event like "There are more than 6 people in
752
+ # the scene". or "Shelf is empty!".
753
+ # @!attribute [rw] payload
754
+ # @return [::Google::Protobuf::Struct]
755
+ # For the case of Pub/Sub, it will be stored in the message attributes.
756
+ # ​​pubsub.proto
757
+ # @!attribute [rw] event_id
758
+ # @return [::String]
759
+ # User defined Event Id, used to classify event, within a delivery interval,
760
+ # events from the same application instance with the same id will be
761
+ # de-duplicated & only first one will be sent out. Empty event_id will be
762
+ # treated as "".
763
+ class AppPlatformEventBody
764
+ include ::Google::Protobuf::MessageExts
765
+ extend ::Google::Protobuf::MessageExts::ClassMethods
766
+ end
767
+
768
+ # Enum describing all possible types of a stream annotation.
769
+ module StreamAnnotationType
770
+ # Type UNSPECIFIED.
771
+ STREAM_ANNOTATION_TYPE_UNSPECIFIED = 0
772
+
773
+ # active_zone annotation defines a polygon on top of the content from an
774
+ # image/video based stream, following processing will only focus on the
775
+ # content inside the active zone.
776
+ STREAM_ANNOTATION_TYPE_ACTIVE_ZONE = 1
777
+
778
+ # crossing_line annotation defines a polyline on top of the content from an
779
+ # image/video based Vision AI stream, events happening across the line will
780
+ # be captured. For example, the counts of people who goes acroos the line
781
+ # in Occupancy Analytic Processor.
782
+ STREAM_ANNOTATION_TYPE_CROSSING_LINE = 2
783
+ end
784
+ end
785
+ end
786
+ end
787
+ end