google-cloud-automl-v1beta1 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (89) hide show
  1. checksums.yaml +7 -0
  2. data/.yardopts +12 -0
  3. data/AUTHENTICATION.md +169 -0
  4. data/LICENSE.md +203 -0
  5. data/README.md +71 -0
  6. data/lib/google-cloud-automl-v1beta1.rb +21 -0
  7. data/lib/google/cloud/automl/v1beta1.rb +36 -0
  8. data/lib/google/cloud/automl/v1beta1/annotation_payload_pb.rb +41 -0
  9. data/lib/google/cloud/automl/v1beta1/annotation_spec_pb.rb +26 -0
  10. data/lib/google/cloud/automl/v1beta1/automl.rb +63 -0
  11. data/lib/google/cloud/automl/v1beta1/automl/client.rb +2368 -0
  12. data/lib/google/cloud/automl/v1beta1/automl/credentials.rb +51 -0
  13. data/lib/google/cloud/automl/v1beta1/automl/operations.rb +564 -0
  14. data/lib/google/cloud/automl/v1beta1/automl/paths.rb +174 -0
  15. data/lib/google/cloud/automl/v1beta1/classification_pb.rb +73 -0
  16. data/lib/google/cloud/automl/v1beta1/column_spec_pb.rb +36 -0
  17. data/lib/google/cloud/automl/v1beta1/data_items_pb.rb +95 -0
  18. data/lib/google/cloud/automl/v1beta1/data_stats_pb.rb +84 -0
  19. data/lib/google/cloud/automl/v1beta1/data_types_pb.rb +43 -0
  20. data/lib/google/cloud/automl/v1beta1/dataset_pb.rb +46 -0
  21. data/lib/google/cloud/automl/v1beta1/detection_pb.rb +59 -0
  22. data/lib/google/cloud/automl/v1beta1/geometry_pb.rb +28 -0
  23. data/lib/google/cloud/automl/v1beta1/image_pb.rb +57 -0
  24. data/lib/google/cloud/automl/v1beta1/io_pb.rb +87 -0
  25. data/lib/google/cloud/automl/v1beta1/model_evaluation_pb.rb +45 -0
  26. data/lib/google/cloud/automl/v1beta1/model_pb.rb +52 -0
  27. data/lib/google/cloud/automl/v1beta1/operations_pb.rb +97 -0
  28. data/lib/google/cloud/automl/v1beta1/prediction_service.rb +53 -0
  29. data/lib/google/cloud/automl/v1beta1/prediction_service/client.rb +586 -0
  30. data/lib/google/cloud/automl/v1beta1/prediction_service/credentials.rb +51 -0
  31. data/lib/google/cloud/automl/v1beta1/prediction_service/operations.rb +564 -0
  32. data/lib/google/cloud/automl/v1beta1/prediction_service/paths.rb +52 -0
  33. data/lib/google/cloud/automl/v1beta1/prediction_service_pb.rb +50 -0
  34. data/lib/google/cloud/automl/v1beta1/prediction_service_services_pb.rb +79 -0
  35. data/lib/google/cloud/automl/v1beta1/ranges_pb.rb +24 -0
  36. data/lib/google/cloud/automl/v1beta1/regression_pb.rb +27 -0
  37. data/lib/google/cloud/automl/v1beta1/service_pb.rb +187 -0
  38. data/lib/google/cloud/automl/v1beta1/service_services_pb.rb +163 -0
  39. data/lib/google/cloud/automl/v1beta1/table_spec_pb.rb +31 -0
  40. data/lib/google/cloud/automl/v1beta1/tables_pb.rb +65 -0
  41. data/lib/google/cloud/automl/v1beta1/temporal_pb.rb +25 -0
  42. data/lib/google/cloud/automl/v1beta1/text_extraction_pb.rb +39 -0
  43. data/lib/google/cloud/automl/v1beta1/text_pb.rb +41 -0
  44. data/lib/google/cloud/automl/v1beta1/text_segment_pb.rb +25 -0
  45. data/lib/google/cloud/automl/v1beta1/text_sentiment_pb.rb +36 -0
  46. data/lib/google/cloud/automl/v1beta1/translation_pb.rb +41 -0
  47. data/lib/google/cloud/automl/v1beta1/version.rb +28 -0
  48. data/lib/google/cloud/automl/v1beta1/video_pb.rb +32 -0
  49. data/lib/google/cloud/common_resources_pb.rb +15 -0
  50. data/proto_docs/README.md +4 -0
  51. data/proto_docs/google/api/field_behavior.rb +59 -0
  52. data/proto_docs/google/api/resource.rb +247 -0
  53. data/proto_docs/google/cloud/automl/v1beta1/annotation_payload.rb +70 -0
  54. data/proto_docs/google/cloud/automl/v1beta1/annotation_spec.rb +46 -0
  55. data/proto_docs/google/cloud/automl/v1beta1/classification.rb +234 -0
  56. data/proto_docs/google/cloud/automl/v1beta1/column_spec.rb +77 -0
  57. data/proto_docs/google/cloud/automl/v1beta1/data_items.rb +227 -0
  58. data/proto_docs/google/cloud/automl/v1beta1/data_stats.rb +227 -0
  59. data/proto_docs/google/cloud/automl/v1beta1/data_types.rb +118 -0
  60. data/proto_docs/google/cloud/automl/v1beta1/dataset.rb +84 -0
  61. data/proto_docs/google/cloud/automl/v1beta1/detection.rb +152 -0
  62. data/proto_docs/google/cloud/automl/v1beta1/geometry.rb +53 -0
  63. data/proto_docs/google/cloud/automl/v1beta1/image.rb +205 -0
  64. data/proto_docs/google/cloud/automl/v1beta1/io.rb +1168 -0
  65. data/proto_docs/google/cloud/automl/v1beta1/model.rb +95 -0
  66. data/proto_docs/google/cloud/automl/v1beta1/model_evaluation.rb +103 -0
  67. data/proto_docs/google/cloud/automl/v1beta1/operations.rb +206 -0
  68. data/proto_docs/google/cloud/automl/v1beta1/prediction_service.rb +238 -0
  69. data/proto_docs/google/cloud/automl/v1beta1/ranges.rb +38 -0
  70. data/proto_docs/google/cloud/automl/v1beta1/regression.rb +48 -0
  71. data/proto_docs/google/cloud/automl/v1beta1/service.rb +447 -0
  72. data/proto_docs/google/cloud/automl/v1beta1/table_spec.rb +75 -0
  73. data/proto_docs/google/cloud/automl/v1beta1/tables.rb +300 -0
  74. data/proto_docs/google/cloud/automl/v1beta1/temporal.rb +40 -0
  75. data/proto_docs/google/cloud/automl/v1beta1/text.rb +76 -0
  76. data/proto_docs/google/cloud/automl/v1beta1/text_extraction.rb +73 -0
  77. data/proto_docs/google/cloud/automl/v1beta1/text_segment.rb +44 -0
  78. data/proto_docs/google/cloud/automl/v1beta1/text_sentiment.rb +86 -0
  79. data/proto_docs/google/cloud/automl/v1beta1/translation.rb +79 -0
  80. data/proto_docs/google/cloud/automl/v1beta1/video.rb +51 -0
  81. data/proto_docs/google/longrunning/operations.rb +150 -0
  82. data/proto_docs/google/protobuf/any.rb +138 -0
  83. data/proto_docs/google/protobuf/duration.rb +98 -0
  84. data/proto_docs/google/protobuf/empty.rb +36 -0
  85. data/proto_docs/google/protobuf/field_mask.rb +229 -0
  86. data/proto_docs/google/protobuf/struct.rb +96 -0
  87. data/proto_docs/google/protobuf/timestamp.rb +120 -0
  88. data/proto_docs/google/rpc/status.rb +46 -0
  89. metadata +245 -0
@@ -0,0 +1,1168 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Copyright 2020 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # https://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Auto-generated by gapic-generator-ruby. DO NOT EDIT!
18
+
19
+
20
+ module Google
21
+ module Cloud
22
+ module AutoML
23
+ module V1beta1
24
+ # Input configuration for ImportData Action.
25
+ #
26
+ # The format of input depends on dataset_metadata the Dataset into which
27
+ # the import is happening has. As input source the
28
+ # {Google::Cloud::AutoML::V1beta1::InputConfig#gcs_source gcs_source}
29
+ # is expected, unless specified otherwise. Additionally any input .CSV file
30
+ # by itself must be 100MB or smaller, unless specified otherwise.
31
+ # If an "example" file (that is, image, video etc.) with identical content
32
+ # (even if it had different GCS_FILE_PATH) is mentioned multiple times, then
33
+ # its label, bounding boxes etc. are appended. The same file should be always
34
+ # provided with the same ML_USE and GCS_FILE_PATH, if it is not, then
35
+ # these values are nondeterministically selected from the given ones.
36
+ #
37
+ # The formats are represented in EBNF with commas being literal and with
38
+ # non-terminal symbols defined near the end of this comment. The formats are:
39
+ #
40
+ # * For Image Classification:
41
+ # CSV file(s) with each line in format:
42
+ # ML_USE,GCS_FILE_PATH,LABEL,LABEL,...
43
+ # GCS_FILE_PATH leads to image of up to 30MB in size. Supported
44
+ # extensions: .JPEG, .GIF, .PNG, .WEBP, .BMP, .TIFF, .ICO
45
+ # For MULTICLASS classification type, at most one LABEL is allowed
46
+ # per image. If an image has not yet been labeled, then it should be
47
+ # mentioned just once with no LABEL.
48
+ # Some sample rows:
49
+ # TRAIN,gs://folder/image1.jpg,daisy
50
+ # TEST,gs://folder/image2.jpg,dandelion,tulip,rose
51
+ # UNASSIGNED,gs://folder/image3.jpg,daisy
52
+ # UNASSIGNED,gs://folder/image4.jpg
53
+ #
54
+ # * For Image Object Detection:
55
+ # CSV file(s) with each line in format:
56
+ # ML_USE,GCS_FILE_PATH,(LABEL,BOUNDING_BOX | ,,,,,,,)
57
+ # GCS_FILE_PATH leads to image of up to 30MB in size. Supported
58
+ # extensions: .JPEG, .GIF, .PNG.
59
+ # Each image is assumed to be exhaustively labeled. The minimum
60
+ # allowed BOUNDING_BOX edge length is 0.01, and no more than 500
61
+ # BOUNDING_BOX-es per image are allowed (one BOUNDING_BOX is defined
62
+ # per line). If an image has not yet been labeled, then it should be
63
+ # mentioned just once with no LABEL and the ",,,,,,," in place of the
64
+ # BOUNDING_BOX. For images which are known to not contain any
65
+ # bounding boxes, they should be labelled explictly as
66
+ # "NEGATIVE_IMAGE", followed by ",,,,,,," in place of the
67
+ # BOUNDING_BOX.
68
+ # Sample rows:
69
+ # TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,,
70
+ # TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,,
71
+ # UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3
72
+ # TEST,gs://folder/im3.png,,,,,,,,,
73
+ # TRAIN,gs://folder/im4.png,NEGATIVE_IMAGE,,,,,,,,,
74
+ #
75
+ # * For Video Classification:
76
+ # CSV file(s) with each line in format:
77
+ # ML_USE,GCS_FILE_PATH
78
+ # where ML_USE VALIDATE value should not be used. The GCS_FILE_PATH
79
+ # should lead to another .csv file which describes examples that have
80
+ # given ML_USE, using the following row format:
81
+ # GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,)
82
+ # Here GCS_FILE_PATH leads to a video of up to 50GB in size and up
83
+ # to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
84
+ # TIME_SEGMENT_START and TIME_SEGMENT_END must be within the
85
+ # length of the video, and end has to be after the start. Any segment
86
+ # of a video which has one or more labels on it, is considered a
87
+ # hard negative for all other labels. Any segment with no labels on
88
+ # it is considered to be unknown. If a whole video is unknown, then
89
+ # it shuold be mentioned just once with ",," in place of LABEL,
90
+ # TIME_SEGMENT_START,TIME_SEGMENT_END.
91
+ # Sample top level CSV file:
92
+ # TRAIN,gs://folder/train_videos.csv
93
+ # TEST,gs://folder/test_videos.csv
94
+ # UNASSIGNED,gs://folder/other_videos.csv
95
+ # Sample rows of a CSV file for a particular ML_USE:
96
+ # gs://folder/video1.avi,car,120,180.000021
97
+ # gs://folder/video1.avi,bike,150,180.000021
98
+ # gs://folder/vid2.avi,car,0,60.5
99
+ # gs://folder/vid3.avi,,,
100
+ #
101
+ # * For Video Object Tracking:
102
+ # CSV file(s) with each line in format:
103
+ # ML_USE,GCS_FILE_PATH
104
+ # where ML_USE VALIDATE value should not be used. The GCS_FILE_PATH
105
+ # should lead to another .csv file which describes examples that have
106
+ # given ML_USE, using one of the following row format:
107
+ # GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX
108
+ # or
109
+ # GCS_FILE_PATH,,,,,,,,,,
110
+ # Here GCS_FILE_PATH leads to a video of up to 50GB in size and up
111
+ # to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
112
+ # Providing INSTANCE_IDs can help to obtain a better model. When
113
+ # a specific labeled entity leaves the video frame, and shows up
114
+ # afterwards it is not required, albeit preferable, that the same
115
+ # INSTANCE_ID is given to it.
116
+ # TIMESTAMP must be within the length of the video, the
117
+ # BOUNDING_BOX is assumed to be drawn on the closest video's frame
118
+ # to the TIMESTAMP. Any mentioned by the TIMESTAMP frame is expected
119
+ # to be exhaustively labeled and no more than 500 BOUNDING_BOX-es per
120
+ # frame are allowed. If a whole video is unknown, then it should be
121
+ # mentioned just once with ",,,,,,,,,," in place of LABEL,
122
+ # [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX.
123
+ # Sample top level CSV file:
124
+ # TRAIN,gs://folder/train_videos.csv
125
+ # TEST,gs://folder/test_videos.csv
126
+ # UNASSIGNED,gs://folder/other_videos.csv
127
+ # Seven sample rows of a CSV file for a particular ML_USE:
128
+ # gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9
129
+ # gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9
130
+ # gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3
131
+ # gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,,
132
+ # gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,,
133
+ # gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,,
134
+ # gs://folder/video2.avi,,,,,,,,,,,
135
+ # * For Text Extraction:
136
+ # CSV file(s) with each line in format:
137
+ # ML_USE,GCS_FILE_PATH
138
+ # GCS_FILE_PATH leads to a .JSONL (that is, JSON Lines) file which
139
+ # either imports text in-line or as documents. Any given
140
+ # .JSONL file must be 100MB or smaller.
141
+ # The in-line .JSONL file contains, per line, a proto that wraps a
142
+ # TextSnippet proto (in json representation) followed by one or more
143
+ # AnnotationPayload protos (called annotations), which have
144
+ # display_name and text_extraction detail populated. The given text
145
+ # is expected to be annotated exhaustively, for example, if you look
146
+ # for animals and text contains "dolphin" that is not labeled, then
147
+ # "dolphin" is assumed to not be an animal. Any given text snippet
148
+ # content must be 10KB or smaller, and also be UTF-8 NFC encoded
149
+ # (ASCII already is).
150
+ # The document .JSONL file contains, per line, a proto that wraps a
151
+ # Document proto. The Document proto must have either document_text
152
+ # or input_config set. In document_text case, the Document proto may
153
+ # also contain the spatial information of the document, including
154
+ # layout, document dimension and page number. In input_config case,
155
+ # only PDF documents are supported now, and each document may be up
156
+ # to 2MB large. Currently, annotations on documents cannot be
157
+ # specified at import.
158
+ # Three sample CSV rows:
159
+ # TRAIN,gs://folder/file1.jsonl
160
+ # VALIDATE,gs://folder/file2.jsonl
161
+ # TEST,gs://folder/file3.jsonl
162
+ # Sample in-line JSON Lines file for entity extraction (presented here
163
+ # with artificial line breaks, but the only actual line break is
164
+ # denoted by \n).:
165
+ # {
166
+ # "document": {
167
+ # "document_text": \\{"content": "dog cat"}
168
+ # "layout": [
169
+ # {
170
+ # "text_segment": {
171
+ # "start_offset": 0,
172
+ # "end_offset": 3,
173
+ # },
174
+ # "page_number": 1,
175
+ # "bounding_poly": {
176
+ # "normalized_vertices": [
177
+ # \\{"x": 0.1, "y": 0.1},
178
+ # \\{"x": 0.1, "y": 0.3},
179
+ # \\{"x": 0.3, "y": 0.3},
180
+ # \\{"x": 0.3, "y": 0.1},
181
+ # ],
182
+ # },
183
+ # "text_segment_type": TOKEN,
184
+ # },
185
+ # {
186
+ # "text_segment": {
187
+ # "start_offset": 4,
188
+ # "end_offset": 7,
189
+ # },
190
+ # "page_number": 1,
191
+ # "bounding_poly": {
192
+ # "normalized_vertices": [
193
+ # \\{"x": 0.4, "y": 0.1},
194
+ # \\{"x": 0.4, "y": 0.3},
195
+ # \\{"x": 0.8, "y": 0.3},
196
+ # \\{"x": 0.8, "y": 0.1},
197
+ # ],
198
+ # },
199
+ # "text_segment_type": TOKEN,
200
+ # }
201
+ #
202
+ # ],
203
+ # "document_dimensions": {
204
+ # "width": 8.27,
205
+ # "height": 11.69,
206
+ # "unit": INCH,
207
+ # }
208
+ # "page_count": 1,
209
+ # },
210
+ # "annotations": [
211
+ # {
212
+ # "display_name": "animal",
213
+ # "text_extraction": {"text_segment": {"start_offset": 0,
214
+ # "end_offset": 3}}
215
+ # },
216
+ # {
217
+ # "display_name": "animal",
218
+ # "text_extraction": {"text_segment": {"start_offset": 4,
219
+ # "end_offset": 7}}
220
+ # }
221
+ # ],
222
+ # }\n
223
+ # {
224
+ # "text_snippet": {
225
+ # "content": "This dog is good."
226
+ # },
227
+ # "annotations": [
228
+ # {
229
+ # "display_name": "animal",
230
+ # "text_extraction": {
231
+ # "text_segment": \\{"start_offset": 5, "end_offset": 8}
232
+ # }
233
+ # }
234
+ # ]
235
+ # }
236
+ # Sample document JSON Lines file (presented here with artificial line
237
+ # breaks, but the only actual line break is denoted by \n).:
238
+ # {
239
+ # "document": {
240
+ # "input_config": {
241
+ # "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ]
242
+ # }
243
+ # }
244
+ # }
245
+ # }\n
246
+ # {
247
+ # "document": {
248
+ # "input_config": {
249
+ # "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ]
250
+ # }
251
+ # }
252
+ # }
253
+ # }
254
+ #
255
+ # * For Text Classification:
256
+ # CSV file(s) with each line in format:
257
+ # ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,...
258
+ # TEXT_SNIPPET and GCS_FILE_PATH are distinguished by a pattern. If
259
+ # the column content is a valid gcs file path, i.e. prefixed by
260
+ # "gs://", it will be treated as a GCS_FILE_PATH, else if the content
261
+ # is enclosed within double quotes (""), it is
262
+ # treated as a TEXT_SNIPPET. In the GCS_FILE_PATH case, the path
263
+ # must lead to a .txt file with UTF-8 encoding, for example,
264
+ # "gs://folder/content.txt", and the content in it is extracted
265
+ # as a text snippet. In TEXT_SNIPPET case, the column content
266
+ # excluding quotes is treated as to be imported text snippet. In
267
+ # both cases, the text snippet/file size must be within 128kB.
268
+ # Maximum 100 unique labels are allowed per CSV row.
269
+ # Sample rows:
270
+ # TRAIN,"They have bad food and very rude",RudeService,BadFood
271
+ # TRAIN,gs://folder/content.txt,SlowService
272
+ # TEST,"Typically always bad service there.",RudeService
273
+ # VALIDATE,"Stomach ache to go.",BadFood
274
+ #
275
+ # * For Text Sentiment:
276
+ # CSV file(s) with each line in format:
277
+ # ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),SENTIMENT
278
+ # TEXT_SNIPPET and GCS_FILE_PATH are distinguished by a pattern. If
279
+ # the column content is a valid gcs file path, that is, prefixed by
280
+ # "gs://", it is treated as a GCS_FILE_PATH, otherwise it is treated
281
+ # as a TEXT_SNIPPET. In the GCS_FILE_PATH case, the path
282
+ # must lead to a .txt file with UTF-8 encoding, for example,
283
+ # "gs://folder/content.txt", and the content in it is extracted
284
+ # as a text snippet. In TEXT_SNIPPET case, the column content itself
285
+ # is treated as to be imported text snippet. In both cases, the
286
+ # text snippet must be up to 500 characters long.
287
+ # Sample rows:
288
+ # TRAIN,"@freewrytin this is way too good for your product",2
289
+ # TRAIN,"I need this product so bad",3
290
+ # TEST,"Thank you for this product.",4
291
+ # VALIDATE,gs://folder/content.txt,2
292
+ #
293
+ # * For Tables:
294
+ # Either
295
+ # {Google::Cloud::AutoML::V1beta1::InputConfig#gcs_source gcs_source} or
296
+ #
297
+ # {Google::Cloud::AutoML::V1beta1::InputConfig#bigquery_source bigquery_source}
298
+ # can be used. All inputs is concatenated into a single
299
+ #
300
+ # [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_name]
301
+ # For gcs_source:
302
+ # CSV file(s), where the first row of the first file is the header,
303
+ # containing unique column names. If the first row of a subsequent
304
+ # file is the same as the header, then it is also treated as a
305
+ # header. All other rows contain values for the corresponding
306
+ # columns.
307
+ # Each .CSV file by itself must be 10GB or smaller, and their total
308
+ # size must be 100GB or smaller.
309
+ # First three sample rows of a CSV file:
310
+ # "Id","First Name","Last Name","Dob","Addresses"
311
+ #
312
+ # "1","John","Doe","1968-01-22","[\\{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},\\{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
313
+ #
314
+ # "2","Jane","Doe","1980-10-16","[\\{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},\\{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
315
+ # For bigquery_source:
316
+ # An URI of a BigQuery table. The user data size of the BigQuery
317
+ # table must be 100GB or smaller.
318
+ # An imported table must have between 2 and 1,000 columns, inclusive,
319
+ # and between 1000 and 100,000,000 rows, inclusive. There are at most 5
320
+ # import data running in parallel.
321
+ # Definitions:
322
+ # ML_USE = "TRAIN" | "VALIDATE" | "TEST" | "UNASSIGNED"
323
+ # Describes how the given example (file) should be used for model
324
+ # training. "UNASSIGNED" can be used when user has no preference.
325
+ # GCS_FILE_PATH = A path to file on GCS, e.g. "gs://folder/image1.png".
326
+ # LABEL = A display name of an object on an image, video etc., e.g. "dog".
327
+ # Must be up to 32 characters long and can consist only of ASCII
328
+ # Latin letters A-Z and a-z, underscores(_), and ASCII digits 0-9.
329
+ # For each label an AnnotationSpec is created which display_name
330
+ # becomes the label; AnnotationSpecs are given back in predictions.
331
+ # INSTANCE_ID = A positive integer that identifies a specific instance of a
332
+ # labeled entity on an example. Used e.g. to track two cars on
333
+ # a video while being able to tell apart which one is which.
334
+ # BOUNDING_BOX = VERTEX,VERTEX,VERTEX,VERTEX | VERTEX,,,VERTEX,,
335
+ # A rectangle parallel to the frame of the example (image,
336
+ # video). If 4 vertices are given they are connected by edges
337
+ # in the order provided, if 2 are given they are recognized
338
+ # as diagonally opposite vertices of the rectangle.
339
+ # VERTEX = COORDINATE,COORDINATE
340
+ # First coordinate is horizontal (x), the second is vertical (y).
341
+ # COORDINATE = A float in 0 to 1 range, relative to total length of
342
+ # image or video in given dimension. For fractions the
343
+ # leading non-decimal 0 can be omitted (i.e. 0.3 = .3).
344
+ # Point 0,0 is in top left.
345
+ # TIME_SEGMENT_START = TIME_OFFSET
346
+ # Expresses a beginning, inclusive, of a time segment
347
+ # within an example that has a time dimension
348
+ # (e.g. video).
349
+ # TIME_SEGMENT_END = TIME_OFFSET
350
+ # Expresses an end, exclusive, of a time segment within
351
+ # an example that has a time dimension (e.g. video).
352
+ # TIME_OFFSET = A number of seconds as measured from the start of an
353
+ # example (e.g. video). Fractions are allowed, up to a
354
+ # microsecond precision. "inf" is allowed, and it means the end
355
+ # of the example.
356
+ # TEXT_SNIPPET = A content of a text snippet, UTF-8 encoded, enclosed within
357
+ # double quotes ("").
358
+ # SENTIMENT = An integer between 0 and
359
+ # Dataset.text_sentiment_dataset_metadata.sentiment_max
360
+ # (inclusive). Describes the ordinal of the sentiment - higher
361
+ # value means a more positive sentiment. All the values are
362
+ # completely relative, i.e. neither 0 needs to mean a negative or
363
+ # neutral sentiment nor sentiment_max needs to mean a positive one
364
+ # - it is just required that 0 is the least positive sentiment
365
+ # in the data, and sentiment_max is the most positive one.
366
+ # The SENTIMENT shouldn't be confused with "score" or "magnitude"
367
+ # from the previous Natural Language Sentiment Analysis API.
368
+ # All SENTIMENT values between 0 and sentiment_max must be
369
+ # represented in the imported data. On prediction the same 0 to
370
+ # sentiment_max range will be used. The difference between
371
+ # neighboring sentiment values needs not to be uniform, e.g. 1 and
372
+ # 2 may be similar whereas the difference between 2 and 3 may be
373
+ # huge.
374
+ #
375
+ # Errors:
376
+ # If any of the provided CSV files can't be parsed or if more than certain
377
+ # percent of CSV rows cannot be processed then the operation fails and
378
+ # nothing is imported. Regardless of overall success or failure the per-row
379
+ # failures, up to a certain count cap, is listed in
380
+ # Operation.metadata.partial_failures.
381
+ # @!attribute [rw] gcs_source
382
+ # @return [Google::Cloud::AutoML::V1beta1::GcsSource]
383
+ # The Google Cloud Storage location for the input content.
384
+ # In ImportData, the gcs_source points to a csv with structure described in
385
+ # the comment.
386
+ # @!attribute [rw] bigquery_source
387
+ # @return [Google::Cloud::AutoML::V1beta1::BigQuerySource]
388
+ # The BigQuery location for the input content.
389
+ # @!attribute [rw] params
390
+ # @return [Google::Protobuf::Map{String => String}]
391
+ # Additional domain-specific parameters describing the semantic of the
392
+ # imported data, any string must be up to 25000
393
+ # characters long.
394
+ #
395
+ # * For Tables:
396
+ # `schema_inference_version` - (integer) Required. The version of the
397
+ # algorithm that should be used for the initial inference of the
398
+ # schema (columns' DataTypes) of the table the data is being imported
399
+ # into. Allowed values: "1".
400
+ class InputConfig
401
+ include Google::Protobuf::MessageExts
402
+ extend Google::Protobuf::MessageExts::ClassMethods
403
+
404
+ # @!attribute [rw] key
405
+ # @return [String]
406
+ # @!attribute [rw] value
407
+ # @return [String]
408
+ class ParamsEntry
409
+ include Google::Protobuf::MessageExts
410
+ extend Google::Protobuf::MessageExts::ClassMethods
411
+ end
412
+ end
413
+
414
+ # Input configuration for BatchPredict Action.
415
+ #
416
+ # The format of input depends on the ML problem of the model used for
417
+ # prediction. As input source the
418
+ # {Google::Cloud::AutoML::V1beta1::InputConfig#gcs_source gcs_source}
419
+ # is expected, unless specified otherwise.
420
+ #
421
+ # The formats are represented in EBNF with commas being literal and with
422
+ # non-terminal symbols defined near the end of this comment. The formats
423
+ # are:
424
+ #
425
+ # * For Image Classification:
426
+ # CSV file(s) with each line having just a single column:
427
+ # GCS_FILE_PATH
428
+ # which leads to image of up to 30MB in size. Supported
429
+ # extensions: .JPEG, .GIF, .PNG. This path is treated as the ID in
430
+ # the Batch predict output.
431
+ # Three sample rows:
432
+ # gs://folder/image1.jpeg
433
+ # gs://folder/image2.gif
434
+ # gs://folder/image3.png
435
+ #
436
+ # * For Image Object Detection:
437
+ # CSV file(s) with each line having just a single column:
438
+ # GCS_FILE_PATH
439
+ # which leads to image of up to 30MB in size. Supported
440
+ # extensions: .JPEG, .GIF, .PNG. This path is treated as the ID in
441
+ # the Batch predict output.
442
+ # Three sample rows:
443
+ # gs://folder/image1.jpeg
444
+ # gs://folder/image2.gif
445
+ # gs://folder/image3.png
446
+ # * For Video Classification:
447
+ # CSV file(s) with each line in format:
448
+ # GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END
449
+ # GCS_FILE_PATH leads to video of up to 50GB in size and up to 3h
450
+ # duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
451
+ # TIME_SEGMENT_START and TIME_SEGMENT_END must be within the
452
+ # length of the video, and end has to be after the start.
453
+ # Three sample rows:
454
+ # gs://folder/video1.mp4,10,40
455
+ # gs://folder/video1.mp4,20,60
456
+ # gs://folder/vid2.mov,0,inf
457
+ #
458
+ # * For Video Object Tracking:
459
+ # CSV file(s) with each line in format:
460
+ # GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END
461
+ # GCS_FILE_PATH leads to video of up to 50GB in size and up to 3h
462
+ # duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
463
+ # TIME_SEGMENT_START and TIME_SEGMENT_END must be within the
464
+ # length of the video, and end has to be after the start.
465
+ # Three sample rows:
466
+ # gs://folder/video1.mp4,10,240
467
+ # gs://folder/video1.mp4,300,360
468
+ # gs://folder/vid2.mov,0,inf
469
+ # * For Text Classification:
470
+ # CSV file(s) with each line having just a single column:
471
+ # GCS_FILE_PATH | TEXT_SNIPPET
472
+ # Any given text file can have size upto 128kB.
473
+ # Any given text snippet content must have 60,000 characters or less.
474
+ # Three sample rows:
475
+ # gs://folder/text1.txt
476
+ # "Some text content to predict"
477
+ # gs://folder/text3.pdf
478
+ # Supported file extensions: .txt, .pdf
479
+ #
480
+ # * For Text Sentiment:
481
+ # CSV file(s) with each line having just a single column:
482
+ # GCS_FILE_PATH | TEXT_SNIPPET
483
+ # Any given text file can have size upto 128kB.
484
+ # Any given text snippet content must have 500 characters or less.
485
+ # Three sample rows:
486
+ # gs://folder/text1.txt
487
+ # "Some text content to predict"
488
+ # gs://folder/text3.pdf
489
+ # Supported file extensions: .txt, .pdf
490
+ #
491
+ # * For Text Extraction
492
+ # .JSONL (i.e. JSON Lines) file(s) which either provide text in-line or
493
+ # as documents (for a single BatchPredict call only one of the these
494
+ # formats may be used).
495
+ # The in-line .JSONL file(s) contain per line a proto that
496
+ # wraps a temporary user-assigned TextSnippet ID (string up to 2000
497
+ # characters long) called "id", a TextSnippet proto (in
498
+ # json representation) and zero or more TextFeature protos. Any given
499
+ # text snippet content must have 30,000 characters or less, and also
500
+ # be UTF-8 NFC encoded (ASCII already is). The IDs provided should be
501
+ # unique.
502
+ # The document .JSONL file(s) contain, per line, a proto that wraps a
503
+ # Document proto with input_config set. Only PDF documents are
504
+ # supported now, and each document must be up to 2MB large.
505
+ # Any given .JSONL file must be 100MB or smaller, and no more than 20
506
+ # files may be given.
507
+ # Sample in-line JSON Lines file (presented here with artificial line
508
+ # breaks, but the only actual line break is denoted by \n):
509
+ # {
510
+ # "id": "my_first_id",
511
+ # "text_snippet": { "content": "dog car cat"},
512
+ # "text_features": [
513
+ # {
514
+ # "text_segment": \\{"start_offset": 4, "end_offset": 6},
515
+ # "structural_type": PARAGRAPH,
516
+ # "bounding_poly": {
517
+ # "normalized_vertices": [
518
+ # \\{"x": 0.1, "y": 0.1},
519
+ # \\{"x": 0.1, "y": 0.3},
520
+ # \\{"x": 0.3, "y": 0.3},
521
+ # \\{"x": 0.3, "y": 0.1},
522
+ # ]
523
+ # },
524
+ # }
525
+ # ],
526
+ # }\n
527
+ # {
528
+ # "id": "2",
529
+ # "text_snippet": {
530
+ # "content": "An elaborate content",
531
+ # "mime_type": "text/plain"
532
+ # }
533
+ # }
534
+ # Sample document JSON Lines file (presented here with artificial line
535
+ # breaks, but the only actual line break is denoted by \n).:
536
+ # {
537
+ # "document": {
538
+ # "input_config": {
539
+ # "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ]
540
+ # }
541
+ # }
542
+ # }
543
+ # }\n
544
+ # {
545
+ # "document": {
546
+ # "input_config": {
547
+ # "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ]
548
+ # }
549
+ # }
550
+ # }
551
+ # }
552
+ #
553
+ # * For Tables:
554
+ # Either
555
+ # {Google::Cloud::AutoML::V1beta1::InputConfig#gcs_source gcs_source} or
556
+ #
557
+ # {Google::Cloud::AutoML::V1beta1::InputConfig#bigquery_source bigquery_source}.
558
+ # GCS case:
559
+ # CSV file(s), each by itself 10GB or smaller and total size must be
560
+ # 100GB or smaller, where first file must have a header containing
561
+ # column names. If the first row of a subsequent file is the same as
562
+ # the header, then it is also treated as a header. All other rows
563
+ # contain values for the corresponding columns.
564
+ # The column names must contain the model's
565
+ #
566
+ # [input_feature_column_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs]
567
+ #
568
+ # {Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name-s}
569
+ # (order doesn't matter). The columns corresponding to the model's
570
+ # input feature column specs must contain values compatible with the
571
+ # column spec's data types. Prediction on all the rows, i.e. the CSV
572
+ # lines, will be attempted. For FORECASTING
573
+ #
574
+ # [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]:
575
+ # all columns having
576
+ #
577
+ # [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType]
578
+ # type will be ignored.
579
+ # First three sample rows of a CSV file:
580
+ # "First Name","Last Name","Dob","Addresses"
581
+ #
582
+ # "John","Doe","1968-01-22","[\\{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},\\{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
583
+ #
584
+ # "Jane","Doe","1980-10-16","[\\{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},\\{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
585
+ # BigQuery case:
586
+ # An URI of a BigQuery table. The user data size of the BigQuery
587
+ # table must be 100GB or smaller.
588
+ # The column names must contain the model's
589
+ #
590
+ # [input_feature_column_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs]
591
+ #
592
+ # {Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name-s}
593
+ # (order doesn't matter). The columns corresponding to the model's
594
+ # input feature column specs must contain values compatible with the
595
+ # column spec's data types. Prediction on all the rows of the table
596
+ # will be attempted. For FORECASTING
597
+ #
598
+ # [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]:
599
+ # all columns having
600
+ #
601
+ # [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType]
602
+ # type will be ignored.
603
+ #
604
+ # Definitions:
605
+ # GCS_FILE_PATH = A path to file on GCS, e.g. "gs://folder/video.avi".
606
+ # TEXT_SNIPPET = A content of a text snippet, UTF-8 encoded, enclosed within
607
+ # double quotes ("")
608
+ # TIME_SEGMENT_START = TIME_OFFSET
609
+ # Expresses a beginning, inclusive, of a time segment
610
+ # within an
611
+ # example that has a time dimension (e.g. video).
612
+ # TIME_SEGMENT_END = TIME_OFFSET
613
+ # Expresses an end, exclusive, of a time segment within
614
+ # an example that has a time dimension (e.g. video).
615
+ # TIME_OFFSET = A number of seconds as measured from the start of an
616
+ # example (e.g. video). Fractions are allowed, up to a
617
+ # microsecond precision. "inf" is allowed and it means the end
618
+ # of the example.
619
+ #
620
+ # Errors:
621
+ # If any of the provided CSV files can't be parsed or if more than certain
622
+ # percent of CSV rows cannot be processed then the operation fails and
623
+ # prediction does not happen. Regardless of overall success or failure the
624
+ # per-row failures, up to a certain count cap, will be listed in
625
+ # Operation.metadata.partial_failures.
626
+ # @!attribute [rw] gcs_source
627
+ # @return [Google::Cloud::AutoML::V1beta1::GcsSource]
628
+ # The Google Cloud Storage location for the input content.
629
+ # @!attribute [rw] bigquery_source
630
+ # @return [Google::Cloud::AutoML::V1beta1::BigQuerySource]
631
+ # The BigQuery location for the input content.
632
+ class BatchPredictInputConfig
633
+ include Google::Protobuf::MessageExts
634
+ extend Google::Protobuf::MessageExts::ClassMethods
635
+ end
636
+
637
+ # Input configuration of a {Google::Cloud::AutoML::V1beta1::Document Document}.
638
+ # @!attribute [rw] gcs_source
639
+ # @return [Google::Cloud::AutoML::V1beta1::GcsSource]
640
+ # The Google Cloud Storage location of the document file. Only a single path
641
+ # should be given.
642
+ # Max supported size: 512MB.
643
+ # Supported extensions: .PDF.
644
+ class DocumentInputConfig
645
+ include Google::Protobuf::MessageExts
646
+ extend Google::Protobuf::MessageExts::ClassMethods
647
+ end
648
+
649
+ # * For Translation:
650
+ # CSV file `translation.csv`, with each line in format:
651
+ # ML_USE,GCS_FILE_PATH
652
+ # GCS_FILE_PATH leads to a .TSV file which describes examples that have
653
+ # given ML_USE, using the following row format per line:
654
+ # TEXT_SNIPPET (in source language) \t TEXT_SNIPPET (in target
655
+ # language)
656
+ #
657
+ # * For Tables:
658
+ # Output depends on whether the dataset was imported from GCS or
659
+ # BigQuery.
660
+ # GCS case:
661
+ #
662
+ # {Google::Cloud::AutoML::V1beta1::OutputConfig#gcs_destination gcs_destination}
663
+ # must be set. Exported are CSV file(s) `tables_1.csv`,
664
+ # `tables_2.csv`,...,`tables_N.csv` with each having as header line
665
+ # the table's column names, and all other lines contain values for
666
+ # the header columns.
667
+ # BigQuery case:
668
+ #
669
+ # {Google::Cloud::AutoML::V1beta1::OutputConfig#bigquery_destination bigquery_destination}
670
+ # pointing to a BigQuery project must be set. In the given project a
671
+ # new dataset will be created with name
672
+ #
673
+ # `export_data_<automl-dataset-display-name>_<timestamp-of-export-call>`
674
+ # where <automl-dataset-display-name> will be made
675
+ # BigQuery-dataset-name compatible (e.g. most special characters will
676
+ # become underscores), and timestamp will be in
677
+ # YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In that
678
+ # dataset a new table called `primary_table` will be created, and
679
+ # filled with precisely the same data as this obtained on import.
680
+ # @!attribute [rw] gcs_destination
681
+ # @return [Google::Cloud::AutoML::V1beta1::GcsDestination]
682
+ # The Google Cloud Storage location where the output is to be written to.
683
+ # For Image Object Detection, Text Extraction, Video Classification and
684
+ # Tables, in the given directory a new directory will be created with name:
685
+ # export_data-<dataset-display-name>-<timestamp-of-export-call> where
686
+ # timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export
687
+ # output will be written into that directory.
688
+ # @!attribute [rw] bigquery_destination
689
+ # @return [Google::Cloud::AutoML::V1beta1::BigQueryDestination]
690
+ # The BigQuery location where the output is to be written to.
691
+ class OutputConfig
692
+ include Google::Protobuf::MessageExts
693
+ extend Google::Protobuf::MessageExts::ClassMethods
694
+ end
695
+
696
+ # Output configuration for BatchPredict Action.
697
+ #
698
+ # As destination the
699
+ #
700
+ # {Google::Cloud::AutoML::V1beta1::BatchPredictOutputConfig#gcs_destination gcs_destination}
701
+ # must be set unless specified otherwise for a domain. If gcs_destination is
702
+ # set then in the given directory a new directory is created. Its name
703
+ # will be
704
+ # "prediction-<model-display-name>-<timestamp-of-prediction-call>",
705
+ # where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents
706
+ # of it depends on the ML problem the predictions are made for.
707
+ #
708
+ # * For Image Classification:
709
+ # In the created directory files `image_classification_1.jsonl`,
710
+ # `image_classification_2.jsonl`,...,`image_classification_N.jsonl`
711
+ # will be created, where N may be 1, and depends on the
712
+ # total number of the successfully predicted images and annotations.
713
+ # A single image will be listed only once with all its annotations,
714
+ # and its annotations will never be split across files.
715
+ # Each .JSONL file will contain, per line, a JSON representation of a
716
+ # proto that wraps image's "ID" : "<id_value>" followed by a list of
717
+ # zero or more AnnotationPayload protos (called annotations), which
718
+ # have classification detail populated.
719
+ # If prediction for any image failed (partially or completely), then an
720
+ # additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl`
721
+ # files will be created (N depends on total number of failed
722
+ # predictions). These files will have a JSON representation of a proto
723
+ # that wraps the same "ID" : "<id_value>" but here followed by
724
+ # exactly one
725
+ #
726
+ # [`google.rpc.Status`](https:
727
+ # //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
728
+ # containing only `code` and `message`fields.
729
+ #
730
+ # * For Image Object Detection:
731
+ # In the created directory files `image_object_detection_1.jsonl`,
732
+ # `image_object_detection_2.jsonl`,...,`image_object_detection_N.jsonl`
733
+ # will be created, where N may be 1, and depends on the
734
+ # total number of the successfully predicted images and annotations.
735
+ # Each .JSONL file will contain, per line, a JSON representation of a
736
+ # proto that wraps image's "ID" : "<id_value>" followed by a list of
737
+ # zero or more AnnotationPayload protos (called annotations), which
738
+ # have image_object_detection detail populated. A single image will
739
+ # be listed only once with all its annotations, and its annotations
740
+ # will never be split across files.
741
+ # If prediction for any image failed (partially or completely), then
742
+ # additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl`
743
+ # files will be created (N depends on total number of failed
744
+ # predictions). These files will have a JSON representation of a proto
745
+ # that wraps the same "ID" : "<id_value>" but here followed by
746
+ # exactly one
747
+ #
748
+ # [`google.rpc.Status`](https:
749
+ # //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
750
+ # containing only `code` and `message`fields.
751
+ # * For Video Classification:
752
+ # In the created directory a video_classification.csv file, and a .JSON
753
+ # file per each video classification requested in the input (i.e. each
754
+ # line in given CSV(s)), will be created.
755
+ #
756
+ # The format of video_classification.csv is:
757
+ #
758
+ # GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
759
+ # where:
760
+ # GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
761
+ # the prediction input lines (i.e. video_classification.csv has
762
+ # precisely the same number of lines as the prediction input had.)
763
+ # JSON_FILE_NAME = Name of .JSON file in the output directory, which
764
+ # contains prediction responses for the video time segment.
765
+ # STATUS = "OK" if prediction completed successfully, or an error code
766
+ # with message otherwise. If STATUS is not "OK" then the .JSON file
767
+ # for that line may not exist or be empty.
768
+ #
769
+ # Each .JSON file, assuming STATUS is "OK", will contain a list of
770
+ # AnnotationPayload protos in JSON format, which are the predictions
771
+ # for the video time segment the file is assigned to in the
772
+ # video_classification.csv. All AnnotationPayload protos will have
773
+ # video_classification field set, and will be sorted by
774
+ # video_classification.type field (note that the returned types are
775
+ # governed by `classifaction_types` parameter in
776
+ # [PredictService.BatchPredictRequest.params][]).
777
+ #
778
+ # * For Video Object Tracking:
779
+ # In the created directory a video_object_tracking.csv file will be
780
+ # created, and multiple files video_object_trackinng_1.json,
781
+ # video_object_trackinng_2.json,..., video_object_trackinng_N.json,
782
+ # where N is the number of requests in the input (i.e. the number of
783
+ # lines in given CSV(s)).
784
+ #
785
+ # The format of video_object_tracking.csv is:
786
+ #
787
+ # GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
788
+ # where:
789
+ # GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
790
+ # the prediction input lines (i.e. video_object_tracking.csv has
791
+ # precisely the same number of lines as the prediction input had.)
792
+ # JSON_FILE_NAME = Name of .JSON file in the output directory, which
793
+ # contains prediction responses for the video time segment.
794
+ # STATUS = "OK" if prediction completed successfully, or an error
795
+ # code with message otherwise. If STATUS is not "OK" then the .JSON
796
+ # file for that line may not exist or be empty.
797
+ #
798
+ # Each .JSON file, assuming STATUS is "OK", will contain a list of
799
+ # AnnotationPayload protos in JSON format, which are the predictions
800
+ # for each frame of the video time segment the file is assigned to in
801
+ # video_object_tracking.csv. All AnnotationPayload protos will have
802
+ # video_object_tracking field set.
803
+ # * For Text Classification:
804
+ # In the created directory files `text_classification_1.jsonl`,
805
+ # `text_classification_2.jsonl`,...,`text_classification_N.jsonl`
806
+ # will be created, where N may be 1, and depends on the
807
+ # total number of inputs and annotations found.
808
+ #
809
+ # Each .JSONL file will contain, per line, a JSON representation of a
810
+ # proto that wraps input text snippet or input text file and a list of
811
+ # zero or more AnnotationPayload protos (called annotations), which
812
+ # have classification detail populated. A single text snippet or file
813
+ # will be listed only once with all its annotations, and its
814
+ # annotations will never be split across files.
815
+ #
816
+ # If prediction for any text snippet or file failed (partially or
817
+ # completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
818
+ # `errors_N.jsonl` files will be created (N depends on total number of
819
+ # failed predictions). These files will have a JSON representation of a
820
+ # proto that wraps input text snippet or input text file followed by
821
+ # exactly one
822
+ #
823
+ # [`google.rpc.Status`](https:
824
+ # //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
825
+ # containing only `code` and `message`.
826
+ #
827
+ # * For Text Sentiment:
828
+ # In the created directory files `text_sentiment_1.jsonl`,
829
+ # `text_sentiment_2.jsonl`,...,`text_sentiment_N.jsonl`
830
+ # will be created, where N may be 1, and depends on the
831
+ # total number of inputs and annotations found.
832
+ #
833
+ # Each .JSONL file will contain, per line, a JSON representation of a
834
+ # proto that wraps input text snippet or input text file and a list of
835
+ # zero or more AnnotationPayload protos (called annotations), which
836
+ # have text_sentiment detail populated. A single text snippet or file
837
+ # will be listed only once with all its annotations, and its
838
+ # annotations will never be split across files.
839
+ #
840
+ # If prediction for any text snippet or file failed (partially or
841
+ # completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
842
+ # `errors_N.jsonl` files will be created (N depends on total number of
843
+ # failed predictions). These files will have a JSON representation of a
844
+ # proto that wraps input text snippet or input text file followed by
845
+ # exactly one
846
+ #
847
+ # [`google.rpc.Status`](https:
848
+ # //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
849
+ # containing only `code` and `message`.
850
+ #
851
+ # * For Text Extraction:
852
+ # In the created directory files `text_extraction_1.jsonl`,
853
+ # `text_extraction_2.jsonl`,...,`text_extraction_N.jsonl`
854
+ # will be created, where N may be 1, and depends on the
855
+ # total number of inputs and annotations found.
856
+ # The contents of these .JSONL file(s) depend on whether the input
857
+ # used inline text, or documents.
858
+ # If input was inline, then each .JSONL file will contain, per line,
859
+ # a JSON representation of a proto that wraps given in request text
860
+ # snippet's "id" (if specified), followed by input text snippet,
861
+ # and a list of zero or more
862
+ # AnnotationPayload protos (called annotations), which have
863
+ # text_extraction detail populated. A single text snippet will be
864
+ # listed only once with all its annotations, and its annotations will
865
+ # never be split across files.
866
+ # If input used documents, then each .JSONL file will contain, per
867
+ # line, a JSON representation of a proto that wraps given in request
868
+ # document proto, followed by its OCR-ed representation in the form
869
+ # of a text snippet, finally followed by a list of zero or more
870
+ # AnnotationPayload protos (called annotations), which have
871
+ # text_extraction detail populated and refer, via their indices, to
872
+ # the OCR-ed text snippet. A single document (and its text snippet)
873
+ # will be listed only once with all its annotations, and its
874
+ # annotations will never be split across files.
875
+ # If prediction for any text snippet failed (partially or completely),
876
+ # then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
877
+ # `errors_N.jsonl` files will be created (N depends on total number of
878
+ # failed predictions). These files will have a JSON representation of a
879
+ # proto that wraps either the "id" : "<id_value>" (in case of inline)
880
+ # or the document proto (in case of document) but here followed by
881
+ # exactly one
882
+ #
883
+ # [`google.rpc.Status`](https:
884
+ # //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
885
+ # containing only `code` and `message`.
886
+ #
887
+ # * For Tables:
888
+ # Output depends on whether
889
+ #
890
+ # {Google::Cloud::AutoML::V1beta1::BatchPredictOutputConfig#gcs_destination gcs_destination}
891
+ # or
892
+ #
893
+ # {Google::Cloud::AutoML::V1beta1::BatchPredictOutputConfig#bigquery_destination bigquery_destination}
894
+ # is set (either is allowed).
895
+ # GCS case:
896
+ # In the created directory files `tables_1.csv`, `tables_2.csv`,...,
897
+ # `tables_N.csv` will be created, where N may be 1, and depends on
898
+ # the total number of the successfully predicted rows.
899
+ # For all CLASSIFICATION
900
+ #
901
+ # [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]:
902
+ # Each .csv file will contain a header, listing all columns'
903
+ #
904
+ # {Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name-s}
905
+ # given on input followed by M target column names in the format of
906
+ #
907
+ # "<{Google::Cloud::AutoML::V1beta1::TablesModelMetadata#target_column_spec target_column_specs}
908
+ #
909
+ # {Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name}>_<target
910
+ # value>_score" where M is the number of distinct target values,
911
+ # i.e. number of distinct values in the target column of the table
912
+ # used to train the model. Subsequent lines will contain the
913
+ # respective values of successfully predicted rows, with the last,
914
+ # i.e. the target, columns having the corresponding prediction
915
+ # {Google::Cloud::AutoML::V1beta1::TablesAnnotation#score scores}.
916
+ # For REGRESSION and FORECASTING
917
+ #
918
+ # [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]:
919
+ # Each .csv file will contain a header, listing all columns'
920
+ # [display_name-s][google.cloud.automl.v1beta1.display_name] given
921
+ # on input followed by the predicted target column with name in the
922
+ # format of
923
+ #
924
+ # "predicted_<{Google::Cloud::AutoML::V1beta1::TablesModelMetadata#target_column_spec target_column_specs}
925
+ #
926
+ # {Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name}>"
927
+ # Subsequent lines will contain the respective values of
928
+ # successfully predicted rows, with the last, i.e. the target,
929
+ # column having the predicted target value.
930
+ # If prediction for any rows failed, then an additional
931
+ # `errors_1.csv`, `errors_2.csv`,..., `errors_N.csv` will be
932
+ # created (N depends on total number of failed rows). These files
933
+ # will have analogous format as `tables_*.csv`, but always with a
934
+ # single target column having
935
+ #
936
+ # [`google.rpc.Status`](https:
937
+ # //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
938
+ # represented as a JSON string, and containing only `code` and
939
+ # `message`.
940
+ # BigQuery case:
941
+ #
942
+ # {Google::Cloud::AutoML::V1beta1::OutputConfig#bigquery_destination bigquery_destination}
943
+ # pointing to a BigQuery project must be set. In the given project a
944
+ # new dataset will be created with name
945
+ # `prediction_<model-display-name>_<timestamp-of-prediction-call>`
946
+ # where <model-display-name> will be made
947
+ # BigQuery-dataset-name compatible (e.g. most special characters will
948
+ # become underscores), and timestamp will be in
949
+ # YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset
950
+ # two tables will be created, `predictions`, and `errors`.
951
+ # The `predictions` table's column names will be the input columns'
952
+ #
953
+ # {Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name-s}
954
+ # followed by the target column with name in the format of
955
+ #
956
+ # "predicted_<{Google::Cloud::AutoML::V1beta1::TablesModelMetadata#target_column_spec target_column_specs}
957
+ #
958
+ # {Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name}>"
959
+ # The input feature columns will contain the respective values of
960
+ # successfully predicted rows, with the target column having an
961
+ # ARRAY of
962
+ #
963
+ # {Google::Cloud::AutoML::V1beta1::AnnotationPayload AnnotationPayloads},
964
+ # represented as STRUCT-s, containing
965
+ # {Google::Cloud::AutoML::V1beta1::TablesAnnotation TablesAnnotation}.
966
+ # The `errors` table contains rows for which the prediction has
967
+ # failed, it has analogous input columns while the target column name
968
+ # is in the format of
969
+ #
970
+ # "errors_<{Google::Cloud::AutoML::V1beta1::TablesModelMetadata#target_column_spec target_column_specs}
971
+ #
972
+ # {Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name}>",
973
+ # and as a value has
974
+ #
975
+ # [`google.rpc.Status`](https:
976
+ # //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
977
+ # represented as a STRUCT, and containing only `code` and `message`.
978
+ # @!attribute [rw] gcs_destination
979
+ # @return [Google::Cloud::AutoML::V1beta1::GcsDestination]
980
+ # The Google Cloud Storage location of the directory where the output is to
981
+ # be written to.
982
+ # @!attribute [rw] bigquery_destination
983
+ # @return [Google::Cloud::AutoML::V1beta1::BigQueryDestination]
984
+ # The BigQuery location where the output is to be written to.
985
+ class BatchPredictOutputConfig
986
+ include Google::Protobuf::MessageExts
987
+ extend Google::Protobuf::MessageExts::ClassMethods
988
+ end
989
+
990
+ # Output configuration for ModelExport Action.
991
+ # @!attribute [rw] gcs_destination
992
+ # @return [Google::Cloud::AutoML::V1beta1::GcsDestination]
993
+ # The Google Cloud Storage location where the model is to be written to.
994
+ # This location may only be set for the following model formats:
995
+ # "tflite", "edgetpu_tflite", "tf_saved_model", "tf_js", "core_ml".
996
+ #
997
+ # Under the directory given as the destination a new one with name
998
+ # "model-export-<model-display-name>-<timestamp-of-export-call>",
999
+ # where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format,
1000
+ # will be created. Inside the model and any of its supporting files
1001
+ # will be written.
1002
+ # @!attribute [rw] gcr_destination
1003
+ # @return [Google::Cloud::AutoML::V1beta1::GcrDestination]
1004
+ # The GCR location where model image is to be pushed to. This location
1005
+ # may only be set for the following model formats:
1006
+ # "docker".
1007
+ #
1008
+ # The model image will be created under the given URI.
1009
+ # @!attribute [rw] model_format
1010
+ # @return [String]
1011
+ # The format in which the model must be exported. The available, and default,
1012
+ # formats depend on the problem and model type (if given problem and type
1013
+ # combination doesn't have a format listed, it means its models are not
1014
+ # exportable):
1015
+ #
1016
+ # * For Image Classification mobile-low-latency-1, mobile-versatile-1,
1017
+ # mobile-high-accuracy-1:
1018
+ # "tflite" (default), "edgetpu_tflite", "tf_saved_model", "tf_js",
1019
+ # "docker".
1020
+ #
1021
+ # * For Image Classification mobile-core-ml-low-latency-1,
1022
+ # mobile-core-ml-versatile-1, mobile-core-ml-high-accuracy-1:
1023
+ # "core_ml" (default).
1024
+ # Formats description:
1025
+ #
1026
+ # * tflite - Used for Android mobile devices.
1027
+ # * edgetpu_tflite - Used for [Edge TPU](https://cloud.google.com/edge-tpu/)
1028
+ # devices.
1029
+ # * tf_saved_model - A tensorflow model in SavedModel format.
1030
+ # * tf_js - A [TensorFlow.js](https://www.tensorflow.org/js) model that can
1031
+ # be used in the browser and in Node.js using JavaScript.
1032
+ # * docker - Used for Docker containers. Use the params field to customize
1033
+ # the container. The container is verified to work correctly on
1034
+ # ubuntu 16.04 operating system. See more at
1035
+ # [containers
1036
+ #
1037
+ # quickstart](https:
1038
+ # //cloud.google.com/vision/automl/docs/containers-gcs-quickstart)
1039
+ # * core_ml - Used for iOS mobile devices.
1040
+ # @!attribute [rw] params
1041
+ # @return [Google::Protobuf::Map{String => String}]
1042
+ # Additional model-type and format specific parameters describing the
1043
+ # requirements for the to be exported model files, any string must be up to
1044
+ # 25000 characters long.
1045
+ #
1046
+ # * For `docker` format:
1047
+ # `cpu_architecture` - (string) "x86_64" (default).
1048
+ # `gpu_architecture` - (string) "none" (default), "nvidia".
1049
+ class ModelExportOutputConfig
1050
+ include Google::Protobuf::MessageExts
1051
+ extend Google::Protobuf::MessageExts::ClassMethods
1052
+
1053
+ # @!attribute [rw] key
1054
+ # @return [String]
1055
+ # @!attribute [rw] value
1056
+ # @return [String]
1057
+ class ParamsEntry
1058
+ include Google::Protobuf::MessageExts
1059
+ extend Google::Protobuf::MessageExts::ClassMethods
1060
+ end
1061
+ end
1062
+
1063
+ # Output configuration for ExportEvaluatedExamples Action. Note that this call
1064
+ # is available only for 30 days since the moment the model was evaluated.
1065
+ # The output depends on the domain, as follows (note that only examples from
1066
+ # the TEST set are exported):
1067
+ #
1068
+ # * For Tables:
1069
+ #
1070
+ # {Google::Cloud::AutoML::V1beta1::OutputConfig#bigquery_destination bigquery_destination}
1071
+ # pointing to a BigQuery project must be set. In the given project a
1072
+ # new dataset will be created with name
1073
+ #
1074
+ # `export_evaluated_examples_<model-display-name>_<timestamp-of-export-call>`
1075
+ # where <model-display-name> will be made BigQuery-dataset-name
1076
+ # compatible (e.g. most special characters will become underscores),
1077
+ # and timestamp will be in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601"
1078
+ # format. In the dataset an `evaluated_examples` table will be
1079
+ # created. It will have all the same columns as the
1080
+ #
1081
+ # {Google::Cloud::AutoML::V1beta1::TablesDatasetMetadata#primary_table_spec_id primary_table}
1082
+ # of the
1083
+ # {Google::Cloud::AutoML::V1beta1::Model#dataset_id dataset} from which
1084
+ # the model was created, as they were at the moment of model's
1085
+ # evaluation (this includes the target column with its ground
1086
+ # truth), followed by a column called "predicted_<target_column>". That
1087
+ # last column will contain the model's prediction result for each
1088
+ # respective row, given as ARRAY of
1089
+ # {Google::Cloud::AutoML::V1beta1::AnnotationPayload AnnotationPayloads},
1090
+ # represented as STRUCT-s, containing
1091
+ # {Google::Cloud::AutoML::V1beta1::TablesAnnotation TablesAnnotation}.
1092
+ # @!attribute [rw] bigquery_destination
1093
+ # @return [Google::Cloud::AutoML::V1beta1::BigQueryDestination]
1094
+ # The BigQuery location where the output is to be written to.
1095
+ class ExportEvaluatedExamplesOutputConfig
1096
+ include Google::Protobuf::MessageExts
1097
+ extend Google::Protobuf::MessageExts::ClassMethods
1098
+ end
1099
+
1100
+ # The Google Cloud Storage location for the input content.
1101
+ # @!attribute [rw] input_uris
1102
+ # @return [Array<String>]
1103
+ # Required. Google Cloud Storage URIs to input files, up to 2000 characters
1104
+ # long. Accepted forms:
1105
+ # * Full object path, e.g. gs://bucket/directory/object.csv
1106
+ class GcsSource
1107
+ include Google::Protobuf::MessageExts
1108
+ extend Google::Protobuf::MessageExts::ClassMethods
1109
+ end
1110
+
1111
+ # The BigQuery location for the input content.
1112
+ # @!attribute [rw] input_uri
1113
+ # @return [String]
1114
+ # Required. BigQuery URI to a table, up to 2000 characters long.
1115
+ # Accepted forms:
1116
+ # * BigQuery path e.g. bq://projectId.bqDatasetId.bqTableId
1117
+ class BigQuerySource
1118
+ include Google::Protobuf::MessageExts
1119
+ extend Google::Protobuf::MessageExts::ClassMethods
1120
+ end
1121
+
1122
+ # The Google Cloud Storage location where the output is to be written to.
1123
+ # @!attribute [rw] output_uri_prefix
1124
+ # @return [String]
1125
+ # Required. Google Cloud Storage URI to output directory, up to 2000
1126
+ # characters long.
1127
+ # Accepted forms:
1128
+ # * Prefix path: gs://bucket/directory
1129
+ # The requesting user must have write permission to the bucket.
1130
+ # The directory is created if it doesn't exist.
1131
+ class GcsDestination
1132
+ include Google::Protobuf::MessageExts
1133
+ extend Google::Protobuf::MessageExts::ClassMethods
1134
+ end
1135
+
1136
+ # The BigQuery location for the output content.
1137
+ # @!attribute [rw] output_uri
1138
+ # @return [String]
1139
+ # Required. BigQuery URI to a project, up to 2000 characters long.
1140
+ # Accepted forms:
1141
+ # * BigQuery path e.g. bq://projectId
1142
+ class BigQueryDestination
1143
+ include Google::Protobuf::MessageExts
1144
+ extend Google::Protobuf::MessageExts::ClassMethods
1145
+ end
1146
+
1147
+ # The GCR location where the image must be pushed to.
1148
+ # @!attribute [rw] output_uri
1149
+ # @return [String]
1150
+ # Required. Google Contained Registry URI of the new image, up to 2000
1151
+ # characters long. See
1152
+ #
1153
+ # https:
1154
+ # //cloud.google.com/container-registry/do
1155
+ # // cs/pushing-and-pulling#pushing_an_image_to_a_registry
1156
+ # Accepted forms:
1157
+ # * [HOSTNAME]/[PROJECT-ID]/[IMAGE]
1158
+ # * [HOSTNAME]/[PROJECT-ID]/[IMAGE]:[TAG]
1159
+ #
1160
+ # The requesting user must have permission to push images the project.
1161
+ class GcrDestination
1162
+ include Google::Protobuf::MessageExts
1163
+ extend Google::Protobuf::MessageExts::ClassMethods
1164
+ end
1165
+ end
1166
+ end
1167
+ end
1168
+ end