google-cloud-bigquery-storage-v1 0.7.0 → 0.9.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -33,11 +33,7 @@ module Google
33
33
  # // For Kubernetes resources, the format is {api group}/{kind}.
34
34
  # option (google.api.resource) = {
35
35
  # type: "pubsub.googleapis.com/Topic"
36
- # name_descriptor: {
37
- # pattern: "projects/{project}/topics/{topic}"
38
- # parent_type: "cloudresourcemanager.googleapis.com/Project"
39
- # parent_name_extractor: "projects/{project}"
40
- # }
36
+ # pattern: "projects/{project}/topics/{topic}"
41
37
  # };
42
38
  # }
43
39
  #
@@ -45,10 +41,7 @@ module Google
45
41
  #
46
42
  # resources:
47
43
  # - type: "pubsub.googleapis.com/Topic"
48
- # name_descriptor:
49
- # - pattern: "projects/{project}/topics/{topic}"
50
- # parent_type: "cloudresourcemanager.googleapis.com/Project"
51
- # parent_name_extractor: "projects/{project}"
44
+ # pattern: "projects/{project}/topics/{topic}"
52
45
  #
53
46
  # Sometimes, resources have multiple patterns, typically because they can
54
47
  # live under multiple parents.
@@ -58,26 +51,10 @@ module Google
58
51
  # message LogEntry {
59
52
  # option (google.api.resource) = {
60
53
  # type: "logging.googleapis.com/LogEntry"
61
- # name_descriptor: {
62
- # pattern: "projects/{project}/logs/{log}"
63
- # parent_type: "cloudresourcemanager.googleapis.com/Project"
64
- # parent_name_extractor: "projects/{project}"
65
- # }
66
- # name_descriptor: {
67
- # pattern: "folders/{folder}/logs/{log}"
68
- # parent_type: "cloudresourcemanager.googleapis.com/Folder"
69
- # parent_name_extractor: "folders/{folder}"
70
- # }
71
- # name_descriptor: {
72
- # pattern: "organizations/{organization}/logs/{log}"
73
- # parent_type: "cloudresourcemanager.googleapis.com/Organization"
74
- # parent_name_extractor: "organizations/{organization}"
75
- # }
76
- # name_descriptor: {
77
- # pattern: "billingAccounts/{billing_account}/logs/{log}"
78
- # parent_type: "billing.googleapis.com/BillingAccount"
79
- # parent_name_extractor: "billingAccounts/{billing_account}"
80
- # }
54
+ # pattern: "projects/{project}/logs/{log}"
55
+ # pattern: "folders/{folder}/logs/{log}"
56
+ # pattern: "organizations/{organization}/logs/{log}"
57
+ # pattern: "billingAccounts/{billing_account}/logs/{log}"
81
58
  # };
82
59
  # }
83
60
  #
@@ -85,48 +62,10 @@ module Google
85
62
  #
86
63
  # resources:
87
64
  # - type: 'logging.googleapis.com/LogEntry'
88
- # name_descriptor:
89
- # - pattern: "projects/{project}/logs/{log}"
90
- # parent_type: "cloudresourcemanager.googleapis.com/Project"
91
- # parent_name_extractor: "projects/{project}"
92
- # - pattern: "folders/{folder}/logs/{log}"
93
- # parent_type: "cloudresourcemanager.googleapis.com/Folder"
94
- # parent_name_extractor: "folders/{folder}"
95
- # - pattern: "organizations/{organization}/logs/{log}"
96
- # parent_type: "cloudresourcemanager.googleapis.com/Organization"
97
- # parent_name_extractor: "organizations/{organization}"
98
- # - pattern: "billingAccounts/{billing_account}/logs/{log}"
99
- # parent_type: "billing.googleapis.com/BillingAccount"
100
- # parent_name_extractor: "billingAccounts/{billing_account}"
101
- #
102
- # For flexible resources, the resource name doesn't contain parent names, but
103
- # the resource itself has parents for policy evaluation.
104
- #
105
- # Example:
106
- #
107
- # message Shelf {
108
- # option (google.api.resource) = {
109
- # type: "library.googleapis.com/Shelf"
110
- # name_descriptor: {
111
- # pattern: "shelves/{shelf}"
112
- # parent_type: "cloudresourcemanager.googleapis.com/Project"
113
- # }
114
- # name_descriptor: {
115
- # pattern: "shelves/{shelf}"
116
- # parent_type: "cloudresourcemanager.googleapis.com/Folder"
117
- # }
118
- # };
119
- # }
120
- #
121
- # The ResourceDescriptor Yaml config will look like:
122
- #
123
- # resources:
124
- # - type: 'library.googleapis.com/Shelf'
125
- # name_descriptor:
126
- # - pattern: "shelves/{shelf}"
127
- # parent_type: "cloudresourcemanager.googleapis.com/Project"
128
- # - pattern: "shelves/{shelf}"
129
- # parent_type: "cloudresourcemanager.googleapis.com/Folder"
65
+ # pattern: "projects/{project}/logs/{log}"
66
+ # pattern: "folders/{folder}/logs/{log}"
67
+ # pattern: "organizations/{organization}/logs/{log}"
68
+ # pattern: "billingAccounts/{billing_account}/logs/{log}"
130
69
  # @!attribute [rw] type
131
70
  # @return [::String]
132
71
  # The resource type. It must be in the format of
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Copyright 2021 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # https://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Auto-generated by gapic-generator-ruby. DO NOT EDIT!
18
+
19
+
20
+ module Google
21
+ module Cloud
22
+ module Bigquery
23
+ module Storage
24
+ module V1
25
+ # ProtoSchema describes the schema of the serialized protocol buffer data rows.
26
+ # @!attribute [rw] proto_descriptor
27
+ # @return [::Google::Protobuf::DescriptorProto]
28
+ # Descriptor for input message. The provided descriptor must be self
29
+ # contained, such that data rows sent can be fully decoded using only the
30
+ # single descriptor. For data rows that are compositions of multiple
31
+ # independent messages, this means the descriptor may need to be transformed
32
+ # to only use nested types:
33
+ # https://developers.google.com/protocol-buffers/docs/proto#nested
34
+ #
35
+ # For additional information for how proto types and values map onto BigQuery
36
+ # see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
37
+ class ProtoSchema
38
+ include ::Google::Protobuf::MessageExts
39
+ extend ::Google::Protobuf::MessageExts::ClassMethods
40
+ end
41
+
42
+ # @!attribute [rw] serialized_rows
43
+ # @return [::Array<::String>]
44
+ # A sequence of rows serialized as a Protocol Buffer.
45
+ #
46
+ # See https://developers.google.com/protocol-buffers/docs/overview for more
47
+ # information on deserializing this field.
48
+ class ProtoRows
49
+ include ::Google::Protobuf::MessageExts
50
+ extend ::Google::Protobuf::MessageExts::ClassMethods
51
+ end
52
+ end
53
+ end
54
+ end
55
+ end
56
+ end
@@ -69,7 +69,7 @@ module Google
69
69
  extend ::Google::Protobuf::MessageExts::ClassMethods
70
70
  end
71
71
 
72
- # Estimated stream statistics for a given Stream.
72
+ # Estimated stream statistics for a given read Stream.
73
73
  # @!attribute [rw] progress
74
74
  # @return [::Google::Cloud::Bigquery::Storage::V1::StreamStats::Progress]
75
75
  # Represents the progress of the current stream.
@@ -162,6 +162,249 @@ module Google
162
162
  include ::Google::Protobuf::MessageExts
163
163
  extend ::Google::Protobuf::MessageExts::ClassMethods
164
164
  end
165
+
166
+ # Request message for `CreateWriteStream`.
167
+ # @!attribute [rw] parent
168
+ # @return [::String]
169
+ # Required. Reference to the table to which the stream belongs, in the format
170
+ # of `projects/{project}/datasets/{dataset}/tables/{table}`.
171
+ # @!attribute [rw] write_stream
172
+ # @return [::Google::Cloud::Bigquery::Storage::V1::WriteStream]
173
+ # Required. Stream to be created.
174
+ class CreateWriteStreamRequest
175
+ include ::Google::Protobuf::MessageExts
176
+ extend ::Google::Protobuf::MessageExts::ClassMethods
177
+ end
178
+
179
+ # Request message for `AppendRows`.
180
+ #
181
+ # Due to the nature of AppendRows being a bidirectional streaming RPC, certain
182
+ # parts of the AppendRowsRequest need only be specified for the first request
183
+ # sent each time the gRPC network connection is opened/reopened.
184
+ # @!attribute [rw] write_stream
185
+ # @return [::String]
186
+ # Required. The write_stream identifies the target of the append operation, and only
187
+ # needs to be specified as part of the first request on the gRPC connection.
188
+ # If provided for subsequent requests, it must match the value of the first
189
+ # request.
190
+ #
191
+ # For explicitly created write streams, the format is:
192
+ # `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
193
+ #
194
+ # For the special default stream, the format is:
195
+ # `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
196
+ # @!attribute [rw] offset
197
+ # @return [::Google::Protobuf::Int64Value]
198
+ # If present, the write is only performed if the next append offset is same
199
+ # as the provided value. If not present, the write is performed at the
200
+ # current end of stream. Specifying a value for this field is not allowed
201
+ # when calling AppendRows for the '_default' stream.
202
+ # @!attribute [rw] proto_rows
203
+ # @return [::Google::Cloud::Bigquery::Storage::V1::AppendRowsRequest::ProtoData]
204
+ # Rows in proto format.
205
+ # @!attribute [rw] trace_id
206
+ # @return [::String]
207
+ # Id set by client to annotate its identity. Only initial request setting is
208
+ # respected.
209
+ class AppendRowsRequest
210
+ include ::Google::Protobuf::MessageExts
211
+ extend ::Google::Protobuf::MessageExts::ClassMethods
212
+
213
+ # ProtoData contains the data rows and schema when constructing append
214
+ # requests.
215
+ # @!attribute [rw] writer_schema
216
+ # @return [::Google::Cloud::Bigquery::Storage::V1::ProtoSchema]
217
+ # Proto schema used to serialize the data. This value only needs to be
218
+ # provided as part of the first request on a gRPC network connection,
219
+ # and will be ignored for subsequent requests on the connection.
220
+ # @!attribute [rw] rows
221
+ # @return [::Google::Cloud::Bigquery::Storage::V1::ProtoRows]
222
+ # Serialized row data in protobuf message format.
223
+ # Currently, the backend expects the serialized rows to adhere to
224
+ # proto2 semantics when appending rows, particularly with respect to
225
+ # how default values are encoded.
226
+ class ProtoData
227
+ include ::Google::Protobuf::MessageExts
228
+ extend ::Google::Protobuf::MessageExts::ClassMethods
229
+ end
230
+ end
231
+
232
+ # Response message for `AppendRows`.
233
+ # @!attribute [rw] append_result
234
+ # @return [::Google::Cloud::Bigquery::Storage::V1::AppendRowsResponse::AppendResult]
235
+ # Result if the append is successful.
236
+ # @!attribute [rw] error
237
+ # @return [::Google::Rpc::Status]
238
+ # Error returned when problems were encountered. If present,
239
+ # it indicates rows were not accepted into the system.
240
+ # Users can retry or continue with other append requests within the
241
+ # same connection.
242
+ #
243
+ # Additional information about error signalling:
244
+ #
245
+ # ALREADY_EXISTS: Happens when an append specified an offset, and the
246
+ # backend already has received data at this offset. Typically encountered
247
+ # in retry scenarios, and can be ignored.
248
+ #
249
+ # OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
250
+ # the current end of the stream.
251
+ #
252
+ # INVALID_ARGUMENT: Indicates a malformed request or data.
253
+ #
254
+ # ABORTED: Request processing is aborted because of prior failures. The
255
+ # request can be retried if previous failure is addressed.
256
+ #
257
+ # INTERNAL: Indicates server side error(s) that can be retried.
258
+ # @!attribute [rw] updated_schema
259
+ # @return [::Google::Cloud::Bigquery::Storage::V1::TableSchema]
260
+ # If backend detects a schema update, pass it to user so that user can
261
+ # use it to input new type of message. It will be empty when no schema
262
+ # updates have occurred.
263
+ class AppendRowsResponse
264
+ include ::Google::Protobuf::MessageExts
265
+ extend ::Google::Protobuf::MessageExts::ClassMethods
266
+
267
+ # AppendResult is returned for successful append requests.
268
+ # @!attribute [rw] offset
269
+ # @return [::Google::Protobuf::Int64Value]
270
+ # The row offset at which the last append occurred. The offset will not be
271
+ # set if appending using default streams.
272
+ class AppendResult
273
+ include ::Google::Protobuf::MessageExts
274
+ extend ::Google::Protobuf::MessageExts::ClassMethods
275
+ end
276
+ end
277
+
278
+ # Request message for `GetWriteStreamRequest`.
279
+ # @!attribute [rw] name
280
+ # @return [::String]
281
+ # Required. Name of the stream to get, in the form of
282
+ # `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
283
+ class GetWriteStreamRequest
284
+ include ::Google::Protobuf::MessageExts
285
+ extend ::Google::Protobuf::MessageExts::ClassMethods
286
+ end
287
+
288
+ # Request message for `BatchCommitWriteStreams`.
289
+ # @!attribute [rw] parent
290
+ # @return [::String]
291
+ # Required. Parent table that all the streams should belong to, in the form of
292
+ # `projects/{project}/datasets/{dataset}/tables/{table}`.
293
+ # @!attribute [rw] write_streams
294
+ # @return [::Array<::String>]
295
+ # Required. The group of streams that will be committed atomically.
296
+ class BatchCommitWriteStreamsRequest
297
+ include ::Google::Protobuf::MessageExts
298
+ extend ::Google::Protobuf::MessageExts::ClassMethods
299
+ end
300
+
301
+ # Response message for `BatchCommitWriteStreams`.
302
+ # @!attribute [rw] commit_time
303
+ # @return [::Google::Protobuf::Timestamp]
304
+ # The time at which streams were committed in microseconds granularity.
305
+ # This field will only exist when there are no stream errors.
306
+ # **Note** if this field is not set, it means the commit was not successful.
307
+ # @!attribute [rw] stream_errors
308
+ # @return [::Array<::Google::Cloud::Bigquery::Storage::V1::StorageError>]
309
+ # Stream level error if commit failed. Only streams with error will be in
310
+ # the list.
311
+ # If empty, there is no error and all streams are committed successfully.
312
+ # If non empty, certain streams have errors and ZERO stream is committed due
313
+ # to atomicity guarantee.
314
+ class BatchCommitWriteStreamsResponse
315
+ include ::Google::Protobuf::MessageExts
316
+ extend ::Google::Protobuf::MessageExts::ClassMethods
317
+ end
318
+
319
+ # Request message for invoking `FinalizeWriteStream`.
320
+ # @!attribute [rw] name
321
+ # @return [::String]
322
+ # Required. Name of the stream to finalize, in the form of
323
+ # `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
324
+ class FinalizeWriteStreamRequest
325
+ include ::Google::Protobuf::MessageExts
326
+ extend ::Google::Protobuf::MessageExts::ClassMethods
327
+ end
328
+
329
+ # Response message for `FinalizeWriteStream`.
330
+ # @!attribute [rw] row_count
331
+ # @return [::Integer]
332
+ # Number of rows in the finalized stream.
333
+ class FinalizeWriteStreamResponse
334
+ include ::Google::Protobuf::MessageExts
335
+ extend ::Google::Protobuf::MessageExts::ClassMethods
336
+ end
337
+
338
+ # Request message for `FlushRows`.
339
+ # @!attribute [rw] write_stream
340
+ # @return [::String]
341
+ # Required. The stream that is the target of the flush operation.
342
+ # @!attribute [rw] offset
343
+ # @return [::Google::Protobuf::Int64Value]
344
+ # Ending offset of the flush operation. Rows before this offset(including
345
+ # this offset) will be flushed.
346
+ class FlushRowsRequest
347
+ include ::Google::Protobuf::MessageExts
348
+ extend ::Google::Protobuf::MessageExts::ClassMethods
349
+ end
350
+
351
+ # Respond message for `FlushRows`.
352
+ # @!attribute [rw] offset
353
+ # @return [::Integer]
354
+ # The rows before this offset (including this offset) are flushed.
355
+ class FlushRowsResponse
356
+ include ::Google::Protobuf::MessageExts
357
+ extend ::Google::Protobuf::MessageExts::ClassMethods
358
+ end
359
+
360
+ # Structured custom BigQuery Storage error message. The error can be attached
361
+ # as error details in the returned rpc Status. In particular, the use of error
362
+ # codes allows more structured error handling, and reduces the need to evaluate
363
+ # unstructured error text strings.
364
+ # @!attribute [rw] code
365
+ # @return [::Google::Cloud::Bigquery::Storage::V1::StorageError::StorageErrorCode]
366
+ # BigQuery Storage specific error code.
367
+ # @!attribute [rw] entity
368
+ # @return [::String]
369
+ # Name of the failed entity.
370
+ # @!attribute [rw] error_message
371
+ # @return [::String]
372
+ # Message that describes the error.
373
+ class StorageError
374
+ include ::Google::Protobuf::MessageExts
375
+ extend ::Google::Protobuf::MessageExts::ClassMethods
376
+
377
+ # Error code for `StorageError`.
378
+ module StorageErrorCode
379
+ # Default error.
380
+ STORAGE_ERROR_CODE_UNSPECIFIED = 0
381
+
382
+ # Table is not found in the system.
383
+ TABLE_NOT_FOUND = 1
384
+
385
+ # Stream is already committed.
386
+ STREAM_ALREADY_COMMITTED = 2
387
+
388
+ # Stream is not found.
389
+ STREAM_NOT_FOUND = 3
390
+
391
+ # Invalid Stream type.
392
+ # For example, you try to commit a stream that is not pending.
393
+ INVALID_STREAM_TYPE = 4
394
+
395
+ # Invalid Stream state.
396
+ # For example, you try to commit a stream that is not finalized or is
397
+ # garbaged.
398
+ INVALID_STREAM_STATE = 5
399
+
400
+ # Stream is finalized.
401
+ STREAM_FINALIZED = 6
402
+
403
+ # There is a schema mismatch and it is caused by user schema has extra
404
+ # field than bigquery schema.
405
+ SCHEMA_MISMATCH_EXTRA_FIELDS = 7
406
+ end
407
+ end
165
408
  end
166
409
  end
167
410
  end
@@ -117,6 +117,64 @@ module Google
117
117
  extend ::Google::Protobuf::MessageExts::ClassMethods
118
118
  end
119
119
 
120
+ # Information about a single stream that gets data inside the storage system.
121
+ # @!attribute [r] name
122
+ # @return [::String]
123
+ # Output only. Name of the stream, in the form
124
+ # `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
125
+ # @!attribute [rw] type
126
+ # @return [::Google::Cloud::Bigquery::Storage::V1::WriteStream::Type]
127
+ # Immutable. Type of the stream.
128
+ # @!attribute [r] create_time
129
+ # @return [::Google::Protobuf::Timestamp]
130
+ # Output only. Create time of the stream. For the _default stream, this is the
131
+ # creation_time of the table.
132
+ # @!attribute [r] commit_time
133
+ # @return [::Google::Protobuf::Timestamp]
134
+ # Output only. Commit time of the stream.
135
+ # If a stream is of `COMMITTED` type, then it will have a commit_time same as
136
+ # `create_time`. If the stream is of `PENDING` type, empty commit_time
137
+ # means it is not committed.
138
+ # @!attribute [r] table_schema
139
+ # @return [::Google::Cloud::Bigquery::Storage::V1::TableSchema]
140
+ # Output only. The schema of the destination table. It is only returned in
141
+ # `CreateWriteStream` response. Caller should generate data that's
142
+ # compatible with this schema to send in initial `AppendRowsRequest`.
143
+ # The table schema could go out of date during the life time of the stream.
144
+ # @!attribute [rw] write_mode
145
+ # @return [::Google::Cloud::Bigquery::Storage::V1::WriteStream::WriteMode]
146
+ # Immutable. Mode of the stream.
147
+ class WriteStream
148
+ include ::Google::Protobuf::MessageExts
149
+ extend ::Google::Protobuf::MessageExts::ClassMethods
150
+
151
+ # Type enum of the stream.
152
+ module Type
153
+ # Unknown type.
154
+ TYPE_UNSPECIFIED = 0
155
+
156
+ # Data will commit automatically and appear as soon as the write is
157
+ # acknowledged.
158
+ COMMITTED = 1
159
+
160
+ # Data is invisible until the stream is committed.
161
+ PENDING = 2
162
+
163
+ # Data is only visible up to the offset to which it was flushed.
164
+ BUFFERED = 3
165
+ end
166
+
167
+ # Mode enum of the stream.
168
+ module WriteMode
169
+ # Unknown type.
170
+ WRITE_MODE_UNSPECIFIED = 0
171
+
172
+ # Insert new records into the table.
173
+ # It is the default value if customers do not specify it.
174
+ INSERT = 1
175
+ end
176
+ end
177
+
120
178
  # Data format for input or output data.
121
179
  module DataFormat
122
180
  DATA_FORMAT_UNSPECIFIED = 0
@@ -0,0 +1,172 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Copyright 2021 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # https://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Auto-generated by gapic-generator-ruby. DO NOT EDIT!
18
+
19
+
20
+ module Google
21
+ module Cloud
22
+ module Bigquery
23
+ module Storage
24
+ module V1
25
+ # Schema of a table.
26
+ # @!attribute [rw] fields
27
+ # @return [::Array<::Google::Cloud::Bigquery::Storage::V1::TableFieldSchema>]
28
+ # Describes the fields in a table.
29
+ class TableSchema
30
+ include ::Google::Protobuf::MessageExts
31
+ extend ::Google::Protobuf::MessageExts::ClassMethods
32
+ end
33
+
34
+ # TableFieldSchema defines a single field/column within a table schema.
35
+ # @!attribute [rw] name
36
+ # @return [::String]
37
+ # Required. The field name. The name must contain only letters (a-z, A-Z),
38
+ # numbers (0-9), or underscores (_), and must start with a letter or
39
+ # underscore. The maximum length is 128 characters.
40
+ # @!attribute [rw] type
41
+ # @return [::Google::Cloud::Bigquery::Storage::V1::TableFieldSchema::Type]
42
+ # Required. The field data type.
43
+ # @!attribute [rw] mode
44
+ # @return [::Google::Cloud::Bigquery::Storage::V1::TableFieldSchema::Mode]
45
+ # Optional. The field mode. The default value is NULLABLE.
46
+ # @!attribute [rw] fields
47
+ # @return [::Array<::Google::Cloud::Bigquery::Storage::V1::TableFieldSchema>]
48
+ # Optional. Describes the nested schema fields if the type property is set to STRUCT.
49
+ # @!attribute [rw] description
50
+ # @return [::String]
51
+ # Optional. The field description. The maximum length is 1,024 characters.
52
+ # @!attribute [rw] max_length
53
+ # @return [::Integer]
54
+ # Optional. Maximum length of values of this field for STRINGS or BYTES.
55
+ #
56
+ # If max_length is not specified, no maximum length constraint is imposed
57
+ # on this field.
58
+ #
59
+ # If type = "STRING", then max_length represents the maximum UTF-8
60
+ # length of strings in this field.
61
+ #
62
+ # If type = "BYTES", then max_length represents the maximum number of
63
+ # bytes in this field.
64
+ #
65
+ # It is invalid to set this field if type is not "STRING" or "BYTES".
66
+ # @!attribute [rw] precision
67
+ # @return [::Integer]
68
+ # Optional. Precision (maximum number of total digits in base 10) and scale
69
+ # (maximum number of digits in the fractional part in base 10) constraints
70
+ # for values of this field for NUMERIC or BIGNUMERIC.
71
+ #
72
+ # It is invalid to set precision or scale if type is not "NUMERIC" or
73
+ # "BIGNUMERIC".
74
+ #
75
+ # If precision and scale are not specified, no value range constraint is
76
+ # imposed on this field insofar as values are permitted by the type.
77
+ #
78
+ # Values of this NUMERIC or BIGNUMERIC field must be in this range when:
79
+ #
80
+ # * Precision (P) and scale (S) are specified:
81
+ # [-10^(P-S) + 10^(-S), 10^(P-S) - 10^(-S)]
82
+ # * Precision (P) is specified but not scale (and thus scale is
83
+ # interpreted to be equal to zero):
84
+ # [-10^P + 1, 10^P - 1].
85
+ #
86
+ # Acceptable values for precision and scale if both are specified:
87
+ #
88
+ # * If type = "NUMERIC":
89
+ # 1 <= precision - scale <= 29 and 0 <= scale <= 9.
90
+ # * If type = "BIGNUMERIC":
91
+ # 1 <= precision - scale <= 38 and 0 <= scale <= 38.
92
+ #
93
+ # Acceptable values for precision if only precision is specified but not
94
+ # scale (and thus scale is interpreted to be equal to zero):
95
+ #
96
+ # * If type = "NUMERIC": 1 <= precision <= 29.
97
+ # * If type = "BIGNUMERIC": 1 <= precision <= 38.
98
+ #
99
+ # If scale is specified but not precision, then it is invalid.
100
+ # @!attribute [rw] scale
101
+ # @return [::Integer]
102
+ # Optional. See documentation for precision.
103
+ class TableFieldSchema
104
+ include ::Google::Protobuf::MessageExts
105
+ extend ::Google::Protobuf::MessageExts::ClassMethods
106
+
107
+ module Type
108
+ # Illegal value
109
+ TYPE_UNSPECIFIED = 0
110
+
111
+ # 64K, UTF8
112
+ STRING = 1
113
+
114
+ # 64-bit signed
115
+ INT64 = 2
116
+
117
+ # 64-bit IEEE floating point
118
+ DOUBLE = 3
119
+
120
+ # Aggregate type
121
+ STRUCT = 4
122
+
123
+ # 64K, Binary
124
+ BYTES = 5
125
+
126
+ # 2-valued
127
+ BOOL = 6
128
+
129
+ # 64-bit signed usec since UTC epoch
130
+ TIMESTAMP = 7
131
+
132
+ # Civil date - Year, Month, Day
133
+ DATE = 8
134
+
135
+ # Civil time - Hour, Minute, Second, Microseconds
136
+ TIME = 9
137
+
138
+ # Combination of civil date and civil time
139
+ DATETIME = 10
140
+
141
+ # Geography object
142
+ GEOGRAPHY = 11
143
+
144
+ # Numeric value
145
+ NUMERIC = 12
146
+
147
+ # BigNumeric value
148
+ BIGNUMERIC = 13
149
+
150
+ # Interval
151
+ INTERVAL = 14
152
+
153
+ # JSON, String
154
+ JSON = 15
155
+ end
156
+
157
+ module Mode
158
+ # Illegal value
159
+ MODE_UNSPECIFIED = 0
160
+
161
+ NULLABLE = 1
162
+
163
+ REQUIRED = 2
164
+
165
+ REPEATED = 3
166
+ end
167
+ end
168
+ end
169
+ end
170
+ end
171
+ end
172
+ end