google-cloud-bigquery-storage-v1 1.5.1 → 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/google/cloud/bigquery/storage/v1/arrow_pb.rb +2 -1
- data/lib/google/cloud/bigquery/storage/v1/avro_pb.rb +2 -1
- data/lib/google/cloud/bigquery/storage/v1/big_query_read/client.rb +2 -2
- data/lib/google/cloud/bigquery/storage/v1/storage_services_pb.rb +2 -2
- data/lib/google/cloud/bigquery/storage/v1/stream_pb.rb +1 -1
- data/lib/google/cloud/bigquery/storage/v1/table_pb.rb +3 -1
- data/lib/google/cloud/bigquery/storage/v1/version.rb +1 -1
- data/proto_docs/google/cloud/bigquery/storage/v1/arrow.rb +25 -0
- data/proto_docs/google/cloud/bigquery/storage/v1/avro.rb +25 -0
- data/proto_docs/google/cloud/bigquery/storage/v1/storage.rb +6 -9
- data/proto_docs/google/cloud/bigquery/storage/v1/stream.rb +3 -3
- data/proto_docs/google/cloud/bigquery/storage/v1/table.rb +9 -0
- metadata +1 -1
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 007c47637514ccd42c1ae4a09561886979d8c219cc19499dd666955bfb6a3d91
|
|
4
|
+
data.tar.gz: 7827779ae2c06f4daae67a0a7855ad99a28054e8faa22b171e966d6a7b84fe69
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: e4023d4172edbf9aa7c042973664f283a1bf7f9a48c2079de15510cd1d9e33cf5c751b320afa168d7f0adb73afdec46bdc77ac99209b65be5b6dddc2240ddfae
|
|
7
|
+
data.tar.gz: 6223f941fec0490d5565c4ed6ee8a9f429dffa4c5ea95e611c71c755e72ded2d7682d4872f02120ca5a450e0e775ae9e382e5310ce40481d94472d83ee36871b
|
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
require 'google/protobuf'
|
|
6
6
|
|
|
7
7
|
|
|
8
|
-
descriptor_data = "\n,google/cloud/bigquery/storage/v1/arrow.proto\x12 google.cloud.bigquery.storage.v1\"(\n\x0b\x41rrowSchema\x12\x19\n\x11serialized_schema\x18\x01 \x01(\x0c\"J\n\x10\x41rrowRecordBatch\x12\x1f\n\x17serialized_record_batch\x18\x01 \x01(\x0c\x12\x15\n\trow_count\x18\x02 \x01(\x03\x42\x02\x18\x01\"\
|
|
8
|
+
descriptor_data = "\n,google/cloud/bigquery/storage/v1/arrow.proto\x12 google.cloud.bigquery.storage.v1\"(\n\x0b\x41rrowSchema\x12\x19\n\x11serialized_schema\x18\x01 \x01(\x0c\"J\n\x10\x41rrowRecordBatch\x12\x1f\n\x17serialized_record_batch\x18\x01 \x01(\x0c\x12\x15\n\trow_count\x18\x02 \x01(\x03\x42\x02\x18\x01\"\xec\x03\n\x19\x41rrowSerializationOptions\x12h\n\x12\x62uffer_compression\x18\x02 \x01(\x0e\x32L.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec\x12v\n\x19picos_timestamp_precision\x18\x03 \x01(\x0e\x32S.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision\"H\n\x10\x43ompressionCodec\x12\x1b\n\x17\x43OMPRESSION_UNSPECIFIED\x10\x00\x12\r\n\tLZ4_FRAME\x10\x01\x12\x08\n\x04ZSTD\x10\x02\"\xa2\x01\n\x17PicosTimestampPrecision\x12)\n%PICOS_TIMESTAMP_PRECISION_UNSPECIFIED\x10\x00\x12\x1e\n\x1aTIMESTAMP_PRECISION_MICROS\x10\x01\x12\x1d\n\x19TIMESTAMP_PRECISION_NANOS\x10\x02\x12\x1d\n\x19TIMESTAMP_PRECISION_PICOS\x10\x03\x42\xba\x01\n$com.google.cloud.bigquery.storage.v1B\nArrowProtoP\x01Z>cloud.google.com/go/bigquery/storage/apiv1/storagepb;storagepb\xaa\x02 Google.Cloud.BigQuery.Storage.V1\xca\x02 Google\\Cloud\\BigQuery\\Storage\\V1b\x06proto3"
|
|
9
9
|
|
|
10
10
|
pool = Google::Protobuf::DescriptorPool.generated_pool
|
|
11
11
|
|
|
@@ -40,6 +40,7 @@ module Google
|
|
|
40
40
|
ArrowRecordBatch = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.ArrowRecordBatch").msgclass
|
|
41
41
|
ArrowSerializationOptions = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.ArrowSerializationOptions").msgclass
|
|
42
42
|
ArrowSerializationOptions::CompressionCodec = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec").enummodule
|
|
43
|
+
ArrowSerializationOptions::PicosTimestampPrecision = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision").enummodule
|
|
43
44
|
end
|
|
44
45
|
end
|
|
45
46
|
end
|
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
require 'google/protobuf'
|
|
6
6
|
|
|
7
7
|
|
|
8
|
-
descriptor_data = "\n+google/cloud/bigquery/storage/v1/avro.proto\x12 google.cloud.bigquery.storage.v1\"\x1c\n\nAvroSchema\x12\x0e\n\x06schema\x18\x01 \x01(\t\"A\n\x08\x41vroRows\x12\x1e\n\x16serialized_binary_rows\x18\x01 \x01(\x0c\x12\x15\n\trow_count\x18\x02 \x01(\x03\x42\x02\x18\x01\"
|
|
8
|
+
descriptor_data = "\n+google/cloud/bigquery/storage/v1/avro.proto\x12 google.cloud.bigquery.storage.v1\"\x1c\n\nAvroSchema\x12\x0e\n\x06schema\x18\x01 \x01(\t\"A\n\x08\x41vroRows\x12\x1e\n\x16serialized_binary_rows\x18\x01 \x01(\x0c\x12\x15\n\trow_count\x18\x02 \x01(\x03\x42\x02\x18\x01\"\xdd\x02\n\x18\x41vroSerializationOptions\x12%\n\x1d\x65nable_display_name_attribute\x18\x01 \x01(\x08\x12u\n\x19picos_timestamp_precision\x18\x02 \x01(\x0e\x32R.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision\"\xa2\x01\n\x17PicosTimestampPrecision\x12)\n%PICOS_TIMESTAMP_PRECISION_UNSPECIFIED\x10\x00\x12\x1e\n\x1aTIMESTAMP_PRECISION_MICROS\x10\x01\x12\x1d\n\x19TIMESTAMP_PRECISION_NANOS\x10\x02\x12\x1d\n\x19TIMESTAMP_PRECISION_PICOS\x10\x03\x42\xb9\x01\n$com.google.cloud.bigquery.storage.v1B\tAvroProtoP\x01Z>cloud.google.com/go/bigquery/storage/apiv1/storagepb;storagepb\xaa\x02 Google.Cloud.BigQuery.Storage.V1\xca\x02 Google\\Cloud\\BigQuery\\Storage\\V1b\x06proto3"
|
|
9
9
|
|
|
10
10
|
pool = Google::Protobuf::DescriptorPool.generated_pool
|
|
11
11
|
|
|
@@ -39,6 +39,7 @@ module Google
|
|
|
39
39
|
AvroSchema = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.AvroSchema").msgclass
|
|
40
40
|
AvroRows = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.AvroRows").msgclass
|
|
41
41
|
AvroSerializationOptions = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.AvroSerializationOptions").msgclass
|
|
42
|
+
AvroSerializationOptions::PicosTimestampPrecision = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision").enummodule
|
|
42
43
|
end
|
|
43
44
|
end
|
|
44
45
|
end
|
|
@@ -329,9 +329,9 @@ module Google
|
|
|
329
329
|
|
|
330
330
|
##
|
|
331
331
|
# Reads rows from the stream in the format prescribed by the ReadSession.
|
|
332
|
-
# Each response contains one or more table rows, up to a maximum of
|
|
332
|
+
# Each response contains one or more table rows, up to a maximum of 128 MB
|
|
333
333
|
# per response; read requests which attempt to read individual rows larger
|
|
334
|
-
# than
|
|
334
|
+
# than 128 MB will fail.
|
|
335
335
|
#
|
|
336
336
|
# Each request also returns a set of stream statistics reflecting the current
|
|
337
337
|
# state of the stream.
|
|
@@ -57,9 +57,9 @@ module Google
|
|
|
57
57
|
# not require manual clean-up by the caller.
|
|
58
58
|
rpc :CreateReadSession, ::Google::Cloud::Bigquery::Storage::V1::CreateReadSessionRequest, ::Google::Cloud::Bigquery::Storage::V1::ReadSession
|
|
59
59
|
# Reads rows from the stream in the format prescribed by the ReadSession.
|
|
60
|
-
# Each response contains one or more table rows, up to a maximum of
|
|
60
|
+
# Each response contains one or more table rows, up to a maximum of 128 MB
|
|
61
61
|
# per response; read requests which attempt to read individual rows larger
|
|
62
|
-
# than
|
|
62
|
+
# than 128 MB will fail.
|
|
63
63
|
#
|
|
64
64
|
# Each request also returns a set of stream statistics reflecting the current
|
|
65
65
|
# state of the stream.
|
|
@@ -12,7 +12,7 @@ require 'google/cloud/bigquery/storage/v1/table_pb'
|
|
|
12
12
|
require 'google/protobuf/timestamp_pb'
|
|
13
13
|
|
|
14
14
|
|
|
15
|
-
descriptor_data = "\n-google/cloud/bigquery/storage/v1/stream.proto\x12 google.cloud.bigquery.storage.v1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a,google/cloud/bigquery/storage/v1/arrow.proto\x1a+google/cloud/bigquery/storage/v1/avro.proto\x1a,google/cloud/bigquery/storage/v1/table.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xc3\x0c\n\x0bReadSession\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x34\n\x0b\x65xpire_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x46\n\x0b\x64\x61ta_format\x18\x03 \x01(\x0e\x32,.google.cloud.bigquery.storage.v1.DataFormatB\x03\xe0\x41\x05\x12H\n\x0b\x61vro_schema\x18\x04 \x01(\x0b\x32,.google.cloud.bigquery.storage.v1.AvroSchemaB\x03\xe0\x41\x03H\x00\x12J\n\x0c\x61rrow_schema\x18\x05 \x01(\x0b\x32-.google.cloud.bigquery.storage.v1.ArrowSchemaB\x03\xe0\x41\x03H\x00\x12\x34\n\x05table\x18\x06 \x01(\tB%\xe0\x41\x05\xfa\x41\x1f\n\x1d\x62igquery.googleapis.com/Table\x12Z\n\x0ftable_modifiers\x18\x07 \x01(\x0b\x32<.google.cloud.bigquery.storage.v1.ReadSession.TableModifiersB\x03\xe0\x41\x01\x12Y\n\x0cread_options\x18\x08 \x01(\x0b\x32>.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptionsB\x03\xe0\x41\x01\x12\x42\n\x07streams\x18\n \x03(\x0b\x32,.google.cloud.bigquery.storage.v1.ReadStreamB\x03\xe0\x41\x03\x12*\n\x1d\x65stimated_total_bytes_scanned\x18\x0c \x01(\x03\x42\x03\xe0\x41\x03\x12/\n\"estimated_total_physical_file_size\x18\x0f \x01(\x03\x42\x03\xe0\x41\x03\x12 \n\x13\x65stimated_row_count\x18\x0e \x01(\x03\x42\x03\xe0\x41\x03\x12\x15\n\x08trace_id\x18\r \x01(\tB\x03\xe0\x41\x01\x1a\x43\n\x0eTableModifiers\x12\x31\n\rsnapshot_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a\x89\x05\n\x10TableReadOptions\x12\x17\n\x0fselected_fields\x18\x01 \x03(\t\x12\x17\n\x0frow_restriction\x18\x02 \x01(\t\x12g\n\x1b\x61rrow_serialization_options\x18\x03 \x01(\x0b\x32;.google.cloud.bigquery.storage.v1.ArrowSerializationOptionsB\x03\xe0\x41\x01H\x00\x12\x65\n\x1a\x61vro_serialization_options\x18\x04 \x01(\x0b\x32:.google.cloud.bigquery.storage.v1.AvroSerializationOptionsB\x03\xe0\x41\x01H\x00\x12#\n\x11sample_percentage\x18\x05 \x01(\x01\x42\x03\xe0\x41\x01H\x01\x88\x01\x01\x12\x85\x01\n\x1aresponse_compression_codec\x18\x06 \x01(\x0e\x32W.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.ResponseCompressionCodecB\x03\xe0\x41\x01H\x02\x88\x01\x01\"j\n\x18ResponseCompressionCodec\x12*\n&RESPONSE_COMPRESSION_CODEC_UNSPECIFIED\x10\x00\x12\"\n\x1eRESPONSE_COMPRESSION_CODEC_LZ4\x10\x02\x42%\n#output_format_serialization_optionsB\x14\n\x12_sample_percentageB\x1d\n\x1b_response_compression_codec:k\xea\x41h\n*bigquerystorage.googleapis.com/ReadSession\x12:projects/{project}/locations/{location}/sessions/{session}B\x08\n\x06schema\"\x9c\x01\n\nReadStream\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03:{\xea\x41x\n)bigquerystorage.googleapis.com/ReadStream\x12Kprojects/{project}/locations/{location}/sessions/{session}/streams/{stream}\"\xfb\x04\n\x0bWriteStream\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x45\n\x04type\x18\x02 \x01(\x0e\x32\x32.google.cloud.bigquery.storage.v1.WriteStream.TypeB\x03\xe0\x41\x05\x12\x34\n\x0b\x63reate_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x34\n\x0b\x63ommit_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12H\n\x0ctable_schema\x18\x05 \x01(\x0b\x32-.google.cloud.bigquery.storage.v1.TableSchemaB\x03\xe0\x41\x03\x12P\n\nwrite_mode\x18\x07 \x01(\x0e\x32\x37.google.cloud.bigquery.storage.v1.WriteStream.WriteModeB\x03\xe0\x41\x05\x12\x15\n\x08location\x18\x08 \x01(\tB\x03\xe0\x41\
|
|
15
|
+
descriptor_data = "\n-google/cloud/bigquery/storage/v1/stream.proto\x12 google.cloud.bigquery.storage.v1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a,google/cloud/bigquery/storage/v1/arrow.proto\x1a+google/cloud/bigquery/storage/v1/avro.proto\x1a,google/cloud/bigquery/storage/v1/table.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xc3\x0c\n\x0bReadSession\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x34\n\x0b\x65xpire_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x46\n\x0b\x64\x61ta_format\x18\x03 \x01(\x0e\x32,.google.cloud.bigquery.storage.v1.DataFormatB\x03\xe0\x41\x05\x12H\n\x0b\x61vro_schema\x18\x04 \x01(\x0b\x32,.google.cloud.bigquery.storage.v1.AvroSchemaB\x03\xe0\x41\x03H\x00\x12J\n\x0c\x61rrow_schema\x18\x05 \x01(\x0b\x32-.google.cloud.bigquery.storage.v1.ArrowSchemaB\x03\xe0\x41\x03H\x00\x12\x34\n\x05table\x18\x06 \x01(\tB%\xe0\x41\x05\xfa\x41\x1f\n\x1d\x62igquery.googleapis.com/Table\x12Z\n\x0ftable_modifiers\x18\x07 \x01(\x0b\x32<.google.cloud.bigquery.storage.v1.ReadSession.TableModifiersB\x03\xe0\x41\x01\x12Y\n\x0cread_options\x18\x08 \x01(\x0b\x32>.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptionsB\x03\xe0\x41\x01\x12\x42\n\x07streams\x18\n \x03(\x0b\x32,.google.cloud.bigquery.storage.v1.ReadStreamB\x03\xe0\x41\x03\x12*\n\x1d\x65stimated_total_bytes_scanned\x18\x0c \x01(\x03\x42\x03\xe0\x41\x03\x12/\n\"estimated_total_physical_file_size\x18\x0f \x01(\x03\x42\x03\xe0\x41\x03\x12 \n\x13\x65stimated_row_count\x18\x0e \x01(\x03\x42\x03\xe0\x41\x03\x12\x15\n\x08trace_id\x18\r \x01(\tB\x03\xe0\x41\x01\x1a\x43\n\x0eTableModifiers\x12\x31\n\rsnapshot_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a\x89\x05\n\x10TableReadOptions\x12\x17\n\x0fselected_fields\x18\x01 \x03(\t\x12\x17\n\x0frow_restriction\x18\x02 \x01(\t\x12g\n\x1b\x61rrow_serialization_options\x18\x03 \x01(\x0b\x32;.google.cloud.bigquery.storage.v1.ArrowSerializationOptionsB\x03\xe0\x41\x01H\x00\x12\x65\n\x1a\x61vro_serialization_options\x18\x04 \x01(\x0b\x32:.google.cloud.bigquery.storage.v1.AvroSerializationOptionsB\x03\xe0\x41\x01H\x00\x12#\n\x11sample_percentage\x18\x05 \x01(\x01\x42\x03\xe0\x41\x01H\x01\x88\x01\x01\x12\x85\x01\n\x1aresponse_compression_codec\x18\x06 \x01(\x0e\x32W.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.ResponseCompressionCodecB\x03\xe0\x41\x01H\x02\x88\x01\x01\"j\n\x18ResponseCompressionCodec\x12*\n&RESPONSE_COMPRESSION_CODEC_UNSPECIFIED\x10\x00\x12\"\n\x1eRESPONSE_COMPRESSION_CODEC_LZ4\x10\x02\x42%\n#output_format_serialization_optionsB\x14\n\x12_sample_percentageB\x1d\n\x1b_response_compression_codec:k\xea\x41h\n*bigquerystorage.googleapis.com/ReadSession\x12:projects/{project}/locations/{location}/sessions/{session}B\x08\n\x06schema\"\x9c\x01\n\nReadStream\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03:{\xea\x41x\n)bigquerystorage.googleapis.com/ReadStream\x12Kprojects/{project}/locations/{location}/sessions/{session}/streams/{stream}\"\xfb\x04\n\x0bWriteStream\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x45\n\x04type\x18\x02 \x01(\x0e\x32\x32.google.cloud.bigquery.storage.v1.WriteStream.TypeB\x03\xe0\x41\x05\x12\x34\n\x0b\x63reate_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x34\n\x0b\x63ommit_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12H\n\x0ctable_schema\x18\x05 \x01(\x0b\x32-.google.cloud.bigquery.storage.v1.TableSchemaB\x03\xe0\x41\x03\x12P\n\nwrite_mode\x18\x07 \x01(\x0e\x32\x37.google.cloud.bigquery.storage.v1.WriteStream.WriteModeB\x03\xe0\x41\x05\x12\x15\n\x08location\x18\x08 \x01(\tB\x03\xe0\x41\x03\"F\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\r\n\tCOMMITTED\x10\x01\x12\x0b\n\x07PENDING\x10\x02\x12\x0c\n\x08\x42UFFERED\x10\x03\"3\n\tWriteMode\x12\x1a\n\x16WRITE_MODE_UNSPECIFIED\x10\x00\x12\n\n\x06INSERT\x10\x01:v\xea\x41s\n*bigquerystorage.googleapis.com/WriteStream\x12\x45projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}*>\n\nDataFormat\x12\x1b\n\x17\x44\x41TA_FORMAT_UNSPECIFIED\x10\x00\x12\x08\n\x04\x41VRO\x10\x01\x12\t\n\x05\x41RROW\x10\x02*I\n\x0fWriteStreamView\x12!\n\x1dWRITE_STREAM_VIEW_UNSPECIFIED\x10\x00\x12\t\n\x05\x42\x41SIC\x10\x01\x12\x08\n\x04\x46ULL\x10\x02\x42\xbb\x01\n$com.google.cloud.bigquery.storage.v1B\x0bStreamProtoP\x01Z>cloud.google.com/go/bigquery/storage/apiv1/storagepb;storagepb\xaa\x02 Google.Cloud.BigQuery.Storage.V1\xca\x02 Google\\Cloud\\BigQuery\\Storage\\V1b\x06proto3"
|
|
16
16
|
|
|
17
17
|
pool = Google::Protobuf::DescriptorPool.generated_pool
|
|
18
18
|
|
|
@@ -5,9 +5,10 @@
|
|
|
5
5
|
require 'google/protobuf'
|
|
6
6
|
|
|
7
7
|
require 'google/api/field_behavior_pb'
|
|
8
|
+
require 'google/protobuf/wrappers_pb'
|
|
8
9
|
|
|
9
10
|
|
|
10
|
-
descriptor_data = "\n,google/cloud/bigquery/storage/v1/table.proto\x12 google.cloud.bigquery.storage.v1\x1a\x1fgoogle/api/field_behavior.proto\"Q\n\x0bTableSchema\x12\x42\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x32.google.cloud.bigquery.storage.v1.TableFieldSchema\"\
|
|
11
|
+
descriptor_data = "\n,google/cloud/bigquery/storage/v1/table.proto\x12 google.cloud.bigquery.storage.v1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x1egoogle/protobuf/wrappers.proto\"Q\n\x0bTableSchema\x12\x42\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x32.google.cloud.bigquery.storage.v1.TableFieldSchema\"\xbc\x07\n\x10TableFieldSchema\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12J\n\x04type\x18\x02 \x01(\x0e\x32\x37.google.cloud.bigquery.storage.v1.TableFieldSchema.TypeB\x03\xe0\x41\x02\x12J\n\x04mode\x18\x03 \x01(\x0e\x32\x37.google.cloud.bigquery.storage.v1.TableFieldSchema.ModeB\x03\xe0\x41\x01\x12G\n\x06\x66ields\x18\x04 \x03(\x0b\x32\x32.google.cloud.bigquery.storage.v1.TableFieldSchemaB\x03\xe0\x41\x01\x12\x18\n\x0b\x64\x65scription\x18\x06 \x01(\tB\x03\xe0\x41\x01\x12\x17\n\nmax_length\x18\x07 \x01(\x03\x42\x03\xe0\x41\x01\x12\x16\n\tprecision\x18\x08 \x01(\x03\x42\x03\xe0\x41\x01\x12\x12\n\x05scale\x18\t \x01(\x03\x42\x03\xe0\x41\x01\x12%\n\x18\x64\x65\x66\x61ult_value_expression\x18\n \x01(\tB\x03\xe0\x41\x01\x12=\n\x13timestamp_precision\x18\x1b \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\x03\xe0\x41\x01\x12\x64\n\x12range_element_type\x18\x0b \x01(\x0b\x32\x43.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementTypeB\x03\xe0\x41\x01\x1a^\n\x10\x46ieldElementType\x12J\n\x04type\x18\x01 \x01(\x0e\x32\x37.google.cloud.bigquery.storage.v1.TableFieldSchema.TypeB\x03\xe0\x41\x02\"\xe0\x01\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\n\n\x06STRING\x10\x01\x12\t\n\x05INT64\x10\x02\x12\n\n\x06\x44OUBLE\x10\x03\x12\n\n\x06STRUCT\x10\x04\x12\t\n\x05\x42YTES\x10\x05\x12\x08\n\x04\x42OOL\x10\x06\x12\r\n\tTIMESTAMP\x10\x07\x12\x08\n\x04\x44\x41TE\x10\x08\x12\x08\n\x04TIME\x10\t\x12\x0c\n\x08\x44\x41TETIME\x10\n\x12\r\n\tGEOGRAPHY\x10\x0b\x12\x0b\n\x07NUMERIC\x10\x0c\x12\x0e\n\nBIGNUMERIC\x10\r\x12\x0c\n\x08INTERVAL\x10\x0e\x12\x08\n\x04JSON\x10\x0f\x12\t\n\x05RANGE\x10\x10\"F\n\x04Mode\x12\x14\n\x10MODE_UNSPECIFIED\x10\x00\x12\x0c\n\x08NULLABLE\x10\x01\x12\x0c\n\x08REQUIRED\x10\x02\x12\x0c\n\x08REPEATED\x10\x03\x42\xba\x01\n$com.google.cloud.bigquery.storage.v1B\nTableProtoP\x01Z>cloud.google.com/go/bigquery/storage/apiv1/storagepb;storagepb\xaa\x02 Google.Cloud.BigQuery.Storage.V1\xca\x02 Google\\Cloud\\BigQuery\\Storage\\V1b\x06proto3"
|
|
11
12
|
|
|
12
13
|
pool = Google::Protobuf::DescriptorPool.generated_pool
|
|
13
14
|
|
|
@@ -22,6 +23,7 @@ rescue TypeError
|
|
|
22
23
|
file = pool.add_serialized_file(serialized)
|
|
23
24
|
warn "Warning: Protobuf detected an import path issue while loading generated file #{__FILE__}"
|
|
24
25
|
imports = [
|
|
26
|
+
["google.protobuf.Int64Value", "google/protobuf/wrappers.proto"],
|
|
25
27
|
]
|
|
26
28
|
imports.each do |type_name, expected_filename|
|
|
27
29
|
import_file = pool.lookup(type_name).file_descriptor
|
|
@@ -55,6 +55,10 @@ module Google
|
|
|
55
55
|
# @return [::Google::Cloud::Bigquery::Storage::V1::ArrowSerializationOptions::CompressionCodec]
|
|
56
56
|
# The compression codec to use for Arrow buffers in serialized record
|
|
57
57
|
# batches.
|
|
58
|
+
# @!attribute [rw] picos_timestamp_precision
|
|
59
|
+
# @return [::Google::Cloud::Bigquery::Storage::V1::ArrowSerializationOptions::PicosTimestampPrecision]
|
|
60
|
+
# Optional. Set timestamp precision option. If not set, the default precision
|
|
61
|
+
# is microseconds.
|
|
58
62
|
class ArrowSerializationOptions
|
|
59
63
|
include ::Google::Protobuf::MessageExts
|
|
60
64
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
|
@@ -70,6 +74,27 @@ module Google
|
|
|
70
74
|
# Zstandard compression.
|
|
71
75
|
ZSTD = 2
|
|
72
76
|
end
|
|
77
|
+
|
|
78
|
+
# The precision of the timestamp value in the Avro message. This precision
|
|
79
|
+
# will **only** be applied to the column(s) with the `TIMESTAMP_PICOS` type.
|
|
80
|
+
module PicosTimestampPrecision
|
|
81
|
+
# Unspecified timestamp precision. The default precision is microseconds.
|
|
82
|
+
PICOS_TIMESTAMP_PRECISION_UNSPECIFIED = 0
|
|
83
|
+
|
|
84
|
+
# Timestamp values returned by Read API will be truncated to microsecond
|
|
85
|
+
# level precision. The value will be encoded as Arrow TIMESTAMP type in a
|
|
86
|
+
# 64 bit integer.
|
|
87
|
+
TIMESTAMP_PRECISION_MICROS = 1
|
|
88
|
+
|
|
89
|
+
# Timestamp values returned by Read API will be truncated to nanosecond
|
|
90
|
+
# level precision. The value will be encoded as Arrow TIMESTAMP type in a
|
|
91
|
+
# 64 bit integer.
|
|
92
|
+
TIMESTAMP_PRECISION_NANOS = 2
|
|
93
|
+
|
|
94
|
+
# Read API will return full precision picosecond value. The value will be
|
|
95
|
+
# encoded as a string which conforms to ISO 8601 format.
|
|
96
|
+
TIMESTAMP_PRECISION_PICOS = 3
|
|
97
|
+
end
|
|
73
98
|
end
|
|
74
99
|
end
|
|
75
100
|
end
|
|
@@ -59,9 +59,34 @@ module Google
|
|
|
59
59
|
# Setting this field to true, populates avro field names with a placeholder
|
|
60
60
|
# value and populates a "displayName" attribute for every avro field with the
|
|
61
61
|
# original column name.
|
|
62
|
+
# @!attribute [rw] picos_timestamp_precision
|
|
63
|
+
# @return [::Google::Cloud::Bigquery::Storage::V1::AvroSerializationOptions::PicosTimestampPrecision]
|
|
64
|
+
# Optional. Set timestamp precision option. If not set, the default precision
|
|
65
|
+
# is microseconds.
|
|
62
66
|
class AvroSerializationOptions
|
|
63
67
|
include ::Google::Protobuf::MessageExts
|
|
64
68
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
|
69
|
+
|
|
70
|
+
# The precision of the timestamp value in the Avro message. This precision
|
|
71
|
+
# will **only** be applied to the column(s) with the `TIMESTAMP_PICOS` type.
|
|
72
|
+
module PicosTimestampPrecision
|
|
73
|
+
# Unspecified timestamp precision. The default precision is microseconds.
|
|
74
|
+
PICOS_TIMESTAMP_PRECISION_UNSPECIFIED = 0
|
|
75
|
+
|
|
76
|
+
# Timestamp values returned by Read API will be truncated to microsecond
|
|
77
|
+
# level precision. The value will be encoded as Avro TIMESTAMP type in a
|
|
78
|
+
# 64 bit integer.
|
|
79
|
+
TIMESTAMP_PRECISION_MICROS = 1
|
|
80
|
+
|
|
81
|
+
# Timestamp values returned by Read API will be truncated to nanosecond
|
|
82
|
+
# level precision. The value will be encoded as Avro TIMESTAMP type in a
|
|
83
|
+
# 64 bit integer.
|
|
84
|
+
TIMESTAMP_PRECISION_NANOS = 2
|
|
85
|
+
|
|
86
|
+
# Read API will return full precision picosecond value. The value will be
|
|
87
|
+
# encoded as a string which conforms to ISO 8601 format.
|
|
88
|
+
TIMESTAMP_PRECISION_PICOS = 3
|
|
89
|
+
end
|
|
65
90
|
end
|
|
66
91
|
end
|
|
67
92
|
end
|
|
@@ -268,8 +268,7 @@ module Google
|
|
|
268
268
|
# Note: The following fields are mutually exclusive: `proto_rows`, `arrow_rows`. If a field in that set is populated, all other fields in the set will automatically be cleared.
|
|
269
269
|
# @!attribute [rw] arrow_rows
|
|
270
270
|
# @return [::Google::Cloud::Bigquery::Storage::V1::AppendRowsRequest::ArrowData]
|
|
271
|
-
# Rows in arrow format.
|
|
272
|
-
# allowlisted customers.
|
|
271
|
+
# Rows in arrow format.
|
|
273
272
|
#
|
|
274
273
|
# Note: The following fields are mutually exclusive: `arrow_rows`, `proto_rows`. If a field in that set is populated, all other fields in the set will automatically be cleared.
|
|
275
274
|
# @!attribute [rw] trace_id
|
|
@@ -300,8 +299,8 @@ module Google
|
|
|
300
299
|
# @return [::Google::Cloud::Bigquery::Storage::V1::AppendRowsRequest::MissingValueInterpretation]
|
|
301
300
|
# Optional. Default missing value interpretation for all columns in the
|
|
302
301
|
# table. When a value is specified on an `AppendRowsRequest`, it is applied
|
|
303
|
-
# to all requests
|
|
304
|
-
#
|
|
302
|
+
# to all requests from that point forward, until a subsequent
|
|
303
|
+
# `AppendRowsRequest` sets it to a different value.
|
|
305
304
|
# `missing_value_interpretation` can override
|
|
306
305
|
# `default_missing_value_interpretation`. For example, if you want to write
|
|
307
306
|
# `NULL` instead of using default values for some columns, you can set
|
|
@@ -312,8 +311,6 @@ module Google
|
|
|
312
311
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
|
313
312
|
|
|
314
313
|
# Arrow schema and data.
|
|
315
|
-
# Arrow format is an experimental feature only selected for allowlisted
|
|
316
|
-
# customers.
|
|
317
314
|
# @!attribute [rw] writer_schema
|
|
318
315
|
# @return [::Google::Cloud::Bigquery::Storage::V1::ArrowSchema]
|
|
319
316
|
# Optional. Arrow Schema used to serialize the data.
|
|
@@ -329,8 +326,8 @@ module Google
|
|
|
329
326
|
# requests.
|
|
330
327
|
# @!attribute [rw] writer_schema
|
|
331
328
|
# @return [::Google::Cloud::Bigquery::Storage::V1::ProtoSchema]
|
|
332
|
-
# The protocol buffer schema used to serialize the data. Provide
|
|
333
|
-
# whenever:
|
|
329
|
+
# Optional. The protocol buffer schema used to serialize the data. Provide
|
|
330
|
+
# this value whenever:
|
|
334
331
|
#
|
|
335
332
|
# * You send the first request of an RPC connection.
|
|
336
333
|
#
|
|
@@ -339,7 +336,7 @@ module Google
|
|
|
339
336
|
# * You specify a new destination table.
|
|
340
337
|
# @!attribute [rw] rows
|
|
341
338
|
# @return [::Google::Cloud::Bigquery::Storage::V1::ProtoRows]
|
|
342
|
-
# Serialized row data in protobuf message format.
|
|
339
|
+
# Required. Serialized row data in protobuf message format.
|
|
343
340
|
# Currently, the backend expects the serialized rows to adhere to
|
|
344
341
|
# proto2 semantics when appending rows, particularly with respect to
|
|
345
342
|
# how default values are encoded.
|
|
@@ -248,10 +248,10 @@ module Google
|
|
|
248
248
|
# @!attribute [rw] write_mode
|
|
249
249
|
# @return [::Google::Cloud::Bigquery::Storage::V1::WriteStream::WriteMode]
|
|
250
250
|
# Immutable. Mode of the stream.
|
|
251
|
-
# @!attribute [
|
|
251
|
+
# @!attribute [r] location
|
|
252
252
|
# @return [::String]
|
|
253
|
-
#
|
|
254
|
-
# https://cloud.google.com/bigquery/docs/locations for supported
|
|
253
|
+
# Output only. The geographic location where the stream's dataset resides.
|
|
254
|
+
# See https://cloud.google.com/bigquery/docs/locations for supported
|
|
255
255
|
# locations.
|
|
256
256
|
class WriteStream
|
|
257
257
|
include ::Google::Protobuf::MessageExts
|
|
@@ -107,6 +107,15 @@ module Google
|
|
|
107
107
|
# @return [::String]
|
|
108
108
|
# Optional. A SQL expression to specify the [default value]
|
|
109
109
|
# (https://cloud.google.com/bigquery/docs/default-values) for this field.
|
|
110
|
+
# @!attribute [rw] timestamp_precision
|
|
111
|
+
# @return [::Google::Protobuf::Int64Value]
|
|
112
|
+
# Optional. Precision (maximum number of total digits in base 10) for seconds
|
|
113
|
+
# of TIMESTAMP type.
|
|
114
|
+
#
|
|
115
|
+
# Possible values include:
|
|
116
|
+
#
|
|
117
|
+
# * 6 (Default, for TIMESTAMP type with microsecond precision)
|
|
118
|
+
# * 12 (For TIMESTAMP type with picosecond precision)
|
|
110
119
|
# @!attribute [rw] range_element_type
|
|
111
120
|
# @return [::Google::Cloud::Bigquery::Storage::V1::TableFieldSchema::FieldElementType]
|
|
112
121
|
# Optional. The subtype of the RANGE, if the type of this field is RANGE. If
|