google-cloud-bigtable-v2 0.10.0 → 0.12.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/google/bigtable/v2/bigtable_pb.rb +30 -155
- data/lib/google/bigtable/v2/data_pb.rb +24 -133
- data/lib/google/bigtable/v2/feature_flags_pb.rb +24 -4
- data/lib/google/bigtable/v2/request_stats_pb.rb +25 -19
- data/lib/google/bigtable/v2/response_params_pb.rb +24 -5
- data/lib/google/cloud/bigtable/v2/bigtable/client.rb +18 -3
- data/lib/google/cloud/bigtable/v2/version.rb +1 -1
- data/proto_docs/google/bigtable/v2/bigtable.rb +14 -0
- data/proto_docs/google/bigtable/v2/feature_flags.rb +4 -0
- data/proto_docs/google/protobuf/any.rb +7 -4
- data/proto_docs/google/protobuf/timestamp.rb +1 -3
- metadata +4 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 398c39861ce2c8346fcba6a6ecf4a1273aebd07552483a0c756a8c88eea79585
|
4
|
+
data.tar.gz: b8388fc6e511f2dc523cbbedf97ea970fd81cfa11d8f56940b0043d6b026acd5
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 891cd672d22b4b8c7e9373aa53ea8da5d893c4ecf8821caff1fa7b2754efdb28463363b63e84aa0b11428f8e3514ad5179c591c449976df50462e6a94aa58e24
|
7
|
+
data.tar.gz: 7a0f51f0317bd186a145ea9fd8f1d7437bfa2826c852e0f4fe30c9f84f742b3a5c892f4167781713f4738c1718506be2d81c78cff16b4c55f951f2da4d474189
|
@@ -1,3 +1,4 @@
|
|
1
|
+
# frozen_string_literal: true
|
1
2
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
2
3
|
# source: google/bigtable/v2/bigtable.proto
|
3
4
|
|
@@ -15,163 +16,37 @@ require 'google/protobuf/timestamp_pb'
|
|
15
16
|
require 'google/protobuf/wrappers_pb'
|
16
17
|
require 'google/rpc/status_pb'
|
17
18
|
|
18
|
-
|
19
|
-
add_file("google/bigtable/v2/bigtable.proto", :syntax => :proto3) do
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
optional :value_size, :int32, 7
|
46
|
-
oneof :row_status do
|
47
|
-
optional :reset_row, :bool, 8
|
48
|
-
optional :commit_row, :bool, 9
|
49
|
-
end
|
50
|
-
end
|
51
|
-
add_message "google.bigtable.v2.SampleRowKeysRequest" do
|
52
|
-
optional :table_name, :string, 1
|
53
|
-
optional :app_profile_id, :string, 2
|
54
|
-
end
|
55
|
-
add_message "google.bigtable.v2.SampleRowKeysResponse" do
|
56
|
-
optional :row_key, :bytes, 1
|
57
|
-
optional :offset_bytes, :int64, 2
|
58
|
-
end
|
59
|
-
add_message "google.bigtable.v2.MutateRowRequest" do
|
60
|
-
optional :table_name, :string, 1
|
61
|
-
optional :app_profile_id, :string, 4
|
62
|
-
optional :row_key, :bytes, 2
|
63
|
-
repeated :mutations, :message, 3, "google.bigtable.v2.Mutation"
|
64
|
-
end
|
65
|
-
add_message "google.bigtable.v2.MutateRowResponse" do
|
66
|
-
end
|
67
|
-
add_message "google.bigtable.v2.MutateRowsRequest" do
|
68
|
-
optional :table_name, :string, 1
|
69
|
-
optional :app_profile_id, :string, 3
|
70
|
-
repeated :entries, :message, 2, "google.bigtable.v2.MutateRowsRequest.Entry"
|
71
|
-
end
|
72
|
-
add_message "google.bigtable.v2.MutateRowsRequest.Entry" do
|
73
|
-
optional :row_key, :bytes, 1
|
74
|
-
repeated :mutations, :message, 2, "google.bigtable.v2.Mutation"
|
75
|
-
end
|
76
|
-
add_message "google.bigtable.v2.MutateRowsResponse" do
|
77
|
-
repeated :entries, :message, 1, "google.bigtable.v2.MutateRowsResponse.Entry"
|
78
|
-
proto3_optional :rate_limit_info, :message, 3, "google.bigtable.v2.RateLimitInfo"
|
79
|
-
end
|
80
|
-
add_message "google.bigtable.v2.MutateRowsResponse.Entry" do
|
81
|
-
optional :index, :int64, 1
|
82
|
-
optional :status, :message, 2, "google.rpc.Status"
|
83
|
-
end
|
84
|
-
add_message "google.bigtable.v2.RateLimitInfo" do
|
85
|
-
optional :period, :message, 1, "google.protobuf.Duration"
|
86
|
-
optional :factor, :double, 2
|
87
|
-
end
|
88
|
-
add_message "google.bigtable.v2.CheckAndMutateRowRequest" do
|
89
|
-
optional :table_name, :string, 1
|
90
|
-
optional :app_profile_id, :string, 7
|
91
|
-
optional :row_key, :bytes, 2
|
92
|
-
optional :predicate_filter, :message, 6, "google.bigtable.v2.RowFilter"
|
93
|
-
repeated :true_mutations, :message, 4, "google.bigtable.v2.Mutation"
|
94
|
-
repeated :false_mutations, :message, 5, "google.bigtable.v2.Mutation"
|
95
|
-
end
|
96
|
-
add_message "google.bigtable.v2.CheckAndMutateRowResponse" do
|
97
|
-
optional :predicate_matched, :bool, 1
|
98
|
-
end
|
99
|
-
add_message "google.bigtable.v2.PingAndWarmRequest" do
|
100
|
-
optional :name, :string, 1
|
101
|
-
optional :app_profile_id, :string, 2
|
102
|
-
end
|
103
|
-
add_message "google.bigtable.v2.PingAndWarmResponse" do
|
104
|
-
end
|
105
|
-
add_message "google.bigtable.v2.ReadModifyWriteRowRequest" do
|
106
|
-
optional :table_name, :string, 1
|
107
|
-
optional :app_profile_id, :string, 4
|
108
|
-
optional :row_key, :bytes, 2
|
109
|
-
repeated :rules, :message, 3, "google.bigtable.v2.ReadModifyWriteRule"
|
110
|
-
end
|
111
|
-
add_message "google.bigtable.v2.ReadModifyWriteRowResponse" do
|
112
|
-
optional :row, :message, 1, "google.bigtable.v2.Row"
|
113
|
-
end
|
114
|
-
add_message "google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest" do
|
115
|
-
optional :table_name, :string, 1
|
116
|
-
optional :app_profile_id, :string, 2
|
117
|
-
end
|
118
|
-
add_message "google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse" do
|
119
|
-
optional :partition, :message, 1, "google.bigtable.v2.StreamPartition"
|
120
|
-
end
|
121
|
-
add_message "google.bigtable.v2.ReadChangeStreamRequest" do
|
122
|
-
optional :table_name, :string, 1
|
123
|
-
optional :app_profile_id, :string, 2
|
124
|
-
optional :partition, :message, 3, "google.bigtable.v2.StreamPartition"
|
125
|
-
optional :end_time, :message, 5, "google.protobuf.Timestamp"
|
126
|
-
optional :heartbeat_duration, :message, 7, "google.protobuf.Duration"
|
127
|
-
oneof :start_from do
|
128
|
-
optional :start_time, :message, 4, "google.protobuf.Timestamp"
|
129
|
-
optional :continuation_tokens, :message, 6, "google.bigtable.v2.StreamContinuationTokens"
|
130
|
-
end
|
131
|
-
end
|
132
|
-
add_message "google.bigtable.v2.ReadChangeStreamResponse" do
|
133
|
-
oneof :stream_record do
|
134
|
-
optional :data_change, :message, 1, "google.bigtable.v2.ReadChangeStreamResponse.DataChange"
|
135
|
-
optional :heartbeat, :message, 2, "google.bigtable.v2.ReadChangeStreamResponse.Heartbeat"
|
136
|
-
optional :close_stream, :message, 3, "google.bigtable.v2.ReadChangeStreamResponse.CloseStream"
|
137
|
-
end
|
138
|
-
end
|
139
|
-
add_message "google.bigtable.v2.ReadChangeStreamResponse.MutationChunk" do
|
140
|
-
optional :chunk_info, :message, 1, "google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo"
|
141
|
-
optional :mutation, :message, 2, "google.bigtable.v2.Mutation"
|
142
|
-
end
|
143
|
-
add_message "google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo" do
|
144
|
-
optional :chunked_value_size, :int32, 1
|
145
|
-
optional :chunked_value_offset, :int32, 2
|
146
|
-
optional :last_chunk, :bool, 3
|
147
|
-
end
|
148
|
-
add_message "google.bigtable.v2.ReadChangeStreamResponse.DataChange" do
|
149
|
-
optional :type, :enum, 1, "google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type"
|
150
|
-
optional :source_cluster_id, :string, 2
|
151
|
-
optional :row_key, :bytes, 3
|
152
|
-
optional :commit_timestamp, :message, 4, "google.protobuf.Timestamp"
|
153
|
-
optional :tiebreaker, :int32, 5
|
154
|
-
repeated :chunks, :message, 6, "google.bigtable.v2.ReadChangeStreamResponse.MutationChunk"
|
155
|
-
optional :done, :bool, 8
|
156
|
-
optional :token, :string, 9
|
157
|
-
optional :estimated_low_watermark, :message, 10, "google.protobuf.Timestamp"
|
158
|
-
end
|
159
|
-
add_enum "google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type" do
|
160
|
-
value :TYPE_UNSPECIFIED, 0
|
161
|
-
value :USER, 1
|
162
|
-
value :GARBAGE_COLLECTION, 2
|
163
|
-
value :CONTINUATION, 3
|
164
|
-
end
|
165
|
-
add_message "google.bigtable.v2.ReadChangeStreamResponse.Heartbeat" do
|
166
|
-
optional :continuation_token, :message, 1, "google.bigtable.v2.StreamContinuationToken"
|
167
|
-
optional :estimated_low_watermark, :message, 2, "google.protobuf.Timestamp"
|
168
|
-
end
|
169
|
-
add_message "google.bigtable.v2.ReadChangeStreamResponse.CloseStream" do
|
170
|
-
optional :status, :message, 1, "google.rpc.Status"
|
171
|
-
repeated :continuation_tokens, :message, 2, "google.bigtable.v2.StreamContinuationToken"
|
172
|
-
repeated :new_partitions, :message, 3, "google.bigtable.v2.StreamPartition"
|
19
|
+
|
20
|
+
descriptor_data = "\n!google/bigtable/v2/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x18google/api/routing.proto\x1a\x1dgoogle/bigtable/v2/data.proto\x1a&google/bigtable/v2/request_stats.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto\"\xa2\x03\n\x0fReadRowsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03\x12P\n\x12request_stats_view\x18\x06 \x01(\x0e\x32\x34.google.bigtable.v2.ReadRowsRequest.RequestStatsView\x12\x10\n\x08reversed\x18\x07 \x01(\x08\"f\n\x10RequestStatsView\x12\"\n\x1eREQUEST_STATS_VIEW_UNSPECIFIED\x10\x00\x12\x16\n\x12REQUEST_STATS_NONE\x10\x01\x12\x16\n\x12REQUEST_STATS_FULL\x10\x02\"\xb1\x03\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x12\x37\n\rrequest_stats\x18\x03 \x01(\x0b\x32 .google.bigtable.v2.RequestStats\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status\"n\n\x14SampleRowKeysRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03\"\xb6\x01\n\x10MutateRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x34\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02\"\x13\n\x11MutateRowResponse\"\xfe\x01\n\x11MutateRowsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12\x41\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.EntryB\x03\xe0\x41\x02\x1aN\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x34\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02\"\xe4\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x12?\n\x0frate_limit_info\x18\x03 \x01(\x0b\x32!.google.bigtable.v2.RateLimitInfoH\x00\x88\x01\x01\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.StatusB\x12\n\x10_rate_limit_info\"J\n\rRateLimitInfo\x12)\n\x06period\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0e\n\x06\x66\x61\x63tor\x18\x02 \x01(\x01\"\xae\x02\n\x18\x43heckAndMutateRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08\"i\n\x12PingAndWarmRequest\x12;\n\x04name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%bigtableadmin.googleapis.com/Instance\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\"\x15\n\x13PingAndWarmResponse\"\xc6\x01\n\x19ReadModifyWriteRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12;\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRuleB\x03\xe0\x41\x02\"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row\"\x86\x01\n,GenerateInitialChangeStreamPartitionsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\"g\n-GenerateInitialChangeStreamPartitionsResponse\x12\x36\n\tpartition\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\"\x9b\x03\n\x17ReadChangeStreamRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\x12\x36\n\tpartition\x18\x03 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\x12\x30\n\nstart_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12K\n\x13\x63ontinuation_tokens\x18\x06 \x01(\x0b\x32,.google.bigtable.v2.StreamContinuationTokensH\x00\x12,\n\x08\x65nd_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x35\n\x12heartbeat_duration\x18\x07 \x01(\x0b\x32\x19.google.protobuf.DurationB\x0c\n\nstart_from\"\xa9\n\n\x18ReadChangeStreamResponse\x12N\n\x0b\x64\x61ta_change\x18\x01 \x01(\x0b\x32\x37.google.bigtable.v2.ReadChangeStreamResponse.DataChangeH\x00\x12K\n\theartbeat\x18\x02 \x01(\x0b\x32\x36.google.bigtable.v2.ReadChangeStreamResponse.HeartbeatH\x00\x12P\n\x0c\x63lose_stream\x18\x03 \x01(\x0b\x32\x38.google.bigtable.v2.ReadChangeStreamResponse.CloseStreamH\x00\x1a\xf4\x01\n\rMutationChunk\x12X\n\nchunk_info\x18\x01 \x01(\x0b\x32\x44.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo\x12.\n\x08mutation\x18\x02 \x01(\x0b\x32\x1c.google.bigtable.v2.Mutation\x1aY\n\tChunkInfo\x12\x1a\n\x12\x63hunked_value_size\x18\x01 \x01(\x05\x12\x1c\n\x14\x63hunked_value_offset\x18\x02 \x01(\x05\x12\x12\n\nlast_chunk\x18\x03 \x01(\x08\x1a\xc6\x03\n\nDataChange\x12J\n\x04type\x18\x01 \x01(\x0e\x32<.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type\x12\x19\n\x11source_cluster_id\x18\x02 \x01(\t\x12\x0f\n\x07row_key\x18\x03 \x01(\x0c\x12\x34\n\x10\x63ommit_timestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\ntiebreaker\x18\x05 \x01(\x05\x12J\n\x06\x63hunks\x18\x06 \x03(\x0b\x32:.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk\x12\x0c\n\x04\x64one\x18\x08 \x01(\x08\x12\r\n\x05token\x18\t \x01(\t\x12;\n\x17\x65stimated_low_watermark\x18\n \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"P\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x08\n\x04USER\x10\x01\x12\x16\n\x12GARBAGE_COLLECTION\x10\x02\x12\x10\n\x0c\x43ONTINUATION\x10\x03\x1a\x91\x01\n\tHeartbeat\x12G\n\x12\x63ontinuation_token\x18\x01 \x01(\x0b\x32+.google.bigtable.v2.StreamContinuationToken\x12;\n\x17\x65stimated_low_watermark\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a\xb8\x01\n\x0b\x43loseStream\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12H\n\x13\x63ontinuation_tokens\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.StreamContinuationToken\x12;\n\x0enew_partitions\x18\x03 \x03(\x0b\x32#.google.bigtable.v2.StreamPartitionB\x0f\n\rstream_record2\xd7\x18\n\x08\x42igtable\x12\x9b\x02\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse\"\xc1\x01\x82\xd3\xe4\x93\x02>\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xac\x02\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse\"\xc3\x01\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xc1\x02\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse\"\xe6\x01\x82\xd3\xe4\x93\x02?\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x1ctable_name,row_key,mutations\xda\x41+table_name,row_key,mutations,app_profile_id\x12\xb3\x02\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse\"\xd3\x01\x82\xd3\xe4\x93\x02@\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x12table_name,entries\xda\x41!table_name,entries,app_profile_id0\x01\x12\xad\x03\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse\"\xba\x02\x82\xd3\xe4\x93\x02G\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x42table_name,row_key,predicate_filter,true_mutations,false_mutations\xda\x41Qtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id\x12\xee\x01\n\x0bPingAndWarm\x12&.google.bigtable.v2.PingAndWarmRequest\x1a\'.google.bigtable.v2.PingAndWarmResponse\"\x8d\x01\x82\xd3\xe4\x93\x02+\"&/v2/{name=projects/*/instances/*}:ping:\x01*\x8a\xd3\xe4\x93\x02\x39\x12%\n\x04name\x12\x1d{name=projects/*/instances/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x04name\xda\x41\x13name,app_profile_id\x12\xdd\x02\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse\"\xe7\x01\x82\xd3\xe4\x93\x02H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x18table_name,row_key,rules\xda\x41\'table_name,row_key,rules,app_profile_id\x12\xbb\x02\n%GenerateInitialChangeStreamPartitions\x12@.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest\x1a\x41.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse\"\x8a\x01\x82\xd3\xe4\x93\x02[\"V/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions:\x01*\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xe6\x01\n\x10ReadChangeStream\x12+.google.bigtable.v2.ReadChangeStreamRequest\x1a,.google.bigtable.v2.ReadChangeStreamResponse\"u\x82\xd3\xe4\x93\x02\x46\"A/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream:\x01*\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x1a\xdb\x02\xca\x41\x17\x62igtable.googleapis.com\xd2\x41\xbd\x02https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xeb\x02\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2\xea\x41P\n%bigtableadmin.googleapis.com/Instance\x12\'projects/{project}/instances/{instance}\xea\x41\\\n\"bigtableadmin.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}b\x06proto3"
|
21
|
+
|
22
|
+
pool = Google::Protobuf::DescriptorPool.generated_pool
|
23
|
+
|
24
|
+
begin
|
25
|
+
pool.add_serialized_file(descriptor_data)
|
26
|
+
rescue TypeError => e
|
27
|
+
# Compatibility code: will be removed in the next major version.
|
28
|
+
require 'google/protobuf/descriptor_pb'
|
29
|
+
parsed = Google::Protobuf::FileDescriptorProto.decode(descriptor_data)
|
30
|
+
parsed.clear_dependency
|
31
|
+
serialized = parsed.class.encode(parsed)
|
32
|
+
file = pool.add_serialized_file(serialized)
|
33
|
+
warn "Warning: Protobuf detected an import path issue while loading generated file #{__FILE__}"
|
34
|
+
imports = [
|
35
|
+
["google.bigtable.v2.RowSet", "google/bigtable/v2/data.proto"],
|
36
|
+
["google.bigtable.v2.RequestStats", "google/bigtable/v2/request_stats.proto"],
|
37
|
+
["google.protobuf.StringValue", "google/protobuf/wrappers.proto"],
|
38
|
+
["google.rpc.Status", "google/rpc/status.proto"],
|
39
|
+
["google.protobuf.Duration", "google/protobuf/duration.proto"],
|
40
|
+
["google.protobuf.Timestamp", "google/protobuf/timestamp.proto"],
|
41
|
+
]
|
42
|
+
imports.each do |type_name, expected_filename|
|
43
|
+
import_file = pool.lookup(type_name).file_descriptor
|
44
|
+
if import_file.name != expected_filename
|
45
|
+
warn "- #{file.name} imports #{expected_filename}, but that import was loaded as #{import_file.name}"
|
173
46
|
end
|
174
47
|
end
|
48
|
+
warn "Each proto file must use a consistent fully-qualified name."
|
49
|
+
warn "This will become an error in the next major version."
|
175
50
|
end
|
176
51
|
|
177
52
|
module Google
|
@@ -1,143 +1,34 @@
|
|
1
|
+
# frozen_string_literal: true
|
1
2
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
2
3
|
# source: google/bigtable/v2/data.proto
|
3
4
|
|
4
5
|
require 'google/protobuf'
|
5
6
|
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
optional :start_key_closed, :bytes, 1
|
28
|
-
optional :start_key_open, :bytes, 2
|
29
|
-
end
|
30
|
-
oneof :end_key do
|
31
|
-
optional :end_key_open, :bytes, 3
|
32
|
-
optional :end_key_closed, :bytes, 4
|
33
|
-
end
|
34
|
-
end
|
35
|
-
add_message "google.bigtable.v2.RowSet" do
|
36
|
-
repeated :row_keys, :bytes, 1
|
37
|
-
repeated :row_ranges, :message, 2, "google.bigtable.v2.RowRange"
|
38
|
-
end
|
39
|
-
add_message "google.bigtable.v2.ColumnRange" do
|
40
|
-
optional :family_name, :string, 1
|
41
|
-
oneof :start_qualifier do
|
42
|
-
optional :start_qualifier_closed, :bytes, 2
|
43
|
-
optional :start_qualifier_open, :bytes, 3
|
44
|
-
end
|
45
|
-
oneof :end_qualifier do
|
46
|
-
optional :end_qualifier_closed, :bytes, 4
|
47
|
-
optional :end_qualifier_open, :bytes, 5
|
48
|
-
end
|
49
|
-
end
|
50
|
-
add_message "google.bigtable.v2.TimestampRange" do
|
51
|
-
optional :start_timestamp_micros, :int64, 1
|
52
|
-
optional :end_timestamp_micros, :int64, 2
|
53
|
-
end
|
54
|
-
add_message "google.bigtable.v2.ValueRange" do
|
55
|
-
oneof :start_value do
|
56
|
-
optional :start_value_closed, :bytes, 1
|
57
|
-
optional :start_value_open, :bytes, 2
|
58
|
-
end
|
59
|
-
oneof :end_value do
|
60
|
-
optional :end_value_closed, :bytes, 3
|
61
|
-
optional :end_value_open, :bytes, 4
|
62
|
-
end
|
63
|
-
end
|
64
|
-
add_message "google.bigtable.v2.RowFilter" do
|
65
|
-
oneof :filter do
|
66
|
-
optional :chain, :message, 1, "google.bigtable.v2.RowFilter.Chain"
|
67
|
-
optional :interleave, :message, 2, "google.bigtable.v2.RowFilter.Interleave"
|
68
|
-
optional :condition, :message, 3, "google.bigtable.v2.RowFilter.Condition"
|
69
|
-
optional :sink, :bool, 16
|
70
|
-
optional :pass_all_filter, :bool, 17
|
71
|
-
optional :block_all_filter, :bool, 18
|
72
|
-
optional :row_key_regex_filter, :bytes, 4
|
73
|
-
optional :row_sample_filter, :double, 14
|
74
|
-
optional :family_name_regex_filter, :string, 5
|
75
|
-
optional :column_qualifier_regex_filter, :bytes, 6
|
76
|
-
optional :column_range_filter, :message, 7, "google.bigtable.v2.ColumnRange"
|
77
|
-
optional :timestamp_range_filter, :message, 8, "google.bigtable.v2.TimestampRange"
|
78
|
-
optional :value_regex_filter, :bytes, 9
|
79
|
-
optional :value_range_filter, :message, 15, "google.bigtable.v2.ValueRange"
|
80
|
-
optional :cells_per_row_offset_filter, :int32, 10
|
81
|
-
optional :cells_per_row_limit_filter, :int32, 11
|
82
|
-
optional :cells_per_column_limit_filter, :int32, 12
|
83
|
-
optional :strip_value_transformer, :bool, 13
|
84
|
-
optional :apply_label_transformer, :string, 19
|
85
|
-
end
|
86
|
-
end
|
87
|
-
add_message "google.bigtable.v2.RowFilter.Chain" do
|
88
|
-
repeated :filters, :message, 1, "google.bigtable.v2.RowFilter"
|
89
|
-
end
|
90
|
-
add_message "google.bigtable.v2.RowFilter.Interleave" do
|
91
|
-
repeated :filters, :message, 1, "google.bigtable.v2.RowFilter"
|
92
|
-
end
|
93
|
-
add_message "google.bigtable.v2.RowFilter.Condition" do
|
94
|
-
optional :predicate_filter, :message, 1, "google.bigtable.v2.RowFilter"
|
95
|
-
optional :true_filter, :message, 2, "google.bigtable.v2.RowFilter"
|
96
|
-
optional :false_filter, :message, 3, "google.bigtable.v2.RowFilter"
|
97
|
-
end
|
98
|
-
add_message "google.bigtable.v2.Mutation" do
|
99
|
-
oneof :mutation do
|
100
|
-
optional :set_cell, :message, 1, "google.bigtable.v2.Mutation.SetCell"
|
101
|
-
optional :delete_from_column, :message, 2, "google.bigtable.v2.Mutation.DeleteFromColumn"
|
102
|
-
optional :delete_from_family, :message, 3, "google.bigtable.v2.Mutation.DeleteFromFamily"
|
103
|
-
optional :delete_from_row, :message, 4, "google.bigtable.v2.Mutation.DeleteFromRow"
|
104
|
-
end
|
105
|
-
end
|
106
|
-
add_message "google.bigtable.v2.Mutation.SetCell" do
|
107
|
-
optional :family_name, :string, 1
|
108
|
-
optional :column_qualifier, :bytes, 2
|
109
|
-
optional :timestamp_micros, :int64, 3
|
110
|
-
optional :value, :bytes, 4
|
111
|
-
end
|
112
|
-
add_message "google.bigtable.v2.Mutation.DeleteFromColumn" do
|
113
|
-
optional :family_name, :string, 1
|
114
|
-
optional :column_qualifier, :bytes, 2
|
115
|
-
optional :time_range, :message, 3, "google.bigtable.v2.TimestampRange"
|
116
|
-
end
|
117
|
-
add_message "google.bigtable.v2.Mutation.DeleteFromFamily" do
|
118
|
-
optional :family_name, :string, 1
|
119
|
-
end
|
120
|
-
add_message "google.bigtable.v2.Mutation.DeleteFromRow" do
|
121
|
-
end
|
122
|
-
add_message "google.bigtable.v2.ReadModifyWriteRule" do
|
123
|
-
optional :family_name, :string, 1
|
124
|
-
optional :column_qualifier, :bytes, 2
|
125
|
-
oneof :rule do
|
126
|
-
optional :append_value, :bytes, 3
|
127
|
-
optional :increment_amount, :int64, 4
|
128
|
-
end
|
129
|
-
end
|
130
|
-
add_message "google.bigtable.v2.StreamPartition" do
|
131
|
-
optional :row_range, :message, 1, "google.bigtable.v2.RowRange"
|
132
|
-
end
|
133
|
-
add_message "google.bigtable.v2.StreamContinuationTokens" do
|
134
|
-
repeated :tokens, :message, 1, "google.bigtable.v2.StreamContinuationToken"
|
135
|
-
end
|
136
|
-
add_message "google.bigtable.v2.StreamContinuationToken" do
|
137
|
-
optional :partition, :message, 1, "google.bigtable.v2.StreamPartition"
|
138
|
-
optional :token, :string, 2
|
7
|
+
|
8
|
+
descriptor_data = "\n\x1dgoogle/bigtable/v2/data.proto\x12\x12google.bigtable.v2\"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family\"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column\"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell\"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t\"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key\"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange\"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier\"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03\"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value\"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12\"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32\".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter\"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32\".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation\"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04rule\"B\n\x0fStreamPartition\x12/\n\trow_range\x18\x01 \x01(\x0b\x32\x1c.google.bigtable.v2.RowRange\"W\n\x18StreamContinuationTokens\x12;\n\x06tokens\x18\x01 \x03(\x0b\x32+.google.bigtable.v2.StreamContinuationToken\"`\n\x17StreamContinuationToken\x12\x36\n\tpartition\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\x12\r\n\x05token\x18\x02 \x01(\tB\xb5\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3"
|
9
|
+
|
10
|
+
pool = Google::Protobuf::DescriptorPool.generated_pool
|
11
|
+
|
12
|
+
begin
|
13
|
+
pool.add_serialized_file(descriptor_data)
|
14
|
+
rescue TypeError => e
|
15
|
+
# Compatibility code: will be removed in the next major version.
|
16
|
+
require 'google/protobuf/descriptor_pb'
|
17
|
+
parsed = Google::Protobuf::FileDescriptorProto.decode(descriptor_data)
|
18
|
+
parsed.clear_dependency
|
19
|
+
serialized = parsed.class.encode(parsed)
|
20
|
+
file = pool.add_serialized_file(serialized)
|
21
|
+
warn "Warning: Protobuf detected an import path issue while loading generated file #{__FILE__}"
|
22
|
+
imports = [
|
23
|
+
]
|
24
|
+
imports.each do |type_name, expected_filename|
|
25
|
+
import_file = pool.lookup(type_name).file_descriptor
|
26
|
+
if import_file.name != expected_filename
|
27
|
+
warn "- #{file.name} imports #{expected_filename}, but that import was loaded as #{import_file.name}"
|
139
28
|
end
|
140
29
|
end
|
30
|
+
warn "Each proto file must use a consistent fully-qualified name."
|
31
|
+
warn "This will become an error in the next major version."
|
141
32
|
end
|
142
33
|
|
143
34
|
module Google
|
@@ -1,14 +1,34 @@
|
|
1
|
+
# frozen_string_literal: true
|
1
2
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
2
3
|
# source: google/bigtable/v2/feature_flags.proto
|
3
4
|
|
4
5
|
require 'google/protobuf'
|
5
6
|
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
7
|
+
|
8
|
+
descriptor_data = "\n&google/bigtable/v2/feature_flags.proto\x12\x12google.bigtable.v2\"E\n\x0c\x46\x65\x61tureFlags\x12\x15\n\rreverse_scans\x18\x01 \x01(\x08\x12\x1e\n\x16mutate_rows_rate_limit\x18\x03 \x01(\x08\x42\xbd\x01\n\x16\x63om.google.bigtable.v2B\x11\x46\x65\x61tureFlagsProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3"
|
9
|
+
|
10
|
+
pool = Google::Protobuf::DescriptorPool.generated_pool
|
11
|
+
|
12
|
+
begin
|
13
|
+
pool.add_serialized_file(descriptor_data)
|
14
|
+
rescue TypeError => e
|
15
|
+
# Compatibility code: will be removed in the next major version.
|
16
|
+
require 'google/protobuf/descriptor_pb'
|
17
|
+
parsed = Google::Protobuf::FileDescriptorProto.decode(descriptor_data)
|
18
|
+
parsed.clear_dependency
|
19
|
+
serialized = parsed.class.encode(parsed)
|
20
|
+
file = pool.add_serialized_file(serialized)
|
21
|
+
warn "Warning: Protobuf detected an import path issue while loading generated file #{__FILE__}"
|
22
|
+
imports = [
|
23
|
+
]
|
24
|
+
imports.each do |type_name, expected_filename|
|
25
|
+
import_file = pool.lookup(type_name).file_descriptor
|
26
|
+
if import_file.name != expected_filename
|
27
|
+
warn "- #{file.name} imports #{expected_filename}, but that import was loaded as #{import_file.name}"
|
10
28
|
end
|
11
29
|
end
|
30
|
+
warn "Each proto file must use a consistent fully-qualified name."
|
31
|
+
warn "This will become an error in the next major version."
|
12
32
|
end
|
13
33
|
|
14
34
|
module Google
|
@@ -1,3 +1,4 @@
|
|
1
|
+
# frozen_string_literal: true
|
1
2
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
2
3
|
# source: google/bigtable/v2/request_stats.proto
|
3
4
|
|
@@ -5,27 +6,32 @@ require 'google/protobuf'
|
|
5
6
|
|
6
7
|
require 'google/protobuf/duration_pb'
|
7
8
|
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
9
|
+
|
10
|
+
descriptor_data = "\n&google/bigtable/v2/request_stats.proto\x12\x12google.bigtable.v2\x1a\x1egoogle/protobuf/duration.proto\"\x82\x01\n\x12ReadIterationStats\x12\x17\n\x0frows_seen_count\x18\x01 \x01(\x03\x12\x1b\n\x13rows_returned_count\x18\x02 \x01(\x03\x12\x18\n\x10\x63\x65lls_seen_count\x18\x03 \x01(\x03\x12\x1c\n\x14\x63\x65lls_returned_count\x18\x04 \x01(\x03\"Q\n\x13RequestLatencyStats\x12:\n\x17\x66rontend_server_latency\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\xa1\x01\n\x11\x46ullReadStatsView\x12\x44\n\x14read_iteration_stats\x18\x01 \x01(\x0b\x32&.google.bigtable.v2.ReadIterationStats\x12\x46\n\x15request_latency_stats\x18\x02 \x01(\x0b\x32\'.google.bigtable.v2.RequestLatencyStats\"c\n\x0cRequestStats\x12\x45\n\x14\x66ull_read_stats_view\x18\x01 \x01(\x0b\x32%.google.bigtable.v2.FullReadStatsViewH\x00\x42\x0c\n\nstats_viewB\xbd\x01\n\x16\x63om.google.bigtable.v2B\x11RequestStatsProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3"
|
11
|
+
|
12
|
+
pool = Google::Protobuf::DescriptorPool.generated_pool
|
13
|
+
|
14
|
+
begin
|
15
|
+
pool.add_serialized_file(descriptor_data)
|
16
|
+
rescue TypeError => e
|
17
|
+
# Compatibility code: will be removed in the next major version.
|
18
|
+
require 'google/protobuf/descriptor_pb'
|
19
|
+
parsed = Google::Protobuf::FileDescriptorProto.decode(descriptor_data)
|
20
|
+
parsed.clear_dependency
|
21
|
+
serialized = parsed.class.encode(parsed)
|
22
|
+
file = pool.add_serialized_file(serialized)
|
23
|
+
warn "Warning: Protobuf detected an import path issue while loading generated file #{__FILE__}"
|
24
|
+
imports = [
|
25
|
+
["google.protobuf.Duration", "google/protobuf/duration.proto"],
|
26
|
+
]
|
27
|
+
imports.each do |type_name, expected_filename|
|
28
|
+
import_file = pool.lookup(type_name).file_descriptor
|
29
|
+
if import_file.name != expected_filename
|
30
|
+
warn "- #{file.name} imports #{expected_filename}, but that import was loaded as #{import_file.name}"
|
27
31
|
end
|
28
32
|
end
|
33
|
+
warn "Each proto file must use a consistent fully-qualified name."
|
34
|
+
warn "This will become an error in the next major version."
|
29
35
|
end
|
30
36
|
|
31
37
|
module Google
|
@@ -1,15 +1,34 @@
|
|
1
|
+
# frozen_string_literal: true
|
1
2
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
2
3
|
# source: google/bigtable/v2/response_params.proto
|
3
4
|
|
4
5
|
require 'google/protobuf'
|
5
6
|
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
7
|
+
|
8
|
+
descriptor_data = "\n(google/bigtable/v2/response_params.proto\x12\x12google.bigtable.v2\"Z\n\x0eResponseParams\x12\x14\n\x07zone_id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\ncluster_id\x18\x02 \x01(\tH\x01\x88\x01\x01\x42\n\n\x08_zone_idB\r\n\x0b_cluster_idB\xbf\x01\n\x16\x63om.google.bigtable.v2B\x13ResponseParamsProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3"
|
9
|
+
|
10
|
+
pool = Google::Protobuf::DescriptorPool.generated_pool
|
11
|
+
|
12
|
+
begin
|
13
|
+
pool.add_serialized_file(descriptor_data)
|
14
|
+
rescue TypeError => e
|
15
|
+
# Compatibility code: will be removed in the next major version.
|
16
|
+
require 'google/protobuf/descriptor_pb'
|
17
|
+
parsed = Google::Protobuf::FileDescriptorProto.decode(descriptor_data)
|
18
|
+
parsed.clear_dependency
|
19
|
+
serialized = parsed.class.encode(parsed)
|
20
|
+
file = pool.add_serialized_file(serialized)
|
21
|
+
warn "Warning: Protobuf detected an import path issue while loading generated file #{__FILE__}"
|
22
|
+
imports = [
|
23
|
+
]
|
24
|
+
imports.each do |type_name, expected_filename|
|
25
|
+
import_file = pool.lookup(type_name).file_descriptor
|
26
|
+
if import_file.name != expected_filename
|
27
|
+
warn "- #{file.name} imports #{expected_filename}, but that import was loaded as #{import_file.name}"
|
11
28
|
end
|
12
29
|
end
|
30
|
+
warn "Each proto file must use a consistent fully-qualified name."
|
31
|
+
warn "This will become an error in the next major version."
|
13
32
|
end
|
14
33
|
|
15
34
|
module Google
|
@@ -142,7 +142,7 @@ module Google
|
|
142
142
|
credentials = @config.credentials
|
143
143
|
# Use self-signed JWT if the endpoint is unchanged from default,
|
144
144
|
# but only if the default endpoint does not have a region prefix.
|
145
|
-
enable_self_signed_jwt = @config.endpoint ==
|
145
|
+
enable_self_signed_jwt = @config.endpoint == Configuration::DEFAULT_ENDPOINT &&
|
146
146
|
!@config.endpoint.split(".").first.include?("-")
|
147
147
|
credentials ||= Credentials.default scope: @config.scope,
|
148
148
|
enable_self_signed_jwt: enable_self_signed_jwt
|
@@ -180,7 +180,7 @@ module Google
|
|
180
180
|
# @param options [::Gapic::CallOptions, ::Hash]
|
181
181
|
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
|
182
182
|
#
|
183
|
-
# @overload read_rows(table_name: nil, app_profile_id: nil, rows: nil, filter: nil, rows_limit: nil, request_stats_view: nil)
|
183
|
+
# @overload read_rows(table_name: nil, app_profile_id: nil, rows: nil, filter: nil, rows_limit: nil, request_stats_view: nil, reversed: nil)
|
184
184
|
# Pass arguments to `read_rows` via keyword arguments. Note that at
|
185
185
|
# least one keyword argument is required. To specify no parameters, or to keep all
|
186
186
|
# the default parameter values, pass an empty Hash as a request object (see above).
|
@@ -203,6 +203,19 @@ module Google
|
|
203
203
|
# default (zero) is to return all results.
|
204
204
|
# @param request_stats_view [::Google::Cloud::Bigtable::V2::ReadRowsRequest::RequestStatsView]
|
205
205
|
# The view into RequestStats, as described above.
|
206
|
+
# @param reversed [::Boolean]
|
207
|
+
# Experimental API - Please note that this API is currently experimental
|
208
|
+
# and can change in the future.
|
209
|
+
#
|
210
|
+
# Return rows in lexiographical descending order of the row keys. The row
|
211
|
+
# contents will not be affected by this flag.
|
212
|
+
#
|
213
|
+
# Example result set:
|
214
|
+
#
|
215
|
+
# [
|
216
|
+
# {key: "k2", "f:col1": "v1", "f:col2": "v1"},
|
217
|
+
# {key: "k1", "f:col1": "v2", "f:col2": "v2"}
|
218
|
+
# ]
|
206
219
|
#
|
207
220
|
# @yield [response, operation] Access the result along with the RPC operation
|
208
221
|
# @yieldparam response [::Enumerable<::Google::Cloud::Bigtable::V2::ReadRowsResponse>]
|
@@ -1197,7 +1210,9 @@ module Google
|
|
1197
1210
|
class Configuration
|
1198
1211
|
extend ::Gapic::Config
|
1199
1212
|
|
1200
|
-
|
1213
|
+
DEFAULT_ENDPOINT = "bigtable.googleapis.com"
|
1214
|
+
|
1215
|
+
config_attr :endpoint, DEFAULT_ENDPOINT, ::String
|
1201
1216
|
config_attr :credentials, nil do |value|
|
1202
1217
|
allowed = [::String, ::Hash, ::Proc, ::Symbol, ::Google::Auth::Credentials, ::Signet::OAuth2::Client, nil]
|
1203
1218
|
allowed += [::GRPC::Core::Channel, ::GRPC::Core::ChannelCredentials] if defined? ::GRPC
|
@@ -46,6 +46,20 @@ module Google
|
|
46
46
|
# @!attribute [rw] request_stats_view
|
47
47
|
# @return [::Google::Cloud::Bigtable::V2::ReadRowsRequest::RequestStatsView]
|
48
48
|
# The view into RequestStats, as described above.
|
49
|
+
# @!attribute [rw] reversed
|
50
|
+
# @return [::Boolean]
|
51
|
+
# Experimental API - Please note that this API is currently experimental
|
52
|
+
# and can change in the future.
|
53
|
+
#
|
54
|
+
# Return rows in lexiographical descending order of the row keys. The row
|
55
|
+
# contents will not be affected by this flag.
|
56
|
+
#
|
57
|
+
# Example result set:
|
58
|
+
#
|
59
|
+
# [
|
60
|
+
# {key: "k2", "f:col1": "v1", "f:col2": "v1"},
|
61
|
+
# {key: "k1", "f:col1": "v2", "f:col2": "v2"}
|
62
|
+
# ]
|
49
63
|
class ReadRowsRequest
|
50
64
|
include ::Google::Protobuf::MessageExts
|
51
65
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -29,6 +29,10 @@ module Google
|
|
29
29
|
# HTTP2's HPACK compression, the request overhead will be tiny.
|
30
30
|
# This is an internal implementation detail and should not be used by endusers
|
31
31
|
# directly.
|
32
|
+
# @!attribute [rw] reverse_scans
|
33
|
+
# @return [::Boolean]
|
34
|
+
# Notify the server that the client supports reverse scans. The server will
|
35
|
+
# reject ReadRowsRequests with the reverse bit set when this is absent.
|
32
36
|
# @!attribute [rw] mutate_rows_rate_limit
|
33
37
|
# @return [::Boolean]
|
34
38
|
# Notify the server that the client enables batch write flow control by
|
@@ -43,8 +43,12 @@ module Google
|
|
43
43
|
# if (any.is(Foo.class)) {
|
44
44
|
# foo = any.unpack(Foo.class);
|
45
45
|
# }
|
46
|
+
# // or ...
|
47
|
+
# if (any.isSameTypeAs(Foo.getDefaultInstance())) {
|
48
|
+
# foo = any.unpack(Foo.getDefaultInstance());
|
49
|
+
# }
|
46
50
|
#
|
47
|
-
#
|
51
|
+
# Example 3: Pack and unpack a message in Python.
|
48
52
|
#
|
49
53
|
# foo = Foo(...)
|
50
54
|
# any = Any()
|
@@ -54,7 +58,7 @@ module Google
|
|
54
58
|
# any.Unpack(foo)
|
55
59
|
# ...
|
56
60
|
#
|
57
|
-
#
|
61
|
+
# Example 4: Pack and unpack a message in Go
|
58
62
|
#
|
59
63
|
# foo := &pb.Foo{...}
|
60
64
|
# any, err := anypb.New(foo)
|
@@ -73,9 +77,8 @@ module Google
|
|
73
77
|
# in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
74
78
|
# name "y.z".
|
75
79
|
#
|
76
|
-
#
|
77
80
|
# JSON
|
78
|
-
#
|
81
|
+
# ====
|
79
82
|
# The JSON representation of an `Any` value uses the regular
|
80
83
|
# representation of the deserialized, embedded message, with an
|
81
84
|
# additional field `@type` which contains the type URL. Example:
|
@@ -69,7 +69,6 @@ module Google
|
|
69
69
|
# Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
70
70
|
# .setNanos((int) ((millis % 1000) * 1000000)).build();
|
71
71
|
#
|
72
|
-
#
|
73
72
|
# Example 5: Compute Timestamp from Java `Instant.now()`.
|
74
73
|
#
|
75
74
|
# Instant now = Instant.now();
|
@@ -78,7 +77,6 @@ module Google
|
|
78
77
|
# Timestamp.newBuilder().setSeconds(now.getEpochSecond())
|
79
78
|
# .setNanos(now.getNano()).build();
|
80
79
|
#
|
81
|
-
#
|
82
80
|
# Example 6: Compute Timestamp from current time in Python.
|
83
81
|
#
|
84
82
|
# timestamp = Timestamp()
|
@@ -108,7 +106,7 @@ module Google
|
|
108
106
|
# [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
|
109
107
|
# the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
|
110
108
|
# the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
111
|
-
# http://
|
109
|
+
# http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()
|
112
110
|
# ) to obtain a formatter capable of generating timestamps in this format.
|
113
111
|
# @!attribute [rw] seconds
|
114
112
|
# @return [::Integer]
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: google-cloud-bigtable-v2
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.12.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Google LLC
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2023-
|
11
|
+
date: 2023-07-18 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: gapic-common
|
@@ -16,7 +16,7 @@ dependencies:
|
|
16
16
|
requirements:
|
17
17
|
- - ">="
|
18
18
|
- !ruby/object:Gem::Version
|
19
|
-
version: 0.
|
19
|
+
version: 0.19.1
|
20
20
|
- - "<"
|
21
21
|
- !ruby/object:Gem::Version
|
22
22
|
version: 2.a
|
@@ -26,7 +26,7 @@ dependencies:
|
|
26
26
|
requirements:
|
27
27
|
- - ">="
|
28
28
|
- !ruby/object:Gem::Version
|
29
|
-
version: 0.
|
29
|
+
version: 0.19.1
|
30
30
|
- - "<"
|
31
31
|
- !ruby/object:Gem::Version
|
32
32
|
version: 2.a
|