google-cloud-bigtable 0.7.0 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/AUTHENTICATION.md +4 -26
- data/CHANGELOG.md +55 -0
- data/OVERVIEW.md +388 -19
- data/lib/google-cloud-bigtable.rb +0 -1
- data/lib/google/bigtable/admin/v2/bigtable_table_admin_services_pb.rb +1 -1
- data/lib/google/bigtable/v2/bigtable_pb.rb +3 -0
- data/lib/google/bigtable/v2/bigtable_services_pb.rb +1 -1
- data/lib/google/cloud/bigtable/admin.rb +2 -2
- data/lib/google/cloud/bigtable/admin/v2.rb +2 -2
- data/lib/google/cloud/bigtable/admin/v2/bigtable_instance_admin_client.rb +1 -1
- data/lib/google/cloud/bigtable/admin/v2/bigtable_table_admin_client.rb +2 -2
- data/lib/google/cloud/bigtable/admin/v2/credentials.rb +1 -1
- data/lib/google/cloud/bigtable/admin/v2/doc/google/bigtable/admin/v2/bigtable_instance_admin.rb +1 -1
- data/lib/google/cloud/bigtable/admin/v2/doc/google/bigtable/admin/v2/bigtable_table_admin.rb +1 -1
- data/lib/google/cloud/bigtable/admin/v2/doc/google/bigtable/admin/v2/instance.rb +1 -1
- data/lib/google/cloud/bigtable/admin/v2/doc/google/bigtable/admin/v2/table.rb +1 -1
- data/lib/google/cloud/bigtable/admin/v2/doc/google/iam/v1/iam_policy.rb +1 -1
- data/lib/google/cloud/bigtable/admin/v2/doc/google/iam/v1/options.rb +1 -1
- data/lib/google/cloud/bigtable/admin/v2/doc/google/iam/v1/policy.rb +7 -6
- data/lib/google/cloud/bigtable/admin/v2/doc/google/longrunning/operations.rb +2 -2
- data/lib/google/cloud/bigtable/admin/v2/doc/google/protobuf/any.rb +1 -1
- data/lib/google/cloud/bigtable/admin/v2/doc/google/protobuf/duration.rb +1 -1
- data/lib/google/cloud/bigtable/admin/v2/doc/google/protobuf/empty.rb +1 -1
- data/lib/google/cloud/bigtable/admin/v2/doc/google/protobuf/field_mask.rb +1 -1
- data/lib/google/cloud/bigtable/admin/v2/doc/google/protobuf/timestamp.rb +1 -1
- data/lib/google/cloud/bigtable/admin/v2/doc/google/rpc/status.rb +7 -55
- data/lib/google/cloud/bigtable/admin/v2/doc/google/type/expr.rb +1 -1
- data/lib/google/cloud/bigtable/app_profile.rb +13 -13
- data/lib/google/cloud/bigtable/app_profile/list.rb +7 -7
- data/lib/google/cloud/bigtable/chunk_processor.rb +1 -1
- data/lib/google/cloud/bigtable/cluster.rb +7 -7
- data/lib/google/cloud/bigtable/cluster/list.rb +2 -2
- data/lib/google/cloud/bigtable/column_family.rb +3 -9
- data/lib/google/cloud/bigtable/column_family_map.rb +8 -8
- data/lib/google/cloud/bigtable/column_range.rb +6 -6
- data/lib/google/cloud/bigtable/gc_rule.rb +8 -8
- data/lib/google/cloud/bigtable/instance.rb +78 -71
- data/lib/google/cloud/bigtable/instance/cluster_map.rb +8 -6
- data/lib/google/cloud/bigtable/instance/job.rb +2 -2
- data/lib/google/cloud/bigtable/instance/list.rb +4 -4
- data/lib/google/cloud/bigtable/longrunning_job.rb +1 -1
- data/lib/google/cloud/bigtable/mutation_entry.rb +17 -16
- data/lib/google/cloud/bigtable/mutation_operations.rb +13 -49
- data/lib/google/cloud/bigtable/policy.rb +1 -1
- data/lib/google/cloud/bigtable/project.rb +28 -59
- data/lib/google/cloud/bigtable/read_modify_write_rule.rb +6 -6
- data/lib/google/cloud/bigtable/read_operations.rb +15 -19
- data/lib/google/cloud/bigtable/routing_policy.rb +3 -2
- data/lib/google/cloud/bigtable/row.rb +14 -14
- data/lib/google/cloud/bigtable/row_filter.rb +31 -15
- data/lib/google/cloud/bigtable/row_filter/chain_filter.rb +49 -26
- data/lib/google/cloud/bigtable/row_filter/condition_filter.rb +4 -1
- data/lib/google/cloud/bigtable/row_filter/interleave_filter.rb +44 -24
- data/lib/google/cloud/bigtable/row_filter/simple_filter.rb +3 -3
- data/lib/google/cloud/bigtable/rows_mutator.rb +1 -5
- data/lib/google/cloud/bigtable/rows_reader.rb +5 -4
- data/lib/google/cloud/bigtable/sample_row_key.rb +1 -1
- data/lib/google/cloud/bigtable/service.rb +94 -14
- data/lib/google/cloud/bigtable/table.rb +130 -27
- data/lib/google/cloud/bigtable/table/cluster_state.rb +10 -5
- data/lib/google/cloud/bigtable/table/list.rb +2 -2
- data/lib/google/cloud/bigtable/v2.rb +2 -2
- data/lib/google/cloud/bigtable/v2/bigtable_client.rb +13 -13
- data/lib/google/cloud/bigtable/v2/credentials.rb +1 -1
- data/lib/google/cloud/bigtable/v2/doc/google/bigtable/v2/bigtable.rb +17 -14
- data/lib/google/cloud/bigtable/v2/doc/google/bigtable/v2/data.rb +1 -1
- data/lib/google/cloud/bigtable/v2/doc/google/protobuf/any.rb +1 -1
- data/lib/google/cloud/bigtable/v2/doc/google/protobuf/wrappers.rb +1 -1
- data/lib/google/cloud/bigtable/v2/doc/google/rpc/status.rb +7 -55
- data/lib/google/cloud/bigtable/value_range.rb +14 -13
- data/lib/google/cloud/bigtable/version.rb +1 -1
- metadata +42 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: d5a23f16f0330f474a569b573e9651402b1494e0d6549a93bd09a92e74276866
|
4
|
+
data.tar.gz: 2f876607d84147ef39875f8fd476ee278fc0d31bad9e70db70d752eac33bc4fb
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 8cceac32a50ad779a690fe989ba58c6723e876a0125045e9c380740e550781710a4b7c4c79b5954029d9b334dc43a87b29624cd09d1a769c0960fa0fc7e3027e
|
7
|
+
data.tar.gz: afdb9a5c2ef03fd8471cc6fa1746ba2aecd8e96618dd3049a035ee42f6c5fbbc9439367c299101c59edf20731847eb0444b5937c72711a18d1de8c534fdc2759
|
data/AUTHENTICATION.md
CHANGED
@@ -55,32 +55,10 @@ code.
|
|
55
55
|
|
56
56
|
### Google Cloud Platform environments
|
57
57
|
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
should be written as if already authenticated.
|
62
|
-
GCE instance][gce-how-to], you add the correct scopes for the APIs you want to
|
63
|
-
access. For example:
|
64
|
-
|
65
|
-
* **All APIs**
|
66
|
-
* `https://www.googleapis.com/auth/cloud-platform`
|
67
|
-
* `https://www.googleapis.com/auth/cloud-platform.read-only`
|
68
|
-
* **BigQuery**
|
69
|
-
* `https://www.googleapis.com/auth/bigquery`
|
70
|
-
* `https://www.googleapis.com/auth/bigquery.insertdata`
|
71
|
-
* **Compute Engine**
|
72
|
-
* `https://www.googleapis.com/auth/compute`
|
73
|
-
* **Datastore**
|
74
|
-
* `https://www.googleapis.com/auth/datastore`
|
75
|
-
* `https://www.googleapis.com/auth/userinfo.email`
|
76
|
-
* **DNS**
|
77
|
-
* `https://www.googleapis.com/auth/ndev.clouddns.readwrite`
|
78
|
-
* **Pub/Sub**
|
79
|
-
* `https://www.googleapis.com/auth/pubsub`
|
80
|
-
* **Storage**
|
81
|
-
* `https://www.googleapis.com/auth/devstorage.full_control`
|
82
|
-
* `https://www.googleapis.com/auth/devstorage.read_only`
|
83
|
-
* `https://www.googleapis.com/auth/devstorage.read_write`
|
58
|
+
When running on Google Cloud Platform (GCP), including Google Compute Engine (GCE),
|
59
|
+
Google Kubernetes Engine (GKE), Google App Engine (GAE), Google Cloud Functions
|
60
|
+
(GCF) and Cloud Run, the **Project ID** and **Credentials** and are discovered
|
61
|
+
automatically. Code should be written as if already authenticated.
|
84
62
|
|
85
63
|
### Environment Variables
|
86
64
|
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,60 @@
|
|
1
1
|
# Release History
|
2
2
|
|
3
|
+
### 1.1.0 / 2020-02-10
|
4
|
+
|
5
|
+
#### Features
|
6
|
+
|
7
|
+
* Add Table-level IAM Policy support
|
8
|
+
|
9
|
+
### 1.0.2 / 2020-01-23
|
10
|
+
|
11
|
+
#### Documentation
|
12
|
+
|
13
|
+
* Update copyright year
|
14
|
+
* Update Status documentation
|
15
|
+
|
16
|
+
### 1.0.1 / 2020-01-15
|
17
|
+
|
18
|
+
#### Documentation
|
19
|
+
|
20
|
+
* Update lower-level API documentation
|
21
|
+
|
22
|
+
### 1.0.0 / 2019-12-03
|
23
|
+
|
24
|
+
#### Documentation
|
25
|
+
|
26
|
+
* Update release level to GA
|
27
|
+
* Add OVERVIEW.md guide with samples
|
28
|
+
* Add sample to README.md
|
29
|
+
* Fix samples and copy edit all in-line documentation
|
30
|
+
* Correct error in lower-level API Table IAM documentation
|
31
|
+
* Update lower-level API documentation to indicate attributes as required
|
32
|
+
* Update low-level IAM Policy documentation
|
33
|
+
|
34
|
+
### 0.8.0 / 2019-11-01
|
35
|
+
|
36
|
+
#### ⚠ BREAKING CHANGES
|
37
|
+
|
38
|
+
* The following methods now raise Google::Cloud::Error instead of
|
39
|
+
Google::Gax::GaxError and/or GRPC::BadStatus:
|
40
|
+
* Table#mutate_row
|
41
|
+
* Table#read_modify_write_row
|
42
|
+
* Table#check_and_mutate_row
|
43
|
+
* Table#sample_row_keys
|
44
|
+
|
45
|
+
#### Features
|
46
|
+
|
47
|
+
* Raise Google::Cloud::Error from Table#mutate_row, Table#read_modify_write_row,
|
48
|
+
Table#check_and_mutate_row, and Table#sample_row_keys.
|
49
|
+
|
50
|
+
#### Bug Fixes
|
51
|
+
|
52
|
+
* Update minimum runtime dependencies
|
53
|
+
|
54
|
+
#### Documentation
|
55
|
+
|
56
|
+
* Update the list of GCP environments for automatic authentication
|
57
|
+
|
3
58
|
### 0.7.0 / 2019-10-22
|
4
59
|
|
5
60
|
#### Features
|
data/OVERVIEW.md
CHANGED
@@ -1,31 +1,400 @@
|
|
1
1
|
# Cloud Bigtable
|
2
2
|
|
3
|
-
|
3
|
+
Cloud Bigtable is a petabyte-scale, fully managed NoSQL database service for
|
4
|
+
large analytical and operational workloads. Ideal for ad tech, fintech, and IoT,
|
5
|
+
Cloud Bigtable offers consistent sub-10ms latency. Replication provides higher
|
6
|
+
availability, higher durability, and resilience in the face of zonal failures.
|
7
|
+
Cloud Bigtable is designed with a storage engine for machine learning
|
8
|
+
applications and provides easy integration with open source big data tools.
|
4
9
|
|
5
|
-
|
6
|
-
|
7
|
-
cloud project.
|
8
|
-
- [Product Documentation][]
|
10
|
+
For more information about Cloud Bigtable, read the [Cloud Bigtable
|
11
|
+
Documentation](https://cloud.google.com/bigtable/docs/).
|
9
12
|
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
+
The goal of google-cloud is to provide an API that is comfortable to Rubyists.
|
14
|
+
Your authentication credentials are detected automatically in Google Cloud
|
15
|
+
Platform (GCP), including Google Compute Engine (GCE), Google Kubernetes Engine
|
16
|
+
(GKE), Google App Engine (GAE), Google Cloud Functions (GCF) and Cloud Run. In
|
17
|
+
other environments you can configure authentication easily, either directly in
|
18
|
+
your code or via environment variables. Read more about the options for
|
19
|
+
connecting in the {file:AUTHENTICATION.md Authentication Guide}.
|
13
20
|
|
14
|
-
|
15
|
-
2. [Enable billing for your project.](https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project)
|
16
|
-
3. [Enable the Cloud Bigtable API.](https://console.cloud.google.com/apis/library/bigtable.googleapis.com)
|
17
|
-
4. [Setup Authentication.](https://googleapis.dev/ruby/google-cloud-bigtable/latest/file.AUTHENTICATION.html)
|
21
|
+
## Creating instances and clusters
|
18
22
|
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
23
|
+
When you first use Cloud Bigtable, you must create an instance, which is an
|
24
|
+
allocation of resources that are used by Cloud Bigtable. When you create an
|
25
|
+
instance, you must specify at least one cluster. Clusters describe where your
|
26
|
+
data is stored and how many nodes are used for your data.
|
27
|
+
|
28
|
+
Use {Google::Cloud::Bigtable::Project#create_instance Project#create_instance}
|
29
|
+
to create an instance. The following example creates a production instance with
|
30
|
+
one cluster and three nodes:
|
31
|
+
|
32
|
+
```ruby
|
33
|
+
require "google/cloud/bigtable"
|
34
|
+
|
35
|
+
bigtable = Google::Cloud::Bigtable.new
|
36
|
+
|
37
|
+
job = bigtable.create_instance(
|
38
|
+
"my-instance",
|
39
|
+
display_name: "Instance for user data",
|
40
|
+
labels: { "env" => "dev"}
|
41
|
+
) do |clusters|
|
42
|
+
clusters.add("test-cluster", "us-east1-b", nodes: 3, storage_type: :SSD)
|
43
|
+
end
|
44
|
+
|
45
|
+
job.done? #=> false
|
46
|
+
|
47
|
+
# To block until the operation completes.
|
48
|
+
job.wait_until_done!
|
49
|
+
job.done? #=> true
|
50
|
+
|
51
|
+
if job.error?
|
52
|
+
status = job.error
|
53
|
+
else
|
54
|
+
instance = job.instance
|
55
|
+
end
|
56
|
+
```
|
57
|
+
|
58
|
+
You can also create a low-cost development instance for development and testing,
|
59
|
+
with performance limited to the equivalent of a one-node cluster. There are no
|
60
|
+
monitoring or throughput guarantees; replication is not available; and the SLA
|
61
|
+
does not apply. When creating a development instance, you do not specify `nodes`
|
62
|
+
for your clusters:
|
63
|
+
|
64
|
+
```ruby
|
65
|
+
require "google/cloud/bigtable"
|
66
|
+
|
67
|
+
bigtable = Google::Cloud::Bigtable.new
|
68
|
+
|
69
|
+
job = bigtable.create_instance(
|
70
|
+
"my-instance",
|
71
|
+
display_name: "Instance for user data",
|
72
|
+
type: :DEVELOPMENT,
|
73
|
+
labels: { "env" => "dev"}
|
74
|
+
) do |clusters|
|
75
|
+
clusters.add("test-cluster", "us-east1-b") # nodes not allowed
|
76
|
+
end
|
77
|
+
|
78
|
+
job.done? #=> false
|
79
|
+
|
80
|
+
# Reload job until completion.
|
81
|
+
job.wait_until_done!
|
82
|
+
job.done? #=> true
|
83
|
+
|
84
|
+
if job.error?
|
85
|
+
status = job.error
|
86
|
+
else
|
87
|
+
instance = job.instance
|
88
|
+
end
|
89
|
+
```
|
90
|
+
|
91
|
+
You can upgrade a development instance to a production instance at any time.
|
92
|
+
|
93
|
+
## Creating tables
|
94
|
+
|
95
|
+
Cloud Bigtable stores data in massively scalable tables, each of which is a
|
96
|
+
sorted key/value map. The table is composed of rows, each of which typically
|
97
|
+
describes a single entity, and columns, which contain individual values for each
|
98
|
+
row. Each row is indexed by a single row key, and columns that are related to
|
99
|
+
one another are typically grouped together into a column family. Each column is
|
100
|
+
identified by a combination of the column family and a column qualifier, which
|
101
|
+
is a unique name within the column family.
|
102
|
+
|
103
|
+
Each row/column intersection can contain multiple cells, or versions, at
|
104
|
+
different timestamps, providing a record of how the stored data has been altered
|
105
|
+
over time. Cloud Bigtable tables are sparse; if a cell does not contain any
|
106
|
+
data, it does not take up any space.
|
107
|
+
|
108
|
+
Use {Google::Cloud::Bigtable::Project#create_table Project#create_table} or
|
109
|
+
{Google::Cloud::Bigtable::Instance#create_table Instance#create_table} to
|
110
|
+
create a table:
|
111
|
+
|
112
|
+
```ruby
|
113
|
+
require "google/cloud/bigtable"
|
114
|
+
|
115
|
+
bigtable = Google::Cloud::Bigtable.new
|
116
|
+
|
117
|
+
table = bigtable.create_table("my-instance", "my-table")
|
118
|
+
puts table.name
|
119
|
+
```
|
120
|
+
|
121
|
+
When you create a table, you may specify the column families to use in the
|
122
|
+
table, as well as a list of row keys that will be used to initially split the
|
123
|
+
table into several tablets (tablets are similar to HBase regions):
|
124
|
+
|
125
|
+
```ruby
|
126
|
+
require "google/cloud/bigtable"
|
127
|
+
|
128
|
+
bigtable = Google::Cloud::Bigtable.new
|
129
|
+
|
130
|
+
initial_splits = ["user-00001", "user-100000", "others"]
|
131
|
+
table = bigtable.create_table("my-instance", "my-table", initial_splits: initial_splits) do |cfm|
|
132
|
+
cfm.add('cf1', gc_rule: Google::Cloud::Bigtable::GcRule.max_versions(5))
|
133
|
+
cfm.add('cf2', gc_rule: Google::Cloud::Bigtable::GcRule.max_age(600))
|
134
|
+
|
135
|
+
gc_rule = Google::Cloud::Bigtable::GcRule.union(
|
136
|
+
Google::Cloud::Bigtable::GcRule.max_age(1800),
|
137
|
+
Google::Cloud::Bigtable::GcRule.max_versions(3)
|
138
|
+
)
|
139
|
+
cfm.add('cf3', gc_rule: gc_rule)
|
140
|
+
end
|
141
|
+
|
142
|
+
puts table
|
143
|
+
```
|
144
|
+
|
145
|
+
You may also add, update, and delete column families later by passing a block to
|
146
|
+
{Google::Cloud::Bigtable::Table#column_families Table#column_families}:
|
147
|
+
|
148
|
+
```ruby
|
149
|
+
require "google/cloud/bigtable"
|
150
|
+
|
151
|
+
bigtable = Google::Cloud::Bigtable.new
|
152
|
+
|
153
|
+
table = bigtable.table("my-instance", "my-table", perform_lookup: true)
|
154
|
+
|
155
|
+
table.column_families do |cfm|
|
156
|
+
cfm.add "cf4", gc_rule: Google::Cloud::Bigtable::GcRule.max_age(600)
|
157
|
+
cfm.add "cf5", gc_rule: Google::Cloud::Bigtable::GcRule.max_versions(5)
|
158
|
+
|
159
|
+
rule_1 = Google::Cloud::Bigtable::GcRule.max_versions(3)
|
160
|
+
rule_2 = Google::Cloud::Bigtable::GcRule.max_age(600)
|
161
|
+
rule_union = Google::Cloud::Bigtable::GcRule.union(rule_1, rule_2)
|
162
|
+
cfm.update "cf2", gc_rule: rule_union
|
163
|
+
|
164
|
+
cfm.delete "cf3"
|
165
|
+
end
|
166
|
+
|
167
|
+
puts table.column_families["cf3"] #=> nil
|
168
|
+
```
|
169
|
+
|
170
|
+
## Writing data
|
171
|
+
|
172
|
+
The {Google::Cloud::Bigtable::Table Table} class allows you to perform the
|
173
|
+
following types of writes:
|
174
|
+
|
175
|
+
* Simple writes
|
176
|
+
* Increments and appends
|
177
|
+
* Conditional writes
|
178
|
+
* Batch writes
|
179
|
+
|
180
|
+
See [Cloud Bigtable writes](https://cloud.google.com/bigtable/docs/writes) for
|
181
|
+
detailed information about writing data.
|
182
|
+
|
183
|
+
### Simple writes
|
184
|
+
|
185
|
+
Use {Google::Cloud::Bigtable::Table#mutate_row Table#mutate_row} to make
|
186
|
+
one or more mutations to a single row:
|
187
|
+
|
188
|
+
```ruby
|
189
|
+
require "google/cloud/bigtable"
|
190
|
+
|
191
|
+
bigtable = Google::Cloud::Bigtable.new
|
192
|
+
|
193
|
+
table = bigtable.table("my-instance", "my-table")
|
194
|
+
|
195
|
+
entry = table.new_mutation_entry("user-1")
|
196
|
+
entry.set_cell(
|
197
|
+
"cf-1",
|
198
|
+
"field-1",
|
199
|
+
"XYZ",
|
200
|
+
timestamp: (Time.now.to_f * 1000000).round(-3) # microseconds
|
201
|
+
).delete_cells("cf2", "field02")
|
202
|
+
|
203
|
+
table.mutate_row(entry)
|
204
|
+
```
|
205
|
+
|
206
|
+
### Increments and appends
|
207
|
+
|
208
|
+
If you want to append data to an existing value or increment an existing numeric
|
209
|
+
value, use
|
210
|
+
{Google::Cloud::Bigtable::Table#read_modify_write_row Table#read_modify_write_row}:
|
211
|
+
|
212
|
+
```ruby
|
213
|
+
require "google/cloud/bigtable"
|
214
|
+
|
215
|
+
bigtable = Google::Cloud::Bigtable.new
|
216
|
+
table = bigtable.table("my-instance", "my-table")
|
217
|
+
|
218
|
+
rule_1 = table.new_read_modify_write_rule("cf", "field01")
|
219
|
+
rule_1.append("append-xyz")
|
220
|
+
|
221
|
+
rule_2 = table.new_read_modify_write_rule("cf", "field01")
|
222
|
+
rule_2.increment(1)
|
223
|
+
|
224
|
+
row = table.read_modify_write_row("user01", [rule_1, rule_2])
|
225
|
+
|
226
|
+
puts row.cells
|
227
|
+
```
|
228
|
+
|
229
|
+
Do not use `read_modify_write_row` if you are using an app profile that has
|
230
|
+
multi-cluster routing. (See
|
231
|
+
{Google::Cloud::Bigtable::AppProfile#routing_policy AppProfile#routing_policy}.)
|
232
|
+
|
233
|
+
### Conditional writes
|
234
|
+
|
235
|
+
To check a row for a condition and then, depending on the result, write data to
|
236
|
+
that row, use
|
237
|
+
{Google::Cloud::Bigtable::Table#check_and_mutate_row Table#check_and_mutate_row}:
|
238
|
+
|
239
|
+
```ruby
|
240
|
+
require "google/cloud/bigtable"
|
241
|
+
|
242
|
+
bigtable = Google::Cloud::Bigtable.new
|
243
|
+
table = bigtable.table("my-instance", "my-table")
|
244
|
+
|
245
|
+
predicate_filter = Google::Cloud::Bigtable::RowFilter.key("user-10")
|
246
|
+
on_match_mutations = Google::Cloud::Bigtable::MutationEntry.new
|
247
|
+
on_match_mutations.set_cell(
|
248
|
+
"cf-1",
|
249
|
+
"field-1",
|
250
|
+
"XYZ",
|
251
|
+
timestamp: (Time.now.to_f * 1000000).round(-3) # microseconds
|
252
|
+
).delete_cells("cf2", "field02")
|
253
|
+
|
254
|
+
otherwise_mutations = Google::Cloud::Bigtable::MutationEntry.new
|
255
|
+
otherwise_mutations.delete_from_family("cf3")
|
256
|
+
|
257
|
+
predicate_matched = table.check_and_mutate_row(
|
258
|
+
"user01",
|
259
|
+
predicate_filter,
|
260
|
+
on_match: on_match_mutations,
|
261
|
+
otherwise: otherwise_mutations
|
262
|
+
)
|
263
|
+
|
264
|
+
if predicate_matched
|
265
|
+
puts "All predicates matched"
|
266
|
+
end
|
267
|
+
```
|
268
|
+
|
269
|
+
Do not use `check_and_mutate_row` if you are using an app profile that has
|
270
|
+
multi-cluster routing. (See
|
271
|
+
{Google::Cloud::Bigtable::AppProfile#routing_policy AppProfile#routing_policy}.)
|
272
|
+
|
273
|
+
### Batch writes
|
274
|
+
|
275
|
+
You can write more than one row in a single RPC using
|
276
|
+
{Google::Cloud::Bigtable::Table#mutate_rows Table#mutate_rows}:
|
277
|
+
|
278
|
+
```ruby
|
279
|
+
require "google/cloud/bigtable"
|
280
|
+
|
281
|
+
bigtable = Google::Cloud::Bigtable.new
|
282
|
+
|
283
|
+
table = bigtable.table("my-instance", "my-table")
|
284
|
+
|
285
|
+
entries = []
|
286
|
+
entries << table.new_mutation_entry("row-1").set_cell("cf1", "field1", "XYZ")
|
287
|
+
entries << table.new_mutation_entry("row-2").set_cell("cf1", "field1", "ABC")
|
288
|
+
responses = table.mutate_rows(entries)
|
289
|
+
|
290
|
+
responses.each do |response|
|
291
|
+
puts response.status.description
|
292
|
+
end
|
293
|
+
```
|
294
|
+
|
295
|
+
Each entry in the request is atomic, but the request as a whole is not. As shown
|
296
|
+
above, Cloud Bigtable returns a list of responses corresponding to the entries.
|
297
|
+
|
298
|
+
## Reading data
|
299
|
+
|
300
|
+
The {Google::Cloud::Bigtable::Table Table} class also enables you to read data.
|
301
|
+
|
302
|
+
Use {Google::Cloud::Bigtable::Table#read_row Table#read_row} to read a single
|
303
|
+
row by key:
|
304
|
+
|
305
|
+
```ruby
|
306
|
+
require "google/cloud/bigtable"
|
307
|
+
|
308
|
+
bigtable = Google::Cloud::Bigtable.new
|
309
|
+
table = bigtable.table("my-instance", "my-table")
|
310
|
+
|
311
|
+
row = table.read_row("user-1")
|
312
|
+
```
|
313
|
+
|
314
|
+
If desired, you can apply a filter:
|
315
|
+
|
316
|
+
```ruby
|
317
|
+
require "google/cloud/bigtable"
|
318
|
+
|
319
|
+
bigtable = Google::Cloud::Bigtable.new
|
320
|
+
table = bigtable.table("my-instance", "my-table")
|
321
|
+
|
322
|
+
filter = Google::Cloud::Bigtable::RowFilter.cells_per_row(3)
|
323
|
+
|
324
|
+
row = table.read_row("user-1", filter: filter)
|
325
|
+
```
|
326
|
+
|
327
|
+
For multiple rows, the
|
328
|
+
{Google::Cloud::Bigtable::Table#read_rows Table#read_rows} method streams back
|
329
|
+
the contents of all requested rows in key order:
|
330
|
+
|
331
|
+
```ruby
|
332
|
+
require "google/cloud/bigtable"
|
333
|
+
|
334
|
+
bigtable = Google::Cloud::Bigtable.new
|
335
|
+
table = bigtable.table("my-instance", "my-table")
|
336
|
+
|
337
|
+
table.read_rows(keys: ["user-1", "user-2"]).each do |row|
|
338
|
+
puts row
|
339
|
+
end
|
340
|
+
```
|
341
|
+
|
342
|
+
Instead of specifying individual keys (or a range), you can often just use a
|
343
|
+
filter:
|
344
|
+
|
345
|
+
```ruby
|
346
|
+
require "google/cloud/bigtable"
|
347
|
+
|
348
|
+
bigtable = Google::Cloud::Bigtable.new
|
349
|
+
table = bigtable.table("my-instance", "my-table")
|
350
|
+
|
351
|
+
filter = table.filter.key("user-*")
|
352
|
+
# OR
|
353
|
+
# filter = Google::Cloud::Bigtable::RowFilter.key("user-*")
|
354
|
+
|
355
|
+
table.read_rows(filter: filter).each do |row|
|
356
|
+
puts row
|
357
|
+
end
|
358
|
+
```
|
359
|
+
|
360
|
+
## Deleting rows, tables, and instances
|
361
|
+
|
362
|
+
Use {Google::Cloud::Bigtable::Table#drop_row_range Table#drop_row_range} to
|
363
|
+
delete some or all of the rows in a table:
|
364
|
+
|
365
|
+
```ruby
|
366
|
+
require "google/cloud/bigtable"
|
367
|
+
|
368
|
+
bigtable = Google::Cloud::Bigtable.new
|
369
|
+
|
370
|
+
table = bigtable.table("my-instance", "my-table")
|
371
|
+
|
372
|
+
# Delete rows using row key prefix.
|
373
|
+
table.drop_row_range(row_key_prefix: "user-100")
|
374
|
+
|
375
|
+
# Delete all data With timeout
|
376
|
+
table.drop_row_range(delete_all_data: true, timeout: 120) # 120 seconds.
|
377
|
+
```
|
378
|
+
|
379
|
+
Delete tables and instances using
|
380
|
+
{Google::Cloud::Bigtable::Table#delete Table#delete} and
|
381
|
+
{Google::Cloud::Bigtable::Instance#delete Instance#delete}, respectively:
|
382
|
+
|
383
|
+
```ruby
|
384
|
+
require "google/cloud/bigtable"
|
385
|
+
|
386
|
+
bigtable = Google::Cloud::Bigtable.new
|
387
|
+
|
388
|
+
instance = bigtable.instance("my-instance")
|
389
|
+
table = instance.table("my-table")
|
390
|
+
|
391
|
+
table.delete
|
392
|
+
|
393
|
+
instance.delete
|
394
|
+
```
|
24
395
|
|
25
396
|
## Additional information
|
26
397
|
|
27
398
|
Google Bigtable can be configured to use an emulator or to enable gRPC's
|
28
399
|
logging. To learn more, see the {file:EMULATOR.md Emulator guide} and
|
29
400
|
{file:LOGGING.md Logging guide}.
|
30
|
-
|
31
|
-
[Product Documentation]: https://cloud.google.com/bigtable
|