google-cloud-firestore 2.7.2 → 2.15.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/AUTHENTICATION.md +8 -26
- data/CHANGELOG.md +69 -0
- data/LOGGING.md +1 -1
- data/lib/google/cloud/firestore/aggregate_query.rb +285 -0
- data/lib/google/cloud/firestore/aggregate_query_snapshot.rb +145 -0
- data/lib/google/cloud/firestore/bulk_commit_batch.rb +73 -0
- data/lib/google/cloud/firestore/bulk_writer.rb +558 -0
- data/lib/google/cloud/firestore/bulk_writer_exception.rb +40 -0
- data/lib/google/cloud/firestore/bulk_writer_operation.rb +126 -0
- data/lib/google/cloud/firestore/bulk_writer_scheduler.rb +164 -0
- data/lib/google/cloud/firestore/client.rb +161 -10
- data/lib/google/cloud/firestore/collection_group.rb +20 -4
- data/lib/google/cloud/firestore/collection_reference.rb +17 -2
- data/lib/google/cloud/firestore/collection_reference_list.rb +4 -3
- data/lib/google/cloud/firestore/convert.rb +6 -7
- data/lib/google/cloud/firestore/document_reference/list.rb +5 -3
- data/lib/google/cloud/firestore/document_reference.rb +20 -3
- data/lib/google/cloud/firestore/document_snapshot.rb +1 -1
- data/lib/google/cloud/firestore/errors.rb +60 -0
- data/lib/google/cloud/firestore/filter.rb +326 -0
- data/lib/google/cloud/firestore/promise/future.rb +97 -0
- data/lib/google/cloud/firestore/query.rb +112 -89
- data/lib/google/cloud/firestore/rate_limiter.rb +80 -0
- data/lib/google/cloud/firestore/service.rb +74 -23
- data/lib/google/cloud/firestore/transaction.rb +57 -4
- data/lib/google/cloud/firestore/version.rb +1 -1
- data/lib/google/cloud/firestore.rb +17 -7
- data/lib/google-cloud-firestore.rb +45 -8
- metadata +17 -146
@@ -0,0 +1,126 @@
|
|
1
|
+
# Copyright 2023 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
|
16
|
+
require "concurrent"
|
17
|
+
require "google/cloud/firestore/bulk_writer_exception"
|
18
|
+
|
19
|
+
|
20
|
+
module Google
|
21
|
+
module Cloud
|
22
|
+
module Firestore
|
23
|
+
##
|
24
|
+
#
|
25
|
+
# @private A BulkWriterOperation object refers to a write operation and contains
|
26
|
+
# all the necessary information for a specific write task, including meta
|
27
|
+
# information like the current number of attempts
|
28
|
+
#
|
29
|
+
class BulkWriterOperation
|
30
|
+
attr_reader :retry_time
|
31
|
+
attr_reader :result
|
32
|
+
attr_reader :completion_event
|
33
|
+
attr_reader :write
|
34
|
+
|
35
|
+
##
|
36
|
+
# Initialize the object
|
37
|
+
def initialize write, retries
|
38
|
+
@write = write
|
39
|
+
@failed_attempts = 0
|
40
|
+
@retries = retries
|
41
|
+
@retry_time = Time.now
|
42
|
+
@completion_event = Concurrent::Event.new
|
43
|
+
end
|
44
|
+
|
45
|
+
##
|
46
|
+
# Processing to be done when the response is a success.
|
47
|
+
# Updates the result and set the completion event.
|
48
|
+
#
|
49
|
+
# @param [Google::Cloud::Firestore::V1::WriteResult] result The result returned in the response.
|
50
|
+
def on_success result
|
51
|
+
begin
|
52
|
+
@result = WriteResult.new result
|
53
|
+
rescue StandardError => e
|
54
|
+
raise BulkWriterOperationError, e
|
55
|
+
ensure
|
56
|
+
@completion_event.set
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
##
|
61
|
+
# Processing to be done when the response is a failure.
|
62
|
+
# Updates the failure attempts. If the retry count reaches
|
63
|
+
# the upper threshold, operations will be marked
|
64
|
+
# as failure and the completion event will be set.
|
65
|
+
#
|
66
|
+
# @param [Google::Rpc::Status] status The status received in the response.
|
67
|
+
#
|
68
|
+
def on_failure status
|
69
|
+
@failed_attempts += 1
|
70
|
+
if @failed_attempts == @retries + 1
|
71
|
+
begin
|
72
|
+
@result = BulkWriterException.new status
|
73
|
+
rescue StandardError => e
|
74
|
+
raise BulkWriterOperationError, e
|
75
|
+
ensure
|
76
|
+
@completion_event.set
|
77
|
+
end
|
78
|
+
else
|
79
|
+
backoff_duration
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
##
|
84
|
+
# Exponentially increases the waiting time for retry.
|
85
|
+
#
|
86
|
+
def backoff_duration
|
87
|
+
@retry_time = Time.now + (@failed_attempts**2)
|
88
|
+
end
|
89
|
+
|
90
|
+
##
|
91
|
+
# Represents the result of applying a write.
|
92
|
+
#
|
93
|
+
# @example
|
94
|
+
# require "google/cloud/firestore"
|
95
|
+
#
|
96
|
+
# firestore = Google::Cloud::Firestore.new
|
97
|
+
# bw = firestore.bulk_writer
|
98
|
+
#
|
99
|
+
# # Set the data for NYC
|
100
|
+
# result = bw.set("cities/NYC", { name: "New York City" })
|
101
|
+
#
|
102
|
+
# result.wait!
|
103
|
+
#
|
104
|
+
# puts result.value
|
105
|
+
#
|
106
|
+
class WriteResult
|
107
|
+
##
|
108
|
+
# The last update time of the document after applying the write. Set to
|
109
|
+
# nil for a +delete+ mutation.
|
110
|
+
#
|
111
|
+
# If the write did not actually change the document, this will be
|
112
|
+
# the previous update_time.
|
113
|
+
#
|
114
|
+
# @return [Time] The last update time.
|
115
|
+
attr_reader :update_time
|
116
|
+
|
117
|
+
##
|
118
|
+
# @private
|
119
|
+
def initialize result
|
120
|
+
@update_time = Convert.timestamp_to_time result.update_time
|
121
|
+
end
|
122
|
+
end
|
123
|
+
end
|
124
|
+
end
|
125
|
+
end
|
126
|
+
end
|
@@ -0,0 +1,164 @@
|
|
1
|
+
# Copyright 2023 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License")
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
|
16
|
+
require "concurrent"
|
17
|
+
require "google/cloud/firestore/errors"
|
18
|
+
require "google/cloud/firestore/bulk_writer_operation"
|
19
|
+
require "google/cloud/firestore/rate_limiter"
|
20
|
+
require "google/cloud/firestore/bulk_commit_batch"
|
21
|
+
require "google/cloud/firestore/bulk_writer_exception"
|
22
|
+
require "google/cloud/firestore/bulk_writer_scheduler"
|
23
|
+
|
24
|
+
|
25
|
+
module Google
|
26
|
+
module Cloud
|
27
|
+
module Firestore
|
28
|
+
##
|
29
|
+
#
|
30
|
+
# @private Accumulate BulkWriterOperations from the BulkWriter, schedules them
|
31
|
+
# in accordance with 555 rule and retry the failed operations from the BulkCommitBatch.
|
32
|
+
#
|
33
|
+
class BulkWriterScheduler
|
34
|
+
MAX_BATCH_SIZE = 20
|
35
|
+
BATCH_THREAD_COUNT = 4
|
36
|
+
|
37
|
+
##
|
38
|
+
# Initialize the attributes and start the schedule_operations job
|
39
|
+
#
|
40
|
+
def initialize client, service, batch_threads
|
41
|
+
@client = client
|
42
|
+
@service = service
|
43
|
+
@rate_limiter = RateLimiter.new
|
44
|
+
@buffered_operations = []
|
45
|
+
@batch_threads = (batch_threads || BATCH_THREAD_COUNT).to_i
|
46
|
+
@batch_thread_pool = Concurrent::ThreadPoolExecutor.new max_threads: @batch_threads,
|
47
|
+
max_queue: 0,
|
48
|
+
auto_terminate: true
|
49
|
+
@retry_operations = []
|
50
|
+
@mutex = Mutex.new
|
51
|
+
start_scheduling_operations
|
52
|
+
end
|
53
|
+
|
54
|
+
def start_scheduling_operations
|
55
|
+
Concurrent::Promises.future_on @batch_thread_pool do
|
56
|
+
begin
|
57
|
+
schedule_operations
|
58
|
+
rescue StandardError
|
59
|
+
# TODO: Log the error when logging is available
|
60
|
+
retry
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
def add_operation operation
|
66
|
+
@mutex.synchronize { @buffered_operations << operation }
|
67
|
+
end
|
68
|
+
|
69
|
+
##
|
70
|
+
# Closes the scheduler object.
|
71
|
+
# Waits for the enqueued tasks to complete
|
72
|
+
# before closing down.
|
73
|
+
#
|
74
|
+
# @return [nil]
|
75
|
+
def close
|
76
|
+
@mutex.synchronize do
|
77
|
+
@batch_thread_pool.shutdown
|
78
|
+
@batch_thread_pool.wait_for_termination 1
|
79
|
+
@batch_thread_pool.kill unless @batch_thread_pool.shutdown?
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
private
|
84
|
+
|
85
|
+
##
|
86
|
+
# @private Adds failed operations in the retry heap.
|
87
|
+
#
|
88
|
+
def post_commit_batch bulk_commit_batch
|
89
|
+
@mutex.synchronize do
|
90
|
+
bulk_commit_batch.operations.each do |operation|
|
91
|
+
unless operation.completion_event.set?
|
92
|
+
@retry_operations << operation
|
93
|
+
end
|
94
|
+
end
|
95
|
+
@retry_operations.sort_by!(&:retry_time)
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
##
|
100
|
+
# @private Commits a batch of scheduled operations.
|
101
|
+
# Batch size <= 20 to match the constraint of request size < 9.8 MB
|
102
|
+
#
|
103
|
+
# @return [nil]
|
104
|
+
def commit_batch bulk_commit_batch
|
105
|
+
begin
|
106
|
+
Concurrent::Promises.future_on @batch_thread_pool, bulk_commit_batch do |batch|
|
107
|
+
begin
|
108
|
+
batch.commit
|
109
|
+
rescue StandardError
|
110
|
+
# TODO: Log the errors while committing a batch
|
111
|
+
ensure
|
112
|
+
post_commit_batch bulk_commit_batch
|
113
|
+
end
|
114
|
+
end
|
115
|
+
rescue StandardError => e
|
116
|
+
post_commit_batch bulk_commit_batch
|
117
|
+
raise BulkWriterSchedulerError, e.message
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
##
|
122
|
+
# @private Schedule the enqueued operations in batches.
|
123
|
+
#
|
124
|
+
# @return [nil]
|
125
|
+
def schedule_operations
|
126
|
+
loop do
|
127
|
+
break if @batch_thread_pool.shuttingdown?
|
128
|
+
dequeue_retry_operations
|
129
|
+
batch_size = [MAX_BATCH_SIZE, @buffered_operations.length].min
|
130
|
+
if batch_size.zero?
|
131
|
+
sleep 0.001
|
132
|
+
next
|
133
|
+
end
|
134
|
+
@rate_limiter.wait_for_tokens batch_size
|
135
|
+
operations = dequeue_buffered_operations batch_size
|
136
|
+
commit_batch BulkCommitBatch.new(@service, operations)
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
140
|
+
##
|
141
|
+
# @private Removes BulkWriterOperations from the buffered queue to scheduled in
|
142
|
+
# the current batch
|
143
|
+
#
|
144
|
+
def dequeue_buffered_operations size
|
145
|
+
@mutex.synchronize do
|
146
|
+
@buffered_operations.shift size
|
147
|
+
end
|
148
|
+
end
|
149
|
+
|
150
|
+
##
|
151
|
+
# @private Removes BulkWriterOperations from the retry queue to scheduled in
|
152
|
+
# the current batch
|
153
|
+
#
|
154
|
+
def dequeue_retry_operations
|
155
|
+
@mutex.synchronize do
|
156
|
+
while @retry_operations.length.positive? && @retry_operations.first.retry_time <= Time.now
|
157
|
+
@buffered_operations << @retry_operations.shift
|
158
|
+
end
|
159
|
+
end
|
160
|
+
end
|
161
|
+
end
|
162
|
+
end
|
163
|
+
end
|
164
|
+
end
|
@@ -23,6 +23,8 @@ require "google/cloud/firestore/document_snapshot"
|
|
23
23
|
require "google/cloud/firestore/collection_group"
|
24
24
|
require "google/cloud/firestore/batch"
|
25
25
|
require "google/cloud/firestore/transaction"
|
26
|
+
require "google/cloud/firestore/bulk_writer"
|
27
|
+
require "google/cloud/firestore/filter"
|
26
28
|
|
27
29
|
module Google
|
28
30
|
module Cloud
|
@@ -69,7 +71,7 @@ module Google
|
|
69
71
|
#
|
70
72
|
# @return [String] database identifier.
|
71
73
|
def database_id
|
72
|
-
|
74
|
+
service.database
|
73
75
|
end
|
74
76
|
|
75
77
|
##
|
@@ -85,6 +87,9 @@ module Google
|
|
85
87
|
##
|
86
88
|
# Retrieves an enumerator for the root collections.
|
87
89
|
#
|
90
|
+
# @param [Time] read_time Reads documents as they were at the given time.
|
91
|
+
# This may not be older than 270 seconds. Optional
|
92
|
+
#
|
88
93
|
# @yield [collections] The block for accessing the collections.
|
89
94
|
# @yieldparam [CollectionReference] collection A collection reference object.
|
90
95
|
#
|
@@ -101,10 +106,21 @@ module Google
|
|
101
106
|
# puts col.collection_id
|
102
107
|
# end
|
103
108
|
#
|
104
|
-
|
109
|
+
# @example
|
110
|
+
# require "google/cloud/firestore"
|
111
|
+
#
|
112
|
+
# firestore = Google::Cloud::Firestore.new
|
113
|
+
# read_time = Time.now
|
114
|
+
#
|
115
|
+
# # Get the root collections
|
116
|
+
# firestore.cols(read_time: read_time).each do |col|
|
117
|
+
# puts col.collection_id
|
118
|
+
# end
|
119
|
+
#
|
120
|
+
def cols read_time: nil, &block
|
105
121
|
ensure_service!
|
106
|
-
grpc = service.list_collections "#{path}/documents"
|
107
|
-
cols_enum = CollectionReferenceList.from_grpc(grpc, self, "#{path}/documents").all
|
122
|
+
grpc = service.list_collections "#{path}/documents", read_time: read_time
|
123
|
+
cols_enum = CollectionReferenceList.from_grpc(grpc, self, "#{path}/documents", read_time: read_time).all
|
108
124
|
cols_enum.each(&block) if block_given?
|
109
125
|
cols_enum
|
110
126
|
end
|
@@ -171,6 +187,56 @@ module Google
|
|
171
187
|
end
|
172
188
|
alias collection_group col_group
|
173
189
|
|
190
|
+
##
|
191
|
+
# Creates a filter object.
|
192
|
+
#
|
193
|
+
# @param field [FieldPath, String, Symbol] A field path to filter
|
194
|
+
# results with.
|
195
|
+
# If a {FieldPath} object is not provided then the field will be
|
196
|
+
# treated as a dotted string, meaning the string represents individual
|
197
|
+
# fields joined by ".". Fields containing `~`, `*`, `/`, `[`, `]`, and
|
198
|
+
# `.` cannot be in a dotted string, and should provided using a
|
199
|
+
# {FieldPath} object instead.
|
200
|
+
#
|
201
|
+
# @param operator [String, Symbol] The operation to compare the field
|
202
|
+
# to. Acceptable values include:
|
203
|
+
# * less than: `<`, `lt`
|
204
|
+
# * less than or equal: `<=`, `lte`
|
205
|
+
# * greater than: `>`, `gt`
|
206
|
+
# * greater than or equal: `>=`, `gte`
|
207
|
+
# * equal: `=`, `==`, `eq`, `eql`, `is`
|
208
|
+
# * not equal: `!=`
|
209
|
+
# * in: `in`
|
210
|
+
# * not in: `not-in`, `not_in`
|
211
|
+
# * array contains: `array-contains`, `array_contains`
|
212
|
+
#
|
213
|
+
# @param value [Object] The value to compare the property to. Defaults to nil.
|
214
|
+
# Possible values are:
|
215
|
+
# * Integer
|
216
|
+
# * Float/BigDecimal
|
217
|
+
# * String
|
218
|
+
# * Boolean
|
219
|
+
# * Array
|
220
|
+
# * Date/Time
|
221
|
+
# * StringIO
|
222
|
+
# * Google::Cloud::Datastore::Key
|
223
|
+
# * Google::Cloud::Datastore::Entity
|
224
|
+
# * nil
|
225
|
+
#
|
226
|
+
# @return [Google::Cloud::Firestore::Filter] New filter object.
|
227
|
+
#
|
228
|
+
# @example
|
229
|
+
# require "google/cloud/firestore"
|
230
|
+
#
|
231
|
+
# firestore = Google::Cloud::Firestore.new
|
232
|
+
#
|
233
|
+
# # Create a filter
|
234
|
+
# filter = firestore.filter(:population, :>=, 1000000)
|
235
|
+
#
|
236
|
+
def filter field, operator, value
|
237
|
+
Filter.new field, operator, value
|
238
|
+
end
|
239
|
+
|
174
240
|
##
|
175
241
|
# Retrieves a document reference.
|
176
242
|
#
|
@@ -217,6 +283,8 @@ module Google
|
|
217
283
|
# individual fields joined by ".". Fields containing `~`, `*`, `/`,
|
218
284
|
# `[`, `]`, and `.` cannot be in a dotted string, and should provided
|
219
285
|
# using a {FieldPath} object instead. (See {#field_path}.)
|
286
|
+
# @param [Time] read_time Reads documents as they were at the given time.
|
287
|
+
# This may not be older than 270 seconds. Optional
|
220
288
|
#
|
221
289
|
# @yield [documents] The block for accessing the document snapshots.
|
222
290
|
# @yieldparam [DocumentSnapshot] document A document snapshot.
|
@@ -245,11 +313,24 @@ module Google
|
|
245
313
|
# puts "#{city.document_id} has #{city[:population]} residents."
|
246
314
|
# end
|
247
315
|
#
|
248
|
-
|
316
|
+
# @example Get docs using a read_time:
|
317
|
+
# require "google/cloud/firestore"
|
318
|
+
#
|
319
|
+
# firestore = Google::Cloud::Firestore.new
|
320
|
+
#
|
321
|
+
# read_time = Time.now
|
322
|
+
#
|
323
|
+
# # Get and print city documents
|
324
|
+
# cities = ["cities/NYC", "cities/SF", "cities/LA"]
|
325
|
+
# firestore.get_all(cities, read_time: read_time).each do |city|
|
326
|
+
# puts "#{city.document_id} has #{city[:population]} residents."
|
327
|
+
# end
|
328
|
+
#
|
329
|
+
def get_all *docs, field_mask: nil, read_time: nil
|
249
330
|
ensure_service!
|
250
331
|
|
251
332
|
unless block_given?
|
252
|
-
return enum_for :get_all, *docs, field_mask: field_mask
|
333
|
+
return enum_for :get_all, *docs, field_mask: field_mask, read_time: read_time
|
253
334
|
end
|
254
335
|
|
255
336
|
doc_paths = Array(docs).flatten.map do |doc_path|
|
@@ -264,7 +345,7 @@ module Google
|
|
264
345
|
end
|
265
346
|
mask = nil if mask.empty?
|
266
347
|
|
267
|
-
results = service.get_documents doc_paths, mask: mask
|
348
|
+
results = service.get_documents doc_paths, mask: mask, read_time: read_time
|
268
349
|
results.each do |result|
|
269
350
|
next if result.result.nil?
|
270
351
|
yield DocumentSnapshot.from_batch_result result, self
|
@@ -668,13 +749,83 @@ module Google
|
|
668
749
|
end
|
669
750
|
end
|
670
751
|
|
752
|
+
##
|
753
|
+
# Create a transaction to perform multiple reads that are
|
754
|
+
# executed atomically at a single logical point in time in a database.
|
755
|
+
#
|
756
|
+
# All changes are accumulated in memory until the block completes.
|
757
|
+
# Transactions will be automatically retried when documents change
|
758
|
+
# before the transaction is committed. See {Transaction}.
|
759
|
+
#
|
760
|
+
# @see https://firebase.google.com/docs/firestore/manage-data/transactions
|
761
|
+
# Transactions and Batched Writes
|
762
|
+
#
|
763
|
+
# @param [Time] read_time The maximum number of retries for
|
764
|
+
# transactions failed due to errors. Default is 5. Optional.
|
765
|
+
#
|
766
|
+
# @yield [transaction] The block for reading data.
|
767
|
+
# @yieldparam [Transaction] transaction The transaction object for
|
768
|
+
# making changes.
|
769
|
+
#
|
770
|
+
# @return [Object] The return value of the provided
|
771
|
+
# yield block
|
772
|
+
#
|
773
|
+
# @example Read only transaction with read time
|
774
|
+
# require "google/cloud/firestore"
|
775
|
+
#
|
776
|
+
# firestore = Google::Cloud::Firestore.new
|
777
|
+
#
|
778
|
+
# # Get a document reference
|
779
|
+
# nyc_ref = firestore.doc "cities/NYC"
|
780
|
+
#
|
781
|
+
# read_time = Time.now
|
782
|
+
#
|
783
|
+
# firestore.read_only_transaction(read_time: read_time) do |tx|
|
784
|
+
# # Get a document snapshot
|
785
|
+
# nyc_snap = tx.get nyc_ref
|
786
|
+
# end
|
787
|
+
#
|
788
|
+
def read_only_transaction read_time: nil
|
789
|
+
transaction = Transaction.from_client self, read_time: read_time, read_only: true
|
790
|
+
yield transaction
|
791
|
+
end
|
792
|
+
|
793
|
+
##
|
794
|
+
# Create a bulk writer to perform multiple writes that are
|
795
|
+
# executed parallely.
|
796
|
+
#
|
797
|
+
# @param [Integer] request_threads The number of threads used for handling
|
798
|
+
# requests. Default is 2. Optional.
|
799
|
+
# @param [Integer] batch_threads The number of threads used for processing
|
800
|
+
# batches. Default is 4. Optional.
|
801
|
+
# @param [Integer] retries The number of times a failed write request will
|
802
|
+
# be retried (with exponential delay) before being marked as failure. Max
|
803
|
+
# attempts are 15. Optional
|
804
|
+
#
|
805
|
+
# @return [Google::Cloud::Firestore::BulkWriter] Returns an object of
|
806
|
+
# bulk writer.
|
807
|
+
#
|
808
|
+
# @example Initializing a BulkWriter with all the configurations.
|
809
|
+
# require "google/cloud/firestore"
|
810
|
+
#
|
811
|
+
# firestore = Google::Cloud::Firestore.new
|
812
|
+
#
|
813
|
+
# bw = firestore.bulk_writer
|
814
|
+
#
|
815
|
+
# bulk_write_result = bw.create "doc_ref", request_threads: 4, batch_threads: 10, retries: 10
|
816
|
+
#
|
817
|
+
def bulk_writer request_threads: nil, batch_threads: nil, retries: nil
|
818
|
+
BulkWriter.new self, @service, request_threads: request_threads,
|
819
|
+
batch_threads: batch_threads, retries: retries
|
820
|
+
end
|
821
|
+
|
671
822
|
# @!endgroup
|
672
823
|
|
673
824
|
# @private
|
674
|
-
def list_documents parent, collection_id, token: nil, max: nil
|
825
|
+
def list_documents parent, collection_id, token: nil, max: nil, read_time: nil
|
675
826
|
ensure_service!
|
676
|
-
grpc = service.list_documents parent, collection_id, token: token, max: max
|
677
|
-
DocumentReference::List.from_grpc grpc, self, parent, collection_id
|
827
|
+
grpc = service.list_documents parent, collection_id, token: token, max: max, read_time: read_time
|
828
|
+
DocumentReference::List.from_grpc grpc, self, parent, collection_id, read_time: read_time
|
678
829
|
end
|
679
830
|
|
680
831
|
protected
|
@@ -48,6 +48,8 @@ module Google
|
|
48
48
|
#
|
49
49
|
# @param [Integer] partition_count The desired maximum number of partition points. The number must be strictly
|
50
50
|
# positive. The actual number of partitions returned may be fewer.
|
51
|
+
# @param [Time] read_time Reads documents as they were at the given time.
|
52
|
+
# This may not be older than 270 seconds. Optional
|
51
53
|
#
|
52
54
|
# @return [Array<QueryPartition>] An ordered array of query partitions.
|
53
55
|
#
|
@@ -62,7 +64,20 @@ module Google
|
|
62
64
|
#
|
63
65
|
# queries = partitions.map(&:to_query)
|
64
66
|
#
|
65
|
-
|
67
|
+
# @example partition with read time
|
68
|
+
# require "google/cloud/firestore"
|
69
|
+
#
|
70
|
+
# firestore = Google::Cloud::Firestore.new
|
71
|
+
#
|
72
|
+
# col_group = firestore.col_group "cities"
|
73
|
+
#
|
74
|
+
# read_time = Time.now
|
75
|
+
#
|
76
|
+
# partitions = col_group.partitions 3, read_time: read_time
|
77
|
+
#
|
78
|
+
# queries = partitions.map(&:to_query)
|
79
|
+
#
|
80
|
+
def partitions partition_count, read_time: nil
|
66
81
|
ensure_service!
|
67
82
|
|
68
83
|
raise ArgumentError, "partition_count must be > 0" unless partition_count.positive?
|
@@ -75,7 +90,7 @@ module Google
|
|
75
90
|
|
76
91
|
grpc_partitions = if partition_count.positive?
|
77
92
|
# Retrieve all pages, since cursor order is not guaranteed and they must be sorted.
|
78
|
-
list_all partition_count, query_with_default_order
|
93
|
+
list_all partition_count, query_with_default_order, read_time
|
79
94
|
else
|
80
95
|
[] # Ensure that a single, empty QueryPartition is returned.
|
81
96
|
end
|
@@ -118,11 +133,12 @@ module Google
|
|
118
133
|
|
119
134
|
protected
|
120
135
|
|
121
|
-
def list_all partition_count, query_with_default_order
|
136
|
+
def list_all partition_count, query_with_default_order, read_time
|
122
137
|
grpc_partitions = []
|
123
138
|
token = nil
|
124
139
|
loop do
|
125
|
-
grpc = service.partition_query parent_path, query_with_default_order.query, partition_count,
|
140
|
+
grpc = service.partition_query parent_path, query_with_default_order.query, partition_count,
|
141
|
+
token: token, read_time: read_time
|
126
142
|
grpc_partitions += Array(grpc.partitions)
|
127
143
|
token = grpc.next_page_token
|
128
144
|
token = nil if token == ""
|
@@ -147,6 +147,8 @@ module Google
|
|
147
147
|
# @param [String] token A previously-returned page token representing
|
148
148
|
# part of the larger set of results to view.
|
149
149
|
# @param [Integer] max Maximum number of results to return.
|
150
|
+
# @param [Time] read_time Reads documents as they were at the given time.
|
151
|
+
# This may not be older than 270 seconds. Optional
|
150
152
|
#
|
151
153
|
# @return [Array<DocumentReference>] An array of document references.
|
152
154
|
#
|
@@ -161,10 +163,23 @@ module Google
|
|
161
163
|
# puts doc_ref.document_id
|
162
164
|
# end
|
163
165
|
#
|
164
|
-
|
166
|
+
# @example List documents with read time
|
167
|
+
# require "google/cloud/firestore"
|
168
|
+
#
|
169
|
+
# firestore = Google::Cloud::Firestore.new
|
170
|
+
#
|
171
|
+
# read_time = Time.now
|
172
|
+
#
|
173
|
+
# col = firestore.col "cities"
|
174
|
+
#
|
175
|
+
# col.list_documents(read_time: read_time).each do |doc_ref|
|
176
|
+
# puts doc_ref.document_id
|
177
|
+
# end
|
178
|
+
#
|
179
|
+
def list_documents token: nil, max: nil, read_time: nil
|
165
180
|
ensure_client!
|
166
181
|
client.list_documents \
|
167
|
-
parent_path, collection_id, token: token, max: max
|
182
|
+
parent_path, collection_id, token: token, max: max, read_time: read_time
|
168
183
|
end
|
169
184
|
|
170
185
|
##
|
@@ -52,8 +52,8 @@ module Google
|
|
52
52
|
def next
|
53
53
|
return nil unless next?
|
54
54
|
ensure_service!
|
55
|
-
grpc = @client.service.list_collections @parent, token: token, max: @max
|
56
|
-
self.class.from_grpc grpc, @client, @parent, max: @max
|
55
|
+
grpc = @client.service.list_collections @parent, token: token, max: @max, read_time: @read_time
|
56
|
+
self.class.from_grpc grpc, @client, @parent, max: @max, read_time: @read_time
|
57
57
|
end
|
58
58
|
|
59
59
|
##
|
@@ -110,7 +110,7 @@ module Google
|
|
110
110
|
##
|
111
111
|
# @private New CollectionReference::List from a `Google::Cloud::Firestore::V1::ListCollectionIdsResponse`
|
112
112
|
# object.
|
113
|
-
def self.from_grpc grpc, client, parent, max: nil
|
113
|
+
def self.from_grpc grpc, client, parent, max: nil, read_time: nil
|
114
114
|
raise ArgumentError, "parent is required" unless parent
|
115
115
|
cols = CollectionReferenceList.new(Array(grpc.collection_ids).map do |collection_id|
|
116
116
|
CollectionReference.from_path "#{parent}/#{collection_id}", client
|
@@ -121,6 +121,7 @@ module Google
|
|
121
121
|
cols.instance_variable_set :@client, client
|
122
122
|
cols.instance_variable_set :@parent, parent
|
123
123
|
cols.instance_variable_set :@max, max
|
124
|
+
cols.instance_variable_set :@read_time, read_time
|
124
125
|
cols
|
125
126
|
end
|
126
127
|
|
@@ -365,7 +365,7 @@ module Google
|
|
365
365
|
write.current_document = \
|
366
366
|
Google::Cloud::Firestore::V1::Precondition.new({
|
367
367
|
exists: exists, update_time: time_to_timestamp(update_time)
|
368
|
-
}.
|
368
|
+
}.compact)
|
369
369
|
end
|
370
370
|
|
371
371
|
write
|
@@ -381,12 +381,12 @@ module Google
|
|
381
381
|
return val if val
|
382
382
|
end
|
383
383
|
when Hash
|
384
|
-
obj.
|
384
|
+
obj.each_value do |v|
|
385
385
|
val = field_value_nested? v, field_value_type
|
386
386
|
return val if val
|
387
387
|
end
|
388
388
|
end
|
389
|
-
nil
|
389
|
+
nil # rubocop:disable Style/ReturnNilInPredicateMethodDefinition
|
390
390
|
end
|
391
391
|
|
392
392
|
def remove_field_value_from obj, field_value_type = nil
|
@@ -498,7 +498,6 @@ module Google
|
|
498
498
|
dup_hash = dup_hash[field]
|
499
499
|
end
|
500
500
|
prev_hash[last_field] = dup_hash
|
501
|
-
prev_hash.delete_if { |_k, v| v.nil? }
|
502
501
|
ret_hash
|
503
502
|
end
|
504
503
|
|
@@ -536,13 +535,13 @@ module Google
|
|
536
535
|
ESCAPED_FIELD_PATH = /\A`(.*)`\z/.freeze
|
537
536
|
|
538
537
|
def build_hash_from_field_paths_and_values pairs
|
539
|
-
pairs.each do |
|
540
|
-
raise ArgumentError unless
|
538
|
+
pairs.each do |pair|
|
539
|
+
raise ArgumentError unless pair.first.is_a? FieldPath
|
541
540
|
end
|
542
541
|
|
543
542
|
dup_hash = {}
|
544
543
|
|
545
|
-
pairs.each do |field_path, value|
|
544
|
+
pairs.each do |(field_path, value)|
|
546
545
|
tmp_dup = dup_hash
|
547
546
|
last_field = nil
|
548
547
|
field_path.fields.map(&:to_sym).each do |field|
|