google-cloud-firestore 2.12.0 → 2.13.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: c8d082b21e20a784bbc4ded66f836f6d04bd36b9925791edabe11e8d134427b4
4
- data.tar.gz: f7d0f0cc23591434e68a92fee5132860fa6128cdaa3fbe4d3143911eb0c5f147
3
+ metadata.gz: 99ee040593be1625729cd2eae0d3a8bda03185c53692bec5fdfe18a32c2021f1
4
+ data.tar.gz: 26ff29bf53e07708590e92e9db03955ce1c1c60d671f7cf7fbd4dad52b3b8eab
5
5
  SHA512:
6
- metadata.gz: 19c611f16d93bedede808df8f049f9e6ec500a49e1b883b4d7e658d3c7f940c7afdf05eb4407615b80e91ee849eea6091662a886bf442b0a12f7887837578c8b
7
- data.tar.gz: 9948560560760181151c1ec60f0c005a84e6ec1ae29c05f40efcca77d6be159f60592eafed854c69e7ab75a075c19b3786316782dccd5b98efaa814354f7d7e1
6
+ metadata.gz: 56a3248b0d03ec14935cc78aa2e0ca6fcf63b810f1800dc96ba3d19b56c1546e914e7449c30cf56b8460d81c1b2e2210bdfaaf035405cf8c7e9909d05d7a9fba
7
+ data.tar.gz: 3cb3a0a89e2ef1d3707e4f30097e2019d62fee21354e6f1575cf29b5bd61121415945a2ff23735c8611af849c5399b6f30f01e58bd1c03f591baa55359f7cd1f
data/CHANGELOG.md CHANGED
@@ -1,5 +1,11 @@
1
1
  # Release History
2
2
 
3
+ ### 2.13.0 (2023-05-10)
4
+
5
+ #### Features
6
+
7
+ * Added support for bulk writer ([#21426](https://github.com/googleapis/google-cloud-ruby/issues/21426))
8
+
3
9
  ### 2.12.0 (2023-04-20)
4
10
 
5
11
  #### Features
@@ -0,0 +1,73 @@
1
+ # Copyright 2023 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ module Google
17
+ module Cloud
18
+ module Firestore
19
+ ##
20
+ #
21
+ # @private Accumulate write operations to be sent in a batch. Use this for higher
22
+ # volumes (e.g., via `BulkWriter`) and when the order of operations
23
+ # within a given batch is unimportant.
24
+ #
25
+ # Because the order in which individual write operations are applied to the database
26
+ # is not guaranteed, `batch_write` RPCs can never contain multiple operations
27
+ # to the same document. In practice, the BulkWriter class handle this case.
28
+ #
29
+ class BulkCommitBatch
30
+ attr_reader :operations
31
+
32
+ ##
33
+ # Initialize the object
34
+ def initialize service, operations
35
+ @service = service
36
+ @operations = operations
37
+ end
38
+
39
+ ##
40
+ # Updates the operation based on the result received from the API request.
41
+ #
42
+ # @param [Google::Cloud::Firestore::V1::BatchWriteResponse] responses
43
+ #
44
+ # @return [nil]
45
+ #
46
+ def parse_results responses
47
+ @operations.zip responses.write_results, responses.status do |operation, write_result, status|
48
+ begin
49
+ status&.code&.zero? ? operation.on_success(write_result) : operation.on_failure(status)
50
+ rescue StandardError
51
+ # TODO: Log the error while parsing response
52
+ end
53
+ end
54
+ end
55
+
56
+ ##
57
+ # Makes the BatchWrite API request with all the operations in the batch and
58
+ # parses the results for each operation.
59
+ #
60
+ # @return [nil]
61
+ #
62
+ def commit
63
+ begin
64
+ responses = @service.batch_write @operations.map(&:write)
65
+ parse_results responses
66
+ rescue StandardError => e
67
+ raise BulkCommitBatchError, e
68
+ end
69
+ end
70
+ end
71
+ end
72
+ end
73
+ end
@@ -0,0 +1,558 @@
1
+ # Copyright 2023 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License")
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ require "concurrent"
17
+ require "google/cloud/firestore/rate_limiter"
18
+ require "google/cloud/firestore/bulk_commit_batch"
19
+ require "google/cloud/firestore/promise/future"
20
+ require "google/cloud/firestore/bulk_writer_operation"
21
+ require "google/cloud/firestore/bulk_writer_exception"
22
+ require "google/cloud/firestore/bulk_writer_scheduler"
23
+
24
+
25
+ module Google
26
+ module Cloud
27
+ module Firestore
28
+ ##
29
+ # # BulkWriter
30
+ #
31
+ # Accumulate and efficiently sends large amounts of document write
32
+ # operations to the server.
33
+ #
34
+ # BulkWriter can handle large data migrations or updates, buffering records
35
+ # in memory and submitting them to the server in batches of 20.
36
+ #
37
+ # The submission of batches is internally parallelized with a ThreadPoolExecutor.
38
+ #
39
+ # @example Create a BulkWriter and add a write request:
40
+ # require "google/cloud/firestore"
41
+ #
42
+ # firestore = Google::Cloud::Firestore.new
43
+ # bw = firestore.bulk_writer
44
+ #
45
+ # bw.create("cities/NYC", { name: "New York City" })
46
+ #
47
+ # bw.flush
48
+ # bw.close
49
+ #
50
+ class BulkWriter
51
+ MAX_RETRY_ATTEMPTS = 10
52
+
53
+ ##
54
+ # Initialize the attributes and start the schedule_operations job
55
+ #
56
+ def initialize client, service,
57
+ request_threads: nil,
58
+ batch_threads: nil,
59
+ retries: nil
60
+ @client = client
61
+ @service = service
62
+ @closed = false
63
+ @flush = false
64
+ @request_threads = (request_threads || 2).to_i
65
+ @write_thread_pool = Concurrent::ThreadPoolExecutor.new max_threads: @request_threads,
66
+ max_queue: 0
67
+ @mutex = Mutex.new
68
+ @scheduler = BulkWriterScheduler.new client, service, batch_threads
69
+ @doc_refs = Set.new
70
+ @retries = [retries || MAX_RETRY_ATTEMPTS, MAX_RETRY_ATTEMPTS].min
71
+ @request_results = []
72
+ end
73
+
74
+ ##
75
+ # Creates a document with the provided data (fields and values).
76
+ #
77
+ # The operation will fail if the document already exists.
78
+ #
79
+ # @param [String, DocumentReference] doc A string representing the
80
+ # path of the document, or a document reference object.
81
+ # @param [Hash] data The document's fields and values.
82
+ #
83
+ # @return [Google::Cloud::Firestore::Promise::Future] Denoting the future value of
84
+ # write operation.
85
+ #
86
+ # @example Create a document using a document path:
87
+ # require "google/cloud/firestore"
88
+ #
89
+ # firestore = Google::Cloud::Firestore.new
90
+ # bw = firestore.bulk_writer
91
+ #
92
+ # bw.create("cities/NYC", { name: "New York City" })
93
+ #
94
+ # @example Create a document using a document reference:
95
+ # require "google/cloud/firestore"
96
+ #
97
+ # firestore = Google::Cloud::Firestore.new
98
+ # bw = firestore.bulk_writer
99
+ #
100
+ # # Get a document reference
101
+ # nyc_ref = firestore.doc "cities/NYC"
102
+ #
103
+ # bw.create(nyc_ref, { name: "New York City" })
104
+ #
105
+ # @example Create a document and set a field to server_time:
106
+ # require "google/cloud/firestore"
107
+ #
108
+ # firestore = Google::Cloud::Firestore.new
109
+ # bw = firestore.bulk_writer
110
+ #
111
+ # # Get a document reference
112
+ # nyc_ref = firestore.doc "cities/NYC"
113
+ #
114
+ # bw.create(nyc_ref, { name: "New York City",
115
+ # updated_at: firestore.field_server_time })
116
+ #
117
+ # @example Get the value of write operation:
118
+ # require "google/cloud/firestore"
119
+ #
120
+ # firestore = Google::Cloud::Firestore.new
121
+ # bw = firestore.bulk_writer
122
+ #
123
+ # # Get a document reference
124
+ # nyc_ref = firestore.doc "cities/NYC"
125
+ #
126
+ # result = bw.create(nyc_ref, { name: "New York City",
127
+ # updated_at: firestore.field_server_time })
128
+ #
129
+ # bw.close
130
+ #
131
+ # puts result.value
132
+ #
133
+ def create doc, data
134
+ doc_path = coalesce_doc_path_argument doc
135
+ pre_add_operation doc_path
136
+
137
+ write = Convert.write_for_create doc_path, data
138
+
139
+ create_and_enqueue_operation write
140
+ end
141
+
142
+ ##
143
+ # Writes the provided data (fields and values) to the provided document.
144
+ # If the document does not exist, it will be created. By default, the
145
+ # provided data overwrites existing data, but the provided data can be
146
+ # merged into the existing document using the `merge` argument.
147
+ #
148
+ # If you're not sure whether the document exists, use the `merge`
149
+ # argument to merge the new data with any existing document data to
150
+ # avoid overwriting entire documents.
151
+ #
152
+ # @param [String, DocumentReference] doc A string representing the
153
+ # path of the document, or a document reference object.
154
+ # @param [Hash] data The document's fields and values.
155
+ # @param [Boolean, FieldPath, String, Symbol] merge When
156
+ # `true`, all provided data is merged with the existing document data.
157
+ # When the argument is one or more field path, only the data for
158
+ # fields in this argument is merged with the existing document data.
159
+ # The default is to not merge, but to instead overwrite the existing
160
+ # document data.
161
+ #
162
+ # @return [Google::Cloud::Firestore::Promise::Future] Denoting the future value of
163
+ # write operation.
164
+ #
165
+ # @example Set a document using a document path:
166
+ # require "google/cloud/firestore"
167
+ #
168
+ # firestore = Google::Cloud::Firestore.new
169
+ # bw = firestore.bulk_writer
170
+ #
171
+ # # Update a document
172
+ # bw.set("cities/NYC", { name: "New York City" })
173
+ #
174
+ # @example Create a document using a document reference:
175
+ # require "google/cloud/firestore"
176
+ #
177
+ # bw = firestore.bulk_writer
178
+ #
179
+ # # Get a document reference
180
+ # nyc_ref = firestore.doc "cities/NYC"
181
+ #
182
+ # # Update a document
183
+ # bw.set(nyc_ref, { name: "New York City" })
184
+ #
185
+ # @example Set a document and merge all data:
186
+ # require "google/cloud/firestore"
187
+ #
188
+ # firestore = Google::Cloud::Firestore.new
189
+ # bw = firestore.bulk_writer
190
+ #
191
+ # bw.set("cities/NYC", { name: "New York City" }, merge: true)
192
+ #
193
+ # @example Set a document and merge only name:
194
+ # require "google/cloud/firestore"
195
+ #
196
+ # firestore = Google::Cloud::Firestore.new
197
+ # bw = firestore.bulk_writer
198
+ #
199
+ # bw.set("cities/NYC", { name: "New York City" }, merge: :name)
200
+ #
201
+ # @example Set a document and deleting a field using merge:
202
+ # require "google/cloud/firestore"
203
+ #
204
+ # firestore = Google::Cloud::Firestore.new
205
+ # bw = firestore.bulk_writer
206
+ #
207
+ # # Get a document reference
208
+ # nyc_ref = firestore.doc "cities/NYC"
209
+ #
210
+ # nyc_data = { name: "New York City",
211
+ # trash: firestore.field_delete }
212
+ #
213
+ # bw.set(nyc_ref, nyc_data, merge: true)
214
+ #
215
+ # @example Set a document and set a field to server_time:
216
+ # require "google/cloud/firestore"
217
+ #
218
+ # firestore = Google::Cloud::Firestore.new
219
+ # bw = firestore.bulk_writer
220
+ #
221
+ # # Get a document reference
222
+ # nyc_ref = firestore.doc "cities/NYC"
223
+ #
224
+ # nyc_data = { name: "New York City",
225
+ # updated_at: firestore.field_server_time }
226
+ #
227
+ # bw.set(nyc_ref, nyc_data, merge: true)
228
+ #
229
+ # @example Get the value of write operation:
230
+ # require "google/cloud/firestore"
231
+ #
232
+ # firestore = Google::Cloud::Firestore.new
233
+ # bw = firestore.bulk_writer
234
+ #
235
+ # # Get a document reference
236
+ # nyc_ref = firestore.doc "cities/NYC"
237
+ #
238
+ # nyc_data = { name: "New York City",
239
+ # updated_at: firestore.field_server_time }
240
+ #
241
+ # result = bw.set(nyc_ref, nyc_data)
242
+ #
243
+ # bw.close
244
+ #
245
+ # puts result.value
246
+ #
247
+ def set doc, data, merge: nil
248
+ doc_path = coalesce_doc_path_argument doc
249
+ pre_add_operation doc_path
250
+
251
+ write = Convert.write_for_set doc_path, data, merge: merge
252
+
253
+ create_and_enqueue_operation write
254
+ end
255
+
256
+ ##
257
+ # Updates the document with the provided data (fields and values). The
258
+ # provided data is merged into the existing document data.
259
+ #
260
+ # The operation will fail if the document does not exist.
261
+ #
262
+ # @param [String, DocumentReference] doc A string representing the
263
+ # path of the document, or a document reference object.
264
+ # @param [Hash<FieldPath|String|Symbol, Object>] data The document's
265
+ # fields and values.
266
+ #
267
+ # The top-level keys in the data hash are considered field paths, and
268
+ # can either be a FieldPath object, or a string representing the
269
+ # nested fields. In other words the string represents individual
270
+ # fields joined by ".". Fields containing `~`, `*`, `/`, `[`, `]`, and
271
+ # `.` cannot be in a dotted string, and should provided using a
272
+ # {FieldPath} object instead.
273
+ # @param [Time] update_time When set, the document must have been last
274
+ # updated at that time. Optional.
275
+ #
276
+ # @return [Google::Cloud::Firestore::Promise::Future] Denoting the future value of
277
+ # write operation.
278
+ #
279
+ # @example Update a document using a document path:
280
+ # require "google/cloud/firestore"
281
+ #
282
+ # firestore = Google::Cloud::Firestore.new
283
+ # bw = firestore.bulk_writer
284
+ #
285
+ # bw.update("cities/NYC", { name: "New York City" })
286
+ #
287
+ # @example Directly update a deeply-nested field with a `FieldPath`:
288
+ # require "google/cloud/firestore"
289
+ #
290
+ # firestore = Google::Cloud::Firestore.new
291
+ # bw = firestore.bulk_writer
292
+ #
293
+ # nested_field_path = firestore.field_path :favorites, :food
294
+ #
295
+ # bw.update("users/frank", { nested_field_path => "Pasta" })
296
+ #
297
+ # @example Update a document using a document reference:
298
+ # require "google/cloud/firestore"
299
+ #
300
+ # firestore = Google::Cloud::Firestore.new
301
+ # bw = firestore.bulk_writer
302
+ #
303
+ # # Get a document reference
304
+ # nyc_ref = firestore.doc "cities/NYC"
305
+ #
306
+ # bw.update(nyc_ref, { name: "New York City" })
307
+ #
308
+ # @example Update a document using the `update_time` precondition:
309
+ # require "google/cloud/firestore"
310
+ #
311
+ # firestore = Google::Cloud::Firestore.new
312
+ # bw = firestore.bulk_writer
313
+ #
314
+ # last_updated_at = Time.now - 42 # 42 seconds ago
315
+ #
316
+ # bw.update("cities/NYC", { name: "New York City" },
317
+ # update_time: last_updated_at)
318
+ #
319
+ # @example Update a document and deleting a field:
320
+ # require "google/cloud/firestore"
321
+ #
322
+ # firestore = Google::Cloud::Firestore.new
323
+ # bw = firestore.bulk_writer
324
+ #
325
+ # # Get a document reference
326
+ # nyc_ref = firestore.doc "cities/NYC"
327
+ #
328
+ # nyc_data = { name: "New York City",
329
+ # trash: firestore.field_delete }
330
+ #
331
+ # bw.update(nyc_ref, nyc_data)
332
+ #
333
+ # @example Update a document and set a field to server_time:
334
+ # require "google/cloud/firestore"
335
+ #
336
+ # firestore = Google::Cloud::Firestore.new
337
+ # bw = firestore.bulk_writer
338
+ #
339
+ # # Get a document reference
340
+ # nyc_ref = firestore.doc "cities/NYC"
341
+ #
342
+ # nyc_data = { name: "New York City",
343
+ # updated_at: firestore.field_server_time }
344
+ #
345
+ # bw.update(nyc_ref, nyc_data)
346
+ #
347
+ # @example Get the value of write operation:
348
+ # require "google/cloud/firestore"
349
+ #
350
+ # firestore = Google::Cloud::Firestore.new
351
+ # bw = firestore.bulk_writer
352
+ #
353
+ # # Get a document reference
354
+ # nyc_ref = firestore.doc "cities/NYC"
355
+ #
356
+ # nyc_data = { name: "New York City",
357
+ # updated_at: firestore.field_server_time }
358
+ #
359
+ # result = bw.update(nyc_ref, nyc_data)
360
+ #
361
+ # bw.close
362
+ #
363
+ # puts result.value
364
+ #
365
+ def update doc, data, update_time: nil
366
+ doc_path = coalesce_doc_path_argument doc
367
+ pre_add_operation doc_path
368
+
369
+ write = Convert.write_for_update doc_path, data, update_time: update_time
370
+
371
+ create_and_enqueue_operation write
372
+ end
373
+
374
+ ##
375
+ # Deletes a document from the database.
376
+ #
377
+ # @param [String, DocumentReference] doc A string representing the
378
+ # path of the document, or a document reference object.
379
+ # @param [Boolean] exists Whether the document must exist. When `true`,
380
+ # the document must exist or an error is raised. Default is `false`.
381
+ # Optional.
382
+ # @param [Time] update_time When set, the document must have been last
383
+ # updated at that time. Optional.
384
+ #
385
+ # @return [Google::Cloud::Firestore::Promise::Future] Denoting the future value of
386
+ # write operation.
387
+ #
388
+ # @example Delete a document using a document path:
389
+ # require "google/cloud/firestore"
390
+ #
391
+ # firestore = Google::Cloud::Firestore.new
392
+ # bw = firestore.bulk_writer
393
+ #
394
+ # # Delete a document
395
+ # bw.delete "cities/NYC"
396
+ #
397
+ # @example Delete a document using a document reference:
398
+ # require "google/cloud/firestore"
399
+ #
400
+ # firestore = Google::Cloud::Firestore.new
401
+ # bw = firestore.bulk_writer
402
+ #
403
+ # # Get a document reference
404
+ # nyc_ref = firestore.doc "cities/NYC"
405
+ #
406
+ # # Delete a document
407
+ # bw.delete nyc_ref
408
+ #
409
+ # @example Delete a document using `exists`:
410
+ # require "google/cloud/firestore"
411
+ #
412
+ # firestore = Google::Cloud::Firestore.new
413
+ # bw = firestore.bulk_writer
414
+ #
415
+ # # Delete a document
416
+ # bw.delete "cities/NYC", exists: true
417
+ #
418
+ # @example Delete a document using the `update_time` precondition:
419
+ # require "google/cloud/firestore"
420
+ #
421
+ # firestore = Google::Cloud::Firestore.new
422
+ # bw = firestore.bulk_writer
423
+ #
424
+ # last_updated_at = Time.now - 42 # 42 seconds ago
425
+ #
426
+ # # Delete a document
427
+ # bw.delete "cities/NYC", update_time: last_updated_at
428
+ #
429
+ # @example Get the value of write operation:
430
+ # require "google/cloud/firestore"
431
+ #
432
+ # firestore = Google::Cloud::Firestore.new
433
+ # bw = firestore.bulk_writer
434
+ #
435
+ # last_updated_at = Time.now - 42 # 42 seconds ago
436
+ #
437
+ # # Delete a document
438
+ # result = bw.delete "cities/NYC", update_time: last_updated_at
439
+ #
440
+ # bw.close
441
+ #
442
+ # puts result.value
443
+ #
444
+ def delete doc, exists: nil, update_time: nil
445
+ doc_path = coalesce_doc_path_argument doc
446
+ pre_add_operation doc_path
447
+
448
+ write = Convert.write_for_delete doc_path, exists: exists, update_time: update_time
449
+
450
+ create_and_enqueue_operation write
451
+ end
452
+
453
+ ##
454
+ # Flushes all the current operation before enqueuing new operations.
455
+ #
456
+ # @return [nil]
457
+ def flush
458
+ @mutex.synchronize { @flush = true }
459
+ @request_results.each do |result|
460
+ begin
461
+ result.wait!
462
+ rescue StandardError
463
+ # Ignored
464
+ end
465
+ end
466
+ @mutex.synchronize do
467
+ @doc_refs = Set.new
468
+ @flush = false
469
+ end
470
+ end
471
+
472
+ ##
473
+ # Closes the BulkWriter object for new operations.
474
+ # Existing operations will be flushed and the threadpool will shutdown.
475
+ #
476
+ # @return [nil]
477
+ def close
478
+ @mutex.synchronize { @closed = true }
479
+ flush
480
+ @mutex.synchronize do
481
+ @write_thread_pool.shutdown
482
+ @scheduler.close
483
+ end
484
+ end
485
+
486
+ private
487
+
488
+ ##
489
+ # @private The client the Cloud Firestore BulkWriter belongs to.
490
+ #
491
+ # @return [Client] firestore client.
492
+ def firestore
493
+ @client
494
+ end
495
+ alias client firestore
496
+
497
+ ##
498
+ # @private Checks if the BulkWriter is accepting write requests
499
+ def accepting_request?
500
+ unless @closed || @flush
501
+ return true
502
+ end
503
+ false
504
+ end
505
+
506
+ ##
507
+ # @private Sanity checks before adding a write request in the BulkWriter
508
+ def pre_add_operation doc_path
509
+ @mutex.synchronize do
510
+ unless accepting_request?
511
+ raise BulkWriterError, "Not accepting responses for now. Either closed or in flush state"
512
+ end
513
+ if @doc_refs.include? doc_path
514
+ raise BulkWriterError, "Already contains mutations for this document"
515
+ end
516
+ @doc_refs.add doc_path
517
+ end
518
+ end
519
+
520
+ ##
521
+ # @private Creates a BulkWriterOperation
522
+ #
523
+ def create_operation write
524
+ BulkWriterOperation.new write, @retries
525
+ end
526
+
527
+ ##
528
+ # @private Adds a BulkWriterOperation to the scheduler.
529
+ def enqueue_operation operation
530
+ @mutex.synchronize { @scheduler.add_operation operation }
531
+ end
532
+
533
+ ##
534
+ # @private Creates a BulkWriterOperation and adds it in the scheduler.
535
+ #
536
+ def create_and_enqueue_operation write
537
+ operation = create_operation write
538
+ enqueue_operation operation
539
+ future = Concurrent::Promises.future_on @write_thread_pool, operation do |bulk_writer_operation|
540
+ bulk_writer_operation.completion_event.wait
541
+ raise bulk_writer_operation.result if bulk_writer_operation.result.is_a? BulkWriterException
542
+ bulk_writer_operation.result
543
+ end
544
+ @mutex.synchronize { @request_results << future }
545
+ Promise::Future.new future
546
+ end
547
+
548
+ ##
549
+ # @private
550
+ def coalesce_doc_path_argument doc_path
551
+ return doc_path.path if doc_path.respond_to? :path
552
+
553
+ client.doc(doc_path).path
554
+ end
555
+ end
556
+ end
557
+ end
558
+ end
@@ -0,0 +1,40 @@
1
+ # Copyright 2023 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License")
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ module Google
17
+ module Cloud
18
+ module Firestore
19
+ ##
20
+ # # BulkWriterException
21
+ #
22
+ # A BulkWriterException object refers to the error that will be thrown
23
+ # in case a BulkWriterOperation fails in all the attempts.
24
+ #
25
+ class BulkWriterException < StandardError
26
+ attr_reader :status
27
+ attr_reader :message
28
+ attr_reader :details
29
+
30
+ def initialize status
31
+ @status = status.code
32
+ @message = status.message
33
+ @details = status.details
34
+
35
+ super status.message
36
+ end
37
+ end
38
+ end
39
+ end
40
+ end
@@ -0,0 +1,126 @@
1
+ # Copyright 2023 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ require "concurrent"
17
+ require "google/cloud/firestore/bulk_writer_exception"
18
+
19
+
20
+ module Google
21
+ module Cloud
22
+ module Firestore
23
+ ##
24
+ #
25
+ # @private A BulkWriterOperation object refers to a write operation and contains
26
+ # all the necessary information for a specific write task, including meta
27
+ # information like the current number of attempts
28
+ #
29
+ class BulkWriterOperation
30
+ attr_reader :retry_time
31
+ attr_reader :result
32
+ attr_reader :completion_event
33
+ attr_reader :write
34
+
35
+ ##
36
+ # Initialize the object
37
+ def initialize write, retries
38
+ @write = write
39
+ @failed_attempts = 0
40
+ @retries = retries
41
+ @retry_time = Time.now
42
+ @completion_event = Concurrent::Event.new
43
+ end
44
+
45
+ ##
46
+ # Processing to be done when the response is a success.
47
+ # Updates the result and set the completion event.
48
+ #
49
+ # @param [Google::Cloud::Firestore::V1::WriteResult] result The result returned in the response.
50
+ def on_success result
51
+ begin
52
+ @result = WriteResult.new result
53
+ rescue StandardError => e
54
+ raise BulkWriterOperationError, e
55
+ ensure
56
+ @completion_event.set
57
+ end
58
+ end
59
+
60
+ ##
61
+ # Processing to be done when the response is a failure.
62
+ # Updates the failure attempts. If the retry count reaches
63
+ # the upper threshold, operations will be marked
64
+ # as failure and the completion event will be set.
65
+ #
66
+ # @param [Google::Rpc::Status] status The status received in the response.
67
+ #
68
+ def on_failure status
69
+ @failed_attempts += 1
70
+ if @failed_attempts == @retries + 1
71
+ begin
72
+ @result = BulkWriterException.new status
73
+ rescue StandardError => e
74
+ raise BulkWriterOperationError, e
75
+ ensure
76
+ @completion_event.set
77
+ end
78
+ else
79
+ backoff_duration
80
+ end
81
+ end
82
+
83
+ ##
84
+ # Exponentially increases the waiting time for retry.
85
+ #
86
+ def backoff_duration
87
+ @retry_time = Time.now + (@failed_attempts**2)
88
+ end
89
+
90
+ ##
91
+ # Represents the result of applying a write.
92
+ #
93
+ # @example
94
+ # require "google/cloud/firestore"
95
+ #
96
+ # firestore = Google::Cloud::Firestore.new
97
+ # bw = firestore.bulk_writer
98
+ #
99
+ # # Set the data for NYC
100
+ # result = bw.set("cities/NYC", { name: "New York City" })
101
+ #
102
+ # result.wait!
103
+ #
104
+ # puts result.value
105
+ #
106
+ class WriteResult
107
+ ##
108
+ # The last update time of the document after applying the write. Set to
109
+ # nil for a +delete+ mutation.
110
+ #
111
+ # If the write did not actually change the document, this will be
112
+ # the previous update_time.
113
+ #
114
+ # @return [Time] The last update time.
115
+ attr_reader :update_time
116
+
117
+ ##
118
+ # @private
119
+ def initialize result
120
+ @update_time = Convert.timestamp_to_time result.update_time
121
+ end
122
+ end
123
+ end
124
+ end
125
+ end
126
+ end
@@ -0,0 +1,164 @@
1
+ # Copyright 2023 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License")
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ require "concurrent"
17
+ require "google/cloud/firestore/errors"
18
+ require "google/cloud/firestore/bulk_writer_operation"
19
+ require "google/cloud/firestore/rate_limiter"
20
+ require "google/cloud/firestore/bulk_commit_batch"
21
+ require "google/cloud/firestore/bulk_writer_exception"
22
+ require "google/cloud/firestore/bulk_writer_scheduler"
23
+
24
+
25
+ module Google
26
+ module Cloud
27
+ module Firestore
28
+ ##
29
+ #
30
+ # @private Accumulate BulkWriterOperations from the BulkWriter, schedules them
31
+ # in accordance with 555 rule and retry the failed operations from the BulkCommitBatch.
32
+ #
33
+ class BulkWriterScheduler
34
+ MAX_BATCH_SIZE = 20
35
+ BATCH_THREAD_COUNT = 4
36
+
37
+ ##
38
+ # Initialize the attributes and start the schedule_operations job
39
+ #
40
+ def initialize client, service, batch_threads
41
+ @client = client
42
+ @service = service
43
+ @rate_limiter = RateLimiter.new
44
+ @buffered_operations = []
45
+ @batch_threads = (batch_threads || BATCH_THREAD_COUNT).to_i
46
+ @batch_thread_pool = Concurrent::ThreadPoolExecutor.new max_threads: @batch_threads,
47
+ max_queue: 0,
48
+ auto_terminate: true
49
+ @retry_operations = []
50
+ @mutex = Mutex.new
51
+ start_scheduling_operations
52
+ end
53
+
54
+ def start_scheduling_operations
55
+ Concurrent::Promises.future_on @batch_thread_pool do
56
+ begin
57
+ schedule_operations
58
+ rescue StandardError
59
+ # TODO: Log the error when logging is available
60
+ retry
61
+ end
62
+ end
63
+ end
64
+
65
+ def add_operation operation
66
+ @mutex.synchronize { @buffered_operations << operation }
67
+ end
68
+
69
+ ##
70
+ # Closes the scheduler object.
71
+ # Waits for the enqueued tasks to complete
72
+ # before closing down.
73
+ #
74
+ # @return [nil]
75
+ def close
76
+ @mutex.synchronize do
77
+ @batch_thread_pool.shutdown
78
+ @batch_thread_pool.wait_for_termination 1
79
+ @batch_thread_pool.kill unless @batch_thread_pool.shutdown?
80
+ end
81
+ end
82
+
83
+ private
84
+
85
+ ##
86
+ # @private Adds failed operations in the retry heap.
87
+ #
88
+ def post_commit_batch bulk_commit_batch
89
+ @mutex.synchronize do
90
+ bulk_commit_batch.operations.each do |operation|
91
+ unless operation.completion_event.set?
92
+ @retry_operations << operation
93
+ end
94
+ end
95
+ @retry_operations.sort_by!(&:retry_time)
96
+ end
97
+ end
98
+
99
+ ##
100
+ # @private Commits a batch of scheduled operations.
101
+ # Batch size <= 20 to match the constraint of request size < 9.8 MB
102
+ #
103
+ # @return [nil]
104
+ def commit_batch bulk_commit_batch
105
+ begin
106
+ Concurrent::Promises.future_on @batch_thread_pool, bulk_commit_batch do |batch|
107
+ begin
108
+ batch.commit
109
+ rescue StandardError
110
+ # TODO: Log the errors while committing a batch
111
+ ensure
112
+ post_commit_batch bulk_commit_batch
113
+ end
114
+ end
115
+ rescue StandardError => e
116
+ post_commit_batch bulk_commit_batch
117
+ raise BulkWriterSchedulerError, e.message
118
+ end
119
+ end
120
+
121
+ ##
122
+ # @private Schedule the enqueued operations in batches.
123
+ #
124
+ # @return [nil]
125
+ def schedule_operations
126
+ loop do
127
+ break if @batch_thread_pool.shuttingdown?
128
+ dequeue_retry_operations
129
+ batch_size = [MAX_BATCH_SIZE, @buffered_operations.length].min
130
+ if batch_size.zero?
131
+ sleep 0.001
132
+ next
133
+ end
134
+ @rate_limiter.wait_for_tokens batch_size
135
+ operations = dequeue_buffered_operations batch_size
136
+ commit_batch BulkCommitBatch.new(@service, operations)
137
+ end
138
+ end
139
+
140
+ ##
141
+ # @private Removes BulkWriterOperations from the buffered queue to scheduled in
142
+ # the current batch
143
+ #
144
+ def dequeue_buffered_operations size
145
+ @mutex.synchronize do
146
+ @buffered_operations.shift size
147
+ end
148
+ end
149
+
150
+ ##
151
+ # @private Removes BulkWriterOperations from the retry queue to scheduled in
152
+ # the current batch
153
+ #
154
+ def dequeue_retry_operations
155
+ @mutex.synchronize do
156
+ while @retry_operations.length.positive? && @retry_operations.first.retry_time <= Time.now
157
+ @buffered_operations << @retry_operations.shift
158
+ end
159
+ end
160
+ end
161
+ end
162
+ end
163
+ end
164
+ end
@@ -23,6 +23,7 @@ require "google/cloud/firestore/document_snapshot"
23
23
  require "google/cloud/firestore/collection_group"
24
24
  require "google/cloud/firestore/batch"
25
25
  require "google/cloud/firestore/transaction"
26
+ require "google/cloud/firestore/bulk_writer"
26
27
  require "google/cloud/firestore/filter"
27
28
 
28
29
  module Google
@@ -789,6 +790,35 @@ module Google
789
790
  yield transaction
790
791
  end
791
792
 
793
+ ##
794
+ # Create a bulk writer to perform multiple writes that are
795
+ # executed parallely.
796
+ #
797
+ # @param [Integer] request_threads The number of threads used for handling
798
+ # requests. Default is 2. Optional.
799
+ # @param [Integer] batch_threads The number of threads used for processing
800
+ # batches. Default is 4. Optional.
801
+ # @param [Integer] retries The number of times a failed write request will
802
+ # be retried (with exponential delay) before being marked as failure. Max
803
+ # attempts are 15. Optional
804
+ #
805
+ # @return [Google::Cloud::Firestore::BulkWriter] Returns an object of
806
+ # bulk writer.
807
+ #
808
+ # @example Initializing a BulkWriter with all the configurations.
809
+ # require "google/cloud/firestore"
810
+ #
811
+ # firestore = Google::Cloud::Firestore.new
812
+ #
813
+ # bw = firestore.bulk_writer
814
+ #
815
+ # bulk_write_result = bw.create "doc_ref", request_threads: 4, batch_threads: 10, retries: 10
816
+ #
817
+ def bulk_writer request_threads: nil, batch_threads: nil, retries: nil
818
+ BulkWriter.new self, @service, request_threads: request_threads,
819
+ batch_threads: batch_threads, retries: retries
820
+ end
821
+
792
822
  # @!endgroup
793
823
 
794
824
  # @private
@@ -0,0 +1,60 @@
1
+ # Copyright 2023 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ require "google/cloud/errors"
16
+
17
+ module Google
18
+ module Cloud
19
+ module Firestore
20
+ ##
21
+ # Indicates that the an error was reported while scheduling
22
+ # BulkWriter operations.
23
+ #
24
+ class BulkWriterSchedulerError < Google::Cloud::Error
25
+ def initialize message
26
+ super "BulkWriterSchedulerError : #{message}"
27
+ end
28
+ end
29
+
30
+ ##
31
+ # Indicates that the an error was reported while committing a
32
+ # batch of operations.
33
+ #
34
+ class BulkCommitBatchError < Google::Cloud::Error
35
+ def initialize message
36
+ super "BulkCommitBatchError : #{message}"
37
+ end
38
+ end
39
+
40
+ ##
41
+ # Indicates that the an error was reported while parsing response for
42
+ # BulkWriterOperation.
43
+ #
44
+ class BulkWriterOperationError < Google::Cloud::Error
45
+ def initialize message
46
+ super "BulkWriterOperationError : #{message}"
47
+ end
48
+ end
49
+
50
+ ##
51
+ # Indicates that the an error was reported in BulkWriter.
52
+ #
53
+ class BulkWriterError < Google::Cloud::Error
54
+ def initialize message
55
+ super "BulkWriterError : #{message}"
56
+ end
57
+ end
58
+ end
59
+ end
60
+ end
@@ -0,0 +1,97 @@
1
+ # Copyright 2023 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ module Google
16
+ module Cloud
17
+ module Firestore
18
+ module Promise
19
+ ##
20
+ # # Future
21
+ #
22
+ # A Future object represents a value which will become available in future.
23
+ # May reject with a reason instead, e.g. when the tasks raises an exception.
24
+ #
25
+ class Future
26
+ ##
27
+ # Initialize the future object
28
+ #
29
+ def initialize future
30
+ @future = future
31
+ end
32
+
33
+ # Is it in fulfilled state?
34
+ #
35
+ # @return [Boolean]
36
+ def fulfilled?
37
+ @future.fulfilled?
38
+ end
39
+
40
+ # Is it in rejected state?
41
+ #
42
+ # @return [Boolean]
43
+ def rejected?
44
+ @future.rejected?
45
+ end
46
+
47
+ ##
48
+ # Method waits for the timeout duration and return the value of the future if
49
+ # fulfilled, timeout value in case of timeout and nil in case of rejection.
50
+ #
51
+ # @param [Integer] timeout the maximum time in seconds to wait
52
+ # @param [Object] timeout_value a value returned by the method when it times out
53
+ # @return [Object, nil, timeout_value] the value of the Future when fulfilled,
54
+ # timeout_value on timeout, nil on rejection.
55
+ def value timeout = nil, timeout_value = nil
56
+ @future.value timeout, timeout_value
57
+ end
58
+
59
+ # Returns reason of future's rejection.
60
+ #
61
+ # @return [Object, timeout_value] the reason, or timeout_value on timeout, or nil on fulfillment.
62
+ def reason timeout = nil, timeout_value = nil
63
+ @future.reason timeout, timeout_value
64
+ end
65
+
66
+ ##
67
+ # Method waits for the timeout duration and raise exception on rejection
68
+ #
69
+ # @param [Integer] timeout the maximum time in seconds to wait
70
+ def wait! timeout = nil
71
+ @future.wait! timeout
72
+ end
73
+
74
+ ##
75
+ # Chains the task to be executed synchronously after it fulfills. Does not run
76
+ # the task if it rejects. It will resolve though, triggering any dependent futures.
77
+ #
78
+ # @return [Future]
79
+ # @yield [reason, *args] to the task.
80
+ def then(*args, &task)
81
+ Future.new @future.then(*args, &task)
82
+ end
83
+
84
+ # Chains the task to be executed synchronously on executor after it rejects. Does
85
+ # not run the task if it fulfills. It will resolve though, triggering any
86
+ # dependent futures.
87
+ #
88
+ # @return [Future]
89
+ # @yield [reason, *args] to the task.
90
+ def rescue(*args, &task)
91
+ Future.new @future.rescue(*args, &task)
92
+ end
93
+ end
94
+ end
95
+ end
96
+ end
97
+ end
@@ -0,0 +1,80 @@
1
+ # Copyright 2023 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ module Google
17
+ module Cloud
18
+ module Firestore
19
+ ##
20
+ # @private Implements 5/5/5 ramp-up via Token Bucket algorithm.
21
+ #
22
+ # 5/5/5 is a ramp up strategy that starts with a budget of 500 operations per
23
+ # second. Additionally, every 5 minutes, the maximum budget can increase by
24
+ # 50%. Thus, at 5:01 into a long bulk-writing process, the maximum budget
25
+ # becomes 750 operations per second. At 10:01, the budget becomes 1,125
26
+ # operations per second.
27
+ #
28
+ class RateLimiter
29
+ DEFAULT_STARTING_MAXIMUM_OPS_PER_SECOND = 500.0
30
+ DEFAULT_PHASE_LENGTH = 300.0
31
+
32
+ attr_reader :bandwidth
33
+
34
+ ##
35
+ # Initialize the object
36
+ def initialize starting_ops: nil, phase_length: nil
37
+ @start_time = time
38
+ @last_fetched = time
39
+ @bandwidth = (starting_ops || DEFAULT_STARTING_MAXIMUM_OPS_PER_SECOND).to_f
40
+ @phase_length = phase_length || DEFAULT_PHASE_LENGTH
41
+ end
42
+
43
+ ##
44
+ # Wait till the number of tokens is available
45
+ # Assumes that the bandwidth is distributed evenly across the entire second.
46
+ #
47
+ # Example - If the limit is 500 qps, then it has been further broken down to 2e+6 nsec
48
+ # per query
49
+ #
50
+ # @return [nil]
51
+ def wait_for_tokens size
52
+ available_time = @last_fetched + (size / @bandwidth)
53
+ waiting_time = [0, available_time - time].max
54
+ sleep waiting_time
55
+ @last_fetched = time
56
+ increase_bandwidth
57
+ end
58
+
59
+ private
60
+
61
+ ##
62
+ # Returns time elapsed since epoch.
63
+ #
64
+ # @return [Float] Float denoting time elapsed since epoch
65
+ def time
66
+ Time.now.to_f
67
+ end
68
+
69
+ ##
70
+ # Increase the bandwidth as per 555 rule
71
+ #
72
+ # @return [nil]
73
+ def increase_bandwidth
74
+ intervals = (time - @start_time) / @phase_length
75
+ @bandwidth = (DEFAULT_STARTING_MAXIMUM_OPS_PER_SECOND * (1.5**intervals.floor)).to_f
76
+ end
77
+ end
78
+ end
79
+ end
80
+ end
@@ -186,6 +186,18 @@ module Google
186
186
  )
187
187
  end
188
188
 
189
+ ##
190
+ # Makes the BatchWrite API call. Contains the list of write operations to be processed.
191
+ #
192
+ # @return [::Google::Cloud::Firestore::V1::BatchWriteResponse]
193
+ def batch_write writes
194
+ batch_write_req = {
195
+ database: database_path,
196
+ writes: writes
197
+ }
198
+ firestore.batch_write batch_write_req, call_options(parent: database_path)
199
+ end
200
+
189
201
  def database_path project_id: project, database_id: database
190
202
  # Originally used V1::FirestoreClient.database_root_path until it was removed in #5405.
191
203
  "projects/#{project_id}/databases/#{database_id}"
@@ -16,7 +16,7 @@
16
16
  module Google
17
17
  module Cloud
18
18
  module Firestore
19
- VERSION = "2.12.0".freeze
19
+ VERSION = "2.13.0".freeze
20
20
  end
21
21
  end
22
22
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: google-cloud-firestore
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.12.0
4
+ version: 2.13.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Google Inc
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-04-20 00:00:00.000000000 Z
11
+ date: 2023-05-15 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: google-cloud-core
@@ -228,6 +228,11 @@ files:
228
228
  - lib/google/cloud/firestore/aggregate_query.rb
229
229
  - lib/google/cloud/firestore/aggregate_query_snapshot.rb
230
230
  - lib/google/cloud/firestore/batch.rb
231
+ - lib/google/cloud/firestore/bulk_commit_batch.rb
232
+ - lib/google/cloud/firestore/bulk_writer.rb
233
+ - lib/google/cloud/firestore/bulk_writer_exception.rb
234
+ - lib/google/cloud/firestore/bulk_writer_operation.rb
235
+ - lib/google/cloud/firestore/bulk_writer_scheduler.rb
231
236
  - lib/google/cloud/firestore/client.rb
232
237
  - lib/google/cloud/firestore/collection_group.rb
233
238
  - lib/google/cloud/firestore/collection_reference.rb
@@ -240,14 +245,17 @@ files:
240
245
  - lib/google/cloud/firestore/document_reference.rb
241
246
  - lib/google/cloud/firestore/document_reference/list.rb
242
247
  - lib/google/cloud/firestore/document_snapshot.rb
248
+ - lib/google/cloud/firestore/errors.rb
243
249
  - lib/google/cloud/firestore/field_path.rb
244
250
  - lib/google/cloud/firestore/field_value.rb
245
251
  - lib/google/cloud/firestore/filter.rb
246
252
  - lib/google/cloud/firestore/generate.rb
253
+ - lib/google/cloud/firestore/promise/future.rb
247
254
  - lib/google/cloud/firestore/query.rb
248
255
  - lib/google/cloud/firestore/query_listener.rb
249
256
  - lib/google/cloud/firestore/query_partition.rb
250
257
  - lib/google/cloud/firestore/query_snapshot.rb
258
+ - lib/google/cloud/firestore/rate_limiter.rb
251
259
  - lib/google/cloud/firestore/resource_path.rb
252
260
  - lib/google/cloud/firestore/service.rb
253
261
  - lib/google/cloud/firestore/transaction.rb