google-cloud-firestore 2.11.0 → 2.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +12 -0
- data/lib/google/cloud/firestore/bulk_commit_batch.rb +73 -0
- data/lib/google/cloud/firestore/bulk_writer.rb +558 -0
- data/lib/google/cloud/firestore/bulk_writer_exception.rb +40 -0
- data/lib/google/cloud/firestore/bulk_writer_operation.rb +126 -0
- data/lib/google/cloud/firestore/bulk_writer_scheduler.rb +164 -0
- data/lib/google/cloud/firestore/client.rb +81 -0
- data/lib/google/cloud/firestore/errors.rb +60 -0
- data/lib/google/cloud/firestore/filter.rb +326 -0
- data/lib/google/cloud/firestore/promise/future.rb +97 -0
- data/lib/google/cloud/firestore/query.rb +68 -86
- data/lib/google/cloud/firestore/rate_limiter.rb +80 -0
- data/lib/google/cloud/firestore/service.rb +12 -0
- data/lib/google/cloud/firestore/version.rb +1 -1
- metadata +11 -2
@@ -0,0 +1,126 @@
|
|
1
|
+
# Copyright 2023 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
|
16
|
+
require "concurrent"
|
17
|
+
require "google/cloud/firestore/bulk_writer_exception"
|
18
|
+
|
19
|
+
|
20
|
+
module Google
|
21
|
+
module Cloud
|
22
|
+
module Firestore
|
23
|
+
##
|
24
|
+
#
|
25
|
+
# @private A BulkWriterOperation object refers to a write operation and contains
|
26
|
+
# all the necessary information for a specific write task, including meta
|
27
|
+
# information like the current number of attempts
|
28
|
+
#
|
29
|
+
class BulkWriterOperation
|
30
|
+
attr_reader :retry_time
|
31
|
+
attr_reader :result
|
32
|
+
attr_reader :completion_event
|
33
|
+
attr_reader :write
|
34
|
+
|
35
|
+
##
|
36
|
+
# Initialize the object
|
37
|
+
def initialize write, retries
|
38
|
+
@write = write
|
39
|
+
@failed_attempts = 0
|
40
|
+
@retries = retries
|
41
|
+
@retry_time = Time.now
|
42
|
+
@completion_event = Concurrent::Event.new
|
43
|
+
end
|
44
|
+
|
45
|
+
##
|
46
|
+
# Processing to be done when the response is a success.
|
47
|
+
# Updates the result and set the completion event.
|
48
|
+
#
|
49
|
+
# @param [Google::Cloud::Firestore::V1::WriteResult] result The result returned in the response.
|
50
|
+
def on_success result
|
51
|
+
begin
|
52
|
+
@result = WriteResult.new result
|
53
|
+
rescue StandardError => e
|
54
|
+
raise BulkWriterOperationError, e
|
55
|
+
ensure
|
56
|
+
@completion_event.set
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
##
|
61
|
+
# Processing to be done when the response is a failure.
|
62
|
+
# Updates the failure attempts. If the retry count reaches
|
63
|
+
# the upper threshold, operations will be marked
|
64
|
+
# as failure and the completion event will be set.
|
65
|
+
#
|
66
|
+
# @param [Google::Rpc::Status] status The status received in the response.
|
67
|
+
#
|
68
|
+
def on_failure status
|
69
|
+
@failed_attempts += 1
|
70
|
+
if @failed_attempts == @retries + 1
|
71
|
+
begin
|
72
|
+
@result = BulkWriterException.new status
|
73
|
+
rescue StandardError => e
|
74
|
+
raise BulkWriterOperationError, e
|
75
|
+
ensure
|
76
|
+
@completion_event.set
|
77
|
+
end
|
78
|
+
else
|
79
|
+
backoff_duration
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
##
|
84
|
+
# Exponentially increases the waiting time for retry.
|
85
|
+
#
|
86
|
+
def backoff_duration
|
87
|
+
@retry_time = Time.now + (@failed_attempts**2)
|
88
|
+
end
|
89
|
+
|
90
|
+
##
|
91
|
+
# Represents the result of applying a write.
|
92
|
+
#
|
93
|
+
# @example
|
94
|
+
# require "google/cloud/firestore"
|
95
|
+
#
|
96
|
+
# firestore = Google::Cloud::Firestore.new
|
97
|
+
# bw = firestore.bulk_writer
|
98
|
+
#
|
99
|
+
# # Set the data for NYC
|
100
|
+
# result = bw.set("cities/NYC", { name: "New York City" })
|
101
|
+
#
|
102
|
+
# result.wait!
|
103
|
+
#
|
104
|
+
# puts result.value
|
105
|
+
#
|
106
|
+
class WriteResult
|
107
|
+
##
|
108
|
+
# The last update time of the document after applying the write. Set to
|
109
|
+
# nil for a +delete+ mutation.
|
110
|
+
#
|
111
|
+
# If the write did not actually change the document, this will be
|
112
|
+
# the previous update_time.
|
113
|
+
#
|
114
|
+
# @return [Time] The last update time.
|
115
|
+
attr_reader :update_time
|
116
|
+
|
117
|
+
##
|
118
|
+
# @private
|
119
|
+
def initialize result
|
120
|
+
@update_time = Convert.timestamp_to_time result.update_time
|
121
|
+
end
|
122
|
+
end
|
123
|
+
end
|
124
|
+
end
|
125
|
+
end
|
126
|
+
end
|
@@ -0,0 +1,164 @@
|
|
1
|
+
# Copyright 2023 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License")
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
|
16
|
+
require "concurrent"
|
17
|
+
require "google/cloud/firestore/errors"
|
18
|
+
require "google/cloud/firestore/bulk_writer_operation"
|
19
|
+
require "google/cloud/firestore/rate_limiter"
|
20
|
+
require "google/cloud/firestore/bulk_commit_batch"
|
21
|
+
require "google/cloud/firestore/bulk_writer_exception"
|
22
|
+
require "google/cloud/firestore/bulk_writer_scheduler"
|
23
|
+
|
24
|
+
|
25
|
+
module Google
|
26
|
+
module Cloud
|
27
|
+
module Firestore
|
28
|
+
##
|
29
|
+
#
|
30
|
+
# @private Accumulate BulkWriterOperations from the BulkWriter, schedules them
|
31
|
+
# in accordance with 555 rule and retry the failed operations from the BulkCommitBatch.
|
32
|
+
#
|
33
|
+
class BulkWriterScheduler
|
34
|
+
MAX_BATCH_SIZE = 20
|
35
|
+
BATCH_THREAD_COUNT = 4
|
36
|
+
|
37
|
+
##
|
38
|
+
# Initialize the attributes and start the schedule_operations job
|
39
|
+
#
|
40
|
+
def initialize client, service, batch_threads
|
41
|
+
@client = client
|
42
|
+
@service = service
|
43
|
+
@rate_limiter = RateLimiter.new
|
44
|
+
@buffered_operations = []
|
45
|
+
@batch_threads = (batch_threads || BATCH_THREAD_COUNT).to_i
|
46
|
+
@batch_thread_pool = Concurrent::ThreadPoolExecutor.new max_threads: @batch_threads,
|
47
|
+
max_queue: 0,
|
48
|
+
auto_terminate: true
|
49
|
+
@retry_operations = []
|
50
|
+
@mutex = Mutex.new
|
51
|
+
start_scheduling_operations
|
52
|
+
end
|
53
|
+
|
54
|
+
def start_scheduling_operations
|
55
|
+
Concurrent::Promises.future_on @batch_thread_pool do
|
56
|
+
begin
|
57
|
+
schedule_operations
|
58
|
+
rescue StandardError
|
59
|
+
# TODO: Log the error when logging is available
|
60
|
+
retry
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
def add_operation operation
|
66
|
+
@mutex.synchronize { @buffered_operations << operation }
|
67
|
+
end
|
68
|
+
|
69
|
+
##
|
70
|
+
# Closes the scheduler object.
|
71
|
+
# Waits for the enqueued tasks to complete
|
72
|
+
# before closing down.
|
73
|
+
#
|
74
|
+
# @return [nil]
|
75
|
+
def close
|
76
|
+
@mutex.synchronize do
|
77
|
+
@batch_thread_pool.shutdown
|
78
|
+
@batch_thread_pool.wait_for_termination 1
|
79
|
+
@batch_thread_pool.kill unless @batch_thread_pool.shutdown?
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
private
|
84
|
+
|
85
|
+
##
|
86
|
+
# @private Adds failed operations in the retry heap.
|
87
|
+
#
|
88
|
+
def post_commit_batch bulk_commit_batch
|
89
|
+
@mutex.synchronize do
|
90
|
+
bulk_commit_batch.operations.each do |operation|
|
91
|
+
unless operation.completion_event.set?
|
92
|
+
@retry_operations << operation
|
93
|
+
end
|
94
|
+
end
|
95
|
+
@retry_operations.sort_by!(&:retry_time)
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
##
|
100
|
+
# @private Commits a batch of scheduled operations.
|
101
|
+
# Batch size <= 20 to match the constraint of request size < 9.8 MB
|
102
|
+
#
|
103
|
+
# @return [nil]
|
104
|
+
def commit_batch bulk_commit_batch
|
105
|
+
begin
|
106
|
+
Concurrent::Promises.future_on @batch_thread_pool, bulk_commit_batch do |batch|
|
107
|
+
begin
|
108
|
+
batch.commit
|
109
|
+
rescue StandardError
|
110
|
+
# TODO: Log the errors while committing a batch
|
111
|
+
ensure
|
112
|
+
post_commit_batch bulk_commit_batch
|
113
|
+
end
|
114
|
+
end
|
115
|
+
rescue StandardError => e
|
116
|
+
post_commit_batch bulk_commit_batch
|
117
|
+
raise BulkWriterSchedulerError, e.message
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
##
|
122
|
+
# @private Schedule the enqueued operations in batches.
|
123
|
+
#
|
124
|
+
# @return [nil]
|
125
|
+
def schedule_operations
|
126
|
+
loop do
|
127
|
+
break if @batch_thread_pool.shuttingdown?
|
128
|
+
dequeue_retry_operations
|
129
|
+
batch_size = [MAX_BATCH_SIZE, @buffered_operations.length].min
|
130
|
+
if batch_size.zero?
|
131
|
+
sleep 0.001
|
132
|
+
next
|
133
|
+
end
|
134
|
+
@rate_limiter.wait_for_tokens batch_size
|
135
|
+
operations = dequeue_buffered_operations batch_size
|
136
|
+
commit_batch BulkCommitBatch.new(@service, operations)
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
140
|
+
##
|
141
|
+
# @private Removes BulkWriterOperations from the buffered queue to scheduled in
|
142
|
+
# the current batch
|
143
|
+
#
|
144
|
+
def dequeue_buffered_operations size
|
145
|
+
@mutex.synchronize do
|
146
|
+
@buffered_operations.shift size
|
147
|
+
end
|
148
|
+
end
|
149
|
+
|
150
|
+
##
|
151
|
+
# @private Removes BulkWriterOperations from the retry queue to scheduled in
|
152
|
+
# the current batch
|
153
|
+
#
|
154
|
+
def dequeue_retry_operations
|
155
|
+
@mutex.synchronize do
|
156
|
+
while @retry_operations.length.positive? && @retry_operations.first.retry_time <= Time.now
|
157
|
+
@buffered_operations << @retry_operations.shift
|
158
|
+
end
|
159
|
+
end
|
160
|
+
end
|
161
|
+
end
|
162
|
+
end
|
163
|
+
end
|
164
|
+
end
|
@@ -23,6 +23,8 @@ require "google/cloud/firestore/document_snapshot"
|
|
23
23
|
require "google/cloud/firestore/collection_group"
|
24
24
|
require "google/cloud/firestore/batch"
|
25
25
|
require "google/cloud/firestore/transaction"
|
26
|
+
require "google/cloud/firestore/bulk_writer"
|
27
|
+
require "google/cloud/firestore/filter"
|
26
28
|
|
27
29
|
module Google
|
28
30
|
module Cloud
|
@@ -185,6 +187,56 @@ module Google
|
|
185
187
|
end
|
186
188
|
alias collection_group col_group
|
187
189
|
|
190
|
+
##
|
191
|
+
# Creates a filter object.
|
192
|
+
#
|
193
|
+
# @param field [FieldPath, String, Symbol] A field path to filter
|
194
|
+
# results with.
|
195
|
+
# If a {FieldPath} object is not provided then the field will be
|
196
|
+
# treated as a dotted string, meaning the string represents individual
|
197
|
+
# fields joined by ".". Fields containing `~`, `*`, `/`, `[`, `]`, and
|
198
|
+
# `.` cannot be in a dotted string, and should provided using a
|
199
|
+
# {FieldPath} object instead.
|
200
|
+
#
|
201
|
+
# @param operator [String, Symbol] The operation to compare the field
|
202
|
+
# to. Acceptable values include:
|
203
|
+
# * less than: `<`, `lt`
|
204
|
+
# * less than or equal: `<=`, `lte`
|
205
|
+
# * greater than: `>`, `gt`
|
206
|
+
# * greater than or equal: `>=`, `gte`
|
207
|
+
# * equal: `=`, `==`, `eq`, `eql`, `is`
|
208
|
+
# * not equal: `!=`
|
209
|
+
# * in: `in`
|
210
|
+
# * not in: `not-in`, `not_in`
|
211
|
+
# * array contains: `array-contains`, `array_contains`
|
212
|
+
#
|
213
|
+
# @param value [Object] The value to compare the property to. Defaults to nil.
|
214
|
+
# Possible values are:
|
215
|
+
# * Integer
|
216
|
+
# * Float/BigDecimal
|
217
|
+
# * String
|
218
|
+
# * Boolean
|
219
|
+
# * Array
|
220
|
+
# * Date/Time
|
221
|
+
# * StringIO
|
222
|
+
# * Google::Cloud::Datastore::Key
|
223
|
+
# * Google::Cloud::Datastore::Entity
|
224
|
+
# * nil
|
225
|
+
#
|
226
|
+
# @return [Google::Cloud::Firestore::Filter] New filter object.
|
227
|
+
#
|
228
|
+
# @example
|
229
|
+
# require "google/cloud/firestore"
|
230
|
+
#
|
231
|
+
# firestore = Google::Cloud::Firestore.new
|
232
|
+
#
|
233
|
+
# # Create a filter
|
234
|
+
# filter = firestore.filter(:population, :>=, 1000000)
|
235
|
+
#
|
236
|
+
def filter field, operator, value
|
237
|
+
Filter.new field, operator, value
|
238
|
+
end
|
239
|
+
|
188
240
|
##
|
189
241
|
# Retrieves a document reference.
|
190
242
|
#
|
@@ -738,6 +790,35 @@ module Google
|
|
738
790
|
yield transaction
|
739
791
|
end
|
740
792
|
|
793
|
+
##
|
794
|
+
# Create a bulk writer to perform multiple writes that are
|
795
|
+
# executed parallely.
|
796
|
+
#
|
797
|
+
# @param [Integer] request_threads The number of threads used for handling
|
798
|
+
# requests. Default is 2. Optional.
|
799
|
+
# @param [Integer] batch_threads The number of threads used for processing
|
800
|
+
# batches. Default is 4. Optional.
|
801
|
+
# @param [Integer] retries The number of times a failed write request will
|
802
|
+
# be retried (with exponential delay) before being marked as failure. Max
|
803
|
+
# attempts are 15. Optional
|
804
|
+
#
|
805
|
+
# @return [Google::Cloud::Firestore::BulkWriter] Returns an object of
|
806
|
+
# bulk writer.
|
807
|
+
#
|
808
|
+
# @example Initializing a BulkWriter with all the configurations.
|
809
|
+
# require "google/cloud/firestore"
|
810
|
+
#
|
811
|
+
# firestore = Google::Cloud::Firestore.new
|
812
|
+
#
|
813
|
+
# bw = firestore.bulk_writer
|
814
|
+
#
|
815
|
+
# bulk_write_result = bw.create "doc_ref", request_threads: 4, batch_threads: 10, retries: 10
|
816
|
+
#
|
817
|
+
def bulk_writer request_threads: nil, batch_threads: nil, retries: nil
|
818
|
+
BulkWriter.new self, @service, request_threads: request_threads,
|
819
|
+
batch_threads: batch_threads, retries: retries
|
820
|
+
end
|
821
|
+
|
741
822
|
# @!endgroup
|
742
823
|
|
743
824
|
# @private
|
@@ -0,0 +1,60 @@
|
|
1
|
+
# Copyright 2023 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
require "google/cloud/errors"
|
16
|
+
|
17
|
+
module Google
|
18
|
+
module Cloud
|
19
|
+
module Firestore
|
20
|
+
##
|
21
|
+
# Indicates that the an error was reported while scheduling
|
22
|
+
# BulkWriter operations.
|
23
|
+
#
|
24
|
+
class BulkWriterSchedulerError < Google::Cloud::Error
|
25
|
+
def initialize message
|
26
|
+
super "BulkWriterSchedulerError : #{message}"
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
##
|
31
|
+
# Indicates that the an error was reported while committing a
|
32
|
+
# batch of operations.
|
33
|
+
#
|
34
|
+
class BulkCommitBatchError < Google::Cloud::Error
|
35
|
+
def initialize message
|
36
|
+
super "BulkCommitBatchError : #{message}"
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
##
|
41
|
+
# Indicates that the an error was reported while parsing response for
|
42
|
+
# BulkWriterOperation.
|
43
|
+
#
|
44
|
+
class BulkWriterOperationError < Google::Cloud::Error
|
45
|
+
def initialize message
|
46
|
+
super "BulkWriterOperationError : #{message}"
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
##
|
51
|
+
# Indicates that the an error was reported in BulkWriter.
|
52
|
+
#
|
53
|
+
class BulkWriterError < Google::Cloud::Error
|
54
|
+
def initialize message
|
55
|
+
super "BulkWriterError : #{message}"
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|