google-cloud-firestore 2.12.0 → 2.13.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/AUTHENTICATION.md +8 -26
- data/CHANGELOG.md +12 -0
- data/lib/google/cloud/firestore/bulk_commit_batch.rb +73 -0
- data/lib/google/cloud/firestore/bulk_writer.rb +558 -0
- data/lib/google/cloud/firestore/bulk_writer_exception.rb +40 -0
- data/lib/google/cloud/firestore/bulk_writer_operation.rb +126 -0
- data/lib/google/cloud/firestore/bulk_writer_scheduler.rb +164 -0
- data/lib/google/cloud/firestore/client.rb +30 -0
- data/lib/google/cloud/firestore/convert.rb +1 -1
- data/lib/google/cloud/firestore/errors.rb +60 -0
- data/lib/google/cloud/firestore/promise/future.rb +97 -0
- data/lib/google/cloud/firestore/rate_limiter.rb +80 -0
- data/lib/google/cloud/firestore/service.rb +13 -1
- data/lib/google/cloud/firestore/version.rb +1 -1
- metadata +10 -2
@@ -0,0 +1,126 @@
|
|
1
|
+
# Copyright 2023 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
|
16
|
+
require "concurrent"
|
17
|
+
require "google/cloud/firestore/bulk_writer_exception"
|
18
|
+
|
19
|
+
|
20
|
+
module Google
|
21
|
+
module Cloud
|
22
|
+
module Firestore
|
23
|
+
##
|
24
|
+
#
|
25
|
+
# @private A BulkWriterOperation object refers to a write operation and contains
|
26
|
+
# all the necessary information for a specific write task, including meta
|
27
|
+
# information like the current number of attempts
|
28
|
+
#
|
29
|
+
class BulkWriterOperation
|
30
|
+
attr_reader :retry_time
|
31
|
+
attr_reader :result
|
32
|
+
attr_reader :completion_event
|
33
|
+
attr_reader :write
|
34
|
+
|
35
|
+
##
|
36
|
+
# Initialize the object
|
37
|
+
def initialize write, retries
|
38
|
+
@write = write
|
39
|
+
@failed_attempts = 0
|
40
|
+
@retries = retries
|
41
|
+
@retry_time = Time.now
|
42
|
+
@completion_event = Concurrent::Event.new
|
43
|
+
end
|
44
|
+
|
45
|
+
##
|
46
|
+
# Processing to be done when the response is a success.
|
47
|
+
# Updates the result and set the completion event.
|
48
|
+
#
|
49
|
+
# @param [Google::Cloud::Firestore::V1::WriteResult] result The result returned in the response.
|
50
|
+
def on_success result
|
51
|
+
begin
|
52
|
+
@result = WriteResult.new result
|
53
|
+
rescue StandardError => e
|
54
|
+
raise BulkWriterOperationError, e
|
55
|
+
ensure
|
56
|
+
@completion_event.set
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
##
|
61
|
+
# Processing to be done when the response is a failure.
|
62
|
+
# Updates the failure attempts. If the retry count reaches
|
63
|
+
# the upper threshold, operations will be marked
|
64
|
+
# as failure and the completion event will be set.
|
65
|
+
#
|
66
|
+
# @param [Google::Rpc::Status] status The status received in the response.
|
67
|
+
#
|
68
|
+
def on_failure status
|
69
|
+
@failed_attempts += 1
|
70
|
+
if @failed_attempts == @retries + 1
|
71
|
+
begin
|
72
|
+
@result = BulkWriterException.new status
|
73
|
+
rescue StandardError => e
|
74
|
+
raise BulkWriterOperationError, e
|
75
|
+
ensure
|
76
|
+
@completion_event.set
|
77
|
+
end
|
78
|
+
else
|
79
|
+
backoff_duration
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
##
|
84
|
+
# Exponentially increases the waiting time for retry.
|
85
|
+
#
|
86
|
+
def backoff_duration
|
87
|
+
@retry_time = Time.now + (@failed_attempts**2)
|
88
|
+
end
|
89
|
+
|
90
|
+
##
|
91
|
+
# Represents the result of applying a write.
|
92
|
+
#
|
93
|
+
# @example
|
94
|
+
# require "google/cloud/firestore"
|
95
|
+
#
|
96
|
+
# firestore = Google::Cloud::Firestore.new
|
97
|
+
# bw = firestore.bulk_writer
|
98
|
+
#
|
99
|
+
# # Set the data for NYC
|
100
|
+
# result = bw.set("cities/NYC", { name: "New York City" })
|
101
|
+
#
|
102
|
+
# result.wait!
|
103
|
+
#
|
104
|
+
# puts result.value
|
105
|
+
#
|
106
|
+
class WriteResult
|
107
|
+
##
|
108
|
+
# The last update time of the document after applying the write. Set to
|
109
|
+
# nil for a +delete+ mutation.
|
110
|
+
#
|
111
|
+
# If the write did not actually change the document, this will be
|
112
|
+
# the previous update_time.
|
113
|
+
#
|
114
|
+
# @return [Time] The last update time.
|
115
|
+
attr_reader :update_time
|
116
|
+
|
117
|
+
##
|
118
|
+
# @private
|
119
|
+
def initialize result
|
120
|
+
@update_time = Convert.timestamp_to_time result.update_time
|
121
|
+
end
|
122
|
+
end
|
123
|
+
end
|
124
|
+
end
|
125
|
+
end
|
126
|
+
end
|
@@ -0,0 +1,164 @@
|
|
1
|
+
# Copyright 2023 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License")
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
|
16
|
+
require "concurrent"
|
17
|
+
require "google/cloud/firestore/errors"
|
18
|
+
require "google/cloud/firestore/bulk_writer_operation"
|
19
|
+
require "google/cloud/firestore/rate_limiter"
|
20
|
+
require "google/cloud/firestore/bulk_commit_batch"
|
21
|
+
require "google/cloud/firestore/bulk_writer_exception"
|
22
|
+
require "google/cloud/firestore/bulk_writer_scheduler"
|
23
|
+
|
24
|
+
|
25
|
+
module Google
|
26
|
+
module Cloud
|
27
|
+
module Firestore
|
28
|
+
##
|
29
|
+
#
|
30
|
+
# @private Accumulate BulkWriterOperations from the BulkWriter, schedules them
|
31
|
+
# in accordance with 555 rule and retry the failed operations from the BulkCommitBatch.
|
32
|
+
#
|
33
|
+
class BulkWriterScheduler
|
34
|
+
MAX_BATCH_SIZE = 20
|
35
|
+
BATCH_THREAD_COUNT = 4
|
36
|
+
|
37
|
+
##
|
38
|
+
# Initialize the attributes and start the schedule_operations job
|
39
|
+
#
|
40
|
+
def initialize client, service, batch_threads
|
41
|
+
@client = client
|
42
|
+
@service = service
|
43
|
+
@rate_limiter = RateLimiter.new
|
44
|
+
@buffered_operations = []
|
45
|
+
@batch_threads = (batch_threads || BATCH_THREAD_COUNT).to_i
|
46
|
+
@batch_thread_pool = Concurrent::ThreadPoolExecutor.new max_threads: @batch_threads,
|
47
|
+
max_queue: 0,
|
48
|
+
auto_terminate: true
|
49
|
+
@retry_operations = []
|
50
|
+
@mutex = Mutex.new
|
51
|
+
start_scheduling_operations
|
52
|
+
end
|
53
|
+
|
54
|
+
def start_scheduling_operations
|
55
|
+
Concurrent::Promises.future_on @batch_thread_pool do
|
56
|
+
begin
|
57
|
+
schedule_operations
|
58
|
+
rescue StandardError
|
59
|
+
# TODO: Log the error when logging is available
|
60
|
+
retry
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
def add_operation operation
|
66
|
+
@mutex.synchronize { @buffered_operations << operation }
|
67
|
+
end
|
68
|
+
|
69
|
+
##
|
70
|
+
# Closes the scheduler object.
|
71
|
+
# Waits for the enqueued tasks to complete
|
72
|
+
# before closing down.
|
73
|
+
#
|
74
|
+
# @return [nil]
|
75
|
+
def close
|
76
|
+
@mutex.synchronize do
|
77
|
+
@batch_thread_pool.shutdown
|
78
|
+
@batch_thread_pool.wait_for_termination 1
|
79
|
+
@batch_thread_pool.kill unless @batch_thread_pool.shutdown?
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
private
|
84
|
+
|
85
|
+
##
|
86
|
+
# @private Adds failed operations in the retry heap.
|
87
|
+
#
|
88
|
+
def post_commit_batch bulk_commit_batch
|
89
|
+
@mutex.synchronize do
|
90
|
+
bulk_commit_batch.operations.each do |operation|
|
91
|
+
unless operation.completion_event.set?
|
92
|
+
@retry_operations << operation
|
93
|
+
end
|
94
|
+
end
|
95
|
+
@retry_operations.sort_by!(&:retry_time)
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
##
|
100
|
+
# @private Commits a batch of scheduled operations.
|
101
|
+
# Batch size <= 20 to match the constraint of request size < 9.8 MB
|
102
|
+
#
|
103
|
+
# @return [nil]
|
104
|
+
def commit_batch bulk_commit_batch
|
105
|
+
begin
|
106
|
+
Concurrent::Promises.future_on @batch_thread_pool, bulk_commit_batch do |batch|
|
107
|
+
begin
|
108
|
+
batch.commit
|
109
|
+
rescue StandardError
|
110
|
+
# TODO: Log the errors while committing a batch
|
111
|
+
ensure
|
112
|
+
post_commit_batch bulk_commit_batch
|
113
|
+
end
|
114
|
+
end
|
115
|
+
rescue StandardError => e
|
116
|
+
post_commit_batch bulk_commit_batch
|
117
|
+
raise BulkWriterSchedulerError, e.message
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
##
|
122
|
+
# @private Schedule the enqueued operations in batches.
|
123
|
+
#
|
124
|
+
# @return [nil]
|
125
|
+
def schedule_operations
|
126
|
+
loop do
|
127
|
+
break if @batch_thread_pool.shuttingdown?
|
128
|
+
dequeue_retry_operations
|
129
|
+
batch_size = [MAX_BATCH_SIZE, @buffered_operations.length].min
|
130
|
+
if batch_size.zero?
|
131
|
+
sleep 0.001
|
132
|
+
next
|
133
|
+
end
|
134
|
+
@rate_limiter.wait_for_tokens batch_size
|
135
|
+
operations = dequeue_buffered_operations batch_size
|
136
|
+
commit_batch BulkCommitBatch.new(@service, operations)
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
140
|
+
##
|
141
|
+
# @private Removes BulkWriterOperations from the buffered queue to scheduled in
|
142
|
+
# the current batch
|
143
|
+
#
|
144
|
+
def dequeue_buffered_operations size
|
145
|
+
@mutex.synchronize do
|
146
|
+
@buffered_operations.shift size
|
147
|
+
end
|
148
|
+
end
|
149
|
+
|
150
|
+
##
|
151
|
+
# @private Removes BulkWriterOperations from the retry queue to scheduled in
|
152
|
+
# the current batch
|
153
|
+
#
|
154
|
+
def dequeue_retry_operations
|
155
|
+
@mutex.synchronize do
|
156
|
+
while @retry_operations.length.positive? && @retry_operations.first.retry_time <= Time.now
|
157
|
+
@buffered_operations << @retry_operations.shift
|
158
|
+
end
|
159
|
+
end
|
160
|
+
end
|
161
|
+
end
|
162
|
+
end
|
163
|
+
end
|
164
|
+
end
|
@@ -23,6 +23,7 @@ require "google/cloud/firestore/document_snapshot"
|
|
23
23
|
require "google/cloud/firestore/collection_group"
|
24
24
|
require "google/cloud/firestore/batch"
|
25
25
|
require "google/cloud/firestore/transaction"
|
26
|
+
require "google/cloud/firestore/bulk_writer"
|
26
27
|
require "google/cloud/firestore/filter"
|
27
28
|
|
28
29
|
module Google
|
@@ -789,6 +790,35 @@ module Google
|
|
789
790
|
yield transaction
|
790
791
|
end
|
791
792
|
|
793
|
+
##
|
794
|
+
# Create a bulk writer to perform multiple writes that are
|
795
|
+
# executed parallely.
|
796
|
+
#
|
797
|
+
# @param [Integer] request_threads The number of threads used for handling
|
798
|
+
# requests. Default is 2. Optional.
|
799
|
+
# @param [Integer] batch_threads The number of threads used for processing
|
800
|
+
# batches. Default is 4. Optional.
|
801
|
+
# @param [Integer] retries The number of times a failed write request will
|
802
|
+
# be retried (with exponential delay) before being marked as failure. Max
|
803
|
+
# attempts are 15. Optional
|
804
|
+
#
|
805
|
+
# @return [Google::Cloud::Firestore::BulkWriter] Returns an object of
|
806
|
+
# bulk writer.
|
807
|
+
#
|
808
|
+
# @example Initializing a BulkWriter with all the configurations.
|
809
|
+
# require "google/cloud/firestore"
|
810
|
+
#
|
811
|
+
# firestore = Google::Cloud::Firestore.new
|
812
|
+
#
|
813
|
+
# bw = firestore.bulk_writer
|
814
|
+
#
|
815
|
+
# bulk_write_result = bw.create "doc_ref", request_threads: 4, batch_threads: 10, retries: 10
|
816
|
+
#
|
817
|
+
def bulk_writer request_threads: nil, batch_threads: nil, retries: nil
|
818
|
+
BulkWriter.new self, @service, request_threads: request_threads,
|
819
|
+
batch_threads: batch_threads, retries: retries
|
820
|
+
end
|
821
|
+
|
792
822
|
# @!endgroup
|
793
823
|
|
794
824
|
# @private
|
@@ -0,0 +1,60 @@
|
|
1
|
+
# Copyright 2023 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
require "google/cloud/errors"
|
16
|
+
|
17
|
+
module Google
|
18
|
+
module Cloud
|
19
|
+
module Firestore
|
20
|
+
##
|
21
|
+
# Indicates that the an error was reported while scheduling
|
22
|
+
# BulkWriter operations.
|
23
|
+
#
|
24
|
+
class BulkWriterSchedulerError < Google::Cloud::Error
|
25
|
+
def initialize message
|
26
|
+
super "BulkWriterSchedulerError : #{message}"
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
##
|
31
|
+
# Indicates that the an error was reported while committing a
|
32
|
+
# batch of operations.
|
33
|
+
#
|
34
|
+
class BulkCommitBatchError < Google::Cloud::Error
|
35
|
+
def initialize message
|
36
|
+
super "BulkCommitBatchError : #{message}"
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
##
|
41
|
+
# Indicates that the an error was reported while parsing response for
|
42
|
+
# BulkWriterOperation.
|
43
|
+
#
|
44
|
+
class BulkWriterOperationError < Google::Cloud::Error
|
45
|
+
def initialize message
|
46
|
+
super "BulkWriterOperationError : #{message}"
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
##
|
51
|
+
# Indicates that the an error was reported in BulkWriter.
|
52
|
+
#
|
53
|
+
class BulkWriterError < Google::Cloud::Error
|
54
|
+
def initialize message
|
55
|
+
super "BulkWriterError : #{message}"
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
@@ -0,0 +1,97 @@
|
|
1
|
+
# Copyright 2023 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
module Google
|
16
|
+
module Cloud
|
17
|
+
module Firestore
|
18
|
+
module Promise
|
19
|
+
##
|
20
|
+
# # Future
|
21
|
+
#
|
22
|
+
# A Future object represents a value which will become available in future.
|
23
|
+
# May reject with a reason instead, e.g. when the tasks raises an exception.
|
24
|
+
#
|
25
|
+
class Future
|
26
|
+
##
|
27
|
+
# Initialize the future object
|
28
|
+
#
|
29
|
+
def initialize future
|
30
|
+
@future = future
|
31
|
+
end
|
32
|
+
|
33
|
+
# Is it in fulfilled state?
|
34
|
+
#
|
35
|
+
# @return [Boolean]
|
36
|
+
def fulfilled?
|
37
|
+
@future.fulfilled?
|
38
|
+
end
|
39
|
+
|
40
|
+
# Is it in rejected state?
|
41
|
+
#
|
42
|
+
# @return [Boolean]
|
43
|
+
def rejected?
|
44
|
+
@future.rejected?
|
45
|
+
end
|
46
|
+
|
47
|
+
##
|
48
|
+
# Method waits for the timeout duration and return the value of the future if
|
49
|
+
# fulfilled, timeout value in case of timeout and nil in case of rejection.
|
50
|
+
#
|
51
|
+
# @param [Integer] timeout the maximum time in seconds to wait
|
52
|
+
# @param [Object] timeout_value a value returned by the method when it times out
|
53
|
+
# @return [Object, nil, timeout_value] the value of the Future when fulfilled,
|
54
|
+
# timeout_value on timeout, nil on rejection.
|
55
|
+
def value timeout = nil, timeout_value = nil
|
56
|
+
@future.value timeout, timeout_value
|
57
|
+
end
|
58
|
+
|
59
|
+
# Returns reason of future's rejection.
|
60
|
+
#
|
61
|
+
# @return [Object, timeout_value] the reason, or timeout_value on timeout, or nil on fulfillment.
|
62
|
+
def reason timeout = nil, timeout_value = nil
|
63
|
+
@future.reason timeout, timeout_value
|
64
|
+
end
|
65
|
+
|
66
|
+
##
|
67
|
+
# Method waits for the timeout duration and raise exception on rejection
|
68
|
+
#
|
69
|
+
# @param [Integer] timeout the maximum time in seconds to wait
|
70
|
+
def wait! timeout = nil
|
71
|
+
@future.wait! timeout
|
72
|
+
end
|
73
|
+
|
74
|
+
##
|
75
|
+
# Chains the task to be executed synchronously after it fulfills. Does not run
|
76
|
+
# the task if it rejects. It will resolve though, triggering any dependent futures.
|
77
|
+
#
|
78
|
+
# @return [Future]
|
79
|
+
# @yield [reason, *args] to the task.
|
80
|
+
def then *args, &task
|
81
|
+
Future.new @future.then(*args, &task)
|
82
|
+
end
|
83
|
+
|
84
|
+
# Chains the task to be executed synchronously on executor after it rejects. Does
|
85
|
+
# not run the task if it fulfills. It will resolve though, triggering any
|
86
|
+
# dependent futures.
|
87
|
+
#
|
88
|
+
# @return [Future]
|
89
|
+
# @yield [reason, *args] to the task.
|
90
|
+
def rescue *args, &task
|
91
|
+
Future.new @future.rescue(*args, &task)
|
92
|
+
end
|
93
|
+
end
|
94
|
+
end
|
95
|
+
end
|
96
|
+
end
|
97
|
+
end
|
@@ -0,0 +1,80 @@
|
|
1
|
+
# Copyright 2023 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
|
16
|
+
module Google
|
17
|
+
module Cloud
|
18
|
+
module Firestore
|
19
|
+
##
|
20
|
+
# @private Implements 5/5/5 ramp-up via Token Bucket algorithm.
|
21
|
+
#
|
22
|
+
# 5/5/5 is a ramp up strategy that starts with a budget of 500 operations per
|
23
|
+
# second. Additionally, every 5 minutes, the maximum budget can increase by
|
24
|
+
# 50%. Thus, at 5:01 into a long bulk-writing process, the maximum budget
|
25
|
+
# becomes 750 operations per second. At 10:01, the budget becomes 1,125
|
26
|
+
# operations per second.
|
27
|
+
#
|
28
|
+
class RateLimiter
|
29
|
+
DEFAULT_STARTING_MAXIMUM_OPS_PER_SECOND = 500.0
|
30
|
+
DEFAULT_PHASE_LENGTH = 300.0
|
31
|
+
|
32
|
+
attr_reader :bandwidth
|
33
|
+
|
34
|
+
##
|
35
|
+
# Initialize the object
|
36
|
+
def initialize starting_ops: nil, phase_length: nil
|
37
|
+
@start_time = time
|
38
|
+
@last_fetched = time
|
39
|
+
@bandwidth = (starting_ops || DEFAULT_STARTING_MAXIMUM_OPS_PER_SECOND).to_f
|
40
|
+
@phase_length = phase_length || DEFAULT_PHASE_LENGTH
|
41
|
+
end
|
42
|
+
|
43
|
+
##
|
44
|
+
# Wait till the number of tokens is available
|
45
|
+
# Assumes that the bandwidth is distributed evenly across the entire second.
|
46
|
+
#
|
47
|
+
# Example - If the limit is 500 qps, then it has been further broken down to 2e+6 nsec
|
48
|
+
# per query
|
49
|
+
#
|
50
|
+
# @return [nil]
|
51
|
+
def wait_for_tokens size
|
52
|
+
available_time = @last_fetched + (size / @bandwidth)
|
53
|
+
waiting_time = [0, available_time - time].max
|
54
|
+
sleep waiting_time
|
55
|
+
@last_fetched = time
|
56
|
+
increase_bandwidth
|
57
|
+
end
|
58
|
+
|
59
|
+
private
|
60
|
+
|
61
|
+
##
|
62
|
+
# Returns time elapsed since epoch.
|
63
|
+
#
|
64
|
+
# @return [Float] Float denoting time elapsed since epoch
|
65
|
+
def time
|
66
|
+
Time.now.to_f
|
67
|
+
end
|
68
|
+
|
69
|
+
##
|
70
|
+
# Increase the bandwidth as per 555 rule
|
71
|
+
#
|
72
|
+
# @return [nil]
|
73
|
+
def increase_bandwidth
|
74
|
+
intervals = (time - @start_time) / @phase_length
|
75
|
+
@bandwidth = (DEFAULT_STARTING_MAXIMUM_OPS_PER_SECOND * (1.5**intervals.floor)).to_f
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|
@@ -186,6 +186,18 @@ module Google
|
|
186
186
|
)
|
187
187
|
end
|
188
188
|
|
189
|
+
##
|
190
|
+
# Makes the BatchWrite API call. Contains the list of write operations to be processed.
|
191
|
+
#
|
192
|
+
# @return [::Google::Cloud::Firestore::V1::BatchWriteResponse]
|
193
|
+
def batch_write writes
|
194
|
+
batch_write_req = {
|
195
|
+
database: database_path,
|
196
|
+
writes: writes
|
197
|
+
}
|
198
|
+
firestore.batch_write batch_write_req, call_options(parent: database_path)
|
199
|
+
end
|
200
|
+
|
189
201
|
def database_path project_id: project, database_id: database
|
190
202
|
# Originally used V1::FirestoreClient.database_root_path until it was removed in #5405.
|
191
203
|
"projects/#{project_id}/databases/#{database_id}"
|
@@ -222,7 +234,7 @@ module Google
|
|
222
234
|
Gapic::CallOptions.new(**{
|
223
235
|
metadata: default_headers(parent),
|
224
236
|
page_token: token
|
225
|
-
}.
|
237
|
+
}.compact)
|
226
238
|
end
|
227
239
|
|
228
240
|
def document_mask mask
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: google-cloud-firestore
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 2.
|
4
|
+
version: 2.13.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Google Inc
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2023-
|
11
|
+
date: 2023-06-16 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: google-cloud-core
|
@@ -228,6 +228,11 @@ files:
|
|
228
228
|
- lib/google/cloud/firestore/aggregate_query.rb
|
229
229
|
- lib/google/cloud/firestore/aggregate_query_snapshot.rb
|
230
230
|
- lib/google/cloud/firestore/batch.rb
|
231
|
+
- lib/google/cloud/firestore/bulk_commit_batch.rb
|
232
|
+
- lib/google/cloud/firestore/bulk_writer.rb
|
233
|
+
- lib/google/cloud/firestore/bulk_writer_exception.rb
|
234
|
+
- lib/google/cloud/firestore/bulk_writer_operation.rb
|
235
|
+
- lib/google/cloud/firestore/bulk_writer_scheduler.rb
|
231
236
|
- lib/google/cloud/firestore/client.rb
|
232
237
|
- lib/google/cloud/firestore/collection_group.rb
|
233
238
|
- lib/google/cloud/firestore/collection_reference.rb
|
@@ -240,14 +245,17 @@ files:
|
|
240
245
|
- lib/google/cloud/firestore/document_reference.rb
|
241
246
|
- lib/google/cloud/firestore/document_reference/list.rb
|
242
247
|
- lib/google/cloud/firestore/document_snapshot.rb
|
248
|
+
- lib/google/cloud/firestore/errors.rb
|
243
249
|
- lib/google/cloud/firestore/field_path.rb
|
244
250
|
- lib/google/cloud/firestore/field_value.rb
|
245
251
|
- lib/google/cloud/firestore/filter.rb
|
246
252
|
- lib/google/cloud/firestore/generate.rb
|
253
|
+
- lib/google/cloud/firestore/promise/future.rb
|
247
254
|
- lib/google/cloud/firestore/query.rb
|
248
255
|
- lib/google/cloud/firestore/query_listener.rb
|
249
256
|
- lib/google/cloud/firestore/query_partition.rb
|
250
257
|
- lib/google/cloud/firestore/query_snapshot.rb
|
258
|
+
- lib/google/cloud/firestore/rate_limiter.rb
|
251
259
|
- lib/google/cloud/firestore/resource_path.rb
|
252
260
|
- lib/google/cloud/firestore/service.rb
|
253
261
|
- lib/google/cloud/firestore/transaction.rb
|