google-cloud-firestore 0.22.0 → 0.23.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.yardopts +1 -0
- data/README.md +8 -8
- data/lib/google-cloud-firestore.rb +1 -1
- data/lib/google/cloud/firestore.rb +46 -0
- data/lib/google/cloud/firestore/batch.rb +1 -1
- data/lib/google/cloud/firestore/client.rb +18 -13
- data/lib/google/cloud/firestore/convert.rb +78 -35
- data/lib/google/cloud/firestore/credentials.rb +2 -12
- data/lib/google/cloud/firestore/document_change.rb +124 -0
- data/lib/google/cloud/firestore/document_listener.rb +125 -0
- data/lib/google/cloud/firestore/document_reference.rb +35 -0
- data/lib/google/cloud/firestore/document_snapshot.rb +91 -9
- data/lib/google/cloud/firestore/field_path.rb +23 -13
- data/lib/google/cloud/firestore/query.rb +513 -69
- data/lib/google/cloud/firestore/query_listener.rb +118 -0
- data/lib/google/cloud/firestore/query_snapshot.rb +121 -0
- data/lib/google/cloud/firestore/service.rb +8 -0
- data/lib/google/cloud/firestore/transaction.rb +2 -2
- data/lib/google/cloud/firestore/v1beta1.rb +62 -37
- data/lib/google/cloud/firestore/v1beta1/credentials.rb +41 -0
- data/lib/google/cloud/firestore/v1beta1/doc/google/firestore/v1beta1/common.rb +1 -1
- data/lib/google/cloud/firestore/v1beta1/doc/google/firestore/v1beta1/document.rb +5 -4
- data/lib/google/cloud/firestore/v1beta1/doc/google/firestore/v1beta1/firestore.rb +1 -12
- data/lib/google/cloud/firestore/v1beta1/doc/google/firestore/v1beta1/query.rb +4 -1
- data/lib/google/cloud/firestore/v1beta1/doc/google/firestore/v1beta1/write.rb +37 -8
- data/lib/google/cloud/firestore/v1beta1/doc/google/protobuf/any.rb +1 -1
- data/lib/google/cloud/firestore/v1beta1/doc/google/protobuf/empty.rb +28 -0
- data/lib/google/cloud/firestore/v1beta1/doc/google/protobuf/timestamp.rb +1 -1
- data/lib/google/cloud/firestore/v1beta1/doc/google/protobuf/wrappers.rb +1 -1
- data/lib/google/cloud/firestore/v1beta1/doc/google/rpc/status.rb +1 -1
- data/lib/google/cloud/firestore/v1beta1/firestore_client.rb +124 -56
- data/lib/google/cloud/firestore/v1beta1/firestore_client_config.json +2 -2
- data/lib/google/cloud/firestore/version.rb +1 -1
- data/lib/google/cloud/firestore/watch/enumerator_queue.rb +47 -0
- data/lib/google/cloud/firestore/watch/inventory.rb +280 -0
- data/lib/google/cloud/firestore/watch/listener.rb +298 -0
- data/lib/google/cloud/firestore/watch/order.rb +98 -0
- data/lib/google/firestore/v1beta1/firestore_services_pb.rb +2 -4
- data/lib/google/firestore/v1beta1/query_pb.rb +1 -0
- data/lib/google/firestore/v1beta1/write_pb.rb +2 -0
- metadata +40 -3
- data/lib/google/cloud/firestore/v1beta1/doc/overview.rb +0 -53
@@ -80,12 +80,12 @@
|
|
80
80
|
"retry_params_name": "default"
|
81
81
|
},
|
82
82
|
"Write": {
|
83
|
-
"timeout_millis":
|
83
|
+
"timeout_millis": 86400000,
|
84
84
|
"retry_codes_name": "non_idempotent",
|
85
85
|
"retry_params_name": "streaming"
|
86
86
|
},
|
87
87
|
"Listen": {
|
88
|
-
"timeout_millis":
|
88
|
+
"timeout_millis": 86400000,
|
89
89
|
"retry_codes_name": "idempotent",
|
90
90
|
"retry_params_name": "streaming"
|
91
91
|
},
|
@@ -0,0 +1,47 @@
|
|
1
|
+
# Copyright 2018 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
|
16
|
+
require "thread"
|
17
|
+
|
18
|
+
module Google
|
19
|
+
module Cloud
|
20
|
+
module Firestore
|
21
|
+
# @private
|
22
|
+
module Watch
|
23
|
+
# @private
|
24
|
+
class EnumeratorQueue
|
25
|
+
def initialize sentinel = nil
|
26
|
+
@queue = Queue.new
|
27
|
+
@sentinel = sentinel
|
28
|
+
end
|
29
|
+
|
30
|
+
def push obj
|
31
|
+
@queue.push obj
|
32
|
+
end
|
33
|
+
|
34
|
+
def each
|
35
|
+
return enum_for(:each) unless block_given?
|
36
|
+
|
37
|
+
loop do
|
38
|
+
obj = @queue.pop
|
39
|
+
break if obj.equal? @sentinel
|
40
|
+
yield obj
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
@@ -0,0 +1,280 @@
|
|
1
|
+
# Copyright 2018 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
|
16
|
+
require "google/cloud/firestore/v1beta1"
|
17
|
+
require "google/cloud/firestore/convert"
|
18
|
+
require "google/cloud/firestore/document_reference"
|
19
|
+
require "google/cloud/firestore/document_snapshot"
|
20
|
+
require "google/cloud/firestore/document_change"
|
21
|
+
require "google/cloud/firestore/query_snapshot"
|
22
|
+
require "google/cloud/firestore/watch/order"
|
23
|
+
require "rbtree"
|
24
|
+
|
25
|
+
module Google
|
26
|
+
module Cloud
|
27
|
+
module Firestore
|
28
|
+
##
|
29
|
+
# @private
|
30
|
+
module Watch
|
31
|
+
# @private Collects changes and produces a QuerySnapshot.
|
32
|
+
# Uses RBTree to hold a sorted list of DocumentSnapshot objects and to
|
33
|
+
# make inserting and removing objects much more efficent.
|
34
|
+
class Inventory
|
35
|
+
attr_accessor :current
|
36
|
+
attr_reader :resume_token, :read_time
|
37
|
+
|
38
|
+
def initialize client, query
|
39
|
+
@client = client
|
40
|
+
@query = query
|
41
|
+
@pending = {
|
42
|
+
add: [],
|
43
|
+
delete: []
|
44
|
+
}
|
45
|
+
@current = nil
|
46
|
+
@resume_token = nil
|
47
|
+
@read_time = nil
|
48
|
+
@tree = RBTree.new
|
49
|
+
@tree.readjust(&method(:query_comparison_proc))
|
50
|
+
@old_order = nil
|
51
|
+
|
52
|
+
# TODO: Remove this when done benchmarking
|
53
|
+
@comp_proc_counter = 0
|
54
|
+
end
|
55
|
+
|
56
|
+
def current?
|
57
|
+
@current
|
58
|
+
end
|
59
|
+
|
60
|
+
def add doc_grpc
|
61
|
+
@pending[:add] << doc_grpc
|
62
|
+
end
|
63
|
+
|
64
|
+
def delete doc_path
|
65
|
+
@pending[:delete] << doc_path
|
66
|
+
end
|
67
|
+
|
68
|
+
def pending?
|
69
|
+
@pending[:add].any? || @pending[:delete].any?
|
70
|
+
end
|
71
|
+
|
72
|
+
def clear_pending
|
73
|
+
@pending[:add].clear
|
74
|
+
@pending[:delete].clear
|
75
|
+
end
|
76
|
+
|
77
|
+
def size
|
78
|
+
@tree.size
|
79
|
+
end
|
80
|
+
alias count size
|
81
|
+
|
82
|
+
def size_with_pending
|
83
|
+
count_with_pending_tree = @tree.dup
|
84
|
+
apply_pending_changes_to_tree @pending, count_with_pending_tree
|
85
|
+
count_with_pending_tree.size
|
86
|
+
end
|
87
|
+
alias count_with_pending size_with_pending
|
88
|
+
|
89
|
+
def restart
|
90
|
+
# clears all but query, resume token, read time, and old order
|
91
|
+
clear_pending
|
92
|
+
|
93
|
+
@current = nil
|
94
|
+
|
95
|
+
@tree.clear
|
96
|
+
end
|
97
|
+
|
98
|
+
def reset
|
99
|
+
restart
|
100
|
+
|
101
|
+
# clears the resume token and read time, but not query and old order
|
102
|
+
@resume_token = nil
|
103
|
+
@read_time = nil
|
104
|
+
end
|
105
|
+
|
106
|
+
# TODO: Remove this when done benchmarking
|
107
|
+
def reset_comp_proc_counter!
|
108
|
+
old_count = @comp_proc_counter
|
109
|
+
@comp_proc_counter = 0
|
110
|
+
old_count
|
111
|
+
end
|
112
|
+
|
113
|
+
def persist resume_token, read_time
|
114
|
+
@resume_token = resume_token
|
115
|
+
@read_time = read_time
|
116
|
+
|
117
|
+
apply_pending_changes_to_tree @pending, @tree
|
118
|
+
clear_pending
|
119
|
+
end
|
120
|
+
|
121
|
+
def changes?
|
122
|
+
# Act like there are changes if we have never run before
|
123
|
+
return true if @old_order.nil?
|
124
|
+
added_paths, deleted_paths, changed_paths = \
|
125
|
+
change_paths current_order, @old_order
|
126
|
+
added_paths.any? || deleted_paths.any? || changed_paths.any?
|
127
|
+
end
|
128
|
+
|
129
|
+
def current_docs
|
130
|
+
@tree.keys
|
131
|
+
end
|
132
|
+
|
133
|
+
def order_for docs
|
134
|
+
Hash[docs.map { |doc| [doc.path, doc.updated_at] }]
|
135
|
+
end
|
136
|
+
|
137
|
+
def current_order
|
138
|
+
order_for current_docs
|
139
|
+
end
|
140
|
+
|
141
|
+
def build_query_snapshot
|
142
|
+
# If this is the first time building, set to empty hash
|
143
|
+
@old_order ||= {}
|
144
|
+
|
145
|
+
# Get the new set of documents, changes, order
|
146
|
+
docs = current_docs
|
147
|
+
new_order = order_for docs
|
148
|
+
changes = build_changes new_order, @old_order
|
149
|
+
@old_order = new_order
|
150
|
+
|
151
|
+
QuerySnapshot.from_docs @query, docs, changes, @read_time
|
152
|
+
end
|
153
|
+
|
154
|
+
protected
|
155
|
+
|
156
|
+
def query_comparison_proc a, b
|
157
|
+
# TODO: Remove this when done benchmarking
|
158
|
+
@comp_proc_counter += 1
|
159
|
+
|
160
|
+
return Order.compare_field_values a.ref, b.ref if @query.nil?
|
161
|
+
|
162
|
+
@directions ||= @query.query.order_by.map(&:direction)
|
163
|
+
|
164
|
+
a_comps = a.query_comparisons_for @query.query
|
165
|
+
b_comps = b.query_comparisons_for @query.query
|
166
|
+
@directions.zip(a_comps, b_comps).each do |dir, a_comp, b_comp|
|
167
|
+
comp = a_comp <=> b_comp
|
168
|
+
comp = 0 - comp if dir == :DESCENDING
|
169
|
+
return comp unless comp.zero?
|
170
|
+
end
|
171
|
+
|
172
|
+
# Compare paths when everything else is equal
|
173
|
+
ref_comp = Order.compare_field_values a.ref, b.ref
|
174
|
+
ref_comp = 0 - ref_comp if @directions.last == :DESCENDING
|
175
|
+
ref_comp
|
176
|
+
end
|
177
|
+
|
178
|
+
def apply_pending_changes_to_tree pending, tree
|
179
|
+
# Remove the deleted documents
|
180
|
+
pending[:delete].each do |doc_path|
|
181
|
+
remove_doc_from_tree doc_path, tree
|
182
|
+
end
|
183
|
+
|
184
|
+
# Add/update the changed documents
|
185
|
+
pending[:add].each do |doc_grpc|
|
186
|
+
removed_doc = remove_doc_from_tree doc_grpc.name, tree
|
187
|
+
added_doc = DocumentSnapshot.from_document(
|
188
|
+
doc_grpc, @client, read_at: read_time
|
189
|
+
)
|
190
|
+
|
191
|
+
if removed_doc && removed_doc.updated_at >= added_doc.updated_at
|
192
|
+
# Restore the removed doc if the added doc isn't newer
|
193
|
+
added_doc = removed_doc
|
194
|
+
end
|
195
|
+
|
196
|
+
add_doc_to_tree added_doc, tree
|
197
|
+
end
|
198
|
+
end
|
199
|
+
|
200
|
+
def change_paths new_order, old_order
|
201
|
+
added_paths = new_order.keys - old_order.keys
|
202
|
+
deleted_paths = old_order.keys - new_order.keys
|
203
|
+
new_hash = new_order.dup.delete_if do |path, _updated_at|
|
204
|
+
added_paths.include? path
|
205
|
+
end
|
206
|
+
old_hash = old_order.dup.delete_if do |path, _updated_at|
|
207
|
+
deleted_paths.include? path
|
208
|
+
end
|
209
|
+
changed_paths = (new_hash.to_a - old_hash.to_a).map(&:first)
|
210
|
+
|
211
|
+
[added_paths, deleted_paths, changed_paths]
|
212
|
+
end
|
213
|
+
|
214
|
+
def build_changes new_order, old_order
|
215
|
+
new_paths = new_order.keys
|
216
|
+
old_paths = old_order.keys
|
217
|
+
added_paths, deleted_paths, changed_paths = \
|
218
|
+
change_paths new_order, old_order
|
219
|
+
|
220
|
+
changes = deleted_paths.map do |doc_path|
|
221
|
+
build_deleted_doc_change doc_path, old_paths
|
222
|
+
end
|
223
|
+
changes += added_paths.map do |doc_path|
|
224
|
+
build_added_doc_change doc_path, new_paths
|
225
|
+
end
|
226
|
+
changes += changed_paths.map do |doc_path|
|
227
|
+
build_modified_doc_change doc_path, new_paths, old_paths
|
228
|
+
end
|
229
|
+
changes
|
230
|
+
end
|
231
|
+
|
232
|
+
def build_deleted_doc_change doc_path, old_paths
|
233
|
+
doc_ref = DocumentReference.from_path doc_path, @client
|
234
|
+
doc_snp = DocumentSnapshot.missing doc_ref
|
235
|
+
old_index = get_index_from_order_array doc_path, old_paths
|
236
|
+
DocumentChange.from_doc doc_snp, old_index, nil
|
237
|
+
end
|
238
|
+
|
239
|
+
def build_added_doc_change doc_path, new_paths
|
240
|
+
doc_snp = get_doc_from_tree doc_path, @tree
|
241
|
+
new_index = get_index_from_order_array doc_path, new_paths
|
242
|
+
DocumentChange.from_doc doc_snp, nil, new_index
|
243
|
+
end
|
244
|
+
|
245
|
+
def build_modified_doc_change doc_path, new_paths, old_paths
|
246
|
+
doc_snp = get_doc_from_tree doc_path, @tree
|
247
|
+
old_index = get_index_from_order_array doc_path, old_paths
|
248
|
+
new_index = get_index_from_order_array doc_path, new_paths
|
249
|
+
DocumentChange.from_doc doc_snp, old_index, new_index
|
250
|
+
end
|
251
|
+
|
252
|
+
def get_index_from_order_array doc_path, order_array
|
253
|
+
order_array.index doc_path
|
254
|
+
end
|
255
|
+
|
256
|
+
def get_doc_from_tree doc_path, tree
|
257
|
+
tree.key doc_path
|
258
|
+
end
|
259
|
+
|
260
|
+
def add_doc_to_tree doc_snp, tree
|
261
|
+
tree[doc_snp] = doc_snp.path
|
262
|
+
end
|
263
|
+
|
264
|
+
def remove_doc_from_tree doc_path, tree
|
265
|
+
# Remove old snapshot
|
266
|
+
old_snp = tree.key doc_path
|
267
|
+
tree.delete old_snp unless old_snp.nil?
|
268
|
+
old_snp
|
269
|
+
end
|
270
|
+
|
271
|
+
def type_from_indexes old_index, new_index
|
272
|
+
return :removed if new_index.nil?
|
273
|
+
return :added if old_index.nil?
|
274
|
+
:modified
|
275
|
+
end
|
276
|
+
end
|
277
|
+
end
|
278
|
+
end
|
279
|
+
end
|
280
|
+
end
|
@@ -0,0 +1,298 @@
|
|
1
|
+
# Copyright 2018 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
|
16
|
+
require "google/cloud/firestore/v1beta1"
|
17
|
+
require "google/cloud/firestore/convert"
|
18
|
+
require "google/cloud/firestore/watch/enumerator_queue"
|
19
|
+
require "google/cloud/firestore/watch/inventory"
|
20
|
+
require "monitor"
|
21
|
+
require "thread"
|
22
|
+
|
23
|
+
module Google
|
24
|
+
module Cloud
|
25
|
+
module Firestore
|
26
|
+
##
|
27
|
+
# @private
|
28
|
+
module Watch
|
29
|
+
##
|
30
|
+
# @private
|
31
|
+
class Listener
|
32
|
+
include MonitorMixin
|
33
|
+
|
34
|
+
def self.for_doc_ref doc_ref, &callback
|
35
|
+
raise ArgumentError if doc_ref.nil?
|
36
|
+
raise ArgumentError if callback.nil?
|
37
|
+
|
38
|
+
init_listen_req = Google::Firestore::V1beta1::ListenRequest.new(
|
39
|
+
database: doc_ref.client.path,
|
40
|
+
add_target: Google::Firestore::V1beta1::Target.new(
|
41
|
+
documents: \
|
42
|
+
Google::Firestore::V1beta1::Target::DocumentsTarget.new(
|
43
|
+
documents: [doc_ref.path]
|
44
|
+
)
|
45
|
+
)
|
46
|
+
)
|
47
|
+
|
48
|
+
new nil, doc_ref, doc_ref.client, init_listen_req, &callback
|
49
|
+
end
|
50
|
+
|
51
|
+
def self.for_query query, &callback
|
52
|
+
raise ArgumentError if query.nil?
|
53
|
+
raise ArgumentError if callback.nil?
|
54
|
+
|
55
|
+
init_listen_req = Google::Firestore::V1beta1::ListenRequest.new(
|
56
|
+
database: query.client.path,
|
57
|
+
add_target: Google::Firestore::V1beta1::Target.new(
|
58
|
+
query: Google::Firestore::V1beta1::Target::QueryTarget.new(
|
59
|
+
parent: query.parent_path,
|
60
|
+
structured_query: query.query
|
61
|
+
)
|
62
|
+
)
|
63
|
+
)
|
64
|
+
|
65
|
+
new query, nil, query.client, init_listen_req, &callback
|
66
|
+
end
|
67
|
+
|
68
|
+
def initialize query, doc_ref, client, init_listen_req, &callback
|
69
|
+
@query = query
|
70
|
+
@doc_ref = doc_ref
|
71
|
+
@client = client
|
72
|
+
@init_listen_req = init_listen_req
|
73
|
+
@callback = callback
|
74
|
+
|
75
|
+
super() # to init MonitorMixin
|
76
|
+
end
|
77
|
+
|
78
|
+
def start
|
79
|
+
synchronize { start_listening! }
|
80
|
+
self
|
81
|
+
end
|
82
|
+
|
83
|
+
def stop
|
84
|
+
synchronize do
|
85
|
+
@stopped = true
|
86
|
+
@request_queue.push self if @request_queue
|
87
|
+
end
|
88
|
+
end
|
89
|
+
|
90
|
+
##
|
91
|
+
# Whether the client has stopped listening for changes.
|
92
|
+
#
|
93
|
+
# @example
|
94
|
+
# require "google/cloud/firestore"
|
95
|
+
#
|
96
|
+
# firestore = Google::Cloud::Firestore.new
|
97
|
+
#
|
98
|
+
# # Create a query
|
99
|
+
# query = firestore.col(:cities).order(:population, :desc)
|
100
|
+
#
|
101
|
+
# listener = query.listen do |snapshot|
|
102
|
+
# puts "The query snapshot has #{snapshot.docs.count} documents "
|
103
|
+
# puts "and has #{snapshot.changes.count} changes."
|
104
|
+
# end
|
105
|
+
#
|
106
|
+
# # Checks if the listener is stopped.
|
107
|
+
# listener.stopped? #=> false
|
108
|
+
#
|
109
|
+
# # When ready, stop the listen operation and close the stream.
|
110
|
+
# listener.stop
|
111
|
+
#
|
112
|
+
# # Checks if the listener is stopped.
|
113
|
+
# listener.stopped? #=> true
|
114
|
+
#
|
115
|
+
def stopped?
|
116
|
+
synchronize { @stopped }
|
117
|
+
end
|
118
|
+
|
119
|
+
private
|
120
|
+
|
121
|
+
def send_callback query_snp
|
122
|
+
@callback.call query_snp
|
123
|
+
end
|
124
|
+
|
125
|
+
def start_listening!
|
126
|
+
# create new background thread to handle the stream's enumerator
|
127
|
+
@background_thread = Thread.new { background_run }
|
128
|
+
end
|
129
|
+
|
130
|
+
# @private
|
131
|
+
class RestartStream < StandardError; end
|
132
|
+
|
133
|
+
# rubocop:disable all
|
134
|
+
|
135
|
+
def background_run
|
136
|
+
# Don't allow a stream to restart if already stopped
|
137
|
+
return if synchronize { @stopped }
|
138
|
+
|
139
|
+
@backoff ||= { current: 0, delay: 1.0, max: 5, mod: 1.3 }
|
140
|
+
|
141
|
+
# Reuse inventory if one already exists
|
142
|
+
# Even though this uses an @var, no need to synchronize
|
143
|
+
@inventory ||= Inventory.new(@client, @query)
|
144
|
+
@inventory.restart
|
145
|
+
|
146
|
+
# Send stop if already running
|
147
|
+
synchronize do
|
148
|
+
@request_queue.push self if @request_queue
|
149
|
+
end
|
150
|
+
|
151
|
+
# Customize the provided initial listen request
|
152
|
+
init_listen_req = @init_listen_req.dup.tap do |req|
|
153
|
+
req.add_target.resume_token = String(@inventory.resume_token)
|
154
|
+
req.add_target.target_id = 0x42
|
155
|
+
end
|
156
|
+
|
157
|
+
# Always create a new enum queue
|
158
|
+
synchronize do
|
159
|
+
@request_queue = EnumeratorQueue.new self
|
160
|
+
@request_queue.push init_listen_req
|
161
|
+
end
|
162
|
+
|
163
|
+
# Not an @var, we get a new enum each time
|
164
|
+
enum = synchronize do
|
165
|
+
@client.service.listen @request_queue.each
|
166
|
+
end
|
167
|
+
|
168
|
+
loop do
|
169
|
+
|
170
|
+
# Break loop, close thread if stopped
|
171
|
+
break if synchronize { @stopped }
|
172
|
+
|
173
|
+
begin
|
174
|
+
# Cannot syncronize the enumerator, causes deadlock
|
175
|
+
response = enum.next
|
176
|
+
|
177
|
+
case response.response_type
|
178
|
+
when :target_change
|
179
|
+
case response.target_change.target_change_type
|
180
|
+
when :NO_CHANGE
|
181
|
+
# No change has occurred. Used only to send an updated
|
182
|
+
# +resume_token+.
|
183
|
+
|
184
|
+
@inventory.persist(
|
185
|
+
response.target_change.resume_token,
|
186
|
+
Convert.timestamp_to_time(
|
187
|
+
response.target_change.read_time
|
188
|
+
)
|
189
|
+
)
|
190
|
+
|
191
|
+
if @inventory.current? && @inventory.changes?
|
192
|
+
synchronize do
|
193
|
+
send_callback @inventory.build_query_snapshot
|
194
|
+
end
|
195
|
+
end
|
196
|
+
when :CURRENT
|
197
|
+
# The targets reflect all changes committed before the targets
|
198
|
+
# were added to the stream.
|
199
|
+
#
|
200
|
+
# This will be sent after or with a +read_time+ that is
|
201
|
+
# greater than or equal to the time at which the targets were
|
202
|
+
# added.
|
203
|
+
#
|
204
|
+
# Listeners can wait for this change if read-after-write
|
205
|
+
# semantics are desired.
|
206
|
+
|
207
|
+
@inventory.persist(
|
208
|
+
response.target_change.resume_token,
|
209
|
+
Convert.timestamp_to_time(
|
210
|
+
response.target_change.read_time
|
211
|
+
)
|
212
|
+
)
|
213
|
+
|
214
|
+
@inventory.current = true
|
215
|
+
when :RESET
|
216
|
+
# The targets have been reset, and a new initial state for the
|
217
|
+
# targets will be returned in subsequent changes.
|
218
|
+
#
|
219
|
+
# After the initial state is complete, +CURRENT+ will be
|
220
|
+
# returned even if the target was previously indicated to be
|
221
|
+
# +CURRENT+.
|
222
|
+
|
223
|
+
@inventory.reset
|
224
|
+
raise RestartStream # Raise to restart the stream
|
225
|
+
end
|
226
|
+
when :document_change
|
227
|
+
# A {Google::Firestore::V1beta1::Document Document} has changed.
|
228
|
+
|
229
|
+
if response.document_change.removed_target_ids.any?
|
230
|
+
@inventory.delete response.document_change.document.name
|
231
|
+
else
|
232
|
+
@inventory.add response.document_change.document
|
233
|
+
end
|
234
|
+
when :document_delete
|
235
|
+
# A {Google::Firestore::V1beta1::Document Document} has been
|
236
|
+
# deleted.
|
237
|
+
|
238
|
+
@inventory.delete response.document_delete.document
|
239
|
+
when :document_remove
|
240
|
+
# A {Google::Firestore::V1beta1::Document Document} has been
|
241
|
+
# removed from a target (because it is no longer relevant to
|
242
|
+
# that target).
|
243
|
+
|
244
|
+
@inventory.delete response.document_remove.document
|
245
|
+
when :filter
|
246
|
+
# A filter to apply to the set of documents previously returned
|
247
|
+
# for the given target.
|
248
|
+
#
|
249
|
+
# Returned when documents may have been removed from the given
|
250
|
+
# target, but the exact documents are unknown.
|
251
|
+
|
252
|
+
if response.filter.count != @inventory.count_with_pending
|
253
|
+
@inventory.reset
|
254
|
+
raise RestartStream # Raise to restart the stream
|
255
|
+
end
|
256
|
+
end
|
257
|
+
rescue StopIteration
|
258
|
+
break
|
259
|
+
end
|
260
|
+
|
261
|
+
# Reset backoff values when completed without an error
|
262
|
+
@backoff[:current] = 0
|
263
|
+
@backoff[:delay] = 1.0
|
264
|
+
end
|
265
|
+
|
266
|
+
# Has the loop broken but we aren't stopped?
|
267
|
+
# Could be GRPC has thrown an internal error, so restart.
|
268
|
+
raise RestartStream unless synchronize { @stopped }
|
269
|
+
|
270
|
+
# We must be stopped, tell the stream to quit.
|
271
|
+
@request_queue.push self
|
272
|
+
rescue GRPC::Cancelled, GRPC::DeadlineExceeded, GRPC::Internal,
|
273
|
+
GRPC::ResourceExhausted, GRPC::Unauthenticated,
|
274
|
+
GRPC::Unavailable, GRPC::Core::CallError
|
275
|
+
# Restart the stream with an incremental back for a retriable error.
|
276
|
+
# Also when GRPC raises the internal CallError.
|
277
|
+
|
278
|
+
# Re-raise if retried more than the max
|
279
|
+
raise err if @backoff[:current] > @backoff[:max]
|
280
|
+
|
281
|
+
# Sleep with incremental backoff before restarting
|
282
|
+
sleep @backoff[:delay]
|
283
|
+
|
284
|
+
# Update increment backoff delay and retry counter
|
285
|
+
@backoff[:delay] *= @backoff[:mod]
|
286
|
+
@backoff[:current] += 1
|
287
|
+
|
288
|
+
retry
|
289
|
+
rescue RestartStream
|
290
|
+
retry
|
291
|
+
rescue StandardError => e
|
292
|
+
raise Google::Cloud::Error.from_error(e)
|
293
|
+
end
|
294
|
+
end
|
295
|
+
end
|
296
|
+
end
|
297
|
+
end
|
298
|
+
end
|