google-cloud-pubsub 1.0.2 → 2.19.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/AUTHENTICATION.md +16 -54
- data/CHANGELOG.md +464 -0
- data/CONTRIBUTING.md +328 -116
- data/EMULATOR.md +1 -1
- data/LOGGING.md +94 -2
- data/OVERVIEW.md +121 -68
- data/TROUBLESHOOTING.md +2 -8
- data/lib/google/cloud/pubsub/acknowledge_result.rb +79 -0
- data/lib/google/cloud/pubsub/async_publisher/batch.rb +319 -0
- data/lib/google/cloud/pubsub/async_publisher.rb +231 -156
- data/lib/google/cloud/pubsub/batch_publisher.rb +60 -30
- data/lib/google/cloud/pubsub/convert.rb +33 -7
- data/lib/google/cloud/pubsub/credentials.rb +2 -2
- data/lib/google/cloud/pubsub/errors.rb +93 -0
- data/lib/google/cloud/pubsub/flow_controller.rb +137 -0
- data/lib/google/cloud/pubsub/message.rb +45 -4
- data/lib/google/cloud/pubsub/policy.rb +3 -2
- data/lib/google/cloud/pubsub/project.rb +316 -49
- data/lib/google/cloud/pubsub/publish_result.rb +6 -1
- data/lib/google/cloud/pubsub/received_message.rb +171 -10
- data/lib/google/cloud/pubsub/retry_policy.rb +88 -0
- data/lib/google/cloud/pubsub/schema/list.rb +180 -0
- data/lib/google/cloud/pubsub/schema.rb +310 -0
- data/lib/google/cloud/pubsub/service.rb +285 -269
- data/lib/google/cloud/pubsub/snapshot/list.rb +4 -6
- data/lib/google/cloud/pubsub/snapshot.rb +5 -2
- data/lib/google/cloud/pubsub/subscriber/inventory.rb +69 -32
- data/lib/google/cloud/pubsub/subscriber/sequencer.rb +115 -0
- data/lib/google/cloud/pubsub/subscriber/stream.rb +108 -49
- data/lib/google/cloud/pubsub/subscriber/timed_unary_buffer.rb +191 -30
- data/lib/google/cloud/pubsub/subscriber.rb +155 -45
- data/lib/google/cloud/pubsub/subscription/list.rb +4 -6
- data/lib/google/cloud/pubsub/subscription/push_config.rb +55 -31
- data/lib/google/cloud/pubsub/subscription.rb +561 -77
- data/lib/google/cloud/pubsub/topic/list.rb +4 -6
- data/lib/google/cloud/pubsub/topic.rb +372 -52
- data/lib/google/cloud/pubsub/version.rb +1 -1
- data/lib/google/cloud/pubsub.rb +35 -46
- data/lib/google-cloud-pubsub.rb +21 -27
- metadata +26 -189
- data/lib/google/cloud/pubsub/v1/credentials.rb +0 -41
- data/lib/google/cloud/pubsub/v1/doc/google/iam/v1/iam_policy.rb +0 -21
- data/lib/google/cloud/pubsub/v1/doc/google/iam/v1/options.rb +0 -21
- data/lib/google/cloud/pubsub/v1/doc/google/iam/v1/policy.rb +0 -21
- data/lib/google/cloud/pubsub/v1/doc/google/protobuf/duration.rb +0 -91
- data/lib/google/cloud/pubsub/v1/doc/google/protobuf/empty.rb +0 -29
- data/lib/google/cloud/pubsub/v1/doc/google/protobuf/field_mask.rb +0 -222
- data/lib/google/cloud/pubsub/v1/doc/google/protobuf/timestamp.rb +0 -113
- data/lib/google/cloud/pubsub/v1/doc/google/pubsub/v1/pubsub.rb +0 -744
- data/lib/google/cloud/pubsub/v1/doc/google/type/expr.rb +0 -19
- data/lib/google/cloud/pubsub/v1/publisher_client.rb +0 -786
- data/lib/google/cloud/pubsub/v1/publisher_client_config.json +0 -105
- data/lib/google/cloud/pubsub/v1/subscriber_client.rb +0 -1385
- data/lib/google/cloud/pubsub/v1/subscriber_client_config.json +0 -138
- data/lib/google/cloud/pubsub/v1.rb +0 -17
- data/lib/google/pubsub/v1/pubsub_pb.rb +0 -249
- data/lib/google/pubsub/v1/pubsub_services_pb.rb +0 -211
@@ -15,8 +15,12 @@
|
|
15
15
|
|
16
16
|
require "monitor"
|
17
17
|
require "concurrent"
|
18
|
+
require "google/cloud/pubsub/errors"
|
19
|
+
require "google/cloud/pubsub/flow_controller"
|
20
|
+
require "google/cloud/pubsub/async_publisher/batch"
|
18
21
|
require "google/cloud/pubsub/publish_result"
|
19
22
|
require "google/cloud/pubsub/service"
|
23
|
+
require "google/cloud/pubsub/convert"
|
20
24
|
|
21
25
|
module Google
|
22
26
|
module Cloud
|
@@ -39,79 +43,128 @@ module Google
|
|
39
43
|
# end
|
40
44
|
# end
|
41
45
|
#
|
42
|
-
# topic.async_publisher.stop
|
46
|
+
# topic.async_publisher.stop!
|
43
47
|
#
|
44
|
-
# @attr_reader [String] topic_name The name of the topic the messages
|
45
|
-
#
|
46
|
-
#
|
47
|
-
#
|
48
|
-
#
|
49
|
-
#
|
50
|
-
# @attr_reader [
|
51
|
-
#
|
52
|
-
# @attr_reader [Numeric]
|
53
|
-
#
|
54
|
-
#
|
55
|
-
# publish messages. Default is 4.
|
56
|
-
# @attr_reader [Numeric] callback_threads The number of threads to
|
57
|
-
# handle the published messages' callbacks. Default is 8.
|
48
|
+
# @attr_reader [String] topic_name The name of the topic the messages are published to. The value is a
|
49
|
+
# fully-qualified topic name in the form `projects/{project_id}/topics/{topic_id}`.
|
50
|
+
# @attr_reader [Integer] max_bytes The maximum size of messages to be collected before the batch is published.
|
51
|
+
# Default is 1,000,000 (1MB).
|
52
|
+
# @attr_reader [Integer] max_messages The maximum number of messages to be collected before the batch is
|
53
|
+
# published. Default is 100.
|
54
|
+
# @attr_reader [Numeric] interval The number of seconds to collect messages before the batch is published. Default
|
55
|
+
# is 0.01.
|
56
|
+
# @attr_reader [Numeric] publish_threads The number of threads used to publish messages. Default is 2.
|
57
|
+
# @attr_reader [Numeric] callback_threads The number of threads to handle the published messages' callbacks.
|
58
|
+
# Default is 4.
|
58
59
|
#
|
59
60
|
class AsyncPublisher
|
60
61
|
include MonitorMixin
|
61
62
|
|
62
|
-
attr_reader :topic_name
|
63
|
-
|
63
|
+
attr_reader :topic_name
|
64
|
+
attr_reader :max_bytes
|
65
|
+
attr_reader :max_messages
|
66
|
+
attr_reader :interval
|
67
|
+
attr_reader :publish_threads
|
68
|
+
attr_reader :callback_threads
|
69
|
+
attr_reader :flow_control
|
64
70
|
##
|
65
71
|
# @private Implementation accessors
|
66
72
|
attr_reader :service, :batch, :publish_thread_pool,
|
67
|
-
:callback_thread_pool
|
73
|
+
:callback_thread_pool, :flow_controller,
|
74
|
+
:compress, :compression_bytes_threshold
|
68
75
|
|
69
76
|
##
|
70
77
|
# @private Create a new instance of the object.
|
71
|
-
def initialize topic_name,
|
72
|
-
|
78
|
+
def initialize topic_name,
|
79
|
+
service,
|
80
|
+
max_bytes: 1_000_000,
|
81
|
+
max_messages: 100,
|
82
|
+
interval: 0.01,
|
83
|
+
threads: {},
|
84
|
+
flow_control: {},
|
85
|
+
compress: nil,
|
86
|
+
compression_bytes_threshold: nil
|
87
|
+
# init MonitorMixin
|
88
|
+
super()
|
73
89
|
@topic_name = service.topic_path topic_name
|
74
90
|
@service = service
|
75
91
|
|
76
92
|
@max_bytes = max_bytes
|
77
93
|
@max_messages = max_messages
|
78
94
|
@interval = interval
|
79
|
-
@publish_threads = (threads[:publish] ||
|
80
|
-
@callback_threads = (threads[:callback] ||
|
81
|
-
|
95
|
+
@publish_threads = (threads[:publish] || 2).to_i
|
96
|
+
@callback_threads = (threads[:callback] || 4).to_i
|
97
|
+
@flow_control = {
|
98
|
+
message_limit: 10 * @max_messages,
|
99
|
+
byte_limit: 10 * @max_bytes
|
100
|
+
}.merge(flow_control).freeze
|
101
|
+
|
102
|
+
@published_at = nil
|
103
|
+
@publish_thread_pool = Concurrent::ThreadPoolExecutor.new max_threads: @publish_threads
|
104
|
+
@callback_thread_pool = Concurrent::ThreadPoolExecutor.new max_threads: @callback_threads
|
105
|
+
|
106
|
+
@ordered = false
|
107
|
+
@batches = {}
|
82
108
|
@cond = new_cond
|
83
|
-
|
84
|
-
|
85
|
-
|
109
|
+
@flow_controller = FlowController.new(**@flow_control)
|
110
|
+
@thread = Thread.new { run_background }
|
111
|
+
@compress = compress || Google::Cloud::PubSub::DEFAULT_COMPRESS
|
112
|
+
@compression_bytes_threshold = compression_bytes_threshold ||
|
113
|
+
Google::Cloud::PubSub::DEFAULT_COMPRESSION_BYTES_THRESHOLD
|
86
114
|
end
|
87
115
|
|
88
116
|
##
|
89
117
|
# Add a message to the async publisher to be published to the topic.
|
90
118
|
# Messages will be collected in batches and published together.
|
91
119
|
# See {Google::Cloud::PubSub::Topic#publish_async}
|
92
|
-
|
93
|
-
|
120
|
+
#
|
121
|
+
# @param [String, File] data The message payload. This will be converted
|
122
|
+
# to bytes encoded as ASCII-8BIT.
|
123
|
+
# @param [Hash] attributes Optional attributes for the message.
|
124
|
+
# @param [String] ordering_key Identifies related messages for which
|
125
|
+
# publish order should be respected.
|
126
|
+
# @yield [result] the callback for when the message has been published
|
127
|
+
# @yieldparam [PublishResult] result the result of the asynchronous
|
128
|
+
# publish
|
129
|
+
# @raise [Google::Cloud::PubSub::AsyncPublisherStopped] when the
|
130
|
+
# publisher is stopped. (See {#stop} and {#stopped?}.)
|
131
|
+
# @raise [Google::Cloud::PubSub::OrderedMessagesDisabled] when
|
132
|
+
# publishing a message with an `ordering_key` but ordered messages are
|
133
|
+
# not enabled. (See {#message_ordering?} and
|
134
|
+
# {#enable_message_ordering!}.)
|
135
|
+
# @raise [Google::Cloud::PubSub::OrderingKeyError] when publishing a
|
136
|
+
# message with an `ordering_key` that has already failed when
|
137
|
+
# publishing. Use {#resume_publish} to allow this `ordering_key` to be
|
138
|
+
# published again.
|
139
|
+
#
|
140
|
+
def publish data = nil, attributes = nil, ordering_key: nil, **extra_attrs, &callback
|
141
|
+
msg = Convert.pubsub_message data, attributes, ordering_key, extra_attrs
|
142
|
+
begin
|
143
|
+
@flow_controller.acquire msg.to_proto.bytesize
|
144
|
+
rescue FlowControlLimitError => e
|
145
|
+
stop_publish ordering_key, e if ordering_key
|
146
|
+
raise
|
147
|
+
end
|
94
148
|
|
95
149
|
synchronize do
|
96
|
-
raise
|
97
|
-
|
98
|
-
if @batch.nil?
|
99
|
-
@batch ||= Batch.new self
|
100
|
-
@batch.add msg, block
|
101
|
-
else
|
102
|
-
unless @batch.try_add msg, block
|
103
|
-
publish_batch!
|
104
|
-
@batch = Batch.new self
|
105
|
-
@batch.add msg, block
|
106
|
-
end
|
107
|
-
end
|
108
|
-
|
109
|
-
init_resources!
|
110
|
-
|
111
|
-
publish_batch! if @batch.ready?
|
150
|
+
raise AsyncPublisherStopped if @stopped
|
151
|
+
raise OrderedMessagesDisabled if !@ordered && !msg.ordering_key.empty? # default is empty string
|
112
152
|
|
153
|
+
batch = resolve_batch_for_message msg
|
154
|
+
if batch.canceled?
|
155
|
+
@flow_controller.release msg.to_proto.bytesize
|
156
|
+
raise OrderingKeyError, batch.ordering_key
|
157
|
+
end
|
158
|
+
batch_action = batch.add msg, callback
|
159
|
+
if batch_action == :full
|
160
|
+
publish_batches!
|
161
|
+
elsif @published_at.nil?
|
162
|
+
# Set initial time to now to start the background counter
|
163
|
+
@published_at = Time.now
|
164
|
+
end
|
113
165
|
@cond.signal
|
114
166
|
end
|
167
|
+
|
115
168
|
nil
|
116
169
|
end
|
117
170
|
|
@@ -127,9 +180,9 @@ module Google
|
|
127
180
|
break if @stopped
|
128
181
|
|
129
182
|
@stopped = true
|
130
|
-
|
183
|
+
publish_batches! stop: true
|
131
184
|
@cond.signal
|
132
|
-
@publish_thread_pool.shutdown
|
185
|
+
@publish_thread_pool.shutdown
|
133
186
|
end
|
134
187
|
|
135
188
|
self
|
@@ -149,14 +202,10 @@ module Google
|
|
149
202
|
# @return [AsyncPublisher] returns self so calls can be chained.
|
150
203
|
def wait! timeout = nil
|
151
204
|
synchronize do
|
152
|
-
|
153
|
-
@publish_thread_pool.wait_for_termination timeout
|
154
|
-
end
|
205
|
+
@publish_thread_pool.wait_for_termination timeout
|
155
206
|
|
156
|
-
|
157
|
-
|
158
|
-
@callback_thread_pool.wait_for_termination timeout
|
159
|
-
end
|
207
|
+
@callback_thread_pool.shutdown
|
208
|
+
@callback_thread_pool.wait_for_termination timeout
|
160
209
|
end
|
161
210
|
|
162
211
|
self
|
@@ -185,7 +234,7 @@ module Google
|
|
185
234
|
# @return [AsyncPublisher] returns self so calls can be chained.
|
186
235
|
def flush
|
187
236
|
synchronize do
|
188
|
-
|
237
|
+
publish_batches!
|
189
238
|
@cond.signal
|
190
239
|
end
|
191
240
|
|
@@ -208,33 +257,63 @@ module Google
|
|
208
257
|
synchronize { @stopped }
|
209
258
|
end
|
210
259
|
|
211
|
-
|
260
|
+
##
|
261
|
+
# Enables message ordering for messages with ordering keys. When
|
262
|
+
# enabled, messages published with the same `ordering_key` will be
|
263
|
+
# delivered in the order they were published.
|
264
|
+
#
|
265
|
+
# See {#message_ordering?}. See {Topic#publish_async},
|
266
|
+
# {Subscription#listen}, and {Message#ordering_key}.
|
267
|
+
#
|
268
|
+
def enable_message_ordering!
|
269
|
+
synchronize { @ordered = true }
|
270
|
+
end
|
212
271
|
|
213
|
-
|
272
|
+
##
|
273
|
+
# Whether message ordering for messages with ordering keys has been
|
274
|
+
# enabled. When enabled, messages published with the same `ordering_key`
|
275
|
+
# will be delivered in the order they were published. When disabled,
|
276
|
+
# messages may be delivered in any order.
|
277
|
+
#
|
278
|
+
# See {#enable_message_ordering!}. See {Topic#publish_async},
|
279
|
+
# {Subscription#listen}, and {Message#ordering_key}.
|
280
|
+
#
|
281
|
+
# @return [Boolean]
|
282
|
+
#
|
283
|
+
def message_ordering?
|
284
|
+
synchronize { @ordered }
|
285
|
+
end
|
214
286
|
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
287
|
+
##
|
288
|
+
# Resume publishing ordered messages for the provided ordering key.
|
289
|
+
#
|
290
|
+
# @param [String] ordering_key Identifies related messages for which
|
291
|
+
# publish order should be respected.
|
292
|
+
#
|
293
|
+
# @return [boolean] `true` when resumed, `false` otherwise.
|
294
|
+
#
|
295
|
+
def resume_publish ordering_key
|
296
|
+
synchronize do
|
297
|
+
batch = resolve_batch_for_ordering_key ordering_key
|
298
|
+
return if batch.nil?
|
299
|
+
batch.resume!
|
300
|
+
end
|
222
301
|
end
|
223
302
|
|
224
|
-
|
303
|
+
protected
|
225
304
|
|
226
305
|
def run_background
|
227
306
|
synchronize do
|
228
307
|
until @stopped
|
229
|
-
if @
|
308
|
+
if @published_at.nil?
|
230
309
|
@cond.wait
|
231
310
|
next
|
232
311
|
end
|
233
312
|
|
234
|
-
time_since_first_publish = Time.now - @
|
313
|
+
time_since_first_publish = Time.now - @published_at
|
235
314
|
if time_since_first_publish > @interval
|
236
|
-
# interval met,
|
237
|
-
|
315
|
+
# interval met, flush the batches...
|
316
|
+
publish_batches!
|
238
317
|
@cond.wait
|
239
318
|
else
|
240
319
|
# still waiting for the interval to publish the batch...
|
@@ -245,40 +324,109 @@ module Google
|
|
245
324
|
end
|
246
325
|
end
|
247
326
|
|
248
|
-
def
|
249
|
-
|
327
|
+
def resolve_batch_for_message msg
|
328
|
+
@batches[msg.ordering_key] ||= Batch.new self, msg.ordering_key
|
329
|
+
end
|
330
|
+
|
331
|
+
def resolve_batch_for_ordering_key ordering_key
|
332
|
+
@batches[ordering_key]
|
333
|
+
end
|
250
334
|
|
251
|
-
|
252
|
-
|
253
|
-
|
335
|
+
def stop_publish ordering_key, err
|
336
|
+
synchronize do
|
337
|
+
batch = resolve_batch_for_ordering_key ordering_key
|
338
|
+
return if batch.nil?
|
339
|
+
items = batch.cancel!
|
340
|
+
items.each do |item|
|
341
|
+
@flow_controller.release item.bytesize
|
342
|
+
next unless item.callback
|
343
|
+
|
344
|
+
publish_result = PublishResult.from_error item.msg, err
|
345
|
+
execute_callback_async item.callback, publish_result
|
346
|
+
end
|
347
|
+
end
|
348
|
+
end
|
349
|
+
|
350
|
+
def publish_batches! stop: nil
|
351
|
+
@batches.reject! { |_ordering_key, batch| batch.empty? }
|
352
|
+
@batches.each_value do |batch|
|
353
|
+
ready = batch.publish! stop: stop
|
354
|
+
publish_batch_async @topic_name, batch if ready
|
355
|
+
end
|
356
|
+
# Set published_at to nil to wait indefinitely
|
357
|
+
@published_at = nil
|
254
358
|
end
|
255
359
|
|
256
360
|
def publish_batch_async topic_name, batch
|
361
|
+
# TODO: raise unless @publish_thread_pool.running?
|
257
362
|
return unless @publish_thread_pool.running?
|
258
363
|
|
259
364
|
Concurrent::Promises.future_on(
|
260
365
|
@publish_thread_pool, topic_name, batch
|
261
|
-
)
|
262
|
-
publish_batch_sync t_name, btch
|
263
|
-
end
|
366
|
+
) { |t, b| publish_batch_sync t, b }
|
264
367
|
end
|
265
368
|
|
369
|
+
# rubocop:disable Metrics/AbcSize
|
370
|
+
|
266
371
|
def publish_batch_sync topic_name, batch
|
267
|
-
|
268
|
-
|
269
|
-
|
372
|
+
# The only batch methods that are safe to call from the loop are
|
373
|
+
# rebalance! and reset! because they are the only methods that are
|
374
|
+
# synchronized.
|
375
|
+
loop do
|
376
|
+
items = batch.rebalance!
|
377
|
+
|
378
|
+
unless items.empty?
|
379
|
+
grpc = @service.publish topic_name,
|
380
|
+
items.map(&:msg),
|
381
|
+
compress: compress && batch.total_message_bytes >= compression_bytes_threshold
|
382
|
+
items.zip Array(grpc.message_ids) do |item, id|
|
383
|
+
@flow_controller.release item.bytesize
|
384
|
+
next unless item.callback
|
385
|
+
|
386
|
+
item.msg.message_id = id
|
387
|
+
publish_result = PublishResult.from_grpc item.msg
|
388
|
+
execute_callback_async item.callback, publish_result
|
389
|
+
end
|
390
|
+
end
|
270
391
|
|
271
|
-
|
272
|
-
publish_result = PublishResult.from_grpc item.msg
|
273
|
-
execute_callback_async item.callback, publish_result
|
392
|
+
break unless batch.reset!
|
274
393
|
end
|
275
394
|
rescue StandardError => e
|
276
|
-
batch.items
|
395
|
+
items = batch.items
|
396
|
+
|
397
|
+
unless batch.ordering_key.empty?
|
398
|
+
retry if publish_batch_error_retryable? e
|
399
|
+
# Cancel the batch if the error is not to be retried.
|
400
|
+
begin
|
401
|
+
raise OrderingKeyError, batch.ordering_key
|
402
|
+
rescue OrderingKeyError => e
|
403
|
+
# The existing e variable is not set to OrderingKeyError
|
404
|
+
# Get all unsent messages for the callback
|
405
|
+
items = batch.cancel!
|
406
|
+
end
|
407
|
+
end
|
408
|
+
|
409
|
+
items.each do |item|
|
410
|
+
@flow_controller.release item.bytesize
|
277
411
|
next unless item.callback
|
278
412
|
|
279
413
|
publish_result = PublishResult.from_error item.msg, e
|
280
414
|
execute_callback_async item.callback, publish_result
|
281
415
|
end
|
416
|
+
|
417
|
+
# publish will retry indefinitely, as long as there are unsent items.
|
418
|
+
retry if batch.reset!
|
419
|
+
end
|
420
|
+
|
421
|
+
# rubocop:enable Metrics/AbcSize
|
422
|
+
|
423
|
+
PUBLISH_RETRY_ERRORS = [
|
424
|
+
GRPC::Cancelled, GRPC::DeadlineExceeded, GRPC::Internal,
|
425
|
+
GRPC::ResourceExhausted, GRPC::Unauthenticated, GRPC::Unavailable
|
426
|
+
].freeze
|
427
|
+
|
428
|
+
def publish_batch_error_retryable? error
|
429
|
+
PUBLISH_RETRY_ERRORS.any? { |klass| error.is_a? klass }
|
282
430
|
end
|
283
431
|
|
284
432
|
def execute_callback_async callback, publish_result
|
@@ -290,79 +438,6 @@ module Google
|
|
290
438
|
cback.call p_result
|
291
439
|
end
|
292
440
|
end
|
293
|
-
|
294
|
-
def create_pubsub_message data, attributes
|
295
|
-
attributes ||= {}
|
296
|
-
if data.is_a?(::Hash) && attributes.empty?
|
297
|
-
attributes = data
|
298
|
-
data = nil
|
299
|
-
end
|
300
|
-
# Convert IO-ish objects to strings
|
301
|
-
if data.respond_to?(:read) && data.respond_to?(:rewind)
|
302
|
-
data.rewind
|
303
|
-
data = data.read
|
304
|
-
end
|
305
|
-
# Convert data to encoded byte array to match the protobuf defn
|
306
|
-
data_bytes = \
|
307
|
-
String(data).dup.force_encoding(Encoding::ASCII_8BIT).freeze
|
308
|
-
|
309
|
-
# Convert attributes to strings to match the protobuf definition
|
310
|
-
attributes = Hash[attributes.map { |k, v| [String(k), String(v)] }]
|
311
|
-
|
312
|
-
Google::Cloud::PubSub::V1::PubsubMessage.new data: data_bytes,
|
313
|
-
attributes: attributes
|
314
|
-
end
|
315
|
-
|
316
|
-
##
|
317
|
-
# @private
|
318
|
-
class Batch
|
319
|
-
attr_reader :messages, :callbacks
|
320
|
-
|
321
|
-
def initialize publisher
|
322
|
-
@publisher = publisher
|
323
|
-
@messages = []
|
324
|
-
@callbacks = []
|
325
|
-
@total_message_bytes = publisher.topic_name.bytesize + 2
|
326
|
-
end
|
327
|
-
|
328
|
-
def add msg, callback
|
329
|
-
@messages << msg
|
330
|
-
@callbacks << callback
|
331
|
-
@total_message_bytes += msg.to_proto.bytesize + 2
|
332
|
-
end
|
333
|
-
|
334
|
-
def try_add msg, callback
|
335
|
-
new_message_count = total_message_count + 1
|
336
|
-
new_message_bytes = total_message_bytes + msg.to_proto.bytesize + 2
|
337
|
-
if new_message_count > @publisher.max_messages ||
|
338
|
-
new_message_bytes >= @publisher.max_bytes
|
339
|
-
return false
|
340
|
-
end
|
341
|
-
add msg, callback
|
342
|
-
true
|
343
|
-
end
|
344
|
-
|
345
|
-
def ready?
|
346
|
-
total_message_count >= @publisher.max_messages ||
|
347
|
-
total_message_bytes >= @publisher.max_bytes
|
348
|
-
end
|
349
|
-
|
350
|
-
def total_message_count
|
351
|
-
@messages.count
|
352
|
-
end
|
353
|
-
|
354
|
-
def total_message_bytes
|
355
|
-
@total_message_bytes
|
356
|
-
end
|
357
|
-
|
358
|
-
def items
|
359
|
-
@messages.zip(@callbacks).map do |msg, callback|
|
360
|
-
Item.new msg, callback
|
361
|
-
end
|
362
|
-
end
|
363
|
-
|
364
|
-
Item = Struct.new :msg, :callback
|
365
|
-
end
|
366
441
|
end
|
367
442
|
end
|
368
443
|
|
@@ -13,6 +13,8 @@
|
|
13
13
|
# limitations under the License.
|
14
14
|
|
15
15
|
|
16
|
+
require "google/cloud/pubsub/convert"
|
17
|
+
|
16
18
|
module Google
|
17
19
|
module Cloud
|
18
20
|
module PubSub
|
@@ -26,32 +28,75 @@ module Google
|
|
26
28
|
# pubsub = Google::Cloud::PubSub.new
|
27
29
|
#
|
28
30
|
# topic = pubsub.topic "my-topic"
|
29
|
-
# msgs = topic.publish do |
|
30
|
-
#
|
31
|
-
#
|
32
|
-
#
|
31
|
+
# msgs = topic.publish do |batch_publisher|
|
32
|
+
# batch_publisher.publish "task 1 completed", foo: :bar
|
33
|
+
# batch_publisher.publish "task 2 completed", foo: :baz
|
34
|
+
# batch_publisher.publish "task 3 completed", foo: :bif
|
33
35
|
# end
|
36
|
+
#
|
34
37
|
class BatchPublisher
|
35
38
|
##
|
36
39
|
# @private The messages to publish
|
37
40
|
attr_reader :messages
|
38
41
|
|
42
|
+
##
|
43
|
+
# @private Enables publisher compression
|
44
|
+
attr_reader :compress
|
45
|
+
|
46
|
+
##
|
47
|
+
# @private The threshold bytes size for compression
|
48
|
+
attr_reader :compression_bytes_threshold
|
49
|
+
|
50
|
+
##
|
51
|
+
# @private The total bytes size of messages data.
|
52
|
+
attr_reader :total_message_bytes
|
53
|
+
|
39
54
|
##
|
40
55
|
# @private Create a new instance of the object.
|
41
|
-
def initialize data
|
56
|
+
def initialize data,
|
57
|
+
attributes,
|
58
|
+
ordering_key,
|
59
|
+
extra_attrs,
|
60
|
+
compress: nil,
|
61
|
+
compression_bytes_threshold: nil
|
42
62
|
@messages = []
|
43
63
|
@mode = :batch
|
64
|
+
@compress = compress || Google::Cloud::PubSub::DEFAULT_COMPRESS
|
65
|
+
@compression_bytes_threshold = compression_bytes_threshold ||
|
66
|
+
Google::Cloud::PubSub::DEFAULT_COMPRESSION_BYTES_THRESHOLD
|
67
|
+
@total_message_bytes = 0
|
44
68
|
return if data.nil?
|
45
69
|
@mode = :single
|
46
|
-
publish data, attributes
|
70
|
+
publish data, attributes, ordering_key: ordering_key, **extra_attrs
|
47
71
|
end
|
48
72
|
|
49
73
|
##
|
50
74
|
# Add a message to the batch to be published to the topic.
|
51
75
|
# All messages added to the batch will be published at once.
|
52
76
|
# See {Google::Cloud::PubSub::Topic#publish}
|
53
|
-
|
54
|
-
|
77
|
+
#
|
78
|
+
# @param [String, File] data The message payload. This will be converted
|
79
|
+
# to bytes encoded as ASCII-8BIT.
|
80
|
+
# @param [Hash] attributes Optional attributes for the message.
|
81
|
+
# @param [String] ordering_key Identifies related messages for which
|
82
|
+
# publish order should be respected.
|
83
|
+
#
|
84
|
+
# @example Multiple messages can be sent at the same time using a block:
|
85
|
+
# require "google/cloud/pubsub"
|
86
|
+
#
|
87
|
+
# pubsub = Google::Cloud::PubSub.new
|
88
|
+
#
|
89
|
+
# topic = pubsub.topic "my-topic"
|
90
|
+
# msgs = topic.publish do |batch_publisher|
|
91
|
+
# batch_publisher.publish "task 1 completed", foo: :bar
|
92
|
+
# batch_publisher.publish "task 2 completed", foo: :baz
|
93
|
+
# batch_publisher.publish "task 3 completed", foo: :bif
|
94
|
+
# end
|
95
|
+
#
|
96
|
+
def publish data, attributes = nil, ordering_key: nil, **extra_attrs
|
97
|
+
msg = Convert.pubsub_message data, attributes, ordering_key, extra_attrs
|
98
|
+
@total_message_bytes += msg.data.bytesize + 2
|
99
|
+
@messages << msg
|
55
100
|
end
|
56
101
|
|
57
102
|
##
|
@@ -70,28 +115,13 @@ module Google
|
|
70
115
|
end
|
71
116
|
end
|
72
117
|
|
73
|
-
|
74
|
-
|
75
|
-
def
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
end
|
81
|
-
# Convert IO-ish objects to strings
|
82
|
-
if data.respond_to?(:read) && data.respond_to?(:rewind)
|
83
|
-
data.rewind
|
84
|
-
data = data.read
|
85
|
-
end
|
86
|
-
# Convert data to encoded byte array to match the protobuf defn
|
87
|
-
data_bytes = \
|
88
|
-
String(data).dup.force_encoding(Encoding::ASCII_8BIT).freeze
|
89
|
-
|
90
|
-
# Convert attributes to strings to match the protobuf definition
|
91
|
-
attributes = Hash[attributes.map { |k, v| [String(k), String(v)] }]
|
92
|
-
|
93
|
-
Google::Cloud::PubSub::V1::PubsubMessage.new data: data_bytes,
|
94
|
-
attributes: attributes
|
118
|
+
##
|
119
|
+
# @private Call the publish API with arrays of data and attrs.
|
120
|
+
def publish_batch_messages topic_name, service
|
121
|
+
grpc = service.publish topic_name,
|
122
|
+
messages,
|
123
|
+
compress: compress && total_message_bytes >= compression_bytes_threshold
|
124
|
+
to_gcloud_messages Array(grpc.message_ids)
|
95
125
|
end
|
96
126
|
end
|
97
127
|
end
|