google-cloud-pubsub 0.33.1 → 2.15.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (59) hide show
  1. checksums.yaml +4 -4
  2. data/AUTHENTICATION.md +41 -40
  3. data/CHANGELOG.md +610 -0
  4. data/CONTRIBUTING.md +328 -116
  5. data/EMULATOR.md +2 -2
  6. data/LOGGING.md +95 -3
  7. data/OVERVIEW.md +183 -90
  8. data/TROUBLESHOOTING.md +2 -8
  9. data/lib/google/cloud/pubsub/acknowledge_result.rb +79 -0
  10. data/lib/google/cloud/pubsub/async_publisher/batch.rb +306 -0
  11. data/lib/google/cloud/pubsub/async_publisher.rb +270 -161
  12. data/lib/google/cloud/pubsub/batch_publisher.rb +65 -33
  13. data/lib/google/cloud/pubsub/convert.rb +36 -8
  14. data/lib/google/cloud/pubsub/credentials.rb +7 -5
  15. data/lib/google/cloud/pubsub/errors.rb +93 -0
  16. data/lib/google/cloud/pubsub/flow_controller.rb +139 -0
  17. data/lib/google/cloud/pubsub/message.rb +52 -7
  18. data/lib/google/cloud/pubsub/policy.rb +15 -12
  19. data/lib/google/cloud/pubsub/project.rb +341 -75
  20. data/lib/google/cloud/pubsub/publish_result.rb +9 -2
  21. data/lib/google/cloud/pubsub/received_message.rb +182 -20
  22. data/lib/google/cloud/pubsub/retry_policy.rb +88 -0
  23. data/lib/google/cloud/pubsub/schema/list.rb +180 -0
  24. data/lib/google/cloud/pubsub/schema.rb +310 -0
  25. data/lib/google/cloud/pubsub/service.rb +285 -258
  26. data/lib/google/cloud/pubsub/snapshot/list.rb +14 -14
  27. data/lib/google/cloud/pubsub/snapshot.rb +17 -12
  28. data/lib/google/cloud/pubsub/subscriber/enumerator_queue.rb +4 -4
  29. data/lib/google/cloud/pubsub/subscriber/inventory.rb +74 -33
  30. data/lib/google/cloud/pubsub/subscriber/sequencer.rb +115 -0
  31. data/lib/google/cloud/pubsub/subscriber/stream.rb +138 -91
  32. data/lib/google/cloud/pubsub/subscriber/timed_unary_buffer.rb +397 -0
  33. data/lib/google/cloud/pubsub/subscriber.rb +213 -51
  34. data/lib/google/cloud/pubsub/subscription/list.rb +16 -16
  35. data/lib/google/cloud/pubsub/subscription/push_config.rb +268 -0
  36. data/lib/google/cloud/pubsub/subscription.rb +827 -137
  37. data/lib/google/cloud/pubsub/topic/list.rb +14 -14
  38. data/lib/google/cloud/pubsub/topic.rb +565 -93
  39. data/lib/google/cloud/pubsub/version.rb +4 -2
  40. data/lib/google/cloud/pubsub.rb +50 -41
  41. data/lib/google-cloud-pubsub.rb +26 -29
  42. metadata +59 -53
  43. data/lib/google/cloud/pubsub/subscriber/async_stream_pusher.rb +0 -222
  44. data/lib/google/cloud/pubsub/subscriber/async_unary_pusher.rb +0 -270
  45. data/lib/google/cloud/pubsub/v1/credentials.rb +0 -39
  46. data/lib/google/cloud/pubsub/v1/doc/google/iam/v1/iam_policy.rb +0 -63
  47. data/lib/google/cloud/pubsub/v1/doc/google/iam/v1/policy.rb +0 -128
  48. data/lib/google/cloud/pubsub/v1/doc/google/protobuf/duration.rb +0 -91
  49. data/lib/google/cloud/pubsub/v1/doc/google/protobuf/empty.rb +0 -29
  50. data/lib/google/cloud/pubsub/v1/doc/google/protobuf/field_mask.rb +0 -230
  51. data/lib/google/cloud/pubsub/v1/doc/google/protobuf/timestamp.rb +0 -109
  52. data/lib/google/cloud/pubsub/v1/doc/google/pubsub/v1/pubsub.rb +0 -628
  53. data/lib/google/cloud/pubsub/v1/publisher_client.rb +0 -734
  54. data/lib/google/cloud/pubsub/v1/publisher_client_config.json +0 -105
  55. data/lib/google/cloud/pubsub/v1/subscriber_client.rb +0 -1267
  56. data/lib/google/cloud/pubsub/v1/subscriber_client_config.json +0 -144
  57. data/lib/google/cloud/pubsub/v1.rb +0 -17
  58. data/lib/google/pubsub/v1/pubsub_pb.rb +0 -222
  59. data/lib/google/pubsub/v1/pubsub_services_pb.rb +0 -192
@@ -15,20 +15,24 @@
15
15
 
16
16
  require "monitor"
17
17
  require "concurrent"
18
+ require "google/cloud/pubsub/errors"
19
+ require "google/cloud/pubsub/flow_controller"
20
+ require "google/cloud/pubsub/async_publisher/batch"
18
21
  require "google/cloud/pubsub/publish_result"
19
22
  require "google/cloud/pubsub/service"
23
+ require "google/cloud/pubsub/convert"
20
24
 
21
25
  module Google
22
26
  module Cloud
23
- module Pubsub
27
+ module PubSub
24
28
  ##
25
29
  # Used to publish multiple messages in batches to a topic. See
26
- # {Google::Cloud::Pubsub::Topic#async_publisher}
30
+ # {Google::Cloud::PubSub::Topic#async_publisher}
27
31
  #
28
32
  # @example
29
33
  # require "google/cloud/pubsub"
30
34
  #
31
- # pubsub = Google::Cloud::Pubsub.new
35
+ # pubsub = Google::Cloud::PubSub.new
32
36
  #
33
37
  # topic = pubsub.topic "my-topic"
34
38
  # topic.publish_async "task completed" do |result|
@@ -39,79 +43,128 @@ module Google
39
43
  # end
40
44
  # end
41
45
  #
42
- # topic.async_publisher.stop.wait!
46
+ # topic.async_publisher.stop!
43
47
  #
44
- # @attr_reader [String] topic_name The name of the topic the messages
45
- # are published to. In the form of
46
- # "/projects/project-identifier/topics/topic-name".
47
- # @attr_reader [Integer] max_bytes The maximum size of messages to be
48
- # collected before the batch is published. Default is 10,000,000
49
- # (10MB).
50
- # @attr_reader [Integer] max_messages The maximum number of messages to
51
- # be collected before the batch is published. Default is 1,000.
52
- # @attr_reader [Numeric] interval The number of seconds to collect
53
- # messages before the batch is published. Default is 0.25.
54
- # @attr_reader [Numeric] publish_threads The number of threads used to
55
- # publish messages. Default is 4.
56
- # @attr_reader [Numeric] callback_threads The number of threads to
57
- # handle the published messages' callbacks. Default is 8.
48
+ # @attr_reader [String] topic_name The name of the topic the messages are published to. The value is a
49
+ # fully-qualified topic name in the form `projects/{project_id}/topics/{topic_id}`.
50
+ # @attr_reader [Integer] max_bytes The maximum size of messages to be collected before the batch is published.
51
+ # Default is 1,000,000 (1MB).
52
+ # @attr_reader [Integer] max_messages The maximum number of messages to be collected before the batch is
53
+ # published. Default is 100.
54
+ # @attr_reader [Numeric] interval The number of seconds to collect messages before the batch is published. Default
55
+ # is 0.01.
56
+ # @attr_reader [Numeric] publish_threads The number of threads used to publish messages. Default is 2.
57
+ # @attr_reader [Numeric] callback_threads The number of threads to handle the published messages' callbacks.
58
+ # Default is 4.
58
59
  #
59
60
  class AsyncPublisher
60
61
  include MonitorMixin
61
62
 
62
- attr_reader :topic_name, :max_bytes, :max_messages, :interval,
63
- :publish_threads, :callback_threads
63
+ attr_reader :topic_name
64
+ attr_reader :max_bytes
65
+ attr_reader :max_messages
66
+ attr_reader :interval
67
+ attr_reader :publish_threads
68
+ attr_reader :callback_threads
69
+ attr_reader :flow_control
64
70
  ##
65
71
  # @private Implementation accessors
66
72
  attr_reader :service, :batch, :publish_thread_pool,
67
- :callback_thread_pool
73
+ :callback_thread_pool, :flow_controller,
74
+ :compress, :compression_bytes_threshold
68
75
 
69
76
  ##
70
77
  # @private Create a new instance of the object.
71
- def initialize topic_name, service, max_bytes: 10000000,
72
- max_messages: 1000, interval: 0.25, threads: {}
78
+ def initialize topic_name,
79
+ service,
80
+ max_bytes: 1_000_000,
81
+ max_messages: 100,
82
+ interval: 0.01,
83
+ threads: {},
84
+ flow_control: {},
85
+ compress: nil,
86
+ compression_bytes_threshold: nil
87
+ # init MonitorMixin
88
+ super()
73
89
  @topic_name = service.topic_path topic_name
74
90
  @service = service
75
91
 
76
92
  @max_bytes = max_bytes
77
93
  @max_messages = max_messages
78
94
  @interval = interval
79
- @publish_threads = (threads[:publish] || 4).to_i
80
- @callback_threads = (threads[:callback] || 8).to_i
81
-
95
+ @publish_threads = (threads[:publish] || 2).to_i
96
+ @callback_threads = (threads[:callback] || 4).to_i
97
+ @flow_control = {
98
+ message_limit: 10 * @max_messages,
99
+ byte_limit: 10 * @max_bytes
100
+ }.merge(flow_control).freeze
101
+
102
+ @published_at = nil
103
+ @publish_thread_pool = Concurrent::ThreadPoolExecutor.new max_threads: @publish_threads
104
+ @callback_thread_pool = Concurrent::ThreadPoolExecutor.new max_threads: @callback_threads
105
+
106
+ @ordered = false
107
+ @batches = {}
82
108
  @cond = new_cond
83
-
84
- # init MonitorMixin
85
- super()
109
+ @flow_controller = FlowController.new(**@flow_control)
110
+ @thread = Thread.new { run_background }
111
+ @compress = compress || Google::Cloud::PubSub::DEFAULT_COMPRESS
112
+ @compression_bytes_threshold = compression_bytes_threshold ||
113
+ Google::Cloud::PubSub::DEFAULT_COMPRESSION_BYTES_THRESHOLD
86
114
  end
87
115
 
88
116
  ##
89
117
  # Add a message to the async publisher to be published to the topic.
90
118
  # Messages will be collected in batches and published together.
91
- # See {Google::Cloud::Pubsub::Topic#publish_async}
92
- def publish data = nil, attributes = {}, &block
93
- msg = create_pubsub_message data, attributes
119
+ # See {Google::Cloud::PubSub::Topic#publish_async}
120
+ #
121
+ # @param [String, File] data The message payload. This will be converted
122
+ # to bytes encoded as ASCII-8BIT.
123
+ # @param [Hash] attributes Optional attributes for the message.
124
+ # @param [String] ordering_key Identifies related messages for which
125
+ # publish order should be respected.
126
+ # @yield [result] the callback for when the message has been published
127
+ # @yieldparam [PublishResult] result the result of the asynchronous
128
+ # publish
129
+ # @raise [Google::Cloud::PubSub::AsyncPublisherStopped] when the
130
+ # publisher is stopped. (See {#stop} and {#stopped?}.)
131
+ # @raise [Google::Cloud::PubSub::OrderedMessagesDisabled] when
132
+ # publishing a message with an `ordering_key` but ordered messages are
133
+ # not enabled. (See {#message_ordering?} and
134
+ # {#enable_message_ordering!}.)
135
+ # @raise [Google::Cloud::PubSub::OrderingKeyError] when publishing a
136
+ # message with an `ordering_key` that has already failed when
137
+ # publishing. Use {#resume_publish} to allow this `ordering_key` to be
138
+ # published again.
139
+ #
140
+ def publish data = nil, attributes = nil, ordering_key: nil, **extra_attrs, &callback
141
+ msg = Convert.pubsub_message data, attributes, ordering_key, extra_attrs
142
+ begin
143
+ @flow_controller.acquire msg.to_proto.bytesize
144
+ rescue FlowControlLimitError => e
145
+ stop_publish ordering_key, e if ordering_key
146
+ raise
147
+ end
94
148
 
95
149
  synchronize do
96
- raise "Can't publish when stopped." if @stopped
97
-
98
- if @batch.nil?
99
- @batch ||= Batch.new self
100
- @batch.add msg, block
101
- else
102
- unless @batch.try_add msg, block
103
- publish_batch!
104
- @batch = Batch.new self
105
- @batch.add msg, block
106
- end
107
- end
108
-
109
- init_resources!
110
-
111
- publish_batch! if @batch.ready?
150
+ raise AsyncPublisherStopped if @stopped
151
+ raise OrderedMessagesDisabled if !@ordered && !msg.ordering_key.empty? # default is empty string
112
152
 
153
+ batch = resolve_batch_for_message msg
154
+ if batch.canceled?
155
+ @flow_controller.release msg.to_proto.bytesize
156
+ raise OrderingKeyError, batch.ordering_key
157
+ end
158
+ batch_action = batch.add msg, callback
159
+ if batch_action == :full
160
+ publish_batches!
161
+ elsif @published_at.nil?
162
+ # Set initial time to now to start the background counter
163
+ @published_at = Time.now
164
+ end
113
165
  @cond.signal
114
166
  end
167
+
115
168
  nil
116
169
  end
117
170
 
@@ -127,36 +180,53 @@ module Google
127
180
  break if @stopped
128
181
 
129
182
  @stopped = true
130
- publish_batch!
183
+ publish_batches! stop: true
131
184
  @cond.signal
132
- @publish_thread_pool.shutdown if @publish_thread_pool
185
+ @publish_thread_pool.shutdown
133
186
  end
134
187
 
135
188
  self
136
189
  end
137
190
 
138
191
  ##
139
- # Blocks until the publisher is fully stopped, all pending messages
140
- # have been published, and all callbacks have completed. Does not stop
141
- # the publisher. To stop the publisher, first call {#stop} and then
142
- # call {#wait!} to block until the publisher is stopped.
192
+ # Blocks until the publisher is fully stopped, all pending messages have
193
+ # been published, and all callbacks have completed, or until `timeout`
194
+ # seconds have passed.
195
+ #
196
+ # Does not stop the publisher. To stop the publisher, first call {#stop}
197
+ # and then call {#wait!} to block until the publisher is stopped
198
+ #
199
+ # @param [Number, nil] timeout The number of seconds to block until the
200
+ # publisher is fully stopped. Default will block indefinitely.
143
201
  #
144
202
  # @return [AsyncPublisher] returns self so calls can be chained.
145
203
  def wait! timeout = nil
146
204
  synchronize do
147
- if @publish_thread_pool
148
- @publish_thread_pool.wait_for_termination timeout
149
- end
205
+ @publish_thread_pool.wait_for_termination timeout
150
206
 
151
- if @callback_thread_pool
152
- @callback_thread_pool.shutdown
153
- @callback_thread_pool.wait_for_termination timeout
154
- end
207
+ @callback_thread_pool.shutdown
208
+ @callback_thread_pool.wait_for_termination timeout
155
209
  end
156
210
 
157
211
  self
158
212
  end
159
213
 
214
+ ##
215
+ # Stop this publisher and block until the publisher is fully stopped,
216
+ # all pending messages have been published, and all callbacks have
217
+ # completed, or until `timeout` seconds have passed.
218
+ #
219
+ # The same as calling {#stop} and {#wait!}.
220
+ #
221
+ # @param [Number, nil] timeout The number of seconds to block until the
222
+ # publisher is fully stopped. Default will block indefinitely.
223
+ #
224
+ # @return [AsyncPublisher] returns self so calls can be chained.
225
+ def stop! timeout = nil
226
+ stop
227
+ wait! timeout
228
+ end
229
+
160
230
  ##
161
231
  # Forces all messages in the current batch to be published
162
232
  # immediately.
@@ -164,7 +234,7 @@ module Google
164
234
  # @return [AsyncPublisher] returns self so calls can be chained.
165
235
  def flush
166
236
  synchronize do
167
- publish_batch!
237
+ publish_batches!
168
238
  @cond.signal
169
239
  end
170
240
 
@@ -187,151 +257,190 @@ module Google
187
257
  synchronize { @stopped }
188
258
  end
189
259
 
190
- protected
260
+ ##
261
+ # Enables message ordering for messages with ordering keys. When
262
+ # enabled, messages published with the same `ordering_key` will be
263
+ # delivered in the order they were published.
264
+ #
265
+ # See {#message_ordering?}. See {Topic#publish_async},
266
+ # {Subscription#listen}, and {Message#ordering_key}.
267
+ #
268
+ def enable_message_ordering!
269
+ synchronize { @ordered = true }
270
+ end
271
+
272
+ ##
273
+ # Whether message ordering for messages with ordering keys has been
274
+ # enabled. When enabled, messages published with the same `ordering_key`
275
+ # will be delivered in the order they were published. When disabled,
276
+ # messages may be delivered in any order.
277
+ #
278
+ # See {#enable_message_ordering!}. See {Topic#publish_async},
279
+ # {Subscription#listen}, and {Message#ordering_key}.
280
+ #
281
+ # @return [Boolean]
282
+ #
283
+ def message_ordering?
284
+ synchronize { @ordered }
285
+ end
191
286
 
192
- def init_resources!
193
- @first_published_at ||= Time.now
194
- @publish_thread_pool ||= Concurrent::FixedThreadPool.new \
195
- @publish_threads
196
- @callback_thread_pool ||= Concurrent::FixedThreadPool.new \
197
- @callback_threads
198
- @thread ||= Thread.new { run_background }
287
+ ##
288
+ # Resume publishing ordered messages for the provided ordering key.
289
+ #
290
+ # @param [String] ordering_key Identifies related messages for which
291
+ # publish order should be respected.
292
+ #
293
+ # @return [boolean] `true` when resumed, `false` otherwise.
294
+ #
295
+ def resume_publish ordering_key
296
+ synchronize do
297
+ batch = resolve_batch_for_ordering_key ordering_key
298
+ return if batch.nil?
299
+ batch.resume!
300
+ end
199
301
  end
200
302
 
303
+ protected
304
+
201
305
  def run_background
202
306
  synchronize do
203
307
  until @stopped
204
- if @batch.nil?
308
+ if @published_at.nil?
205
309
  @cond.wait
206
310
  next
207
311
  end
208
312
 
209
- time_since_first_publish = Time.now - @first_published_at
313
+ time_since_first_publish = Time.now - @published_at
210
314
  if time_since_first_publish > @interval
211
- # interval met, publish the batch...
212
- publish_batch!
315
+ # interval met, flush the batches...
316
+ publish_batches!
213
317
  @cond.wait
214
318
  else
215
319
  # still waiting for the interval to publish the batch...
216
- @cond.wait(@interval - time_since_first_publish)
320
+ timeout = @interval - time_since_first_publish
321
+ @cond.wait timeout
217
322
  end
218
323
  end
219
324
  end
220
325
  end
221
326
 
222
- def publish_batch!
223
- return unless @batch
327
+ def resolve_batch_for_message msg
328
+ @batches[msg.ordering_key] ||= Batch.new self, msg.ordering_key
329
+ end
330
+
331
+ def resolve_batch_for_ordering_key ordering_key
332
+ @batches[ordering_key]
333
+ end
224
334
 
225
- publish_batch_async @topic_name, @batch
226
- @batch = nil
227
- @first_published_at = nil
335
+ def stop_publish ordering_key, err
336
+ synchronize do
337
+ batch = resolve_batch_for_ordering_key ordering_key
338
+ return if batch.nil?
339
+ items = batch.cancel!
340
+ items.each do |item|
341
+ @flow_controller.release item.bytesize
342
+ next unless item.callback
343
+
344
+ publish_result = PublishResult.from_error item.msg, err
345
+ execute_callback_async item.callback, publish_result
346
+ end
347
+ end
348
+ end
349
+
350
+ def publish_batches! stop: nil
351
+ @batches.reject! { |_ordering_key, batch| batch.empty? }
352
+ @batches.each_value do |batch|
353
+ ready = batch.publish! stop: stop
354
+ publish_batch_async @topic_name, batch if ready
355
+ end
356
+ # Set published_at to nil to wait indefinitely
357
+ @published_at = nil
228
358
  end
229
359
 
230
360
  def publish_batch_async topic_name, batch
361
+ # TODO: raise unless @publish_thread_pool.running?
231
362
  return unless @publish_thread_pool.running?
232
363
 
233
- Concurrent::Future.new(executor: @publish_thread_pool) do
234
- begin
235
- grpc = @service.publish topic_name, batch.messages
236
- batch.items.zip(Array(grpc.message_ids)) do |item, id|
237
- next unless item.callback
364
+ Concurrent::Promises.future_on(
365
+ @publish_thread_pool, topic_name, batch
366
+ ) { |t, b| publish_batch_sync t, b }
367
+ end
238
368
 
239
- item.msg.message_id = id
240
- publish_result = PublishResult.from_grpc(item.msg)
241
- execute_callback_async item.callback, publish_result
242
- end
243
- rescue StandardError => e
244
- batch.items.each do |item|
369
+ # rubocop:disable Metrics/AbcSize
370
+
371
+ def publish_batch_sync topic_name, batch
372
+ # The only batch methods that are safe to call from the loop are
373
+ # rebalance! and reset! because they are the only methods that are
374
+ # synchronized.
375
+ loop do
376
+ items = batch.rebalance!
377
+
378
+ unless items.empty?
379
+ grpc = @service.publish topic_name,
380
+ items.map(&:msg),
381
+ compress: compress && batch.total_message_bytes >= compression_bytes_threshold
382
+ items.zip Array(grpc.message_ids) do |item, id|
383
+ @flow_controller.release item.bytesize
245
384
  next unless item.callback
246
385
 
247
- publish_result = PublishResult.from_error(item.msg, e)
386
+ item.msg.message_id = id
387
+ publish_result = PublishResult.from_grpc item.msg
248
388
  execute_callback_async item.callback, publish_result
249
389
  end
250
390
  end
251
- end.execute
252
- end
253
-
254
- def execute_callback_async callback, publish_result
255
- return unless @callback_thread_pool.running?
256
391
 
257
- Concurrent::Future.new(executor: @callback_thread_pool) do
258
- callback.call publish_result
259
- end.execute
260
- end
261
-
262
- def create_pubsub_message data, attributes
263
- attributes ||= {}
264
- if data.is_a?(::Hash) && attributes.empty?
265
- attributes = data
266
- data = nil
392
+ break unless batch.reset!
267
393
  end
268
- # Convert IO-ish objects to strings
269
- if data.respond_to?(:read) && data.respond_to?(:rewind)
270
- data.rewind
271
- data = data.read
394
+ rescue StandardError => e
395
+ items = batch.items
396
+
397
+ unless batch.ordering_key.empty?
398
+ retry if publish_batch_error_retryable? e
399
+ # Cancel the batch if the error is not to be retried.
400
+ begin
401
+ raise OrderingKeyError, batch.ordering_key
402
+ rescue OrderingKeyError => e
403
+ # The existing e variable is not set to OrderingKeyError
404
+ # Get all unsent messages for the callback
405
+ items = batch.cancel!
406
+ end
272
407
  end
273
- # Convert data to encoded byte array to match the protobuf defn
274
- data_bytes = \
275
- String(data).dup.force_encoding(Encoding::ASCII_8BIT).freeze
276
408
 
277
- # Convert attributes to strings to match the protobuf definition
278
- attributes = Hash[attributes.map { |k, v| [String(k), String(v)] }]
409
+ items.each do |item|
410
+ @flow_controller.release item.bytesize
411
+ next unless item.callback
279
412
 
280
- Google::Pubsub::V1::PubsubMessage.new data: data_bytes,
281
- attributes: attributes
282
- end
283
-
284
- ##
285
- # @private
286
- class Batch
287
- attr_reader :messages, :callbacks
288
-
289
- def initialize publisher
290
- @publisher = publisher
291
- @messages = []
292
- @callbacks = []
293
- @total_message_bytes = publisher.topic_name.bytesize + 2
413
+ publish_result = PublishResult.from_error item.msg, e
414
+ execute_callback_async item.callback, publish_result
294
415
  end
295
416
 
296
- def add msg, callback
297
- @messages << msg
298
- @callbacks << callback
299
- @total_message_bytes += msg.to_proto.bytesize + 2
300
- end
417
+ # publish will retry indefinitely, as long as there are unsent items.
418
+ retry if batch.reset!
419
+ end
301
420
 
302
- def try_add msg, callback
303
- new_message_count = total_message_count + 1
304
- new_message_bytes = total_message_bytes + msg.to_proto.bytesize + 2
305
- if new_message_count > @publisher.max_messages ||
306
- new_message_bytes >= @publisher.max_bytes
307
- return false
308
- end
309
- add msg, callback
310
- true
311
- end
421
+ # rubocop:enable Metrics/AbcSize
312
422
 
313
- def ready?
314
- total_message_count >= @publisher.max_messages ||
315
- total_message_bytes >= @publisher.max_bytes
316
- end
423
+ PUBLISH_RETRY_ERRORS = [
424
+ GRPC::Cancelled, GRPC::DeadlineExceeded, GRPC::Internal,
425
+ GRPC::ResourceExhausted, GRPC::Unauthenticated, GRPC::Unavailable
426
+ ].freeze
317
427
 
318
- def total_message_count
319
- @messages.count
320
- end
428
+ def publish_batch_error_retryable? error
429
+ PUBLISH_RETRY_ERRORS.any? { |klass| error.is_a? klass }
430
+ end
321
431
 
322
- def total_message_bytes
323
- @total_message_bytes
324
- end
432
+ def execute_callback_async callback, publish_result
433
+ return unless @callback_thread_pool.running?
325
434
 
326
- def items
327
- @messages.zip(@callbacks).map do |msg, callback|
328
- Item.new msg, callback
329
- end
435
+ Concurrent::Promises.future_on(
436
+ @callback_thread_pool, callback, publish_result
437
+ ) do |cback, p_result|
438
+ cback.call p_result
330
439
  end
331
-
332
- Item = Struct.new :msg, :callback
333
440
  end
334
441
  end
335
442
  end
443
+
444
+ Pubsub = PubSub unless const_defined? :Pubsub
336
445
  end
337
446
  end