active_publisher 1.2.3-java → 1.3.0-java

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 15d88cf40d6d054ecdb12cf8d079fb8e09fedd02e19aab629e4034cbbb667979
4
- data.tar.gz: 6c6e47f2d49b035c433a9fc400f83aa0b96ac5d1b0a8a926730476bbe5250b66
3
+ metadata.gz: 7447a8948c2a4e24769ef9733ab12e843266564385535c2cb7bfbb8d485ec50a
4
+ data.tar.gz: c8fef7d08eef182420cbdef0fa847b40cc5f9e14ef96b81ad076fcb885526717
5
5
  SHA512:
6
- metadata.gz: a9d25a3231071ff71ebc32d0e98b362ae1c558b67e568df50806930567d334a57783a428007e95504d7c14e0a7deebf7b864457e008dabfb7a715b51b8fe8358
7
- data.tar.gz: e90cd0c23105cd8ededa04d3bc0636e76d589b60f7060f8784720c2c7cd594e33d27063581b7787babd1cf87878deacea39a34269344257809ec46676143cfd6
6
+ metadata.gz: 6e5f3d51840ac76b600444eb36e4b358f4fed9ae5ae876eed27747f8e89f67273d7cf52fc608dc788f0631257b6f4c6d5e05b9f014a05b39175958f1956fefa1
7
+ data.tar.gz: e965f7bedbc7449c3cb6f6e86afa718be8d28853ba46acec02be6589eebf58389c276b4aa3c90b8a2a919825c17acfd22e93987d4212a4cec535219e9d34158f
@@ -0,0 +1,56 @@
1
+ # Inspired by: http://mikebian.co/running-tests-against-multiple-ruby-versions-using-circleci/
2
+
3
+ version: 2.1
4
+
5
+ orbs:
6
+ ruby: circleci/ruby@1.1
7
+
8
+ jobs:
9
+ test:
10
+ parallelism: 1
11
+ parameters:
12
+ ruby-image:
13
+ type: string
14
+ docker:
15
+ - image: << parameters.ruby-image >>
16
+ - image: rabbitmq
17
+
18
+ steps:
19
+ - checkout
20
+ - run:
21
+ name: install dockerize
22
+ command: wget https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_VERSION/dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz && sudo tar -C /usr/local/bin -xzvf dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz && rm dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz
23
+ environment:
24
+ DOCKERIZE_VERSION: v0.3.0
25
+ - run:
26
+ name: Wait for rabbitmq
27
+ command: dockerize -wait tcp://localhost:5672 -timeout 1m
28
+ - run:
29
+ name: Install bundler
30
+ command: gem install bundler
31
+ - run:
32
+ name: Which bundler?
33
+ command: bundle -v
34
+ - run:
35
+ name: bundle install
36
+ command: bundle install
37
+ - run:
38
+ name: rspec
39
+ command: bundle exec rspec
40
+
41
+ # strangely, there seems to be very little documentation about exactly how martix builds work.
42
+ # By defining a param inside your job definition, Circle CI will automatically spawn a job for
43
+ # unique param value passed via `matrix`. Neat!
44
+ # https://circleci.com/blog/circleci-matrix-jobs/
45
+ workflows:
46
+ build_and_test:
47
+ jobs:
48
+ - test:
49
+ matrix:
50
+ parameters:
51
+ ruby-image:
52
+ - circleci/ruby:2.5
53
+ - circleci/ruby:2.6
54
+ - circleci/ruby:2.7
55
+ - circleci/jruby:9.1
56
+ - circleci/jruby:9.2
data/README.md CHANGED
@@ -55,7 +55,9 @@ Defaults for the configuration are:
55
55
  :network_recovery_interval => 1,
56
56
  :password => "guest",
57
57
  :port => 5672,
58
+ :publisher_threads => 1,
58
59
  :publisher_confirms => false,
60
+ :publisher_confirms_timeout => 5_000,
59
61
  :seconds_to_wait_for_graceful_shutdown => 30,
60
62
  :timeout => 1,
61
63
  :tls => false,
@@ -14,14 +14,15 @@ module ActivePublisher
14
14
  :max_queue_size,
15
15
  :supervisor_interval
16
16
 
17
- attr_reader :consumer, :queue, :supervisor
17
+ attr_reader :consumers, :queue, :supervisor
18
18
 
19
19
  def initialize(back_pressure_strategy, max_queue_size, supervisor_interval)
20
20
  self.back_pressure_strategy = back_pressure_strategy
21
21
  @max_queue_size = max_queue_size
22
22
  @supervisor_interval = supervisor_interval
23
23
  @queue = ::MultiOpQueue::Queue.new
24
- create_and_supervise_consumer!
24
+ @consumers = {}
25
+ create_and_supervise_consumers!
25
26
  end
26
27
 
27
28
  def back_pressure_strategy=(strategy)
@@ -52,36 +53,44 @@ module ActivePublisher
52
53
  def size
53
54
  # Requests might be in flight (out of the queue, but not yet published), so taking the max should be
54
55
  # good enough to make sure we're honest about the actual queue size.
55
- return queue.size if consumer.nil?
56
- [queue.size, consumer.sampled_queue_size].max
56
+ return queue.size if consumers.empty?
57
+ [queue.size, consumer_sampled_queue_size].max
57
58
  end
58
59
 
59
60
  private
60
61
 
61
- def create_and_supervise_consumer!
62
- @consumer = ::ActivePublisher::Async::InMemoryAdapter::ConsumerThread.new(queue)
62
+ def create_and_supervise_consumers!
63
+ ::ActivePublisher.configuration.publisher_threads.times do
64
+ consumer_id = ::SecureRandom.uuid
65
+ consumers[consumer_id] = ::ActivePublisher::Async::InMemoryAdapter::ConsumerThread.new(queue)
66
+ supervisor_task = ::Concurrent::TimerTask.new(:execution_interval => supervisor_interval) do
67
+ current_time = ::Time.now
68
+ consumer = consumers[consumer_id]
63
69
 
64
- supervisor_task = ::Concurrent::TimerTask.new(:execution_interval => supervisor_interval) do
65
- current_time = ::Time.now
70
+ # Consumer is lagging if it does not "tick" at least once every 10 seconds.
71
+ seconds_since_last_tick = current_time - consumer.last_tick_at
72
+ consumer_is_lagging = seconds_since_last_tick > ::ActivePublisher.configuration.max_async_publisher_lag_time
73
+ logger.error "ActivePublisher consumer is lagging. Last consumer tick was #{seconds_since_last_tick} seconds ago." if consumer_is_lagging
66
74
 
67
- # Consumer is lagging if it does not "tick" at least once every 10 seconds.
68
- seconds_since_last_tick = current_time - consumer.last_tick_at
69
- consumer_is_lagging = seconds_since_last_tick > ::ActivePublisher.configuration.max_async_publisher_lag_time
70
- logger.error "ActivePublisher consumer is lagging. Last consumer tick was #{seconds_since_last_tick} seconds ago." if consumer_is_lagging
75
+ # Check to see if we should restart the consumer.
76
+ if !consumer.alive? || consumer_is_lagging
77
+ consumer.kill rescue nil
78
+ consumers[consumer_id] = ::ActivePublisher::Async::InMemoryAdapter::ConsumerThread.new(queue)
79
+ ::ActiveSupport::Notifications.instrument "async_queue.thread_restart"
80
+ end
71
81
 
72
- # Check to see if we should restart the consumer.
73
- if !consumer.alive? || consumer_is_lagging
74
- consumer.kill rescue nil
75
- @consumer = ::ActivePublisher::Async::InMemoryAdapter::ConsumerThread.new(queue)
82
+ # Notify the current queue size.
83
+ ::ActiveSupport::Notifications.instrument "async_queue_size.active_publisher", queue.size
76
84
  end
77
-
78
- # Notify the current queue size.
79
- ::ActiveSupport::Notifications.instrument "async_queue_size.active_publisher", queue.size
85
+ supervisor_task.execute
80
86
  end
81
- supervisor_task.execute
82
87
  end
83
- end
84
88
 
89
+ def consumer_sampled_queue_size
90
+ consumers.values.map(&:sampled_queue_size).max
91
+ end
92
+
93
+ end
85
94
  end
86
95
  end
87
96
  end
@@ -2,7 +2,13 @@ module ActivePublisher
2
2
  module Async
3
3
  module InMemoryAdapter
4
4
  class ConsumerThread
5
- attr_reader :thread, :queue, :sampled_queue_size, :last_tick_at
5
+ attr_reader :channel, :flush_max, :thread, :queue, :sampled_queue_size, :last_tick_at
6
+
7
+ if ::RUBY_PLATFORM == "java"
8
+ CHANNEL_CLOSED_ERRORS = [::MarchHare::ChannelAlreadyClosed]
9
+ else
10
+ CHANNEL_CLOSED_ERRORS = [::Bunny::ChannelAlreadyClosed]
11
+ end
6
12
 
7
13
  if ::RUBY_PLATFORM == "java"
8
14
  NETWORK_ERRORS = [::MarchHare::NetworkException, ::MarchHare::ConnectionRefused,
@@ -21,6 +27,7 @@ module ActivePublisher
21
27
  def initialize(listen_queue)
22
28
  @queue = listen_queue
23
29
  @sampled_queue_size = queue.size
30
+ @flush_max = ::ActivePublisher.configuration.messages_per_batch
24
31
 
25
32
  update_last_tick_at
26
33
  start_thread
@@ -45,6 +52,32 @@ module ActivePublisher
45
52
  end
46
53
  end
47
54
 
55
+ def cleanup_up_channel
56
+ return if channel.nil?
57
+ channel.close
58
+ rescue => error
59
+ ::ActivePublisher.configuration.error_handler.call(error, {:status => "Cleaning up the channel"})
60
+ end
61
+
62
+ def handle_current_messages_on_unknown_error(current_messages)
63
+ current_messages.each do |message|
64
+ # Degrade to single message publish ... or at least attempt to
65
+ begin
66
+ ::ActivePublisher.publish(message.route, message.payload, message.exchange_name, message.options)
67
+ current_messages.delete(message)
68
+ rescue *CHANNEL_CLOSED_ERRORS
69
+ # If the channel is bad, raise!
70
+ raise
71
+ rescue *PRECONDITION_ERRORS => error
72
+ # Delete messages if rabbitmq cannot declare the exchange (or somet other precondition failed).
73
+ ::ActivePublisher.configuration.error_handler.call(error, {:reason => "precondition failed", :message => message})
74
+ current_messages.delete(message)
75
+ rescue => other_error
76
+ ::ActivePublisher.configuration.error_handler.call(other_error, {:route => message.route, :payload => message.payload, :exchange_name => message.exchange_name, :options => message.options})
77
+ end
78
+ end
79
+ end
80
+
48
81
  def make_channel
49
82
  channel = ::ActivePublisher::Async::InMemoryAdapter::Channel.new
50
83
  channel.confirm_select if ::ActivePublisher.configuration.publisher_confirms
@@ -57,57 +90,54 @@ module ActivePublisher
57
90
 
58
91
  def start_thread
59
92
  return if alive?
60
- @thread = ::Thread.new do
61
- loop do
62
- # Sample the queue size so we don't shutdown when messages are in flight.
63
- @sampled_queue_size = queue.size
64
- current_messages = queue.pop_up_to(50, :timeout => 0.1)
65
- update_last_tick_at
66
- # If the queue is empty, we should continue to update to "last_tick_at" time.
67
- next if current_messages.nil?
68
-
69
- # We only look at active publisher messages. Everything else is dropped.
70
- current_messages.select! { |message| message.is_a?(::ActivePublisher::Message) }
71
-
72
- begin
73
- @channel ||= make_channel
74
-
75
- # Only open a single connection for each group of messages to an exchange
76
- current_messages.group_by(&:exchange_name).each do |exchange_name, messages|
77
- publish_all(@channel, exchange_name, messages)
78
- current_messages -= messages
79
- end
80
- rescue *NETWORK_ERRORS
81
- # Sleep because connection is down
82
- await_network_reconnect
83
- rescue => unknown_error
84
- ::ActivePublisher.configuration.error_handler.call(unknown_error, {:number_of_messages => current_messages.size})
85
- current_messages.each do |message|
86
- # Degrade to single message publish ... or at least attempt to
87
- begin
88
- ::ActivePublisher.publish(message.route, message.payload, message.exchange_name, message.options)
89
- current_messages.delete(message)
90
- rescue *PRECONDITION_ERRORS => error
91
- # Delete messages if rabbitmq cannot declare the exchange (or somet other precondition failed).
92
- ::ActivePublisher.configuration.error_handler.call(error, {:reason => "precondition failed", :message => message})
93
- current_messages.delete(message)
94
- rescue => individual_error
95
- ::ActivePublisher.configuration.error_handler.call(individual_error, {:route => message.route, :payload => message.payload, :exchange_name => message.exchange_name, :options => message.options})
96
- end
97
- end
98
-
99
- # TODO: Find a way to bubble this out of the thread for logging purposes.
100
- # Reraise the error out of the publisher loop. The Supervisor will restart the consumer.
101
- raise unknown_error
102
- ensure
103
- # Always requeue anything that gets stuck.
104
- queue.concat(current_messages) if current_messages && !current_messages.empty?
93
+ @thread = ::Thread.new { start_consuming_thread }
94
+ end
95
+
96
+ def start_consuming_thread
97
+ loop do
98
+ # Sample the queue size so we don't shutdown when messages are in flight.
99
+ @sampled_queue_size = queue.size
100
+ current_messages = queue.pop_up_to(flush_max, :timeout => 0.1)
101
+ update_last_tick_at
102
+ # If the queue is empty, we should continue to update to "last_tick_at" time.
103
+ next if current_messages.nil?
104
+
105
+ @channel ||= make_channel
106
+
107
+ # We only look at active publisher messages. Everything else is dropped.
108
+ current_messages.select! { |message| message.is_a?(::ActivePublisher::Message) }
109
+
110
+ begin
111
+ # Only open a single connection for each group of messages to an exchange
112
+ current_messages.group_by(&:exchange_name).each do |exchange_name, messages|
113
+ publish_all(exchange_name, messages)
114
+ current_messages -= messages
105
115
  end
116
+ rescue *CHANNEL_CLOSED_ERRORS
117
+ # If the channel is bad, raise without sending one-by-one!
118
+ raise
119
+ rescue *NETWORK_ERRORS
120
+ # Sleep because connection is down
121
+ await_network_reconnect
122
+ rescue => unknown_error
123
+ ::ActivePublisher.configuration.error_handler.call(unknown_error, {:number_of_messages => current_messages.size})
124
+
125
+ # Attempt to deliver a message one-by-one. Raise if a closed channel error appears.
126
+ handle_current_messages_on_unknown_error(current_messages)
127
+
128
+ # TODO: Find a way to bubble this out of the thread for logging purposes.
129
+ # Reraise the error out of the publisher loop. The Supervisor will restart the consumer.
130
+ raise unknown_error
131
+ ensure
132
+ # Always requeue anything that gets stuck.
133
+ queue.concat(current_messages) if current_messages && !current_messages.empty?
106
134
  end
107
135
  end
136
+ ensure
137
+ cleanup_up_channel
108
138
  end
109
139
 
110
- def publish_all(channel, exchange_name, messages)
140
+ def publish_all(exchange_name, messages)
111
141
  exchange = channel.topic(exchange_name)
112
142
  messages.each do |message|
113
143
  fail ::ActivePublisher::ExchangeMismatchError, "bulk publish messages must match publish_all exchange_name" if message.exchange_name != exchange_name
@@ -116,10 +146,10 @@ module ActivePublisher
116
146
  exchange.publish(message.payload, options)
117
147
  end
118
148
  end
119
- wait_for_confirms(channel)
149
+ wait_for_confirms
120
150
  end
121
151
 
122
- def wait_for_confirms(channel)
152
+ def wait_for_confirms
123
153
  return true unless channel.using_publisher_confirms?
124
154
  channel.wait_for_confirms(::ActivePublisher.configuration.publisher_confirms_timeout)
125
155
  end
@@ -9,28 +9,33 @@ module ActivePublisher
9
9
  :timeout_interval => 5, # seconds
10
10
  }
11
11
 
12
- attr_reader :consumer, :queue, :supervisor
12
+ attr_reader :consumers, :queue, :supervisor
13
13
 
14
14
  def initialize(redis_pool)
15
15
  @queue = ::ActivePublisher::Async::RedisAdapter::RedisMultiPopQueue.new(redis_pool, ::ActivePublisher::Async::RedisAdapter::REDIS_LIST_KEY)
16
- create_and_supervise_consumer!
16
+ @consumers = {}
17
+ create_and_supervise_consumers!
17
18
  end
18
19
 
19
- def create_and_supervise_consumer!
20
- @consumer = ::ActivePublisher::Async::InMemoryAdapter::ConsumerThread.new(queue)
21
-
22
- supervisor_task = ::Concurrent::TimerTask.new(SUPERVISOR_INTERVAL) do
23
- # This may also be the place to start additional publishers when we are getting backed up ... ?
24
- unless consumer.alive?
25
- consumer.kill rescue nil
26
- @consumer = ::ActivePublisher::Async::InMemoryAdapter::ConsumerThread.new(queue)
20
+ def create_and_supervise_consumers!
21
+ ::ActivePublisher.configuration.publisher_threads.times do
22
+ consumer_id = ::SecureRandom.uuid
23
+ consumers[consumer_id] = ::ActivePublisher::Async::InMemoryAdapter::ConsumerThread.new(queue)
24
+
25
+ supervisor_task = ::Concurrent::TimerTask.new(SUPERVISOR_INTERVAL) do
26
+ consumer = consumers[consumer_id]
27
+ unless consumer.alive?
28
+ consumer.kill rescue nil
29
+ consumers[consumer_id] = ::ActivePublisher::Async::InMemoryAdapter::ConsumerThread.new(queue)
30
+ ::ActiveSupport::Notifications.instrument "async_queue.thread_restart"
31
+ end
32
+
33
+ # Notify the current queue size.
34
+ ::ActiveSupport::Notifications.instrument "redis_async_queue_size.active_publisher", queue.size
27
35
  end
28
36
 
29
- # Notify the current queue size.
30
- ::ActiveSupport::Notifications.instrument "redis_async_queue_size.active_publisher", queue.size
37
+ supervisor_task.execute
31
38
  end
32
-
33
- supervisor_task.execute
34
39
  end
35
40
 
36
41
  def size
@@ -19,7 +19,7 @@ module ActivePublisher
19
19
  }
20
20
  include ::ActivePublisher::Logging
21
21
 
22
- attr_reader :async_queue, :redis_pool, :queue
22
+ attr_reader :async_queue, :flush_max, :flush_min, :redis_pool, :queue
23
23
 
24
24
  def initialize(new_redis_pool)
25
25
  logger.info "Starting redis publisher adapter"
@@ -27,6 +27,8 @@ module ActivePublisher
27
27
  @redis_pool = new_redis_pool
28
28
  @async_queue = ::ActivePublisher::Async::RedisAdapter::Consumer.new(redis_pool)
29
29
  @queue = ::MultiOpQueue::Queue.new
30
+ @flush_max = ::ActivePublisher.configuration.messages_per_batch
31
+ @flush_min = @flush_max / 2
30
32
 
31
33
  supervisor_task = ::Concurrent::TimerTask.new(SUPERVISOR_INTERVAL) do
32
34
  queue_size = queue.size
@@ -41,7 +43,7 @@ module ActivePublisher
41
43
  def publish(route, payload, exchange_name, options = {})
42
44
  message = ::ActivePublisher::Message.new(route, payload, exchange_name, options)
43
45
  queue << ::Marshal.dump(message)
44
- flush_queue! if queue.size >= 20 || options[:flush_queue]
46
+ flush_queue! if queue.size >= flush_min || options[:flush_queue]
45
47
 
46
48
  nil
47
49
  end
@@ -58,7 +60,7 @@ module ActivePublisher
58
60
 
59
61
  def flush_queue!
60
62
  return if queue.empty?
61
- encoded_messages = queue.pop_up_to(25, :timeout => 0.001)
63
+ encoded_messages = queue.pop_up_to(flush_max, :timeout => 0.001)
62
64
 
63
65
  return if encoded_messages.nil?
64
66
  return unless encoded_messages.respond_to?(:each)
@@ -8,9 +8,11 @@ module ActivePublisher
8
8
  :host,
9
9
  :hosts,
10
10
  :max_async_publisher_lag_time,
11
+ :messages_per_batch,
11
12
  :network_recovery_interval,
12
13
  :password,
13
14
  :port,
15
+ :publisher_threads,
14
16
  :publisher_confirms,
15
17
  :publisher_confirms_timeout,
16
18
  :seconds_to_wait_for_graceful_shutdown,
@@ -36,9 +38,11 @@ module ActivePublisher
36
38
  :host => "localhost",
37
39
  :hosts => [],
38
40
  :password => "guest",
41
+ :messages_per_batch => 25,
39
42
  :max_async_publisher_lag_time => 10,
40
43
  :network_recovery_interval => NETWORK_RECOVERY_INTERVAL,
41
44
  :port => 5672,
45
+ :publisher_threads => 1,
42
46
  :publisher_confirms => false,
43
47
  :publisher_confirms_timeout => 5_000, #specified as a number of milliseconds
44
48
  :seconds_to_wait_for_graceful_shutdown => 30,
@@ -1,3 +1,3 @@
1
1
  module ActivePublisher
2
- VERSION = "1.2.3"
2
+ VERSION = "1.3.0"
3
3
  end
@@ -4,6 +4,7 @@ else
4
4
  require "bunny"
5
5
  end
6
6
  require "active_support"
7
+ require "securerandom"
7
8
  require "thread"
8
9
 
9
10
  require "active_publisher/logging"
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: active_publisher
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.2.3
4
+ version: 1.3.0
5
5
  platform: java
6
6
  authors:
7
7
  - Brian Stien
@@ -12,7 +12,7 @@ authors:
12
12
  autorequire:
13
13
  bindir: exe
14
14
  cert_chain: []
15
- date: 2020-07-29 00:00:00.000000000 Z
15
+ date: 2021-10-19 00:00:00.000000000 Z
16
16
  dependencies:
17
17
  - !ruby/object:Gem::Dependency
18
18
  requirement: !ruby/object:Gem::Requirement
@@ -179,9 +179,9 @@ executables: []
179
179
  extensions: []
180
180
  extra_rdoc_files: []
181
181
  files:
182
+ - ".circleci/config.yml"
182
183
  - ".gitignore"
183
184
  - ".rspec"
184
- - ".travis.yml"
185
185
  - CODE_OF_CONDUCT.md
186
186
  - Gemfile
187
187
  - LICENSE.txt
@@ -223,8 +223,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
223
223
  - !ruby/object:Gem::Version
224
224
  version: '0'
225
225
  requirements: []
226
- rubyforge_project:
227
- rubygems_version: 2.7.9
226
+ rubygems_version: 3.2.28
228
227
  signing_key:
229
228
  specification_version: 4
230
229
  summary: Aims to make publishing work across MRI and jRuby painless and add some nice
data/.travis.yml DELETED
@@ -1,14 +0,0 @@
1
- language: ruby
2
- rvm:
3
- - 2.3.8
4
- - 2.5.7
5
- - jruby-9.1.12.0
6
- - jruby-9.2.7.0
7
- addons:
8
- apt:
9
- packages:
10
- - rabbitmq-server
11
- services:
12
- - rabbitmq
13
- sudo: false
14
- cache: bundler