local_bus 0.1.2 → 0.3.0

Sign up to get free protection for your applications and to get access to all the features.
data/lib/local_bus/bus.rb CHANGED
@@ -3,29 +3,35 @@
3
3
  # rbs_inline: enabled
4
4
 
5
5
  class LocalBus
6
- # Local in-process single threaded "message bus" with non-blocking I/O
6
+ # The Bus acts as a direct transport mechanism for messages, akin to placing a passenger directly onto a bus.
7
+ # When a message is published to the Bus, it is immediately delivered to all subscribers, ensuring prompt execution of tasks.
8
+ # This is achieved through non-blocking I/O operations, which allow the Bus to handle multiple tasks efficiently without blocking the main thread.
9
+ #
10
+ # @note While the Bus uses asynchronous operations to optimize performance,
11
+ # the actual processing of a message may still experience slight delays due to I/O wait times from prior messages.
12
+ # This means that while the Bus aims for immediate processing, the nature of asynchronous operations can introduce some latency.
7
13
  class Bus
8
14
  include MonitorMixin
9
15
 
10
16
  # Constructor
11
- # @note Creates a new Bus instance with specified concurrency
12
- # @rbs concurrency: Integer -- maximum number of concurrent tasks (default: Concurrent.processor_count)
13
- def initialize(concurrency: Concurrent.processor_count)
17
+ # @note Creates a new Bus instance with specified max concurrency (i.e. number of tasks that can run in parallel)
18
+ # @rbs concurrency: Integer -- maximum number of concurrent tasks (default: Etc.nprocessors)
19
+ def initialize(concurrency: Etc.nprocessors)
14
20
  super()
15
21
  @concurrency = concurrency.to_i
16
- @subscriptions = Concurrent::Hash.new do |hash, key|
17
- hash[key] = Concurrent::Set.new
22
+ @subscriptions = Hash.new do |hash, key|
23
+ hash[key] = Set.new
18
24
  end
19
25
  end
20
26
 
21
27
  # Maximum number of concurrent tasks that can run in "parallel"
22
- # @rbs return: Integer -- current concurrency value
28
+ # @rbs return: Integer
23
29
  def concurrency
24
30
  synchronize { @concurrency }
25
31
  end
26
32
 
27
- # Sets the concurrency
28
- # @rbs concurrency: Integer -- max number of concurrent tasks that can run in "parallel"
33
+ # Sets the max concurrency
34
+ # @rbs value: Integer -- max number of concurrent tasks that can run in "parallel"
29
35
  # @rbs return: Integer -- new concurrency value
30
36
  def concurrency=(value)
31
37
  synchronize { @concurrency = value.to_i }
@@ -34,14 +40,16 @@ class LocalBus
34
40
  # Registered topics that have subscribers
35
41
  # @rbs return: Array[String] -- list of topic names
36
42
  def topics
37
- @subscriptions.keys
43
+ synchronize { @subscriptions.keys }
38
44
  end
39
45
 
40
46
  # Registered subscriptions
41
47
  # @rbs return: Hash[String, Array[callable]] -- mapping of topics to callables
42
48
  def subscriptions
43
- @subscriptions.each_with_object({}) do |(topic, callables), memo|
44
- memo[topic] = callables.to_a
49
+ synchronize do
50
+ @subscriptions.each_with_object({}) do |(topic, callables), memo|
51
+ memo[topic] = callables.to_a
52
+ end
45
53
  end
46
54
  end
47
55
 
@@ -54,7 +62,7 @@ class LocalBus
54
62
  def subscribe(topic, callable: nil, &block)
55
63
  callable ||= block
56
64
  raise ArgumentError, "Subscriber must respond to #call" unless callable.respond_to?(:call, false)
57
- @subscriptions[topic.to_s].add callable
65
+ synchronize { @subscriptions[topic.to_s].add callable }
58
66
  self
59
67
  end
60
68
 
@@ -64,8 +72,10 @@ class LocalBus
64
72
  # @rbs return: self
65
73
  def unsubscribe(topic, callable:)
66
74
  topic = topic.to_s
67
- @subscriptions[topic].delete callable
68
- @subscriptions.delete(topic) if @subscriptions[topic].empty?
75
+ synchronize do
76
+ @subscriptions[topic].delete callable
77
+ @subscriptions.delete(topic) if @subscriptions[topic].empty?
78
+ end
69
79
  self
70
80
  end
71
81
 
@@ -74,8 +84,10 @@ class LocalBus
74
84
  # @rbs return: self
75
85
  def unsubscribe_all(topic)
76
86
  topic = topic.to_s
77
- @subscriptions[topic].clear
78
- @subscriptions.delete topic
87
+ synchronize do
88
+ @subscriptions[topic].clear
89
+ @subscriptions.delete topic
90
+ end
79
91
  self
80
92
  end
81
93
 
@@ -88,7 +100,7 @@ class LocalBus
88
100
  unsubscribe_all topic
89
101
  end
90
102
 
91
- # Publishes a message to a topic
103
+ # Publishes a message
92
104
  #
93
105
  # @note If subscribers are rapidly created/destroyed mid-publish, there's a theoretical
94
106
  # possibility of object_id reuse. However, this is extremely unlikely in practice.
@@ -98,20 +110,26 @@ class LocalBus
98
110
  #
99
111
  # @note If the timeout is exceeded, the task will be cancelled before all subscribers have completed.
100
112
  #
101
- # Check the Subscriber for any errors.
113
+ # Check individual Subscribers for possible errors.
102
114
  #
103
115
  # @rbs topic: String -- topic name
104
- # @rbs timeout: Float -- seconds to wait before cancelling (default: 300)
116
+ # @rbs timeout: Float -- seconds to wait for subscribers to process the message before cancelling (default: 60)
105
117
  # @rbs payload: Hash -- message payload
106
- # @rbs return: Array[Subscriber] -- list of performed subscribers (empty if no subscribers)
107
- def publish(topic, timeout: 300, **payload)
118
+ # @rbs return: Message
119
+ def publish(topic, timeout: 60, **payload)
120
+ publish_message Message.new(topic, timeout: timeout.to_f, **payload)
121
+ end
122
+
123
+ # Publishes a pre-built message
124
+ # @rbs message: Message -- message to publish
125
+ # @rbs return: Message
126
+ def publish_message(message)
108
127
  barrier = Async::Barrier.new
109
- message = Message.new(topic, timeout: timeout, **payload)
110
128
  subscribers = subscriptions.fetch(message.topic, []).map { Subscriber.new _1, message }
111
129
 
112
130
  if subscribers.any?
113
131
  Sync do |task|
114
- task.with_timeout timeout.to_f do
132
+ task.with_timeout message.timeout do
115
133
  semaphore = Async::Semaphore.new(concurrency, parent: barrier)
116
134
 
117
135
  subscribers.each do |subscriber|
@@ -129,7 +147,8 @@ class LocalBus
129
147
  end
130
148
  end
131
149
 
132
- Pledge.new(barrier, *subscribers)
150
+ message.publication = Publication.new(barrier, *subscribers)
151
+ message
133
152
  end
134
153
  end
135
154
  end
@@ -11,50 +11,82 @@ class LocalBus
11
11
  # @rbs timeout: Float? -- optional timeout for message processing (in seconds)
12
12
  # @rbs payload: Hash -- the message payload
13
13
  def initialize(topic, timeout: nil, **payload)
14
- @id = SecureRandom.uuid_v7
15
- @topic = topic.to_s.freeze
16
- @payload = payload.transform_keys(&:to_sym).freeze
17
- @created_at = Time.now
18
- @thread_id = Thread.current.object_id
19
- @timeout = timeout.to_f
20
14
  @metadata ||= {
21
- id: id,
22
- topic: topic,
23
- payload: payload,
24
- created_at: created_at,
25
- thread_id: thread_id,
26
- timeout: timeout
15
+ id: SecureRandom.uuid_v7,
16
+ topic: topic.to_s.freeze,
17
+ payload: payload.transform_keys(&:to_sym).freeze,
18
+ created_at: Time.now,
19
+ thread_id: Thread.current.object_id,
20
+ timeout: timeout.to_f
27
21
  }.freeze
28
- freeze
29
22
  end
30
23
 
24
+ # Metadata for the message
25
+ # @rbs return: Hash[Symbol, untyped]
26
+ attr_reader :metadata
27
+
28
+ # Publication representing the Async barrier and subscribers handling the message
29
+ # @note May be nil if processing hasn't happened yet (e.g. it was published via Station)
30
+ # @rbs return: Publication?
31
+ attr_accessor :publication
32
+
31
33
  # Unique identifier for the message
32
34
  # @rbs return: String
33
- attr_reader :id
35
+ def id
36
+ metadata[:id]
37
+ end
34
38
 
35
39
  # Message topic
36
40
  # @rbs return: String
37
- attr_reader :topic
41
+ def topic
42
+ metadata[:topic]
43
+ end
38
44
 
39
45
  # Message payload
40
46
  # @rbs return: Hash
41
- attr_reader :payload
47
+ def payload
48
+ metadata[:payload]
49
+ end
42
50
 
43
51
  # Time when the message was created or published
44
52
  # @rbs return: Time
45
- attr_reader :created_at
53
+ def created_at
54
+ metadata[:created_at]
55
+ end
46
56
 
47
57
  # ID of the thread that created the message
48
58
  # @rbs return: Integer
49
- attr_reader :thread_id
59
+ def thread_id
60
+ metadata[:thread_id]
61
+ end
50
62
 
51
63
  # Timeout for message processing (in seconds)
52
64
  # @rbs return: Float
53
- attr_reader :timeout
65
+ def timeout
66
+ metadata[:timeout]
67
+ end
54
68
 
55
- # Metadata for the message
56
- # @rbs return: Hash[Symbol, untyped]
57
- attr_reader :metadata
69
+ # Blocks and waits for the message to process
70
+ # @rbs interval: Float -- time to wait between checks (default: 0.1)
71
+ # @rbs return: void
72
+ def wait(interval: 0.1)
73
+ @timers ||= Timers::Group.new.tap { _1.every(interval) {} }
74
+ loop do
75
+ break if publication
76
+ @timers.wait
77
+ end
78
+ publication&.wait
79
+ ensure
80
+ @timers&.cancel
81
+ @timers = nil
82
+ end
83
+
84
+ # Blocks and waits for the message process then returns all subscribers
85
+ # @rbs return: Array[Subscriber]
86
+ def subscribers
87
+ wait
88
+ publication.subscribers
89
+ end
58
90
 
59
91
  # Converts the message to a hash
60
92
  # @rbs return: Hash[Symbol, untyped]
@@ -0,0 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ # rbs_inline: enabled
4
+
5
+ class LocalBus
6
+ # Wraps an Async::Barrier and a list of Subscribers that are processing a Message.
7
+ class Publication
8
+ # Constructor
9
+ # @rbs barrier: Async::Barrier -- barrier used to wait for all subscribers
10
+ # @rbs subscribers: Array[Subscriber]
11
+ def initialize(barrier, *subscribers)
12
+ @barrier = barrier
13
+ @subscribers = subscribers
14
+ end
15
+
16
+ # Blocks and waits for the barrier (i.e. all subscribers to complete)
17
+ # @rbs return: void
18
+ def wait
19
+ @barrier.wait
20
+ self
21
+ end
22
+
23
+ # List of Subscribers that are processing a Message
24
+ # @note Blocks until all subscribers complete
25
+ # @rbs return: Array[Subscriber]
26
+ def subscribers
27
+ wait
28
+ @subscribers
29
+ end
30
+ end
31
+ end
@@ -5,119 +5,137 @@
5
5
  # rubocop:disable Style/ArgumentsForwarding
6
6
 
7
7
  class LocalBus
8
- # An in-process message queuing system that buffers and publishes messages to Bus.
9
- # This class acts as an intermediary, queuing messages internally before publishing them to the Bus.
8
+ # The Station serves as a queuing system for messages, similar to a bus station where passengers wait for their bus.
10
9
  #
11
- # @note Station shares the same interface as Bus and is thus a message bus.
12
- # The key difference is that Stations are multi-threaded and will not block the main thread.
10
+ # When a message is published to the Station, it is queued and processed at a later time, allowing for deferred execution.
11
+ # This is particularly useful for tasks that can be handled later.
13
12
  #
14
- # Three fallback policies are supported:
15
- # 1. `abort` - Raises an exception and discards the task when the queue is full (default)
16
- # 2. `discard` - Discards the task when the queue is full
17
- # 3. `caller_runs` - Executes the task on the calling thread when the queue is full,
18
- # This effectively jumps the queue (and blocks the main thread) but ensures the task is performed
19
- #
20
- # IMPORTANT: Be sure to release resources like database connections in subscribers when publishing via Station.
13
+ # The Station employs a thread pool to manage message processing, enabling high concurrency and efficient resource utilization.
14
+ # Messages can also be prioritized, ensuring that higher-priority tasks are processed first.
21
15
  #
16
+ # @note: While the Station provides a robust mechanism for background processing,
17
+ # it's important to understand that the exact timing of message processing is not controlled by the publisher,
18
+ # and messages will be processed as resources become available.
22
19
  class Station
23
20
  include MonitorMixin
24
21
 
25
- class TimeoutError < StandardError; end
26
-
27
- # Default options for Concurrent::FixedThreadPool (can be overridden via the constructor)
28
- # @see https://ruby-concurrency.github.io/concurrent-ruby/1.3.4/Concurrent/ThreadPoolExecutor.html
29
- THREAD_POOL_OPTIONS = {
30
- max_queue: 5_000, # max number of pending tasks allowed in the queue
31
- fallback_policy: :caller_runs # Options: :abort, :discard, :caller_runs
32
- }.freeze
22
+ class CapacityError < StandardError; end
33
23
 
34
24
  # Constructor
25
+ #
26
+ # @note Delays process exit in an attempt to flush the queue to avoid dropping messages.
27
+ # Exit flushing makes a "best effort" to process all messages, but it's not guaranteed.
28
+ # Will not delay process exit when the queue is empty.
29
+ #
35
30
  # @rbs bus: Bus -- local message bus (default: Bus.new)
36
- # @rbs threads: Integer -- number of threads (default: Concurrent.processor_count)
37
- # @rbs default_timeout: Float -- seconds to wait for a future to complete
38
- # @rbs shutdown_timeout: Float -- seconds to wait for all futures to complete on process exit
39
- # @rbs options: Hash[Symbol, untyped] -- Concurrent::FixedThreadPool options
31
+ # @rbs interval: Float -- queue polling interval in seconds (default: 0.01)
32
+ # @rbs limit: Integer -- max queue size (default: 10_000)
33
+ # @rbs threads: Integer -- number of threads to use (default: Etc.nprocessors)
34
+ # @rbs timeout: Float -- seconds to wait for subscribers to process the message before cancelling (default: 60)
35
+ # @rbs wait: Float -- seconds to wait for the queue to flush at process exit (default: 5)
40
36
  # @rbs return: void
41
- def initialize(
42
- bus: Bus.new,
43
- threads: Concurrent.processor_count,
44
- default_timeout: 0,
45
- shutdown_timeout: 8,
46
- **options
47
- )
37
+ def initialize(bus: Bus.new, interval: 0.01, limit: 10_000, threads: Etc.nprocessors, timeout: 60, wait: 5)
48
38
  super()
49
39
  @bus = bus
50
- @threads = [2, threads].max.to_i
51
- @default_timeout = default_timeout.to_f
52
- @shutdown_timeout = shutdown_timeout.to_f
53
- @shutdown = Concurrent::AtomicBoolean.new(false)
54
- start(**options)
40
+ @interval = [interval.to_f, 0.01].max
41
+ @limit = limit.to_i.positive? ? limit.to_i : 10_000
42
+ @threads = [threads.to_i, 1].max
43
+ @timeout = timeout.to_f
44
+ @queue = Containers::PriorityQueue.new
45
+ at_exit { stop timeout: [wait.to_f, 1].max }
46
+ start
55
47
  end
56
48
 
57
49
  # Bus instance
58
50
  # @rbs return: Bus
59
51
  attr_reader :bus
60
52
 
61
- # Number of threads used to process messages
53
+ # Queue polling interval in seconds
54
+ # @rbs return: Float
55
+ attr_reader :interval
56
+
57
+ # Max queue size
58
+ # @rbs return: Integer
59
+ attr_reader :limit
60
+
61
+ # Number of threads to use
62
62
  # @rbs return: Integer
63
63
  attr_reader :threads
64
64
 
65
65
  # Default timeout for message processing (in seconds)
66
66
  # @rbs return: Float
67
- attr_reader :default_timeout
68
-
69
- # Timeout for graceful shutdown (in seconds)
70
- # @rbs return: Float
71
- attr_reader :shutdown_timeout
67
+ attr_reader :timeout
72
68
 
73
- # Starts the broker
74
- # @rbs options: Hash[Symbol, untyped] -- Concurrent::FixedThreadPool options
69
+ # Starts the station
70
+ # @rbs interval: Float -- queue polling interval in seconds (default: 0.01)
71
+ # @rbs threads: Integer -- number of threads to use (default: self.threads)
75
72
  # @rbs return: void
76
- def start(**options)
73
+ def start(interval: self.interval, threads: self.threads)
74
+ interval = [interval.to_f, 0.01].max
75
+ threads = [threads.to_i, 1].max
76
+
77
77
  synchronize do
78
- return if running?
78
+ return if running? || stopping?
79
+
80
+ timers = Timers::Group.new
81
+ @pool = []
82
+ threads.times do
83
+ @pool << Thread.new do
84
+ Thread.current.report_on_exception = true
85
+ timers.every interval do
86
+ message = synchronize { @queue.pop unless @queue.empty? || stopping? }
87
+ bus.send :publish_message, message if message
88
+ end
79
89
 
80
- start_shutdown_handler
81
- @pool = Concurrent::FixedThreadPool.new(threads, THREAD_POOL_OPTIONS.merge(options))
82
- enable_safe_shutdown on: ["HUP", "INT", "QUIT", "TERM"]
90
+ loop do
91
+ timers.wait
92
+ break if stopping?
93
+ end
94
+ ensure
95
+ timers.cancel
96
+ end
97
+ end
83
98
  end
84
99
  end
85
100
 
86
- # Stops the broker
87
- # @rbs timeout: Float -- seconds to wait for all futures to complete
101
+ # Stops the station
102
+ # @rbs timeout: Float -- seconds to wait for message processing before killing the thread pool (default: nil)
88
103
  # @rbs return: void
89
- def stop(timeout: shutdown_timeout)
90
- return unless @shutdown.make_true # Ensure we only stop once
91
-
104
+ def stop(timeout: nil)
92
105
  synchronize do
93
- if running?
94
- # First try graceful shutdown
95
- pool.shutdown
96
-
97
- # If graceful shutdown fails, force termination
98
- pool.kill unless pool.wait_for_termination(timeout)
99
-
100
- @pool = nil
101
- end
102
- rescue
103
- nil # ignore errors during shutdown
106
+ return unless running?
107
+ return if stopping?
108
+ @stopping = true
104
109
  end
105
110
 
106
- # Clean up shutdown handler
107
- if @shutdown_thread&.alive?
108
- @shutdown_queue&.close
109
- @shutdown_thread&.join timeout
111
+ @pool&.each do |thread|
112
+ timeout.is_a?(Numeric) ? thread.join(timeout) : thread.join
110
113
  end
114
+ ensure
115
+ @stopping = false
116
+ @pool = nil
117
+ end
111
118
 
112
- @shutdown_thread = nil
113
- @shutdown_queue = nil
114
- @shutdown_completed&.set
119
+ def stopping?
120
+ synchronize { !!@stopping }
115
121
  end
116
122
 
117
- # Indicates if the broker is running
123
+ # Indicates if the station is running
118
124
  # @rbs return: bool
119
125
  def running?
120
- synchronize { pool&.running? }
126
+ synchronize { !!@pool }
127
+ end
128
+
129
+ # Indicates if the queue is empty
130
+ # @rbs return: bool
131
+ def empty?
132
+ synchronize { @queue.empty? }
133
+ end
134
+
135
+ # Number of unprocessed messages in the queue
136
+ # @rbs return: Integer
137
+ def count
138
+ synchronize { @queue.size }
121
139
  end
122
140
 
123
141
  # Subscribe to a topic
@@ -125,103 +143,46 @@ class LocalBus
125
143
  # @rbs callable: (Message) -> untyped -- callable that will process messages published to the topic
126
144
  # @rbs &block: (Message) -> untyped -- alternative way to provide a callable
127
145
  # @rbs return: self
128
- def subscribe(topic, callable: nil, &block)
129
- bus.subscribe(topic, callable: callable || block)
146
+ def subscribe(...)
147
+ bus.subscribe(...)
130
148
  self
131
149
  end
132
150
 
133
- # Unsubscribe from a topic
151
+ # Unsubscribes a callable from a topic
134
152
  # @rbs topic: String -- topic name
153
+ # @rbs callable: (Message) -> untyped -- subscriber that should no longer receive messages
135
154
  # @rbs return: self
136
- def unsubscribe(topic)
137
- bus.unsubscribe(topic)
155
+ def unsubscribe(...)
156
+ bus.unsubscribe(...)
138
157
  self
139
158
  end
140
159
 
141
160
  # Unsubscribes all subscribers from a topic and removes the topic
142
161
  # @rbs topic: String -- topic name
143
162
  # @rbs return: self
144
- def unsubscribe_all(topic)
145
- bus.unsubscribe_all topic
163
+ def unsubscribe_all(...)
164
+ bus.unsubscribe_all(...)
146
165
  self
147
166
  end
148
167
 
149
- # Publishes a message to Bus on a separate thread keeping the main thread free for additional work.
150
- #
151
- # @note This allows you to publish messages when performing operations like handling web requests
152
- # without blocking the main thread and slowing down the response.
153
- #
154
- # @see https://ruby-concurrency.github.io/concurrent-ruby/1.3.4/Concurrent/Promises/Future.html
168
+ # Publishes a message
155
169
  #
156
170
  # @rbs topic: String | Symbol -- topic name
171
+ # @rbs priority: Integer -- priority of the message, higher number == higher priority (default: 1)
157
172
  # @rbs timeout: Float -- seconds to wait before cancelling
158
173
  # @rbs payload: Hash[Symbol, untyped] -- message payload
159
- # @rbs return: Concurrent::Promises::Future
160
- def publish(topic, timeout: default_timeout, **payload)
161
- timeout = timeout.to_f
162
-
163
- future = Concurrent::Promises.future_on(pool) do
164
- case timeout
165
- in 0 then bus.publish(topic, **payload).value
166
- else bus.publish(topic, timeout: timeout, **payload).value
167
- end
168
- end
169
-
170
- # ensure calls to future.then use the thread pool
171
- executor = pool
172
- future.singleton_class.define_method :then do |&block|
173
- future.then_on(executor, &block)
174
- end
175
-
176
- future
177
- end
178
-
179
- private
180
-
181
- # Thread pool used for asynchronous operations
182
- # @rbs return: Concurrent::FixedThreadPool
183
- attr_reader :pool
184
-
185
- # Starts the shutdown handler thread
186
- # @rbs return: void
187
- def start_shutdown_handler
188
- return if @shutdown.true?
189
-
190
- @shutdown_queue = Queue.new
191
- @shutdown_completed = Concurrent::Event.new
192
- @shutdown_thread = Thread.new do
193
- catch :shutdown do
194
- loop do
195
- signal = @shutdown_queue.pop # blocks until something is available
196
- throw :shutdown if @shutdown_queue.closed?
197
-
198
- stop # initiate shutdown sequence
199
-
200
- # Re-raise the signal to let the process terminate
201
- if signal
202
- # Remove our trap handler before re-raising
203
- trap signal, "DEFAULT"
204
- Process.kill signal, Process.pid
205
- end
206
- rescue ThreadError, ClosedQueueError
207
- break # queue was closed, exit gracefully
208
- end
209
- end
210
- @shutdown_completed.set
211
- end
174
+ # @rbs return: Message
175
+ def publish(topic, priority: 1, timeout: self.timeout, **payload)
176
+ publish_message Message.new(topic, timeout: timeout, **payload), priority: priority
212
177
  end
213
178
 
214
- # Enables safe shutdown on process exit by trapping specified signals
215
- # @rbs on: Array[String] -- signals to trap
216
- # @rbs return: void
217
- def enable_safe_shutdown(on:)
218
- at_exit { stop }
219
- on.each do |signal|
220
- trap signal do
221
- @shutdown_queue.push signal unless @shutdown.true?
222
- rescue
223
- nil
224
- end
179
+ # Publishes a pre-built message
180
+ # @rbs message: Message -- message to publish
181
+ # @rbs return: Message
182
+ def publish_message(message, priority: 1)
183
+ synchronize do
184
+ raise CapacityError, "Station is at capacity! (limit: #{limit})" if @queue.size >= limit
185
+ @queue.push message, priority
225
186
  end
226
187
  end
227
188
  end
@@ -5,6 +5,8 @@
5
5
  class LocalBus
6
6
  # Wraps a Callable (Proc) and Message intended for asynchronous execution.
7
7
  class Subscriber
8
+ include MonitorMixin
9
+
8
10
  # Custom error class for Subscriber errors
9
11
  class Error < StandardError
10
12
  # Constructor
@@ -24,6 +26,7 @@ class LocalBus
24
26
  # @rbs callable: #call -- the subscriber's callable object
25
27
  # @rbs message: Message -- the message to be processed
26
28
  def initialize(callable, message)
29
+ super()
27
30
  @callable = callable
28
31
  @message = message
29
32
  @id = callable.object_id
@@ -68,21 +71,29 @@ class LocalBus
68
71
  metadata.any?
69
72
  end
70
73
 
71
- # Checks if the subscriber is pending
74
+ # Indicates if the subscriber is pending or unperformed
72
75
  # @rbs return: bool
73
76
  def pending?
74
77
  metadata.empty?
75
78
  end
76
79
 
80
+ # Indicates if the subscriber has errored
81
+ # @rbs return: bool
82
+ def errored?
83
+ !!error
84
+ end
85
+
77
86
  # Performs the subscriber's callable
78
87
  # @rbs return: void
79
88
  def perform
80
- return if performed?
81
-
82
- with_metadata do
83
- @value = callable.call(message)
84
- rescue => cause
85
- @error = Error.new("Invocation failed! #{cause.message}", cause: cause)
89
+ synchronize do
90
+ return if performed?
91
+
92
+ with_metadata do
93
+ @value = callable.call(message)
94
+ rescue => cause
95
+ @error = Error.new("Invocation failed! #{cause.message}", cause: cause)
96
+ end
86
97
  end
87
98
  end
88
99
 
@@ -129,7 +140,7 @@ class LocalBus
129
140
  finished_at: Time.now,
130
141
  duration: Time.now - started_at,
131
142
  latency: Time.now - message.created_at,
132
- message: message
143
+ message: message.to_h
133
144
  }.freeze
134
145
  end
135
146
  end