subserver 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +2 -0
- data/CONTRIBUTING.md +54 -0
- data/LICENSE +21 -0
- data/README.md +5 -0
- data/bin/subserver +16 -0
- data/lib/subserver.rb +185 -0
- data/lib/subserver/cli.rb +353 -0
- data/lib/subserver/exception_handler.rb +29 -0
- data/lib/subserver/health.rb +33 -0
- data/lib/subserver/launcher.rb +71 -0
- data/lib/subserver/listener.rb +157 -0
- data/lib/subserver/logging.rb +122 -0
- data/lib/subserver/manager.rb +153 -0
- data/lib/subserver/message_logger.rb +24 -0
- data/lib/subserver/middleware/active_record.rb +21 -0
- data/lib/subserver/middleware/chain.rb +128 -0
- data/lib/subserver/pubsub.rb +20 -0
- data/lib/subserver/rails.rb +57 -0
- data/lib/subserver/subscriber.rb +25 -0
- data/lib/subserver/util.rb +62 -0
- data/lib/subserver/version.rb +4 -0
- metadata +86 -0
@@ -0,0 +1,29 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require 'subserver'
|
3
|
+
|
4
|
+
module Subserver
|
5
|
+
module ExceptionHandler
|
6
|
+
|
7
|
+
class Logger
|
8
|
+
def call(ex, ctxHash)
|
9
|
+
Subserver.logger.warn(Subserver.dump_json(ctxHash)) if !ctxHash.empty?
|
10
|
+
Subserver.logger.warn("#{ex.class.name}: #{ex.message}")
|
11
|
+
Subserver.logger.warn(ex.backtrace.join("\n")) unless ex.backtrace.nil?
|
12
|
+
end
|
13
|
+
|
14
|
+
Subserver.error_handlers << Subserver::ExceptionHandler::Logger.new
|
15
|
+
end
|
16
|
+
|
17
|
+
def handle_exception(ex, ctxHash={})
|
18
|
+
Subserver.error_handlers.each do |handler|
|
19
|
+
begin
|
20
|
+
handler.call(ex, ctxHash)
|
21
|
+
rescue => ex
|
22
|
+
Subserver.logger.error "!!! ERROR HANDLER THREW AN ERROR !!!"
|
23
|
+
Subserver.logger.error ex
|
24
|
+
Subserver.logger.error ex.backtrace.join("\n") unless ex.backtrace.nil?
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
@@ -0,0 +1,33 @@
|
|
1
|
+
require 'socket' # Sockets are in standard library
|
2
|
+
|
3
|
+
module Subserver
|
4
|
+
class Health
|
5
|
+
|
6
|
+
attr_accessor :server
|
7
|
+
|
8
|
+
def initialize
|
9
|
+
@server = TCPServer.new 4481
|
10
|
+
end
|
11
|
+
|
12
|
+
def start
|
13
|
+
begin
|
14
|
+
while session = @server.accept
|
15
|
+
request = session.gets
|
16
|
+
|
17
|
+
session.print "HTTP/1.1 200\r\n" # 1
|
18
|
+
session.print "Content-Type: text/html\r\n" # 2
|
19
|
+
session.print "\r\n" # 3
|
20
|
+
session.print "Subserver Online" #4
|
21
|
+
session.close
|
22
|
+
end
|
23
|
+
rescue Errno::ECONNRESET, Errno::EPIPE => e
|
24
|
+
puts e.message
|
25
|
+
retry
|
26
|
+
end
|
27
|
+
end
|
28
|
+
|
29
|
+
def stop
|
30
|
+
@server.close
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
@@ -0,0 +1,71 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require 'subserver/manager'
|
3
|
+
|
4
|
+
module Subserver
|
5
|
+
# The Launcher is a very simple Actor whose job is to
|
6
|
+
# start, monitor and stop the core Actors in Subserver.
|
7
|
+
# If any of these actors die, the Subserver process exits
|
8
|
+
# immediately.
|
9
|
+
class Launcher
|
10
|
+
include Util
|
11
|
+
|
12
|
+
attr_accessor :manager
|
13
|
+
|
14
|
+
def initialize(options)
|
15
|
+
@manager = Subserver::Manager.new(options)
|
16
|
+
@done = false
|
17
|
+
@options = options
|
18
|
+
end
|
19
|
+
|
20
|
+
def run
|
21
|
+
@manager.start
|
22
|
+
end
|
23
|
+
|
24
|
+
# Stops this instance from processing any more jobs,
|
25
|
+
#
|
26
|
+
def quiet
|
27
|
+
@done = true
|
28
|
+
@manager.quiet
|
29
|
+
end
|
30
|
+
|
31
|
+
# Shuts down the process. This method does not
|
32
|
+
# return until all work is complete and cleaned up.
|
33
|
+
# It can take up to the timeout to complete.
|
34
|
+
def stop
|
35
|
+
deadline = Time.now + @options[:timeout]
|
36
|
+
|
37
|
+
@done = true
|
38
|
+
@manager.quiet
|
39
|
+
@manager.stop(deadline)
|
40
|
+
end
|
41
|
+
|
42
|
+
def stopping?
|
43
|
+
@done
|
44
|
+
end
|
45
|
+
|
46
|
+
private unless $TESTING
|
47
|
+
|
48
|
+
def to_data
|
49
|
+
@data ||= begin
|
50
|
+
{
|
51
|
+
'hostname' => hostname,
|
52
|
+
'started_at' => Time.now.to_f,
|
53
|
+
'pid' => $$,
|
54
|
+
'tag' => @options[:tag] || '',
|
55
|
+
'queues' => @options[:queues].uniq,
|
56
|
+
'labels' => @options[:labels],
|
57
|
+
'identity' => identity,
|
58
|
+
}
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
def to_json
|
63
|
+
@json ||= begin
|
64
|
+
# this data changes infrequently so dump it to a string
|
65
|
+
# now so we don't need to dump it every heartbeat.
|
66
|
+
Subserver.dump_json(to_data)
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
end
|
71
|
+
end
|
@@ -0,0 +1,157 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require 'subserver/util'
|
3
|
+
require 'subserver/message_logger'
|
4
|
+
require 'subserver/pubsub'
|
5
|
+
require 'thread'
|
6
|
+
|
7
|
+
module Subserver
|
8
|
+
##
|
9
|
+
# The Listener is a standalone thread which:
|
10
|
+
#
|
11
|
+
# 1. Starts Google Pubsub subscription threads which:
|
12
|
+
# a. Instantiate the Subscription class
|
13
|
+
# b. Run the middleware chain
|
14
|
+
# c. call subscriber #perform
|
15
|
+
#
|
16
|
+
# A Listener can exit due to shutdown (listner_stopped)
|
17
|
+
# or due to an error during message processing (listener_died)
|
18
|
+
#
|
19
|
+
# If an error occurs during message processing, the
|
20
|
+
# Listener calls the Manager to create a new one
|
21
|
+
# to replace itself and exits.
|
22
|
+
#
|
23
|
+
class Listener
|
24
|
+
|
25
|
+
include Util
|
26
|
+
|
27
|
+
attr_reader :thread
|
28
|
+
attr_reader :subscriber
|
29
|
+
|
30
|
+
def initialize(mgr, subscriber)
|
31
|
+
@mgr = mgr
|
32
|
+
@down = false
|
33
|
+
@done = false
|
34
|
+
@thread = nil
|
35
|
+
@reloader = Subserver.options[:reloader]
|
36
|
+
@subscriber = subscriber
|
37
|
+
@subscription = retrive_subscrption
|
38
|
+
@logging = (mgr.options[:message_logger] || Subserver::MessageLogger).new
|
39
|
+
end
|
40
|
+
|
41
|
+
def name
|
42
|
+
@subscriber.name
|
43
|
+
end
|
44
|
+
|
45
|
+
def stop
|
46
|
+
@done = true
|
47
|
+
return if !@thread
|
48
|
+
|
49
|
+
# Stop the listener and wait for current messages to finish processing.
|
50
|
+
@pubsub_listener.stop.wait!
|
51
|
+
@mgr.listener_stopped(self)
|
52
|
+
end
|
53
|
+
|
54
|
+
def kill
|
55
|
+
@done = true
|
56
|
+
return if !@thread
|
57
|
+
# Hard stop the listener and shutdown thread after timeout passes.
|
58
|
+
@pubsub_listener.stop
|
59
|
+
@thread.raise ::Subserver::Shutdown
|
60
|
+
end
|
61
|
+
|
62
|
+
def start
|
63
|
+
@thread ||= safe_thread("listener", &method(:run))
|
64
|
+
end
|
65
|
+
|
66
|
+
private unless $TESTING
|
67
|
+
|
68
|
+
def retrive_subscrption
|
69
|
+
subscription_name = @subscriber.get_subserver_options[:subscription]
|
70
|
+
begin
|
71
|
+
subscription = Pubsub.client.subscription subscription_name
|
72
|
+
rescue Google::Cloud::Error => e
|
73
|
+
raise ArgumentError, "Invalid Subscription name: #{subscription_name} Please ensure your Pubsub subscription exists."
|
74
|
+
end
|
75
|
+
subscription
|
76
|
+
end
|
77
|
+
|
78
|
+
def connect_subscriber
|
79
|
+
options = @subscriber.get_subserver_options
|
80
|
+
logger.debug("Connecting to subscription with options: #{options}")
|
81
|
+
@pubsub_listener = @subscription.listen streams: options[:streams], threads: options[:threads] do |received_message|
|
82
|
+
logger.debug("Message Received: #{received_message}")
|
83
|
+
process_message(received_message)
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
def run
|
88
|
+
begin
|
89
|
+
connect_subscriber
|
90
|
+
@pubsub_listener.start
|
91
|
+
rescue Subserver::Shutdown
|
92
|
+
@mgr.listener_stopped(self)
|
93
|
+
rescue Exception => ex
|
94
|
+
@mgr.listener_died(self, @subscriber, ex)
|
95
|
+
end
|
96
|
+
end
|
97
|
+
|
98
|
+
def process_message(received_message)
|
99
|
+
begin
|
100
|
+
logger.debug("Executing Middleware")
|
101
|
+
Subserver.middleware.invoke(@subscriber, received_message) do
|
102
|
+
execute_processor(@subscriber, received_message)
|
103
|
+
end
|
104
|
+
rescue Subserver::Shutdown
|
105
|
+
# Reject message if shutdown
|
106
|
+
received_message.reject!
|
107
|
+
rescue Exception => ex
|
108
|
+
handle_exception(e, { context: "Exception raised during message processing.", message: received_message })
|
109
|
+
raise e
|
110
|
+
end
|
111
|
+
end
|
112
|
+
|
113
|
+
def execute_processor(subscriber, received_message)
|
114
|
+
subscriber.new.perform(received_message)
|
115
|
+
end
|
116
|
+
|
117
|
+
# Ruby doesn't provide atomic counters out of the box so we'll
|
118
|
+
# implement something simple ourselves.
|
119
|
+
# https://bugs.ruby-lang.org/issues/14706
|
120
|
+
class Counter
|
121
|
+
def initialize
|
122
|
+
@value = 0
|
123
|
+
@lock = Mutex.new
|
124
|
+
end
|
125
|
+
|
126
|
+
def incr(amount=1)
|
127
|
+
@lock.synchronize { @value = @value + amount }
|
128
|
+
end
|
129
|
+
|
130
|
+
def reset
|
131
|
+
@lock.synchronize { val = @value; @value = 0; val }
|
132
|
+
end
|
133
|
+
end
|
134
|
+
|
135
|
+
PROCESSED = Counter.new
|
136
|
+
FAILURE = Counter.new
|
137
|
+
# This is mutable global state but because each thread is storing
|
138
|
+
# its own unique key/value, there's no thread-safety issue AFAIK.
|
139
|
+
WORKER_STATE = {}
|
140
|
+
|
141
|
+
def stats(job_hash, queue)
|
142
|
+
tid = Subserver::Logging.tid
|
143
|
+
WORKER_STATE[tid] = {:queue => queue, :payload => job_hash, :run_at => Time.now.to_i }
|
144
|
+
|
145
|
+
begin
|
146
|
+
yield
|
147
|
+
rescue Exception
|
148
|
+
FAILURE.incr
|
149
|
+
raise
|
150
|
+
ensure
|
151
|
+
WORKER_STATE.delete(tid)
|
152
|
+
PROCESSED.incr
|
153
|
+
end
|
154
|
+
end
|
155
|
+
|
156
|
+
end
|
157
|
+
end
|
@@ -0,0 +1,122 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require 'time'
|
3
|
+
require 'logger'
|
4
|
+
require 'fcntl'
|
5
|
+
|
6
|
+
module Subserver
|
7
|
+
module Logging
|
8
|
+
|
9
|
+
class Pretty < Logger::Formatter
|
10
|
+
SPACE = " "
|
11
|
+
|
12
|
+
# Provide a call() method that returns the formatted message.
|
13
|
+
def call(severity, time, program_name, message)
|
14
|
+
"#{time.utc.iso8601(3)} #{::Process.pid} TID-#{Subserver::Logging.tid}#{context} #{severity}: #{message}\n"
|
15
|
+
end
|
16
|
+
|
17
|
+
def context
|
18
|
+
c = Thread.current[:subserver_context]
|
19
|
+
" #{c.join(SPACE)}" if c && c.any?
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
class WithoutTimestamp < Pretty
|
24
|
+
def call(severity, time, program_name, message)
|
25
|
+
"#{::Process.pid} TID-#{Subserver::Logging.tid}#{context} #{severity}: #{message}\n"
|
26
|
+
end
|
27
|
+
end
|
28
|
+
|
29
|
+
def self.tid
|
30
|
+
Thread.current['subserver_tid'] ||= (Thread.current.object_id ^ ::Process.pid).to_s(36)
|
31
|
+
end
|
32
|
+
|
33
|
+
def self.job_hash_context(job_hash)
|
34
|
+
# If we're using a wrapper class, like ActiveJob, use the "wrapped"
|
35
|
+
# attribute to expose the underlying thing.
|
36
|
+
klass = job_hash['wrapped'] || job_hash["class"]
|
37
|
+
bid = job_hash['bid']
|
38
|
+
"#{klass} JID-#{job_hash['jid']}#{" BID-#{bid}" if bid}"
|
39
|
+
end
|
40
|
+
|
41
|
+
def self.with_job_hash_context(job_hash, &block)
|
42
|
+
with_context(job_hash_context(job_hash), &block)
|
43
|
+
end
|
44
|
+
|
45
|
+
def self.with_context(msg)
|
46
|
+
Thread.current[:subserver_context] ||= []
|
47
|
+
Thread.current[:subserver_context] << msg
|
48
|
+
yield
|
49
|
+
ensure
|
50
|
+
Thread.current[:subserver_context].pop
|
51
|
+
end
|
52
|
+
|
53
|
+
def self.initialize_logger(log_target = STDOUT)
|
54
|
+
oldlogger = defined?(@logger) ? @logger : nil
|
55
|
+
@logger = Logger.new(log_target)
|
56
|
+
@logger.level = Logger::INFO
|
57
|
+
@logger.formatter = ENV['DYNO'] ? WithoutTimestamp.new : Pretty.new
|
58
|
+
oldlogger.close if oldlogger && !$TESTING # don't want to close testing's STDOUT logging
|
59
|
+
@logger
|
60
|
+
end
|
61
|
+
|
62
|
+
def self.logger
|
63
|
+
defined?(@logger) ? @logger : initialize_logger
|
64
|
+
end
|
65
|
+
|
66
|
+
def self.logger=(log)
|
67
|
+
@logger = (log ? log : Logger.new(File::NULL))
|
68
|
+
end
|
69
|
+
|
70
|
+
# This reopens ALL logfiles in the process that have been rotated
|
71
|
+
# using logrotate(8) (without copytruncate) or similar tools.
|
72
|
+
# A +File+ object is considered for reopening if it is:
|
73
|
+
# 1) opened with the O_APPEND and O_WRONLY flags
|
74
|
+
# 2) the current open file handle does not match its original open path
|
75
|
+
# 3) unbuffered (as far as userspace buffering goes, not O_SYNC)
|
76
|
+
# Returns the number of files reopened
|
77
|
+
def self.reopen_logs
|
78
|
+
to_reopen = []
|
79
|
+
append_flags = File::WRONLY | File::APPEND
|
80
|
+
|
81
|
+
ObjectSpace.each_object(File) do |fp|
|
82
|
+
begin
|
83
|
+
if !fp.closed? && fp.stat.file? && fp.sync && (fp.fcntl(Fcntl::F_GETFL) & append_flags) == append_flags
|
84
|
+
to_reopen << fp
|
85
|
+
end
|
86
|
+
rescue IOError, Errno::EBADF
|
87
|
+
end
|
88
|
+
end
|
89
|
+
|
90
|
+
nr = 0
|
91
|
+
to_reopen.each do |fp|
|
92
|
+
orig_st = begin
|
93
|
+
fp.stat
|
94
|
+
rescue IOError, Errno::EBADF
|
95
|
+
next
|
96
|
+
end
|
97
|
+
|
98
|
+
begin
|
99
|
+
b = File.stat(fp.path)
|
100
|
+
next if orig_st.ino == b.ino && orig_st.dev == b.dev
|
101
|
+
rescue Errno::ENOENT
|
102
|
+
end
|
103
|
+
|
104
|
+
begin
|
105
|
+
File.open(fp.path, 'a') { |tmpfp| fp.reopen(tmpfp) }
|
106
|
+
fp.sync = true
|
107
|
+
nr += 1
|
108
|
+
rescue IOError, Errno::EBADF
|
109
|
+
# not much we can do...
|
110
|
+
end
|
111
|
+
end
|
112
|
+
nr
|
113
|
+
rescue RuntimeError => ex
|
114
|
+
# RuntimeError: ObjectSpace is disabled; each_object will only work with Class, pass -X+O to enable
|
115
|
+
puts "Unable to reopen logs: #{ex.message}"
|
116
|
+
end
|
117
|
+
|
118
|
+
def logger
|
119
|
+
Subserver::Logging.logger
|
120
|
+
end
|
121
|
+
end
|
122
|
+
end
|
@@ -0,0 +1,153 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require 'subserver/util'
|
3
|
+
require 'subserver/listener'
|
4
|
+
require 'thread'
|
5
|
+
require 'set'
|
6
|
+
|
7
|
+
module Subserver
|
8
|
+
|
9
|
+
##
|
10
|
+
# The Manager is the central coordination point in Subserver, controlling
|
11
|
+
# the lifecycle of the Google Cloud Listeners.
|
12
|
+
#
|
13
|
+
# Tasks:
|
14
|
+
#
|
15
|
+
# 1. start: Load subscibers and start listeners.
|
16
|
+
# 2. listener_died: restart listener
|
17
|
+
# 3. quiet: tell listeners to stop listening and finish processing messages then shutdown.
|
18
|
+
# 4. stop: hard stop the listeners by deadline.
|
19
|
+
#
|
20
|
+
# Note that only the last task requires its own Thread since it has to monitor
|
21
|
+
# the shutdown process. The other tasks are performed by other threads.
|
22
|
+
#
|
23
|
+
class Manager
|
24
|
+
include Util
|
25
|
+
|
26
|
+
attr_reader :listeners
|
27
|
+
attr_reader :options
|
28
|
+
|
29
|
+
def initialize(options={})
|
30
|
+
logger.debug { options.inspect }
|
31
|
+
@options = options
|
32
|
+
|
33
|
+
@done = false
|
34
|
+
@listeners = Set.new
|
35
|
+
|
36
|
+
subscribers.each do |subscriber|
|
37
|
+
@listeners << Listener.new(self, subscriber)
|
38
|
+
end
|
39
|
+
|
40
|
+
@plock = Mutex.new
|
41
|
+
end
|
42
|
+
|
43
|
+
def start
|
44
|
+
if @listeners.count > 0
|
45
|
+
logger.info("Starting Listeners For: #{@listeners.map(&:name).join(', ')}")
|
46
|
+
@listeners.each do |x|
|
47
|
+
x.start
|
48
|
+
end
|
49
|
+
else
|
50
|
+
logger.warn("No Listeners starting: Couldn't find any subscribers.")
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
def quiet
|
55
|
+
return if @done
|
56
|
+
@done = true
|
57
|
+
|
58
|
+
logger.info { "Stopping listeners" }
|
59
|
+
@listeners.each { |x| x.stop }
|
60
|
+
fire_event(:quiet, reverse: true)
|
61
|
+
end
|
62
|
+
|
63
|
+
# hack for quicker development / testing environment
|
64
|
+
PAUSE_TIME = STDOUT.tty? ? 0.1 : 0.5
|
65
|
+
|
66
|
+
def stop(deadline)
|
67
|
+
quiet
|
68
|
+
fire_event(:shutdown, reverse: true)
|
69
|
+
|
70
|
+
# some of the shutdown events can be async,
|
71
|
+
# we don't have any way to know when they're done but
|
72
|
+
# give them a little time to take effect
|
73
|
+
sleep PAUSE_TIME
|
74
|
+
return if @listeners.empty?
|
75
|
+
|
76
|
+
logger.info { "Pausing to allow listeners to finish..." }
|
77
|
+
remaining = deadline - Time.now
|
78
|
+
while remaining > PAUSE_TIME
|
79
|
+
return if @listeners.empty?
|
80
|
+
sleep PAUSE_TIME
|
81
|
+
remaining = deadline - Time.now
|
82
|
+
end
|
83
|
+
return if @listeners.empty?
|
84
|
+
|
85
|
+
hard_shutdown
|
86
|
+
end
|
87
|
+
|
88
|
+
def listener_stopped(listener)
|
89
|
+
@plock.synchronize do
|
90
|
+
@listeners.delete(listener)
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
def listener_died(listener, subscriber, reason)
|
95
|
+
@plock.synchronize do
|
96
|
+
@listeners.delete(listener)
|
97
|
+
unless @done
|
98
|
+
l = Listener.new(self, subscriber)
|
99
|
+
@listeners << l
|
100
|
+
l.start
|
101
|
+
end
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
def stopped?
|
106
|
+
@done
|
107
|
+
end
|
108
|
+
|
109
|
+
def subscribers
|
110
|
+
@subscribers ||= load_subscribers
|
111
|
+
end
|
112
|
+
|
113
|
+
private
|
114
|
+
|
115
|
+
def hard_shutdown
|
116
|
+
# We've reached the timeout and we still have busy listeners.
|
117
|
+
# They must die but their jobs shall live on.
|
118
|
+
cleanup = nil
|
119
|
+
@plock.synchronize do
|
120
|
+
cleanup = @listeners.dup
|
121
|
+
end
|
122
|
+
|
123
|
+
if cleanup.size > 0
|
124
|
+
logger.warn { "Killing #{cleanup.size} busy worker threads" }
|
125
|
+
# Any message not aknowleged will be avalible for reprocessing
|
126
|
+
end
|
127
|
+
|
128
|
+
cleanup.each do |listener|
|
129
|
+
listener.kill
|
130
|
+
end
|
131
|
+
end
|
132
|
+
|
133
|
+
def load_subscribers
|
134
|
+
# Expand Subscriber Directory from relative require
|
135
|
+
path = File.expand_path("#{options[:subscriber_dir]}/*.rb")
|
136
|
+
|
137
|
+
# Load existing set of classes
|
138
|
+
existing_classes = ObjectSpace.each_object(Class).to_a
|
139
|
+
|
140
|
+
# Require all subscriber files
|
141
|
+
Dir[path].each { |f| require f }
|
142
|
+
|
143
|
+
# Create set with only newly created classes from require loop
|
144
|
+
new_classes = ObjectSpace.each_object(Class).to_a - existing_classes
|
145
|
+
|
146
|
+
# Only included named classes that have included the Subscriber module
|
147
|
+
subscribers = new_classes.select do |klass|
|
148
|
+
klass.name && klass < ::Subserver::Subscriber && options[:queues].include?(klass.subserver_options[:queue])
|
149
|
+
end
|
150
|
+
end
|
151
|
+
|
152
|
+
end
|
153
|
+
end
|