kicks 3.0.0.pre

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. checksums.yaml +7 -0
  2. data/.github/workflows/ci.yml +24 -0
  3. data/.gitignore +12 -0
  4. data/ChangeLog.md +142 -0
  5. data/Dockerfile +24 -0
  6. data/Dockerfile.slim +20 -0
  7. data/Gemfile +8 -0
  8. data/Guardfile +8 -0
  9. data/LICENSE.txt +22 -0
  10. data/README.md +209 -0
  11. data/Rakefile +12 -0
  12. data/bin/sneakers +6 -0
  13. data/docker-compose.yml +24 -0
  14. data/examples/benchmark_worker.rb +22 -0
  15. data/examples/max_retry_handler.rb +68 -0
  16. data/examples/metrics_worker.rb +34 -0
  17. data/examples/middleware_worker.rb +36 -0
  18. data/examples/newrelic_metrics_worker.rb +40 -0
  19. data/examples/profiling_worker.rb +69 -0
  20. data/examples/sneakers.conf.rb.example +11 -0
  21. data/examples/title_scraper.rb +36 -0
  22. data/examples/workflow_worker.rb +23 -0
  23. data/kicks.gemspec +44 -0
  24. data/lib/sneakers/cli.rb +122 -0
  25. data/lib/sneakers/concerns/logging.rb +34 -0
  26. data/lib/sneakers/concerns/metrics.rb +34 -0
  27. data/lib/sneakers/configuration.rb +125 -0
  28. data/lib/sneakers/content_encoding.rb +47 -0
  29. data/lib/sneakers/content_type.rb +47 -0
  30. data/lib/sneakers/error_reporter.rb +33 -0
  31. data/lib/sneakers/errors.rb +2 -0
  32. data/lib/sneakers/handlers/maxretry.rb +219 -0
  33. data/lib/sneakers/handlers/oneshot.rb +26 -0
  34. data/lib/sneakers/metrics/logging_metrics.rb +16 -0
  35. data/lib/sneakers/metrics/newrelic_metrics.rb +32 -0
  36. data/lib/sneakers/metrics/null_metrics.rb +13 -0
  37. data/lib/sneakers/metrics/statsd_metrics.rb +21 -0
  38. data/lib/sneakers/middleware/config.rb +23 -0
  39. data/lib/sneakers/publisher.rb +49 -0
  40. data/lib/sneakers/queue.rb +87 -0
  41. data/lib/sneakers/runner.rb +91 -0
  42. data/lib/sneakers/spawner.rb +30 -0
  43. data/lib/sneakers/support/production_formatter.rb +11 -0
  44. data/lib/sneakers/support/utils.rb +18 -0
  45. data/lib/sneakers/tasks.rb +66 -0
  46. data/lib/sneakers/version.rb +3 -0
  47. data/lib/sneakers/worker.rb +162 -0
  48. data/lib/sneakers/workergroup.rb +60 -0
  49. data/lib/sneakers.rb +125 -0
  50. data/log/.gitkeep +0 -0
  51. data/scripts/local_integration +2 -0
  52. data/scripts/local_worker +3 -0
  53. data/spec/fixtures/integration_worker.rb +18 -0
  54. data/spec/fixtures/require_worker.rb +23 -0
  55. data/spec/gzip_helper.rb +15 -0
  56. data/spec/sneakers/cli_spec.rb +75 -0
  57. data/spec/sneakers/concerns/logging_spec.rb +39 -0
  58. data/spec/sneakers/concerns/metrics_spec.rb +38 -0
  59. data/spec/sneakers/configuration_spec.rb +97 -0
  60. data/spec/sneakers/content_encoding_spec.rb +81 -0
  61. data/spec/sneakers/content_type_spec.rb +81 -0
  62. data/spec/sneakers/integration_spec.rb +158 -0
  63. data/spec/sneakers/publisher_spec.rb +179 -0
  64. data/spec/sneakers/queue_spec.rb +169 -0
  65. data/spec/sneakers/runner_spec.rb +70 -0
  66. data/spec/sneakers/sneakers_spec.rb +77 -0
  67. data/spec/sneakers/support/utils_spec.rb +44 -0
  68. data/spec/sneakers/tasks/sneakers_run_spec.rb +115 -0
  69. data/spec/sneakers/worker_handlers_spec.rb +469 -0
  70. data/spec/sneakers/worker_spec.rb +712 -0
  71. data/spec/sneakers/workergroup_spec.rb +83 -0
  72. data/spec/spec_helper.rb +21 -0
  73. metadata +352 -0
@@ -0,0 +1,219 @@
1
+ require 'base64'
2
+ require 'json'
3
+
4
+ module Sneakers
5
+ module Handlers
6
+ #
7
+ # Maxretry uses dead letter policies on Rabbitmq to requeue and retry
8
+ # messages after failure (rejections and errors). When the maximum
9
+ # number of retries is reached it will put the message on an error queue.
10
+ # This handler will only retry at the queue level. To accomplish that, the
11
+ # setup is a bit complex.
12
+ #
13
+ # Input:
14
+ # worker_exchange (eXchange)
15
+ # worker_queue (Queue)
16
+ # We create:
17
+ # worker_queue-retry - (X) where we setup the worker queue to dead-letter.
18
+ # worker_queue-retry - (Q) queue bound to ^ exchange, dead-letters to
19
+ # worker_queue-retry-requeue.
20
+ # worker_queue-error - (X) where to send max-retry failures
21
+ # worker_queue-error - (Q) bound to worker_queue-error.
22
+ # worker_queue-retry-requeue - (X) exchange to bind worker_queue to for
23
+ # requeuing directly to the worker_queue.
24
+ #
25
+ # This requires that you setup arguments to the worker queue to line up the
26
+ # dead letter queue. See the example for more information.
27
+ #
28
+ # Many of these can be override with options:
29
+ # - retry_exchange - sets retry exchange & queue
30
+ # - retry_error_exchange - sets error exchange and queue
31
+ # - retry_requeue_exchange - sets the exchange created to re-queue things
32
+ # back to the worker queue.
33
+ #
34
+ class Maxretry
35
+
36
+ def initialize(channel, queue, opts)
37
+ @worker_queue_name = queue.name
38
+ Sneakers.logger.debug do
39
+ "#{log_prefix} creating handler, opts=#{opts}"
40
+ end
41
+
42
+ @channel = channel
43
+ @opts = opts
44
+
45
+ # Construct names, defaulting where suitable
46
+ retry_name = @opts[:retry_exchange] || "#{@worker_queue_name}-retry"
47
+ error_name = @opts[:retry_error_exchange] || "#{@worker_queue_name}-error"
48
+ requeue_name = @opts[:retry_requeue_exchange] || "#{@worker_queue_name}-retry-requeue"
49
+ retry_routing_key = @opts[:retry_routing_key] || "#"
50
+
51
+ # Create the exchanges
52
+ @retry_exchange, @error_exchange, @requeue_exchange = [retry_name, error_name, requeue_name].map do |name|
53
+ Sneakers.logger.debug { "#{log_prefix} creating exchange=#{name}" }
54
+ @channel.exchange(name,
55
+ :type => 'topic',
56
+ :durable => exchange_durable?)
57
+ end
58
+
59
+ # Create the queues and bindings
60
+ Sneakers.logger.debug do
61
+ "#{log_prefix} creating queue=#{retry_name} x-dead-letter-exchange=#{requeue_name}"
62
+ end
63
+ @retry_queue = @channel.queue(retry_name,
64
+ :durable => queue_durable?,
65
+ :arguments => {
66
+ :'x-dead-letter-exchange' => requeue_name,
67
+ :'x-message-ttl' => @opts[:retry_timeout] || 60000
68
+ })
69
+ @retry_queue.bind(@retry_exchange, :routing_key => '#')
70
+
71
+ Sneakers.logger.debug do
72
+ "#{log_prefix} creating queue=#{error_name}"
73
+ end
74
+ @error_queue = @channel.queue(error_name,
75
+ :durable => queue_durable?)
76
+ @error_queue.bind(@error_exchange, :routing_key => '#')
77
+
78
+ # Finally, bind the worker queue to our requeue exchange
79
+ queue.bind(@requeue_exchange, :routing_key => retry_routing_key)
80
+
81
+ @max_retries = @opts[:retry_max_times] || 5
82
+
83
+ end
84
+
85
+ def self.configure_queue(name, opts)
86
+ retry_name = opts.fetch(:retry_exchange, "#{name}-retry")
87
+ opt_args = opts[:queue_options][:arguments] ? opts[:queue_options][:arguments].inject({}){|memo,(k,v)| memo[k.to_sym] = v; memo} : {}
88
+ opts[:queue_options][:arguments] = { :'x-dead-letter-exchange' => retry_name }.merge(opt_args)
89
+ opts[:queue_options]
90
+ end
91
+
92
+ def acknowledge(hdr, props, msg)
93
+ @channel.acknowledge(hdr.delivery_tag, false)
94
+ end
95
+
96
+ def reject(hdr, props, msg, requeue = false)
97
+ if requeue
98
+ # This was explicitly rejected specifying it be requeued so we do not
99
+ # want it to pass through our retry logic.
100
+ @channel.reject(hdr.delivery_tag, requeue)
101
+ else
102
+ handle_retry(hdr, props, msg, :reject)
103
+ end
104
+ end
105
+
106
+
107
+ def error(hdr, props, msg, err)
108
+ handle_retry(hdr, props, msg, err)
109
+ end
110
+
111
+ def noop(hdr, props, msg)
112
+
113
+ end
114
+
115
+ # Helper logic for retry handling. This will reject the message if there
116
+ # are remaining retries left on it, otherwise it will publish it to the
117
+ # error exchange along with the reason.
118
+ # @param hdr [Bunny::DeliveryInfo]
119
+ # @param props [Bunny::MessageProperties]
120
+ # @param msg [String] The message
121
+ # @param reason [String, Symbol, Exception] Reason for the retry, included
122
+ # in the JSON we put on the error exchange.
123
+ def handle_retry(hdr, props, msg, reason)
124
+ # +1 for the current attempt
125
+ num_attempts = failure_count(props[:headers]) + 1
126
+ if num_attempts <= @max_retries
127
+ # We call reject which will route the message to the
128
+ # x-dead-letter-exchange (ie. retry exchange) on the queue
129
+ Sneakers.logger.info do
130
+ "#{log_prefix} msg=retrying, count=#{num_attempts}, headers=#{props[:headers]}"
131
+ end
132
+ @channel.reject(hdr.delivery_tag, false)
133
+ # TODO: metrics
134
+ else
135
+ # Retried more than the max times
136
+ # Publish the original message with the routing_key to the error exchange
137
+ Sneakers.logger.info do
138
+ "#{log_prefix} msg=failing, retry_count=#{num_attempts}, reason=#{reason}"
139
+ end
140
+ data = {
141
+ error: reason.to_s,
142
+ num_attempts: num_attempts,
143
+ failed_at: Time.now.iso8601,
144
+ properties: props.to_hash
145
+ }.tap do |hash|
146
+ if reason.is_a?(Exception)
147
+ hash[:error_class] = reason.class.to_s
148
+ hash[:error_message] = "#{reason}"
149
+ if reason.backtrace
150
+ hash[:backtrace] = reason.backtrace.take(10)
151
+ end
152
+ end
153
+ end
154
+
155
+ # Preserve retry log in a list
156
+ if retry_info = props[:headers]['retry_info']
157
+ old_retry0 = JSON.parse(retry_info) rescue {error: "Failed to parse retry info"}
158
+ old_retry = Array(old_retry0)
159
+ # Prevent old retry from nesting
160
+ data[:properties][:headers].delete('retry_info')
161
+ data = old_retry.unshift(data)
162
+ end
163
+
164
+ @error_exchange.publish(msg, {
165
+ routing_key: hdr.routing_key,
166
+ headers: {
167
+ retry_info: data.to_json
168
+ }
169
+ })
170
+ @channel.acknowledge(hdr.delivery_tag, false)
171
+ # TODO: metrics
172
+ end
173
+ end
174
+ private :handle_retry
175
+
176
+ # Uses the x-death header to determine the number of failures this job has
177
+ # seen in the past. This does not count the current failure. So for
178
+ # instance, the first time the job fails, this will return 0, the second
179
+ # time, 1, etc.
180
+ # @param headers [Hash] Hash of headers that Rabbit delivers as part of
181
+ # the message
182
+ # @return [Integer] Count of number of failures.
183
+ def failure_count(headers)
184
+ if headers.nil? || headers['x-death'].nil?
185
+ 0
186
+ else
187
+ x_death_array = headers['x-death'].select do |x_death|
188
+ x_death['queue'] == @worker_queue_name
189
+ end
190
+ if x_death_array.count > 0 && x_death_array.first['count']
191
+ # Newer versions of RabbitMQ return headers with a count key
192
+ x_death_array.inject(0) {|sum, x_death| sum + x_death['count']}
193
+ else
194
+ # Older versions return a separate x-death header for each failure
195
+ x_death_array.count
196
+ end
197
+ end
198
+ end
199
+ private :failure_count
200
+
201
+ # Prefix all of our log messages so they are easier to find. We don't have
202
+ # the worker, so the next best thing is the queue name.
203
+ def log_prefix
204
+ "Maxretry handler [queue=#{@worker_queue_name}]"
205
+ end
206
+ private :log_prefix
207
+
208
+ private
209
+
210
+ def queue_durable?
211
+ @opts.fetch(:queue_options, {}).fetch(:durable, false)
212
+ end
213
+
214
+ def exchange_durable?
215
+ queue_durable?
216
+ end
217
+ end
218
+ end
219
+ end
@@ -0,0 +1,26 @@
1
+ module Sneakers
2
+ module Handlers
3
+ class Oneshot
4
+ def initialize(channel, queue, opts)
5
+ @channel = channel
6
+ @opts = opts
7
+ end
8
+
9
+ def acknowledge(hdr, props, msg)
10
+ @channel.acknowledge(hdr.delivery_tag, false)
11
+ end
12
+
13
+ def reject(hdr, props, msg, requeue=false)
14
+ @channel.reject(hdr.delivery_tag, requeue)
15
+ end
16
+
17
+ def error(hdr, props, msg, err)
18
+ reject(hdr, props, msg)
19
+ end
20
+
21
+ def noop(hdr, props, msg)
22
+
23
+ end
24
+ end
25
+ end
26
+ end
@@ -0,0 +1,16 @@
1
+ module Sneakers
2
+ module Metrics
3
+ class LoggingMetrics
4
+ def increment(metric)
5
+ Sneakers.logger.info("INC: #{metric}")
6
+ end
7
+
8
+ def timing(metric, &block)
9
+ start = Time.now
10
+ block.call
11
+ Sneakers.logger.info("TIME: #{metric} #{Time.now - start}")
12
+ end
13
+ end
14
+ end
15
+ end
16
+
@@ -0,0 +1,32 @@
1
+ module Sneakers
2
+ module Metrics
3
+ class NewrelicMetrics
4
+
5
+ def self.eagent(eagent = nil)
6
+ @eagent = eagent || @eagent
7
+ end
8
+
9
+ def initialize()
10
+ #@connection = conn
11
+ end
12
+
13
+ def increment(metric)
14
+ record_stat metric, 1
15
+ end
16
+
17
+ def record_stat(metric, num)
18
+ metric_name = "Custom/#{metric.gsub("\.", "\/")}"
19
+ NewrelicMetrics.eagent::Agent.record_metric(metric_name, num)
20
+ rescue Exception => e
21
+ puts "NewrelicMetrics#record_stat: #{e}"
22
+ end
23
+
24
+ def timing(metric, &block)
25
+ start = Time.now
26
+ block.call
27
+ record_stat(metric, ((Time.now - start)*1000).floor)
28
+ end
29
+ end
30
+ end
31
+ end
32
+
@@ -0,0 +1,13 @@
1
+ module Sneakers
2
+ module Metrics
3
+ class NullMetrics
4
+ def increment(metric)
5
+ end
6
+
7
+ def timing(metric, &block)
8
+ block.call
9
+ end
10
+ end
11
+ end
12
+ end
13
+
@@ -0,0 +1,21 @@
1
+ module Sneakers
2
+ module Metrics
3
+ class StatsdMetrics
4
+ def initialize(conn)
5
+ @connection = conn
6
+ end
7
+
8
+ def increment(metric)
9
+ @connection.increment(metric)
10
+ end
11
+
12
+ def timing(metric, &block)
13
+ start = Time.now
14
+ block.call
15
+ @connection.timing(metric, ((Time.now - start)*1000).floor)
16
+ end
17
+
18
+ end
19
+ end
20
+ end
21
+
@@ -0,0 +1,23 @@
1
+ module Sneakers
2
+ module Middleware
3
+ class Config
4
+ def self.use(klass, args)
5
+ middlewares << { class: klass, args: args }
6
+ end
7
+
8
+ def self.delete(klass)
9
+ middlewares.reject! { |el| el[:class] == klass }
10
+ end
11
+
12
+ def self.to_a
13
+ middlewares
14
+ end
15
+
16
+ def self.middlewares
17
+ @middlewares ||= []
18
+ end
19
+
20
+ private_class_method :middlewares
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,49 @@
1
+ module Sneakers
2
+ class Publisher
3
+
4
+ attr_reader :exchange, :channel
5
+
6
+ def initialize(opts = {})
7
+ @mutex = Mutex.new
8
+ @opts = Sneakers::CONFIG.merge(opts)
9
+ # If we've already got a bunny object, use it. This allows people to
10
+ # specify all kinds of options we don't need to know about (e.g. for ssl).
11
+ @bunny = @opts[:connection]
12
+ end
13
+
14
+ def publish(msg, options = {})
15
+ ensure_connection!
16
+ to_queue = options.delete(:to_queue)
17
+ options[:routing_key] ||= to_queue
18
+ Sneakers.logger.info {"publishing <#{msg}> to [#{options[:routing_key]}]"}
19
+ serialized_msg = Sneakers::ContentType.serialize(msg, options[:content_type])
20
+ encoded_msg = Sneakers::ContentEncoding.encode(serialized_msg, options[:content_encoding])
21
+ @exchange.publish(encoded_msg, options)
22
+ end
23
+
24
+ def ensure_connection!
25
+ @mutex.synchronize do
26
+ connect! unless connected?
27
+ end
28
+ end
29
+
30
+ private
31
+ def connect!
32
+ @bunny ||= create_bunny_connection
33
+ @bunny.start
34
+ @channel = @bunny.create_channel
35
+ @exchange = @channel.exchange(@opts[:exchange], **@opts[:exchange_options])
36
+ end
37
+
38
+ def connected?
39
+ @bunny && @bunny.connected? && channel
40
+ end
41
+
42
+ def create_bunny_connection
43
+ Bunny.new(@opts[:amqp], :vhost => @opts[:vhost],
44
+ :heartbeat => @opts[:heartbeat],
45
+ :properties => @opts.fetch(:properties, {}),
46
+ :logger => Sneakers::logger)
47
+ end
48
+ end
49
+ end
@@ -0,0 +1,87 @@
1
+
2
+ class Sneakers::Queue
3
+ attr_reader :name, :opts, :exchange, :channel
4
+
5
+ def initialize(name, opts)
6
+ @name = name
7
+ @opts = opts
8
+ @handler_klass = Sneakers::CONFIG[:handler]
9
+ end
10
+
11
+ #
12
+ # :exchange
13
+ # :heartbeat_interval
14
+ # :prefetch
15
+ # :durable
16
+ # :ack
17
+ #
18
+ def subscribe(worker)
19
+ # If we've already got a bunny object, use it. This allows people to
20
+ # specify all kinds of options we don't need to know about (e.g. for ssl).
21
+ @bunny = @opts[:connection]
22
+ @bunny ||= create_bunny_connection
23
+ @bunny.start
24
+
25
+ @channel = @bunny.create_channel
26
+ @channel.prefetch(@opts[:prefetch])
27
+
28
+ exchange_name = @opts[:exchange]
29
+ @exchange = @channel.exchange(exchange_name, **@opts[:exchange_options])
30
+
31
+ routing_key = @opts[:routing_key] || @name
32
+ routing_keys = [*routing_key]
33
+
34
+ handler_klass = worker.opts[:handler] || Sneakers::CONFIG.fetch(:handler)
35
+ # Configure options if needed
36
+ if handler_klass.respond_to?(:configure_queue)
37
+ @opts[:queue_options] = handler_klass.configure_queue(@name, @opts)
38
+ end
39
+
40
+ queue = @channel.queue(@name, **@opts[:queue_options])
41
+
42
+ if exchange_name.length > 0
43
+ routing_keys.each do |key|
44
+ if @opts[:bind_arguments]
45
+ queue.bind(@exchange, routing_key: key, arguments: @opts[:bind_arguments])
46
+ else
47
+ queue.bind(@exchange, routing_key: key)
48
+ end
49
+ end
50
+ end
51
+
52
+ # NOTE: we are using the worker's options. This is necessary so the handler
53
+ # has the same configuration as the worker. Also pass along the exchange and
54
+ # queue in case the handler requires access to them (for things like binding
55
+ # retry queues, etc).
56
+ handler = handler_klass.new(@channel, queue, worker.opts)
57
+
58
+ @consumer = queue.subscribe(block: false, manual_ack: @opts[:ack]) do | delivery_info, metadata, msg |
59
+ worker.do_work(delivery_info, metadata, msg, handler)
60
+ end
61
+ nil
62
+ end
63
+
64
+ def unsubscribe
65
+ return unless @consumer
66
+
67
+ # TODO: should we simply close the channel here?
68
+ Sneakers.logger.info("Queue: will try to cancel consumer #{@consumer.inspect}")
69
+ cancel_ok = @consumer.cancel
70
+ if cancel_ok
71
+ Sneakers.logger.info "Queue: consumer #{cancel_ok.consumer_tag} cancelled"
72
+ @consumer = nil
73
+ else
74
+ Sneakers.logger.warn "Queue: could not cancel consumer #{@consumer.inspect}"
75
+ sleep(1)
76
+ unsubscribe
77
+ end
78
+ end
79
+
80
+ def create_bunny_connection
81
+ Bunny.new(@opts[:amqp], { vhost: @opts[:vhost],
82
+ heartbeat: @opts[:heartbeat],
83
+ properties: @opts.fetch(:properties, {}),
84
+ logger: Sneakers::logger })
85
+ end
86
+ private :create_bunny_connection
87
+ end
@@ -0,0 +1,91 @@
1
+ require 'serverengine'
2
+ require 'sneakers/workergroup'
3
+
4
+ module Sneakers
5
+ class Runner
6
+ def initialize(worker_classes, opts={})
7
+ @runnerconfig = RunnerConfig.new(worker_classes, opts)
8
+ end
9
+
10
+ def run
11
+ @se = ServerEngine.create(nil, WorkerGroup) { @runnerconfig.reload_config! }
12
+ @se.run
13
+ end
14
+
15
+ def stop(stop_graceful=true)
16
+ @se.stop(stop_graceful)
17
+ end
18
+ end
19
+
20
+
21
+ class RunnerConfig
22
+ def method_missing(meth, *args, &block)
23
+ if %w{ before_fork after_fork }.include? meth.to_s
24
+ @conf[meth] = block
25
+ elsif %w{ workers start_worker_delay amqp }.include? meth.to_s
26
+ @conf[meth] = args.first
27
+ else
28
+ super
29
+ end
30
+ end
31
+
32
+ def initialize(worker_classes, opts)
33
+ @worker_classes = worker_classes
34
+ @conf = opts
35
+ end
36
+
37
+ def to_h
38
+ @conf
39
+ end
40
+
41
+
42
+ def reload_config!
43
+ Sneakers.logger.info("Loading runner configuration...")
44
+ config_file = Sneakers::CONFIG[:runner_config_file]
45
+
46
+ if config_file
47
+ begin
48
+ instance_eval(File.read(config_file), config_file)
49
+ Sneakers.logger.info("Loading config with file: #{config_file}")
50
+ rescue
51
+ Sneakers.logger.error("Cannot load from file '#{config_file}', #{$!}")
52
+ end
53
+ end
54
+
55
+ config = make_serverengine_config
56
+
57
+ [:before_fork, :after_fork].each do | hook |
58
+ Sneakers::CONFIG[:hooks][hook] = config.delete(hook) if config[hook]
59
+ end
60
+
61
+ Sneakers.logger.debug("New configuration: #{config.inspect}")
62
+ config
63
+ end
64
+
65
+ private
66
+
67
+ def make_serverengine_config
68
+ # From Sneakers#setup_general_logger, there's support for a Logger object
69
+ # in CONFIG[:log]. However, serverengine takes an object in :logger.
70
+ # Pass our logger object so there's no issue about sometimes passing a
71
+ # file and sometimes an object.
72
+ serverengine_config = Sneakers::CONFIG.merge(@conf)
73
+ serverengine_config.merge!(
74
+ :logger => Sneakers.logger,
75
+ :log_level => Sneakers.logger.level,
76
+ :worker_type => 'process',
77
+ :worker_classes => @worker_classes,
78
+
79
+ # Turning off serverengine internal logging infra, causes
80
+ # livelock and hang.
81
+ # see https://github.com/jondot/sneakers/issues/153
82
+ :log_stdout => false,
83
+ :log_stderr => false
84
+ )
85
+ serverengine_config.delete(:log)
86
+
87
+ serverengine_config
88
+ end
89
+ end
90
+
91
+ end
@@ -0,0 +1,30 @@
1
+ require 'yaml'
2
+ require 'erb'
3
+
4
+ module Sneakers
5
+ class Spawner
6
+ def self.spawn
7
+ worker_group_config_file = ENV['WORKER_GROUP_CONFIG'] || './config/sneaker_worker_groups.yml'
8
+ unless File.exist?(worker_group_config_file)
9
+ puts 'No worker group file found.'
10
+ puts "Specify via ENV 'WORKER_GROUP_CONFIG' or by convention ./config/sneaker_worker_groups.yml"
11
+ Kernel.exit(1)
12
+ end
13
+ @pids = []
14
+ @exec_string = 'bundle exec rake sneakers:run'
15
+ worker_config = YAML.load(ERB.new(File.read(worker_group_config_file)).result)
16
+ worker_config.keys.each do |group_name|
17
+ workers = worker_config[group_name]['classes']
18
+ workers = workers.join ',' if workers.is_a?(Array)
19
+ @pids << fork do
20
+ @exec_hash = { 'WORKERS' => workers, 'WORKER_COUNT' => worker_config[group_name]['workers'].to_s }
21
+ Kernel.exec(@exec_hash, @exec_string)
22
+ end
23
+ end
24
+ %w[TERM USR1 HUP USR2].each do |signal|
25
+ Signal.trap(signal) { @pids.each { |pid| Process.kill(signal, pid) } }
26
+ end
27
+ Process.waitall
28
+ end
29
+ end
30
+ end
@@ -0,0 +1,11 @@
1
+ require 'time'
2
+ module Sneakers
3
+ module Support
4
+ class ProductionFormatter < Logger::Formatter
5
+ def self.call(severity, time, program_name, message)
6
+ "#{time.utc.iso8601} p-#{Process.pid} t-#{Thread.current.object_id.to_s(36)} #{severity}: #{message}\n"
7
+ end
8
+ end
9
+ end
10
+ end
11
+
@@ -0,0 +1,18 @@
1
+ class Sneakers::Utils
2
+ def self.make_worker_id(namespace)
3
+ "worker-#{namespace}:#{'1'}:#{rand(36**6).floor.to_s(36)}" # jid, worker id. include date.
4
+ end
5
+ def self.parse_workers(workerstring)
6
+ missing_workers = []
7
+ workers = (workerstring || '').split(',').map do |k|
8
+ begin
9
+ w = Kernel.const_get(k)
10
+ rescue
11
+ missing_workers << k
12
+ end
13
+ w
14
+ end.compact
15
+
16
+ [workers, missing_workers]
17
+ end
18
+ end