sidejob 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,206 @@
1
+ module SideJob
2
+ # Represents an input or output port from a Job
3
+ class Port
4
+ attr_reader :job, :type, :name
5
+
6
+ # @param job [SideJob::Job, SideJob::Worker]
7
+ # @param type [:in, :out] Specifies whether it is input or output port
8
+ # @param name [Symbol,String] Port names should match [a-zA-Z0-9_]+
9
+ def initialize(job, type, name)
10
+ @job = job
11
+ @type = type.to_sym
12
+ @name = name.to_sym
13
+ raise "Invalid port name: #{@name}" if @name !~ /^[a-zA-Z0-9_]+$/ && name != '*'
14
+ end
15
+
16
+ # @return [Boolean] True if two ports are equal
17
+ def ==(other)
18
+ other.is_a?(Port) && @job == other.job && @type == other.type && @name == other.name
19
+ end
20
+
21
+ # @see #==
22
+ def eql?(other)
23
+ self == other
24
+ end
25
+
26
+ # Returns the port options. Currently supported options are mode and default.
27
+ # @return [Hash] Port options
28
+ def options
29
+ opts = {mode: mode}
30
+
31
+ default = SideJob.redis.hget("#{@job.redis_key}:#{type}ports:default", @name)
32
+ opts[:default] = parse_json(default) if default
33
+
34
+ opts
35
+ end
36
+
37
+ # Reset the port options. Currently supported options are mode and default.
38
+ # @param options [Hash] New port options
39
+ def options=(options)
40
+ options = options.symbolize_keys
41
+ SideJob.redis.multi do |multi|
42
+ multi.hset "#{@job.redis_key}:#{type}ports:mode", @name, options[:mode] || :queue
43
+ if options.has_key?(:default)
44
+ multi.hset "#{@job.redis_key}:#{type}ports:default", @name, options[:default].to_json
45
+ else
46
+ multi.hdel "#{@job.redis_key}:#{type}ports:default", @name
47
+ end
48
+ end
49
+ end
50
+
51
+ # @return [Symbol, nil] The port mode or nil if the port is invalid
52
+ def mode
53
+ mode = SideJob.redis.hget("#{@job.redis_key}:#{type}ports:mode", @name)
54
+ mode = mode.to_sym if mode
55
+ mode
56
+ end
57
+
58
+ # Returns the number of items waiting on this port.
59
+ # @return [Fixnum]
60
+ def size
61
+ SideJob.redis.llen(redis_key)
62
+ end
63
+
64
+ # Returns whether {#read} will return data.
65
+ # @return [Boolean] True if there is data to read.
66
+ def data?
67
+ size > 0 || default?
68
+ end
69
+
70
+ # Returns the port default value. To distinguish a null default value vs no default, use {#default?}.
71
+ # @return [Object, nil] The default value on the port or nil if none
72
+ def default
73
+ parse_json SideJob.redis.hget("#{@job.redis_key}:#{type}ports:default", @name)
74
+ end
75
+
76
+ # Returns if the port has a default value.
77
+ # @return [Boolean] True if the port has a default value
78
+ def default?
79
+ SideJob.redis.hexists("#{@job.redis_key}:#{type}ports:default", @name)
80
+ end
81
+
82
+ # Write data to the port. If port in an input port, runs the job.
83
+ # The default operating mode for a port is :queue which means packets are read/written as a FIFO queue.
84
+ # In :memory mode, writes do not enter the queue and instead overwrite the default port value.
85
+ # @param data [Object] JSON encodable data to write to the port
86
+ def write(data)
87
+ case mode
88
+ when :queue
89
+ SideJob.redis.rpush redis_key, data.to_json
90
+ @job.run if type == :in
91
+ when :memory
92
+ SideJob.redis.hset "#{@job.redis_key}:#{type}ports:default", @name, data.to_json
93
+ else
94
+ raise "Missing port #{@name} or invalid mode #{mode}"
95
+ end
96
+
97
+ @job.log({read: [], write: [log_port_data(self, [data])]})
98
+ end
99
+
100
+ # Reads the oldest data from the port. Returns the default value if no data and there is a default.
101
+ # @return [Object] First data from port
102
+ # @raise [EOFError] Error raised if no data to be read
103
+ def read
104
+ data = SideJob.redis.lpop(redis_key)
105
+ if data
106
+ data = parse_json(data)
107
+ elsif default?
108
+ data = default
109
+ else
110
+ raise EOFError unless data
111
+ end
112
+
113
+ @job.log({read: [log_port_data(self, [data])], write: []})
114
+
115
+ data
116
+ end
117
+
118
+ # Connects this port to a number of other ports.
119
+ # All data is read from the current port and written to the destination ports.
120
+ # If the current port has a default value, the default is copied to all destination ports.
121
+ # @param ports [Array<SideJob::Port>, SideJob::Port] Destination port(s)
122
+ # @param metadata [Hash] If provided, the metadata is merged into the log entry
123
+ # @return [Array<Object>] Returns all data on current port
124
+ def connect_to(ports, metadata={})
125
+ ports = [ports] unless ports.is_a?(Array)
126
+ ports_by_mode = ports.group_by {|port| port.mode}
127
+
128
+ default = SideJob.redis.hget("#{@job.redis_key}:#{type}ports:default", @name)
129
+
130
+ # empty the port of all data
131
+ data = SideJob.redis.multi do |multi|
132
+ multi.lrange redis_key, 0, -1
133
+ multi.del redis_key
134
+ end[0]
135
+
136
+ to_run = Set.new
137
+
138
+ SideJob.redis.multi do |multi|
139
+ if data.length > 0
140
+ (ports_by_mode[:queue] || []).each do |port|
141
+ multi.rpush port.redis_key, data
142
+ to_run.add port.job if port.type == :in
143
+ end
144
+ if ! default
145
+ (ports_by_mode[:memory] || []).each do |port|
146
+ multi.hset "#{port.job.redis_key}:#{port.type}ports:default", port.name, data.last
147
+ end
148
+ end
149
+ end
150
+
151
+ if default
152
+ ports.each do |port|
153
+ multi.hset "#{port.job.redis_key}:#{port.type}ports:default", port.name, default
154
+ end
155
+ end
156
+ end
157
+
158
+ data.map! {|x| parse_json x}
159
+ if data.length > 0
160
+ SideJob.log metadata.merge({read: [log_port_data(self, data)], write: ports.map { |port| log_port_data(port, data)}})
161
+ end
162
+
163
+ to_run.each { |job| job.run }
164
+ data
165
+ end
166
+
167
+ include Enumerable
168
+ # Iterate over port data. Default values are not returned.
169
+ # @yield [Object] Each data from port
170
+ def each(&block)
171
+ while size > 0 do
172
+ yield read
173
+ end
174
+ rescue EOFError
175
+ end
176
+
177
+ # Returns the redis key used for storing inputs or outputs from a port name
178
+ # @return [String] Redis key
179
+ def redis_key
180
+ "#{@job.redis_key}:#{@type}:#{@name}"
181
+ end
182
+ alias :to_s :redis_key
183
+
184
+ # @return [Fixnum] Hash value for port
185
+ def hash
186
+ redis_key.hash
187
+ end
188
+
189
+ private
190
+
191
+ def log_port_data(port, data)
192
+ x = {job: port.job.id, data: data}
193
+ x[:"#{port.type}port"] = port.name
194
+ x
195
+ end
196
+
197
+ # Wrapper around JSON.parse to also handle primitive types.
198
+ # @param data [String, nil] Data to parse
199
+ # @return [Object, nil]
200
+ def parse_json(data)
201
+ raise "Invalid json #{data}" if data && ! data.is_a?(String)
202
+ data = JSON.parse("[#{data}]")[0] if data
203
+ data
204
+ end
205
+ end
206
+ end
@@ -0,0 +1,117 @@
1
+ module SideJob
2
+ # This middleware is primarily responsible for changing job status depending on events
3
+ # {SideJob::Job} sets status to terminating or queued when a job is queued
4
+ # All other job status changes happen here
5
+ # For simplicity, a job is allowed to be queued multiple times in the Sidekiq queue
6
+ # Only when it gets pulled out to be run, i.e. here, we decide if we want to actually run it
7
+ class ServerMiddleware
8
+ # Configuration parameters for running workers
9
+ CONFIGURATION = {
10
+ lock_expiration: 86400, # the worker should not run longer than this number of seconds
11
+ max_depth: 20, # the job should not be nested more than this number of levels
12
+ max_runs_per_minute: 60, # generate error if the job is run more often than this
13
+ }
14
+
15
+ # Called by sidekiq as a server middleware to handle running a worker
16
+ # @param worker [SideJob::Worker]
17
+ # @param msg [Hash] Sidekiq message format
18
+ # @param queue [String] Queue the job was pulled from
19
+ def call(worker, msg, queue)
20
+ @worker = worker
21
+ return unless worker.exists? # make sure the job has not been deleted
22
+ last_run = @worker.get(:ran_at)
23
+
24
+ # we skip the run if we already ran once after the enqueued time
25
+ return if last_run && msg['enqueued_at'] && Time.parse(last_run) > Time.at(msg['enqueued_at'])
26
+
27
+ case @worker.status
28
+ when 'queued'
29
+ terminate = false
30
+ when 'terminating'
31
+ terminate = true
32
+ else
33
+ # for any other status, we assume this worker does not need to be run
34
+ return
35
+ end
36
+
37
+ # if another thread is already running this job, we don't run the job now
38
+ # this simplifies workers from having to deal with thread safety
39
+ # we will requeue the job in the other thread
40
+
41
+ lock = "#{@worker.redis_key}:lock"
42
+ now = Time.now.to_f
43
+ val = SideJob.redis.multi do |multi|
44
+ multi.get(lock)
45
+ multi.set(lock, now, {ex: CONFIGURATION[:lock_expiration]}) # add an expiration just in case the lock becomes stale
46
+ end[0]
47
+
48
+ return if val # only run if lock key was not set
49
+
50
+ @worker.set ran_at: SideJob.timestamp
51
+
52
+ # limit each job to being called too many times per minute
53
+ # or too deep of a job tree
54
+ # this is to help prevent bad coding that leads to recursive busy loops
55
+ # Uses Rate limiter 1 pattern from http://redis.io/commands/INCR
56
+ rate_key = "#{@worker.redis_key}:rate:#{Time.now.to_i / 60}"
57
+ rate = SideJob.redis.multi do |multi|
58
+ multi.incr rate_key
59
+ multi.expire rate_key, 300 # 5 minutes
60
+ end[0]
61
+ if rate.to_i > CONFIGURATION[:max_runs_per_minute]
62
+ terminate = true
63
+ SideJob.log({ job: @worker.id, error: 'Job was terminated due to being called too rapidly' })
64
+ elsif SideJob.redis.llen("#{@worker.redis_key}:ancestors") > CONFIGURATION[:max_depth]
65
+ terminate = true
66
+ SideJob.log({ job: @worker.id, error: 'Job was terminated due to being too deep' })
67
+ end
68
+
69
+ if terminate
70
+ terminate_worker
71
+ else
72
+ begin
73
+ run_worker { yield }
74
+ ensure
75
+ val = SideJob.redis.multi do |multi|
76
+ multi.get lock
77
+ multi.del lock
78
+ end[0]
79
+
80
+ @worker.run if val && val.to_f != now # run it again if the lock key changed
81
+ end
82
+ end
83
+ end
84
+
85
+ private
86
+
87
+ def terminate_worker
88
+ # We let workers perform cleanup before terminating jobs
89
+ # To prevent workers from preventing termination, errors are ignored
90
+ @worker.shutdown if @worker.respond_to?(:shutdown)
91
+ rescue => e
92
+ add_exception e
93
+ ensure
94
+ @worker.status = 'terminated'
95
+ @worker.parent.run if @worker.parent
96
+ end
97
+
98
+ def run_worker(&block)
99
+ # normal run
100
+ @worker.status = 'running'
101
+ yield
102
+ @worker.status = 'completed' if @worker.status == 'running'
103
+ rescue SideJob::Worker::Suspended
104
+ @worker.status = 'suspended' if @worker.status == 'running'
105
+ rescue => e
106
+ @worker.status = 'failed' if @worker.status == 'running'
107
+ add_exception e
108
+ ensure
109
+ @worker.parent.run if @worker.parent
110
+ end
111
+
112
+ def add_exception(exception)
113
+ # only store the backtrace until the first sidekiq line
114
+ SideJob.log({ job: @worker.id, error: exception.message, backtrace: exception.backtrace.take_while {|l| l !~ /sidekiq/}.join("\n") })
115
+ end
116
+ end
117
+ end
@@ -0,0 +1,54 @@
1
+ # helpers for testing
2
+
3
+ module SideJob
4
+ module Worker
5
+ # Run jobs until the queue is cleared.
6
+ # @param timeout [Float] timeout in seconds for Timeout#timeout (default 5)
7
+ # @param errors [Boolean] Whether to propagate errors that occur in jobs (default true)
8
+ def self.drain_queue(timeout: 5, errors: true)
9
+ Timeout::timeout(timeout) do
10
+ have_job = true
11
+ while have_job
12
+ have_job = false
13
+ Sidekiq::Queue.all.each do |queue|
14
+ queue.each do |sidekiq_job|
15
+ have_job = true
16
+ sidekiq_job.delete
17
+
18
+ SideJob.find(sidekiq_job.jid).run_inline(errors: errors, queue: false, args: sidekiq_job.args)
19
+ end
20
+ end
21
+ end
22
+ end
23
+ end
24
+ end
25
+
26
+ class Job
27
+ # Runs a single job once. This method only works for jobs with no child jobs.
28
+ # @param errors [Boolean] Whether to propagate errors that occur in jobs (default true)
29
+ # @param queue [Boolean] Whether to force the job to be queued (default true)
30
+ # @param args [Array] Args to pass to the worker's perform method (default none)
31
+ def run_inline(errors: true, queue: true, args: [])
32
+ self.status = 'queued' if queue
33
+
34
+ worker = get(:class).constantize.new
35
+ worker.jid = id
36
+ SideJob::ServerMiddleware.new.call(worker, {'enqueued_at' => Time.now.to_f}, get(:queue)) do
37
+ worker.perform(*args)
38
+ end
39
+
40
+ reload
41
+
42
+ if errors && status == 'failed'
43
+ SideJob.logs.each do |event|
44
+ if event['error']
45
+ exception = RuntimeError.exception(event['error'])
46
+ exception.set_backtrace(event['backtrace'])
47
+ raise exception
48
+ end
49
+ end
50
+ raise "Job #{id} failed but cannot find error log"
51
+ end
52
+ end
53
+ end
54
+ end
@@ -0,0 +1,4 @@
1
+ module SideJob
2
+ # The current SideJob version
3
+ VERSION = '3.0.0'
4
+ end
@@ -0,0 +1,133 @@
1
+ module SideJob
2
+ # All workers should include SideJob::Worker and implement the perform method.
3
+ # @see SideJob::JobMethods
4
+ module Worker
5
+ @registry ||= {}
6
+ class << self
7
+ # This holds the registry for all available workers on one queue
8
+ attr_reader :registry
9
+
10
+ # Workers need to add themselves to the registry even if it's an empty configuration.
11
+ # This method publishes the registry to redis so that other workers can call workers on this queue.
12
+ # All workers for the queue should be defined as the existing registry is overwritten.
13
+ # @param queue [String] Queue to register all defined workers
14
+ def register_all(queue)
15
+ SideJob.redis.multi do |multi|
16
+ multi.del "workers:#{queue}"
17
+ multi.hmset "workers:#{queue}", @registry.map {|key, val| [key, val.to_json]}.flatten(1) if @registry.size > 0
18
+ end
19
+ end
20
+
21
+ # Returns the configuration registered for a worker.
22
+ # @param queue [String] Name of queue
23
+ # @param klass [String] Name of worker class
24
+ # @return [Hash, nil] Returns nil if the worker is not defined
25
+ # @see ClassMethods#register
26
+ def config(queue, klass)
27
+ config = SideJob.redis.hget "workers:#{queue}", klass
28
+ config = JSON.parse(config) if config
29
+ config
30
+ end
31
+ end
32
+
33
+ # Class methods added to Workers
34
+ module ClassMethods
35
+ # All workers need to register themselves
36
+ # @param config [Hash] The base configuration used by any jobs of this class
37
+ def register(config={})
38
+ SideJob::Worker.registry[self.name] = config
39
+ end
40
+ end
41
+
42
+ # Methods loaded last to override other included methods
43
+ module OverrideMethods
44
+ # Returns the jid set by sidekiq as the job id
45
+ # @return [String] Job id
46
+ def id
47
+ jid
48
+ end
49
+ end
50
+
51
+ # @see SideJob::Worker
52
+ def self.included(base)
53
+ base.class_eval do
54
+ include Sidekiq::Worker
55
+ include SideJob::JobMethods
56
+ include SideJob::Worker::OverrideMethods
57
+ end
58
+ base.extend(ClassMethods)
59
+ end
60
+
61
+ # Queues a child job, setting parent and by to self.
62
+ # @see SideJob.queue
63
+ def queue(queue, klass, **options)
64
+ SideJob.queue(queue, klass, options.merge({parent: self, by: "job:#{id}"}))
65
+ end
66
+
67
+ # Exception raised by {#suspend}
68
+ class Suspended < StandardError; end
69
+
70
+ # Immediately suspend the current worker
71
+ # @raise [SideJob::Worker::Suspended]
72
+ def suspend
73
+ raise Suspended
74
+ end
75
+
76
+ # Reads a set of input ports together.
77
+ # Workers should use this method where possible instead of reading directly from ports due to complexities
78
+ # of dealing with ports with defaults.
79
+ # A worker should be idempotent (it can be called multiple times on the same state).
80
+ # Consider reading from a single port with a default value. Each time it is run, it could read the same data
81
+ # from the port. The output of the job then could depend on the number of times it is run.
82
+ # To prevent this, this method requires that there be at least one input port which does not have a default.
83
+ # Yields data from the ports until either no ports have data or is suspended due to data on some but not all ports.
84
+ # @param inputs [Array<String>] List of input ports to read
85
+ # @yield [Array] Splat of input data in same order as inputs
86
+ # @raise [SideJob::Worker::Suspended] Raised if an input port without a default has data but not all ports
87
+ # @raise [RuntimeError] An error is raised if all input ports have default values
88
+ def for_inputs(*inputs, &block)
89
+ return unless inputs.length > 0
90
+ ports = inputs.map {|name| input(name)}
91
+ loop do
92
+ group_port_logs(job: id) do
93
+ # error if ports all have defaults, complete if no non-default port inputs, suspend if partial inputs
94
+ data = ports.map {|port| [ port.data?, port.default? ] }
95
+ raise "One of these input ports should not have a default value: #{inputs.join(',')}" if data.all? {|x| x[1]}
96
+ return unless data.any? {|x| x[0] && ! x[1] }
97
+ suspend unless data.all? {|x| x[0] }
98
+
99
+ yield *ports.map(&:read)
100
+ end
101
+ end
102
+ end
103
+
104
+ # Sets values in the job's internal state.
105
+ # @param data [Hash{String,Symbol => Object}] Data to update: objects should be JSON encodable
106
+ # @raise [RuntimeError] Error raised if job no longer exists
107
+ def set(data)
108
+ return unless data.size > 0
109
+ load_state
110
+ data.each_pair { |key, val| @state[key.to_s] = val }
111
+ save_state
112
+ end
113
+
114
+ # Unsets some fields in the job's internal state
115
+ # @param fields [Array<String,Symbol>] Fields to unset
116
+ # @raise [RuntimeError] Error raised if job no longer exists
117
+ def unset(*fields)
118
+ return unless fields.length > 0
119
+ load_state
120
+ fields.each { |field| @state.delete(field.to_s) }
121
+ save_state
122
+ end
123
+
124
+ private
125
+
126
+ def save_state
127
+ check_exists
128
+ if @state
129
+ SideJob.redis.hset 'jobs', id, @state.to_json
130
+ end
131
+ end
132
+ end
133
+ end
data/lib/sidejob.rb ADDED
@@ -0,0 +1,116 @@
1
+ require 'sidekiq'
2
+ require 'sidekiq/api'
3
+ require 'sidejob/port'
4
+ require 'sidejob/job'
5
+ require 'sidejob/worker'
6
+ require 'sidejob/server_middleware'
7
+ require 'time' # for iso8601 method
8
+
9
+ module SideJob
10
+ # Returns redis connection
11
+ # If block is given, yields the redis connection
12
+ # Otherwise, just returns the redis connection
13
+ def self.redis
14
+ Sidekiq.redis do |redis|
15
+ if block_given?
16
+ yield redis
17
+ else
18
+ redis
19
+ end
20
+ end
21
+ end
22
+
23
+ # @param redis [Hash] Options for passing to Redis.new
24
+ def self.redis=(redis)
25
+ Sidekiq.redis = redis
26
+ end
27
+
28
+ # Main function to queue a job
29
+ # @param queue [String] Name of the queue to put the job in
30
+ # @param klass [String] Name of the class that will handle the job
31
+ # @param args [Array] additional args to pass to the worker's perform method (default none)
32
+ # @param parent [SideJob::Job] parent job
33
+ # @param name [String] Name of child job (required if parent specified)
34
+ # @param at [Time, Float] Time to schedule the job, otherwise queue immediately
35
+ # @param by [String] Who created this job. Recommend <type>:<id> format for non-jobs as SideJob uses job:<id>.
36
+ # @param inports [Hash{Symbol,String => Hash}] Input port configuration. Port name to options.
37
+ # @param outports [Hash{Symbol,String => Hash}] Output port configuration. Port name to options.
38
+ # @return [SideJob::Job] Job
39
+ def self.queue(queue, klass, args: nil, parent: nil, name: nil, at: nil, by: nil, inports: nil, outports: nil)
40
+ raise "No worker registered for #{klass} in queue #{queue}" unless SideJob::Worker.config(queue, klass)
41
+
42
+ log_options = {}
43
+ if parent
44
+ raise 'Missing name option for job with a parent' unless name
45
+ raise "Parent already has child job with name #{name}" if parent.child(name)
46
+ ancestry = [parent.id] + SideJob.redis.lrange("#{parent.redis_key}:ancestors", 0, -1)
47
+ log_options = {job: parent.id}
48
+ end
49
+
50
+ # To prevent race conditions, we generate the id and set all data in redis before queuing the job to sidekiq
51
+ # Otherwise, sidekiq may start the job too quickly
52
+ id = SideJob.redis.incr('jobs:last_id').to_s
53
+ job = SideJob::Job.new(id)
54
+
55
+ SideJob.redis.multi do |multi|
56
+ multi.hset 'jobs', id, {queue: queue, class: klass, args: args, created_by: by, created_at: SideJob.timestamp}.to_json
57
+
58
+ if parent
59
+ multi.rpush "#{job.redis_key}:ancestors", ancestry # we need to rpush to get the right order
60
+ multi.hset "#{parent.redis_key}:children", name, id
61
+ end
62
+ end
63
+
64
+ # initialize ports
65
+ job.group_port_logs(log_options) do
66
+ job.inports = inports
67
+ job.outports = outports
68
+ end
69
+
70
+ job.run(at: at)
71
+ end
72
+
73
+ # Finds a job by id
74
+ # @param job_id [String, nil] Job Id
75
+ # @return [SideJob::Job, nil] Job object or nil if it doesn't exist
76
+ def self.find(job_id)
77
+ return nil unless job_id
78
+ job = SideJob::Job.new(job_id)
79
+ return job.exists? ? job : nil
80
+ end
81
+
82
+ # Returns the current timestamp as a iso8601 string
83
+ # @return [String] Current timestamp
84
+ def self.timestamp
85
+ Time.now.utc.iso8601(9)
86
+ end
87
+
88
+ # Adds a log entry to redis with current timestamp.
89
+ # @param entry [Hash] Log entry
90
+ def self.log(entry)
91
+ SideJob.redis.rpush 'jobs:logs', entry.merge(timestamp: SideJob.timestamp).to_json
92
+ end
93
+
94
+ # Return all job logs and optionally clears them.
95
+ # @param clear [Boolean] If true, delete logs after returning them (default true)
96
+ # @return [Array<Hash>] All logs for the job with the oldest first
97
+ def self.logs(clear: true)
98
+ SideJob.redis.multi do |multi|
99
+ multi.lrange 'jobs:logs', 0, -1
100
+ multi.del 'jobs:logs' if clear
101
+ end[0].map {|log| JSON.parse(log)}
102
+ end
103
+ end
104
+
105
+ # :nocov:
106
+ Sidekiq.configure_server do |config|
107
+ config.server_middleware do |chain|
108
+ chain.remove Sidekiq::Middleware::Server::RetryJobs # we never want sidekiq to retry jobs
109
+ chain.add SideJob::ServerMiddleware
110
+ end
111
+ end
112
+
113
+ if ENV['SIDEJOB_URL']
114
+ SideJob.redis = {url: ENV['SIDEJOB_URL']}
115
+ end
116
+ # :nocov:
data/sidejob.gemspec ADDED
@@ -0,0 +1,21 @@
1
+ require File.expand_path('../lib/sidejob/version', __FILE__)
2
+
3
+ Gem::Specification.new do |s|
4
+ s.name = 'sidejob'
5
+ s.version = SideJob::VERSION
6
+ s.authors = ['Austin Che']
7
+ s.email = ['austin@ginkgobioworks.com']
8
+ s.summary = 'Use SideJob to run sidekiq jobs with a flow-based model'
9
+
10
+ s.files = `git ls-files`.split($/)
11
+
12
+ s.require_paths = ['lib']
13
+
14
+ s.add_dependency 'sidekiq', '~>3.2.5'
15
+
16
+ # development
17
+ s.add_development_dependency 'pry'
18
+ s.add_development_dependency 'rspec'
19
+ s.add_development_dependency 'simplecov'
20
+ s.add_development_dependency 'yard'
21
+ end