inst-jobs 2.0.0 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. checksums.yaml +4 -4
  2. data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
  3. data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
  4. data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
  5. data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
  6. data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
  7. data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
  8. data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
  9. data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
  10. data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
  11. data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
  12. data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
  13. data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
  14. data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
  15. data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
  16. data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
  17. data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
  18. data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
  19. data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
  20. data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
  21. data/db/migrate/20210812210128_add_singleton_column.rb +200 -0
  22. data/db/migrate/20210917232626_add_delete_conflicting_singletons_before_unlock_trigger.rb +27 -0
  23. data/db/migrate/20210928174754_fix_singleton_condition_in_before_insert.rb +56 -0
  24. data/db/migrate/20210929204903_update_conflicting_singleton_function_to_use_index.rb +27 -0
  25. data/exe/inst_jobs +3 -2
  26. data/lib/delayed/backend/active_record.rb +211 -168
  27. data/lib/delayed/backend/base.rb +110 -72
  28. data/lib/delayed/batch.rb +11 -9
  29. data/lib/delayed/cli.rb +98 -84
  30. data/lib/delayed/core_ext/kernel.rb +4 -2
  31. data/lib/delayed/daemon.rb +70 -74
  32. data/lib/delayed/job_tracking.rb +26 -25
  33. data/lib/delayed/lifecycle.rb +27 -23
  34. data/lib/delayed/log_tailer.rb +17 -17
  35. data/lib/delayed/logging.rb +13 -16
  36. data/lib/delayed/message_sending.rb +43 -52
  37. data/lib/delayed/performable_method.rb +6 -8
  38. data/lib/delayed/periodic.rb +72 -68
  39. data/lib/delayed/plugin.rb +2 -4
  40. data/lib/delayed/pool.rb +205 -168
  41. data/lib/delayed/server/helpers.rb +6 -6
  42. data/lib/delayed/server.rb +51 -54
  43. data/lib/delayed/settings.rb +94 -81
  44. data/lib/delayed/testing.rb +21 -22
  45. data/lib/delayed/version.rb +1 -1
  46. data/lib/delayed/work_queue/in_process.rb +21 -17
  47. data/lib/delayed/work_queue/parent_process/client.rb +55 -53
  48. data/lib/delayed/work_queue/parent_process/server.rb +245 -207
  49. data/lib/delayed/work_queue/parent_process.rb +52 -53
  50. data/lib/delayed/worker/consul_health_check.rb +32 -33
  51. data/lib/delayed/worker/health_check.rb +34 -26
  52. data/lib/delayed/worker/null_health_check.rb +3 -1
  53. data/lib/delayed/worker/process_helper.rb +8 -9
  54. data/lib/delayed/worker.rb +272 -241
  55. data/lib/delayed/yaml_extensions.rb +12 -10
  56. data/lib/delayed_job.rb +37 -37
  57. data/lib/inst-jobs.rb +1 -1
  58. data/spec/active_record_job_spec.rb +143 -139
  59. data/spec/delayed/cli_spec.rb +7 -7
  60. data/spec/delayed/daemon_spec.rb +10 -9
  61. data/spec/delayed/message_sending_spec.rb +16 -9
  62. data/spec/delayed/periodic_spec.rb +14 -21
  63. data/spec/delayed/server_spec.rb +38 -38
  64. data/spec/delayed/settings_spec.rb +26 -25
  65. data/spec/delayed/work_queue/in_process_spec.rb +7 -8
  66. data/spec/delayed/work_queue/parent_process/client_spec.rb +17 -12
  67. data/spec/delayed/work_queue/parent_process/server_spec.rb +117 -41
  68. data/spec/delayed/work_queue/parent_process_spec.rb +21 -23
  69. data/spec/delayed/worker/consul_health_check_spec.rb +37 -50
  70. data/spec/delayed/worker/health_check_spec.rb +60 -52
  71. data/spec/delayed/worker_spec.rb +44 -21
  72. data/spec/sample_jobs.rb +45 -15
  73. data/spec/shared/delayed_batch.rb +74 -67
  74. data/spec/shared/delayed_method.rb +143 -102
  75. data/spec/shared/performable_method.rb +39 -38
  76. data/spec/shared/shared_backend.rb +550 -437
  77. data/spec/shared/testing.rb +14 -14
  78. data/spec/shared/worker.rb +156 -148
  79. data/spec/shared_jobs_specs.rb +13 -13
  80. data/spec/spec_helper.rb +53 -55
  81. metadata +148 -82
  82. data/lib/delayed/backend/redis/bulk_update.lua +0 -50
  83. data/lib/delayed/backend/redis/destroy_job.lua +0 -2
  84. data/lib/delayed/backend/redis/enqueue.lua +0 -29
  85. data/lib/delayed/backend/redis/fail_job.lua +0 -5
  86. data/lib/delayed/backend/redis/find_available.lua +0 -3
  87. data/lib/delayed/backend/redis/functions.rb +0 -59
  88. data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
  89. data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
  90. data/lib/delayed/backend/redis/job.rb +0 -535
  91. data/lib/delayed/backend/redis/set_running.lua +0 -5
  92. data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
  93. data/spec/gemfiles/42.gemfile +0 -7
  94. data/spec/gemfiles/50.gemfile +0 -7
  95. data/spec/gemfiles/51.gemfile +0 -7
  96. data/spec/gemfiles/52.gemfile +0 -7
  97. data/spec/gemfiles/60.gemfile +0 -7
  98. data/spec/redis_job_spec.rb +0 -148
@@ -1,535 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- # This can't currently be made compatible with redis cluster, because the Lua functions
4
- # access keys that aren't in their keys argument list (since they pop jobs off
5
- # a queue and then update the job with that id).
6
-
7
- # still TODO:
8
- # * a consequence of our ignore-redis-failures code is that if redis is unavailable, creating delayed jobs silently fails, which is probably not what we want
9
- # * need a way to migrate between jobs backends
10
- # * we need some auditors:
11
- # * fail jobs in running_jobs if they've timed out
12
- # * have pools audit their workers and immediately fail jobs locked by dead workers (make sure this handles the restart case where two pools are running)
13
- # * have a master auditor that fails jobs if a whole pool dies
14
- # * audit strands ocasionally, look for any stuck strands where the strand queue isn't empty but there's no strand job running or queued
15
- module Delayed::Backend::Redis
16
- require 'delayed/backend/redis/functions'
17
-
18
- class Job
19
- extend ActiveModel::Callbacks
20
- define_model_callbacks :create, :save
21
- include ActiveModel::Dirty
22
- include Delayed::Backend::Base
23
- # This redis instance needs to be set by the application during jobs configuration
24
- cattr_accessor :redis
25
-
26
- # An overview of where and when things are stored in redis:
27
- #
28
- # Jobs are given a UUID for an id, rather than an incrementing integer id.
29
- # The job attributes are then stored in a redis hash at job/<id>. Attribute
30
- # values are generally stored as their json representation, except for
31
- # timestamps, which as stored as floating point utc-time-since-unix-epoch
32
- # values, so that we can compare timestamps in Lua without a date parser.
33
- #
34
- # Jobs that are schedule to run immediately (in the present/past) are
35
- # inserted into the queue named queue/<queue_name>. The queue is a sorted
36
- # set, with the value being the job id and the weight being a floating point
37
- # value, <priority>.<run_at>. This formatting is key to efficient
38
- # querying of the next job to run.
39
- #
40
- # Jobs that are scheduled to run in the future are not inserted into the
41
- # queue, but rather a future queue named queue/<queue_name>/future. This
42
- # queue is also a sorted set, with the value being the job id, but the weight
43
- # is just the <run_at> value.
44
- #
45
- # If the job is on a strand, the flow is different. First, it's inserted into
46
- # a list named strand/<strand>. When strand jobs are inserted into the
47
- # current jobs queue, we check if they're next to run in the strand. If not,
48
- # we give them a special priority that is greater than MAX_PRIORITY, so that
49
- # they won't run. When a strand job is finished, failed or deleted,
50
- # "tickle_strand" is called, which removes that job from the list and if that
51
- # job was at the front of the list, changes the priority on the next job so
52
- # that it's eligible to run.
53
- #
54
- # For singletons, the flow is the same as for other strand jobs, except that
55
- # the job is thrown out if there are already any non-running jobs in the
56
- # strand list.
57
- #
58
- # If a job fails, it's removed from the normal queues and inserted into the
59
- # failed_jobs sorted set, with job id as the value and failure time as the
60
- # key. The hash of job attributes is also renamed from job/<id> to
61
- # failed_job/<id> -- use Delayed::Job::Failed to query those jobs, same as
62
- # with AR jobs.
63
- #
64
- # We also insert into some other data structures for admin functionality.
65
- # tag_counts/current and tag_counts/all are sorted sets storing the count of
66
- # jobs for each tag. tag/<tag> is a set of existing job ids that have that tag.
67
- #
68
- # Most all of this happens in Lua functions, for atomicity. See the other
69
- # files in this directory -- functions.rb is a wrapper to call the lua
70
- # functions, and the individual functions are defined in .lua files in this
71
- # directory.
72
-
73
- # these key mappings are duplicated in the redis lua code, in include.lua
74
- module Keys
75
- RUNNING_JOBS = "running_jobs"
76
- FAILED_JOBS = "failed_jobs"
77
- JOB = proc { |id| "job/#{id}" }
78
- FAILED_JOB = proc { |id| "failed_job/#{id}" }
79
- QUEUE = proc { |name| "queue/#{name}" }
80
- FUTURE_QUEUE = proc { |name| "#{QUEUE[name]}/future" }
81
- STRAND = proc { |strand| strand ? "strand/#{strand}" : nil }
82
- TAG_COUNTS = proc { |flavor| "tag_counts/#{flavor}" }
83
- TAG = proc { |tag| "tag/#{tag}" }
84
- end
85
-
86
- WAITING_STRAND_JOB_PRIORITY = 2000000
87
- if WAITING_STRAND_JOB_PRIORITY <= Delayed::MAX_PRIORITY
88
- # if you change this, note that the value is duplicated in include.lua
89
- raise("Delayed::MAX_PRIORITY must be less than #{WAITING_STRAND_JOB_PRIORITY}")
90
- end
91
-
92
- COLUMNS = []
93
-
94
- # We store time attributes in redis as floats so we don't have to do
95
- # timestamp parsing in lua.
96
- TIMESTAMP_COLUMNS = []
97
- INTEGER_COLUMNS = []
98
-
99
- def self.column(name, type)
100
- COLUMNS << name
101
-
102
- if type == :timestamp
103
- TIMESTAMP_COLUMNS << name
104
- elsif type == :integer
105
- INTEGER_COLUMNS << name
106
- end
107
-
108
- attr_reader(name)
109
- define_attribute_methods([name])
110
- # Custom attr_writer that updates the dirty status.
111
- class_eval(<<-EOS, __FILE__, __LINE__ + 1)
112
- def #{name}=(new_value)
113
- #{name}_will_change! unless new_value == self.#{name}
114
- @#{name} = new_value
115
- end
116
- EOS
117
- end
118
-
119
- column(:id, :string)
120
- column(:priority, :integer)
121
- column(:attempts, :integer)
122
- column(:handler, :string)
123
- column(:last_error, :string)
124
- column(:queue, :string)
125
- column(:run_at, :timestamp)
126
- column(:locked_at, :timestamp)
127
- column(:failed_at, :timestamp)
128
- column(:locked_by, :string)
129
- column(:created_at, :timestamp)
130
- column(:updated_at, :timestamp)
131
- column(:tag, :string)
132
- column(:max_attempts, :integer)
133
- column(:strand, :string)
134
- column(:source, :string)
135
- column(:expires_at, :timestamp)
136
-
137
- def initialize(attrs = {})
138
- attrs.each { |k, v| self.send("#{k}=", v) }
139
- self.priority ||= 0
140
- self.attempts ||= 0
141
- @new_record = true
142
- end
143
-
144
- def self.instantiate(attrs)
145
- result = new(attrs)
146
- result.instance_variable_set(:@new_record, false)
147
- result
148
- end
149
-
150
- def self.create(attrs = {})
151
- result = new(attrs)
152
- result.save
153
- result
154
- end
155
-
156
- def self.create!(attrs = {})
157
- result = new(attrs)
158
- result.save!
159
- result
160
- end
161
-
162
- def [](key)
163
- send(key)
164
- end
165
-
166
- def []=(key, value)
167
- send("#{key}=", value)
168
- end
169
-
170
- def self.find(ids)
171
- if Array === ids
172
- find_some(ids, {})
173
- else
174
- find_one(ids, {})
175
- end
176
- end
177
-
178
- def new_record?
179
- !!@new_record
180
- end
181
-
182
- def destroyed?
183
- !!@destroyed
184
- end
185
-
186
- def ==(other)
187
- other.is_a?(self.class) && id == other.id
188
- end
189
-
190
- def hash
191
- id.hash
192
- end
193
-
194
- def self.reconnect!
195
- # redis cluster responds to reconnect directly,
196
- # but individual redis needs it to be called on client
197
- redis.respond_to?(:reconnect) ?
198
- redis.reconnect :
199
- redis.client.reconnect
200
- end
201
-
202
- def self.functions
203
- @@functions ||= Delayed::Backend::Redis::Functions.new(redis)
204
- end
205
-
206
- def self.find_one(id, options)
207
- job = self.get_with_ids([id]).first
208
- job || raise(ActiveRecord::RecordNotFound, "Couldn't find Job with ID=#{id}")
209
- end
210
-
211
- def self.find_some(ids, options)
212
- self.get_with_ids(ids).compact
213
- end
214
-
215
- def self.get_with_ids(ids)
216
- ids.map { |id| self.instantiate_from_attrs(redis.hgetall(key_for_job_id(id))) }
217
- end
218
-
219
- def self.key_for_job_id(job_id)
220
- Keys::JOB[job_id]
221
- end
222
-
223
- def self.get_and_lock_next_available(worker_name,
224
- queue = Delayed::Settings.queue,
225
- min_priority = Delayed::MIN_PRIORITY,
226
- max_priority = Delayed::MAX_PRIORITY,
227
- prefetch: nil,
228
- prefetch_owner: nil,
229
- forced_latency: nil)
230
-
231
- check_queue(queue)
232
- check_priorities(min_priority, max_priority)
233
- if worker_name.is_a?(Array)
234
- multiple_workers = true
235
- worker_name = worker_name.first
236
- end
237
-
238
- # as an optimization this lua function returns the hash of job attributes,
239
- # rather than just a job id, saving a round trip
240
- now = db_time_now
241
- now -= forced_latency if forced_latency
242
- job_attrs = functions.get_and_lock_next_available(worker_name, queue, min_priority, max_priority, now)
243
- job = instantiate_from_attrs(job_attrs) # will return nil if the attrs are blank
244
- if multiple_workers
245
- if job.nil?
246
- job = {}
247
- else
248
- job = { worker_name => job }
249
- end
250
- end
251
- job
252
- end
253
-
254
- def self.find_available(limit,
255
- queue = Delayed::Settings.queue,
256
- min_priority = Delayed::MIN_PRIORITY,
257
- max_priority = Delayed::MAX_PRIORITY)
258
-
259
- check_queue(queue)
260
- check_priorities(min_priority, max_priority)
261
-
262
- self.find(functions.find_available(queue, limit, 0, min_priority, max_priority, db_time_now))
263
- end
264
-
265
- # get a list of jobs of the given flavor in the given queue
266
- # flavor is :current, :future, :failed, :strand or :tag
267
- # depending on the flavor, query has a different meaning:
268
- # for :current and :future, it's the queue name (defaults to Delayed::Settings.queue)
269
- # for :strand it's the strand name
270
- # for :tag it's the tag name
271
- # for :failed it's ignored
272
- def self.list_jobs(flavor,
273
- limit,
274
- offset = 0,
275
- query = nil)
276
- case flavor.to_s
277
- when 'current'
278
- query ||= Delayed::Settings.queue
279
- check_queue(query)
280
- self.find(functions.find_available(query, limit, offset, 0, "+inf", db_time_now))
281
- when 'future'
282
- query ||= Delayed::Settings.queue
283
- check_queue(query)
284
- self.find(redis.zrangebyscore(Keys::FUTURE_QUEUE[query], 0, "+inf", :limit => [offset, limit]))
285
- when 'failed'
286
- Failed.find(redis.zrevrangebyscore(Keys::FAILED_JOBS, "+inf", 0, :limit => [offset, limit]))
287
- when 'strand'
288
- self.find(redis.lrange(Keys::STRAND[query], offset, offset + limit - 1))
289
- when 'tag'
290
- # This is optimized for writing, since list_jobs(:tag) will only ever happen in the admin UI
291
- ids = redis.smembers(Keys::TAG[query])
292
- self.find(ids[offset, limit])
293
- else
294
- raise ArgumentError, "invalid flavor: #{flavor.inspect}"
295
- end
296
- end
297
-
298
- # get the total job count for the given flavor
299
- # flavor is :current, :future or :failed
300
- # for the :failed flavor, queue is currently ignored
301
- def self.jobs_count(flavor,
302
- queue = Delayed::Settings.queue)
303
- case flavor.to_s
304
- when 'current'
305
- check_queue(queue)
306
- redis.zcard(Keys::QUEUE[queue])
307
- when 'future'
308
- check_queue(queue)
309
- redis.zcard(Keys::FUTURE_QUEUE[queue])
310
- when 'failed'
311
- redis.zcard(Keys::FAILED_JOBS)
312
- else
313
- raise ArgumentError, "invalid flavor: #{flavor.inspect}"
314
- end
315
- end
316
-
317
- def self.strand_size(strand)
318
- redis.llen(Keys::STRAND[strand])
319
- end
320
-
321
- def self.running_jobs()
322
- self.find(redis.zrangebyscore(Keys::RUNNING_JOBS, 0, "+inf"))
323
- end
324
-
325
- def self.clear_locks!(worker_name)
326
- self.running_jobs.each do |job|
327
- # TODO: mark the job as failed one attempt
328
- job.unlock! if job.locked_by == worker_name
329
- end
330
- nil
331
- end
332
-
333
- # returns a list of hashes { :tag => tag_name, :count => current_count }
334
- # in descending count order
335
- # flavor is :current or :all
336
- def self.tag_counts(flavor,
337
- limit,
338
- offset = 0)
339
- raise(ArgumentError, "invalid flavor: #{flavor.inspect}") unless %w(current all).include?(flavor.to_s)
340
- key = Keys::TAG_COUNTS[flavor]
341
- redis.zrevrangebyscore(key, '+inf', 1, :limit => [offset, limit], :withscores => true).map { |tag, count| { :tag => tag, :count => count } }
342
- end
343
-
344
- # perform a bulk update of a set of jobs
345
- # action is :hold, :unhold, or :destroy
346
- # to specify the jobs to act on, either pass opts[:ids] = [list of job ids]
347
- # or opts[:flavor] = <some flavor> to perform on all jobs of that flavor
348
- #
349
- # see the list_jobs action for the list of available flavors and the meaning
350
- # of opts[:query] for each
351
- def self.bulk_update(action, opts)
352
- if %w(current future).include?(opts[:flavor].to_s)
353
- opts[:query] ||= Delayed::Settings.queue
354
- end
355
- functions.bulk_update(action, opts[:ids], opts[:flavor], opts[:query], db_time_now)
356
- end
357
-
358
- def self.create_singleton(options)
359
- self.create!(options.merge(:singleton => true))
360
- end
361
-
362
- def self.unlock(jobs)
363
- jobs.each(&:unlock!)
364
- jobs.length
365
- end
366
-
367
- # not saved, just used as a marker when creating
368
- attr_accessor :singleton, :on_conflict
369
-
370
- def transfer_lock!(from:, to:)
371
- lock_in_redis!(to)
372
- end
373
-
374
- def lock_in_redis!(worker_name)
375
- self.locked_at = self.class.db_time_now
376
- self.locked_by = worker_name
377
- save
378
- end
379
-
380
- def unlock!
381
- unlock
382
- save!
383
- end
384
-
385
- def save(*a)
386
- return false if destroyed?
387
- result = run_callbacks(:save) do
388
- if new_record?
389
- run_callbacks(:create) { create }
390
- else
391
- update
392
- end
393
- end
394
- changes_applied
395
- result
396
- end
397
-
398
- if Rails.version < "4.1"
399
- def changes_applied
400
- @previously_changed = changes
401
- @changed_attributes.clear
402
- end
403
- end
404
-
405
- def save!(*a)
406
- save(*a) || raise(RecordNotSaved)
407
- end
408
-
409
- def destroy
410
- self.class.functions.destroy_job(id, self.class.db_time_now)
411
- @destroyed = true
412
- freeze
413
- end
414
-
415
- # take this job off the strand, and queue up the next strand job if this job
416
- # was at the front
417
- def tickle_strand
418
- if strand.present?
419
- self.class.functions.tickle_strand(id, strand, self.class.db_time_now)
420
- end
421
- end
422
-
423
- def create_and_lock!(worker_name)
424
- raise "job already exists" unless new_record?
425
- lock_in_redis!(worker_name)
426
- end
427
-
428
- def fail!
429
- self.failed_at = self.class.db_time_now
430
- save!
431
- redis.rename Keys::JOB[id], Keys::FAILED_JOB[id]
432
- tickle_strand
433
- self
434
- end
435
-
436
- protected
437
-
438
- def update_queues
439
- if failed_at
440
- self.class.functions.fail_job(id)
441
- elsif locked_at
442
- self.class.functions.set_running(id)
443
- elsif singleton
444
- job_id = self.class.functions.create_singleton(id, queue, strand, self.class.db_time_now)
445
- # if create_singleton returns a different job id, that means this job got
446
- # deleted because there was already that other job on the strand. so
447
- # replace this job with the other for returning.
448
- if job_id != self.id
449
- singleton = self.class.find(job_id)
450
-
451
- self.on_conflict ||= :use_earliest
452
- singleton.run_at =
453
- case self.on_conflict
454
- when :use_earliest
455
- [singleton.run_at, run_at].min
456
- when :overwrite
457
- run_at
458
- when :loose
459
- singleton.run_at
460
- end
461
- singleton.handler = self.handler if self.on_conflict == :overwrite
462
- singleton.save! if singleton.changed?
463
- COLUMNS.each { |c| send("#{c}=", singleton.send(c)) }
464
- end
465
- else
466
- self.class.functions.enqueue(id, queue, strand, self.class.db_time_now)
467
- end
468
- end
469
-
470
- def create
471
- self.id ||= SecureRandom.hex(16)
472
- self.created_at = self.updated_at = Time.now.utc
473
- save_job_to_redis
474
- update_queues
475
-
476
- @new_record = false
477
- self.id
478
- end
479
-
480
- def update
481
- self.updated_at = Time.now.utc
482
- save_job_to_redis
483
- update_queues
484
- true
485
- end
486
-
487
- def queue_score
488
- "#{priority}.#{run_at.to_i}".to_f
489
- end
490
-
491
- def save_job_to_redis
492
- to_delete = []
493
- attrs = {}
494
- COLUMNS.each do |k|
495
- v = send(k)
496
- if v.nil?
497
- to_delete << k if !new_record? && changed.include?(k.to_s)
498
- elsif v.is_a?(ActiveSupport::TimeWithZone) || v.is_a?(Time)
499
- attrs[k] = v.utc.to_f
500
- else
501
- attrs[k] = v.as_json
502
- end
503
- end
504
- key = Keys::JOB[id]
505
- redis.mapped_hmset(key, attrs)
506
- redis.hdel(key, to_delete) unless to_delete.empty?
507
- end
508
-
509
- def self.instantiate_from_attrs(redis_attrs)
510
- if redis_attrs['id'].present?
511
- attrs = redis_attrs.with_indifferent_access
512
- TIMESTAMP_COLUMNS.each { |k| attrs[k] = Time.zone.at(attrs[k].to_f) if attrs[k] }
513
- INTEGER_COLUMNS.each { |k| attrs[k] = attrs[k].to_i if attrs[k] }
514
- instantiate(attrs)
515
- else
516
- nil
517
- end
518
- end
519
-
520
- def global_id
521
- id
522
- end
523
-
524
- class Failed < Job
525
- include Delayed::Backend::Base
526
- def self.key_for_job_id(job_id)
527
- Keys::FAILED_JOB[job_id]
528
- end
529
-
530
- def original_job_id
531
- id
532
- end
533
- end
534
- end
535
- end
@@ -1,5 +0,0 @@
1
- local job_id = unpack(ARGV)
2
- local locked_at, queue, strand = unpack(redis.call('HMGET', Keys.job(job_id), 'locked_at', 'queue', 'strand'))
3
-
4
- remove_from_queues(job_id, queue, strand)
5
- redis.call('ZADD', Keys.running_jobs(), locked_at, job_id)
@@ -1,2 +0,0 @@
1
- local job_id, strand, now = unpack(ARGV)
2
- tickle_strand(job_id, strand, now)
@@ -1,7 +0,0 @@
1
- source "https://rubygems.org"
2
-
3
- gemspec :path=>"../../"
4
-
5
- gem "rails", "~> 4.2.5"
6
- gem "after_transaction_commit", "<2"
7
- gem 'test_after_commit', '0.4.1'
@@ -1,7 +0,0 @@
1
- source "https://rubygems.org"
2
-
3
- gemspec :path=>"../../"
4
-
5
- gem "rails", "~> 5.0.0"
6
- gem 'sinatra', "2.0.0.beta2"
7
- gem 'sinatra-contrib', "2.0.0.beta2"
@@ -1,7 +0,0 @@
1
- source "https://rubygems.org"
2
-
3
- gemspec :path=>"../../"
4
-
5
- gem "rails", "~> 5.1.0"
6
- gem 'sinatra', "2.0.0.beta2"
7
- gem 'sinatra-contrib', "2.0.0.beta2"
@@ -1,7 +0,0 @@
1
- source "https://rubygems.org"
2
-
3
- gemspec :path=>"../../"
4
-
5
- gem "rails", "~> 5.2.0"
6
- gem 'sinatra', "2.0.0.beta2"
7
- gem 'sinatra-contrib', "2.0.0.beta2"
@@ -1,7 +0,0 @@
1
- source "https://rubygems.org"
2
-
3
- gemspec :path=>"../../"
4
-
5
- gem "rails", "~> 6.0.0"
6
- gem 'sinatra', "2.0.0.beta2"
7
- gem 'sinatra-contrib', "2.0.0.beta2"