inst-jobs 2.3.3 → 2.4.11

Sign up to get free protection for your applications and to get access to all the features.
Files changed (96) hide show
  1. checksums.yaml +4 -4
  2. data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
  3. data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
  4. data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
  5. data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
  6. data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
  7. data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
  8. data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
  9. data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
  10. data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
  11. data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
  12. data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
  13. data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
  14. data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
  15. data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
  16. data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
  17. data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
  18. data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
  19. data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
  20. data/db/migrate/20210809145804_add_n_strand_index.rb +3 -3
  21. data/db/migrate/20210812210128_add_singleton_column.rb +200 -0
  22. data/db/migrate/20210917232626_add_delete_conflicting_singletons_before_unlock_trigger.rb +27 -0
  23. data/db/migrate/20210928174754_fix_singleton_condition_in_before_insert.rb +56 -0
  24. data/exe/inst_jobs +3 -2
  25. data/lib/delayed/backend/active_record.rb +204 -150
  26. data/lib/delayed/backend/base.rb +106 -82
  27. data/lib/delayed/batch.rb +11 -9
  28. data/lib/delayed/cli.rb +98 -84
  29. data/lib/delayed/core_ext/kernel.rb +4 -2
  30. data/lib/delayed/daemon.rb +70 -74
  31. data/lib/delayed/job_tracking.rb +26 -25
  32. data/lib/delayed/lifecycle.rb +27 -24
  33. data/lib/delayed/log_tailer.rb +17 -17
  34. data/lib/delayed/logging.rb +13 -16
  35. data/lib/delayed/message_sending.rb +43 -52
  36. data/lib/delayed/performable_method.rb +6 -8
  37. data/lib/delayed/periodic.rb +72 -65
  38. data/lib/delayed/plugin.rb +2 -4
  39. data/lib/delayed/pool.rb +198 -193
  40. data/lib/delayed/server/helpers.rb +6 -6
  41. data/lib/delayed/server.rb +51 -54
  42. data/lib/delayed/settings.rb +93 -81
  43. data/lib/delayed/testing.rb +21 -22
  44. data/lib/delayed/version.rb +1 -1
  45. data/lib/delayed/work_queue/in_process.rb +21 -18
  46. data/lib/delayed/work_queue/parent_process/client.rb +54 -55
  47. data/lib/delayed/work_queue/parent_process/server.rb +219 -208
  48. data/lib/delayed/work_queue/parent_process.rb +52 -53
  49. data/lib/delayed/worker/consul_health_check.rb +21 -19
  50. data/lib/delayed/worker/health_check.rb +29 -22
  51. data/lib/delayed/worker/null_health_check.rb +3 -1
  52. data/lib/delayed/worker/process_helper.rb +8 -9
  53. data/lib/delayed/worker.rb +271 -265
  54. data/lib/delayed/yaml_extensions.rb +12 -10
  55. data/lib/delayed_job.rb +37 -38
  56. data/lib/inst-jobs.rb +1 -1
  57. data/spec/active_record_job_spec.rb +129 -136
  58. data/spec/delayed/cli_spec.rb +7 -7
  59. data/spec/delayed/daemon_spec.rb +10 -9
  60. data/spec/delayed/message_sending_spec.rb +16 -9
  61. data/spec/delayed/periodic_spec.rb +13 -12
  62. data/spec/delayed/server_spec.rb +38 -38
  63. data/spec/delayed/settings_spec.rb +26 -25
  64. data/spec/delayed/work_queue/in_process_spec.rb +7 -8
  65. data/spec/delayed/work_queue/parent_process/client_spec.rb +17 -12
  66. data/spec/delayed/work_queue/parent_process/server_spec.rb +70 -41
  67. data/spec/delayed/work_queue/parent_process_spec.rb +21 -23
  68. data/spec/delayed/worker/consul_health_check_spec.rb +22 -22
  69. data/spec/delayed/worker/health_check_spec.rb +60 -52
  70. data/spec/delayed/worker_spec.rb +28 -25
  71. data/spec/sample_jobs.rb +45 -15
  72. data/spec/shared/delayed_batch.rb +74 -67
  73. data/spec/shared/delayed_method.rb +143 -102
  74. data/spec/shared/performable_method.rb +39 -38
  75. data/spec/shared/shared_backend.rb +547 -441
  76. data/spec/shared/testing.rb +14 -14
  77. data/spec/shared/worker.rb +155 -147
  78. data/spec/shared_jobs_specs.rb +13 -13
  79. data/spec/spec_helper.rb +46 -41
  80. metadata +79 -55
  81. data/lib/delayed/backend/redis/bulk_update.lua +0 -50
  82. data/lib/delayed/backend/redis/destroy_job.lua +0 -2
  83. data/lib/delayed/backend/redis/enqueue.lua +0 -29
  84. data/lib/delayed/backend/redis/fail_job.lua +0 -5
  85. data/lib/delayed/backend/redis/find_available.lua +0 -3
  86. data/lib/delayed/backend/redis/functions.rb +0 -59
  87. data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
  88. data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
  89. data/lib/delayed/backend/redis/job.rb +0 -528
  90. data/lib/delayed/backend/redis/set_running.lua +0 -5
  91. data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
  92. data/spec/gemfiles/52.gemfile +0 -7
  93. data/spec/gemfiles/60.gemfile +0 -7
  94. data/spec/gemfiles/60.gemfile.lock +0 -246
  95. data/spec/gemfiles/61.gemfile +0 -7
  96. data/spec/redis_job_spec.rb +0 -148
@@ -1,528 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- # This can't currently be made compatible with redis cluster, because the Lua functions
4
- # access keys that aren't in their keys argument list (since they pop jobs off
5
- # a queue and then update the job with that id).
6
-
7
- # still TODO:
8
- # * a consequence of our ignore-redis-failures code is that if redis is unavailable, creating delayed jobs silently fails, which is probably not what we want
9
- # * need a way to migrate between jobs backends
10
- # * we need some auditors:
11
- # * fail jobs in running_jobs if they've timed out
12
- # * have pools audit their workers and immediately fail jobs locked by dead workers (make sure this handles the restart case where two pools are running)
13
- # * have a master auditor that fails jobs if a whole pool dies
14
- # * audit strands ocasionally, look for any stuck strands where the strand queue isn't empty but there's no strand job running or queued
15
- module Delayed::Backend::Redis
16
- require 'delayed/backend/redis/functions'
17
-
18
- class Job
19
- extend ActiveModel::Callbacks
20
- define_model_callbacks :create, :save
21
- include ActiveModel::Dirty
22
- include Delayed::Backend::Base
23
- # This redis instance needs to be set by the application during jobs configuration
24
- cattr_accessor :redis
25
-
26
- # An overview of where and when things are stored in redis:
27
- #
28
- # Jobs are given a UUID for an id, rather than an incrementing integer id.
29
- # The job attributes are then stored in a redis hash at job/<id>. Attribute
30
- # values are generally stored as their json representation, except for
31
- # timestamps, which as stored as floating point utc-time-since-unix-epoch
32
- # values, so that we can compare timestamps in Lua without a date parser.
33
- #
34
- # Jobs that are schedule to run immediately (in the present/past) are
35
- # inserted into the queue named queue/<queue_name>. The queue is a sorted
36
- # set, with the value being the job id and the weight being a floating point
37
- # value, <priority>.<run_at>. This formatting is key to efficient
38
- # querying of the next job to run.
39
- #
40
- # Jobs that are scheduled to run in the future are not inserted into the
41
- # queue, but rather a future queue named queue/<queue_name>/future. This
42
- # queue is also a sorted set, with the value being the job id, but the weight
43
- # is just the <run_at> value.
44
- #
45
- # If the job is on a strand, the flow is different. First, it's inserted into
46
- # a list named strand/<strand>. When strand jobs are inserted into the
47
- # current jobs queue, we check if they're next to run in the strand. If not,
48
- # we give them a special priority that is greater than MAX_PRIORITY, so that
49
- # they won't run. When a strand job is finished, failed or deleted,
50
- # "tickle_strand" is called, which removes that job from the list and if that
51
- # job was at the front of the list, changes the priority on the next job so
52
- # that it's eligible to run.
53
- #
54
- # For singletons, the flow is the same as for other strand jobs, except that
55
- # the job is thrown out if there are already any non-running jobs in the
56
- # strand list.
57
- #
58
- # If a job fails, it's removed from the normal queues and inserted into the
59
- # failed_jobs sorted set, with job id as the value and failure time as the
60
- # key. The hash of job attributes is also renamed from job/<id> to
61
- # failed_job/<id> -- use Delayed::Job::Failed to query those jobs, same as
62
- # with AR jobs.
63
- #
64
- # We also insert into some other data structures for admin functionality.
65
- # tag_counts/current and tag_counts/all are sorted sets storing the count of
66
- # jobs for each tag. tag/<tag> is a set of existing job ids that have that tag.
67
- #
68
- # Most all of this happens in Lua functions, for atomicity. See the other
69
- # files in this directory -- functions.rb is a wrapper to call the lua
70
- # functions, and the individual functions are defined in .lua files in this
71
- # directory.
72
-
73
- # these key mappings are duplicated in the redis lua code, in include.lua
74
- module Keys
75
- RUNNING_JOBS = "running_jobs"
76
- FAILED_JOBS = "failed_jobs"
77
- JOB = proc { |id| "job/#{id}" }
78
- FAILED_JOB = proc { |id| "failed_job/#{id}" }
79
- QUEUE = proc { |name| "queue/#{name}" }
80
- FUTURE_QUEUE = proc { |name| "#{QUEUE[name]}/future" }
81
- STRAND = proc { |strand| strand ? "strand/#{strand}" : nil }
82
- TAG_COUNTS = proc { |flavor| "tag_counts/#{flavor}" }
83
- TAG = proc { |tag| "tag/#{tag}" }
84
- end
85
-
86
- WAITING_STRAND_JOB_PRIORITY = 2000000
87
- if WAITING_STRAND_JOB_PRIORITY <= Delayed::MAX_PRIORITY
88
- # if you change this, note that the value is duplicated in include.lua
89
- raise("Delayed::MAX_PRIORITY must be less than #{WAITING_STRAND_JOB_PRIORITY}")
90
- end
91
-
92
- COLUMNS = []
93
-
94
- # We store time attributes in redis as floats so we don't have to do
95
- # timestamp parsing in lua.
96
- TIMESTAMP_COLUMNS = []
97
- INTEGER_COLUMNS = []
98
-
99
- def self.column(name, type)
100
- COLUMNS << name
101
-
102
- if type == :timestamp
103
- TIMESTAMP_COLUMNS << name
104
- elsif type == :integer
105
- INTEGER_COLUMNS << name
106
- end
107
-
108
- attr_reader(name)
109
- define_attribute_methods([name])
110
- # Custom attr_writer that updates the dirty status.
111
- class_eval(<<-EOS, __FILE__, __LINE__ + 1)
112
- def #{name}=(new_value)
113
- #{name}_will_change! unless new_value == self.#{name}
114
- @#{name} = new_value
115
- end
116
- EOS
117
- end
118
-
119
- column(:id, :string)
120
- column(:priority, :integer)
121
- column(:attempts, :integer)
122
- column(:handler, :string)
123
- column(:last_error, :string)
124
- column(:queue, :string)
125
- column(:run_at, :timestamp)
126
- column(:locked_at, :timestamp)
127
- column(:failed_at, :timestamp)
128
- column(:locked_by, :string)
129
- column(:created_at, :timestamp)
130
- column(:updated_at, :timestamp)
131
- column(:tag, :string)
132
- column(:max_attempts, :integer)
133
- column(:strand, :string)
134
- column(:source, :string)
135
- column(:expires_at, :timestamp)
136
-
137
- def initialize(attrs = {})
138
- attrs.each { |k, v| self.send("#{k}=", v) }
139
- self.priority ||= 0
140
- self.attempts ||= 0
141
- @new_record = true
142
- end
143
-
144
- def self.instantiate(attrs)
145
- result = new(attrs)
146
- result.instance_variable_set(:@new_record, false)
147
- result
148
- end
149
-
150
- def self.create(attrs = {})
151
- result = new(attrs)
152
- result.save
153
- result
154
- end
155
-
156
- def self.create!(attrs = {})
157
- result = new(attrs)
158
- result.save!
159
- result
160
- end
161
-
162
- def [](key)
163
- send(key)
164
- end
165
-
166
- def []=(key, value)
167
- send("#{key}=", value)
168
- end
169
-
170
- def self.find(ids)
171
- if Array === ids
172
- find_some(ids, {})
173
- else
174
- find_one(ids, {})
175
- end
176
- end
177
-
178
- def new_record?
179
- !!@new_record
180
- end
181
-
182
- def destroyed?
183
- !!@destroyed
184
- end
185
-
186
- def ==(other)
187
- other.is_a?(self.class) && id == other.id
188
- end
189
-
190
- def hash
191
- id.hash
192
- end
193
-
194
- def self.reconnect!
195
- # redis cluster responds to reconnect directly,
196
- # but individual redis needs it to be called on client
197
- redis.respond_to?(:reconnect) ?
198
- redis.reconnect :
199
- redis.client.reconnect
200
- end
201
-
202
- def self.functions
203
- @@functions ||= Delayed::Backend::Redis::Functions.new(redis)
204
- end
205
-
206
- def self.find_one(id, options)
207
- job = self.get_with_ids([id]).first
208
- job || raise(ActiveRecord::RecordNotFound, "Couldn't find Job with ID=#{id}")
209
- end
210
-
211
- def self.find_some(ids, options)
212
- self.get_with_ids(ids).compact
213
- end
214
-
215
- def self.get_with_ids(ids)
216
- ids.map { |id| self.instantiate_from_attrs(redis.hgetall(key_for_job_id(id))) }
217
- end
218
-
219
- def self.key_for_job_id(job_id)
220
- Keys::JOB[job_id]
221
- end
222
-
223
- def self.get_and_lock_next_available(worker_name,
224
- queue = Delayed::Settings.queue,
225
- min_priority = Delayed::MIN_PRIORITY,
226
- max_priority = Delayed::MAX_PRIORITY,
227
- prefetch: nil,
228
- prefetch_owner: nil,
229
- forced_latency: nil)
230
-
231
- check_queue(queue)
232
- check_priorities(min_priority, max_priority)
233
- if worker_name.is_a?(Array)
234
- multiple_workers = true
235
- worker_name = worker_name.first
236
- end
237
-
238
- # as an optimization this lua function returns the hash of job attributes,
239
- # rather than just a job id, saving a round trip
240
- now = db_time_now
241
- now -= forced_latency if forced_latency
242
- job_attrs = functions.get_and_lock_next_available(worker_name, queue, min_priority, max_priority, now)
243
- job = instantiate_from_attrs(job_attrs) # will return nil if the attrs are blank
244
- if multiple_workers
245
- if job.nil?
246
- job = {}
247
- else
248
- job = { worker_name => job }
249
- end
250
- end
251
- job
252
- end
253
-
254
- def self.find_available(limit,
255
- queue = Delayed::Settings.queue,
256
- min_priority = Delayed::MIN_PRIORITY,
257
- max_priority = Delayed::MAX_PRIORITY)
258
-
259
- check_queue(queue)
260
- check_priorities(min_priority, max_priority)
261
-
262
- self.find(functions.find_available(queue, limit, 0, min_priority, max_priority, db_time_now))
263
- end
264
-
265
- # get a list of jobs of the given flavor in the given queue
266
- # flavor is :current, :future, :failed, :strand or :tag
267
- # depending on the flavor, query has a different meaning:
268
- # for :current and :future, it's the queue name (defaults to Delayed::Settings.queue)
269
- # for :strand it's the strand name
270
- # for :tag it's the tag name
271
- # for :failed it's ignored
272
- def self.list_jobs(flavor,
273
- limit,
274
- offset = 0,
275
- query = nil)
276
- case flavor.to_s
277
- when 'current'
278
- query ||= Delayed::Settings.queue
279
- check_queue(query)
280
- self.find(functions.find_available(query, limit, offset, 0, "+inf", db_time_now))
281
- when 'future'
282
- query ||= Delayed::Settings.queue
283
- check_queue(query)
284
- self.find(redis.zrangebyscore(Keys::FUTURE_QUEUE[query], 0, "+inf", :limit => [offset, limit]))
285
- when 'failed'
286
- Failed.find(redis.zrevrangebyscore(Keys::FAILED_JOBS, "+inf", 0, :limit => [offset, limit]))
287
- when 'strand'
288
- self.find(redis.lrange(Keys::STRAND[query], offset, offset + limit - 1))
289
- when 'tag'
290
- # This is optimized for writing, since list_jobs(:tag) will only ever happen in the admin UI
291
- ids = redis.smembers(Keys::TAG[query])
292
- self.find(ids[offset, limit])
293
- else
294
- raise ArgumentError, "invalid flavor: #{flavor.inspect}"
295
- end
296
- end
297
-
298
- # get the total job count for the given flavor
299
- # flavor is :current, :future or :failed
300
- # for the :failed flavor, queue is currently ignored
301
- def self.jobs_count(flavor,
302
- queue = Delayed::Settings.queue)
303
- case flavor.to_s
304
- when 'current'
305
- check_queue(queue)
306
- redis.zcard(Keys::QUEUE[queue])
307
- when 'future'
308
- check_queue(queue)
309
- redis.zcard(Keys::FUTURE_QUEUE[queue])
310
- when 'failed'
311
- redis.zcard(Keys::FAILED_JOBS)
312
- else
313
- raise ArgumentError, "invalid flavor: #{flavor.inspect}"
314
- end
315
- end
316
-
317
- def self.strand_size(strand)
318
- redis.llen(Keys::STRAND[strand])
319
- end
320
-
321
- def self.running_jobs()
322
- self.find(redis.zrangebyscore(Keys::RUNNING_JOBS, 0, "+inf"))
323
- end
324
-
325
- def self.clear_locks!(worker_name)
326
- self.running_jobs.each do |job|
327
- # TODO: mark the job as failed one attempt
328
- job.unlock! if job.locked_by == worker_name
329
- end
330
- nil
331
- end
332
-
333
- # returns a list of hashes { :tag => tag_name, :count => current_count }
334
- # in descending count order
335
- # flavor is :current or :all
336
- def self.tag_counts(flavor,
337
- limit,
338
- offset = 0)
339
- raise(ArgumentError, "invalid flavor: #{flavor.inspect}") unless %w(current all).include?(flavor.to_s)
340
- key = Keys::TAG_COUNTS[flavor]
341
- redis.zrevrangebyscore(key, '+inf', 1, :limit => [offset, limit], :withscores => true).map { |tag, count| { :tag => tag, :count => count } }
342
- end
343
-
344
- # perform a bulk update of a set of jobs
345
- # action is :hold, :unhold, or :destroy
346
- # to specify the jobs to act on, either pass opts[:ids] = [list of job ids]
347
- # or opts[:flavor] = <some flavor> to perform on all jobs of that flavor
348
- #
349
- # see the list_jobs action for the list of available flavors and the meaning
350
- # of opts[:query] for each
351
- def self.bulk_update(action, opts)
352
- if %w(current future).include?(opts[:flavor].to_s)
353
- opts[:query] ||= Delayed::Settings.queue
354
- end
355
- functions.bulk_update(action, opts[:ids], opts[:flavor], opts[:query], db_time_now)
356
- end
357
-
358
- def self.create_singleton(options)
359
- self.create!(options.merge(:singleton => true))
360
- end
361
-
362
- def self.unlock(jobs)
363
- jobs.each(&:unlock!)
364
- jobs.length
365
- end
366
-
367
- # not saved, just used as a marker when creating
368
- attr_accessor :singleton, :on_conflict
369
-
370
- def transfer_lock!(from:, to:)
371
- lock_in_redis!(to)
372
- end
373
-
374
- def lock_in_redis!(worker_name)
375
- self.locked_at = self.class.db_time_now
376
- self.locked_by = worker_name
377
- save
378
- end
379
-
380
- def unlock!
381
- unlock
382
- save!
383
- end
384
-
385
- def save(*a)
386
- return false if destroyed?
387
- result = run_callbacks(:save) do
388
- if new_record?
389
- run_callbacks(:create) { create }
390
- else
391
- update
392
- end
393
- end
394
- changes_applied
395
- result
396
- end
397
-
398
- def save!(*a)
399
- save(*a) || raise(RecordNotSaved)
400
- end
401
-
402
- def destroy
403
- self.class.functions.destroy_job(id, self.class.db_time_now)
404
- @destroyed = true
405
- freeze
406
- end
407
-
408
- # take this job off the strand, and queue up the next strand job if this job
409
- # was at the front
410
- def tickle_strand
411
- if strand.present?
412
- self.class.functions.tickle_strand(id, strand, self.class.db_time_now)
413
- end
414
- end
415
-
416
- def create_and_lock!(worker_name)
417
- raise "job already exists" unless new_record?
418
- lock_in_redis!(worker_name)
419
- end
420
-
421
- def fail!
422
- self.failed_at = self.class.db_time_now
423
- save!
424
- redis.rename Keys::JOB[id], Keys::FAILED_JOB[id]
425
- tickle_strand
426
- self
427
- end
428
-
429
- protected
430
-
431
- def update_queues
432
- if failed_at
433
- self.class.functions.fail_job(id)
434
- elsif locked_at
435
- self.class.functions.set_running(id)
436
- elsif singleton
437
- job_id = self.class.functions.create_singleton(id, queue, strand, self.class.db_time_now)
438
- # if create_singleton returns a different job id, that means this job got
439
- # deleted because there was already that other job on the strand. so
440
- # replace this job with the other for returning.
441
- if job_id != self.id
442
- singleton = self.class.find(job_id)
443
-
444
- self.on_conflict ||= :use_earliest
445
- singleton.run_at =
446
- case self.on_conflict
447
- when :use_earliest
448
- [singleton.run_at, run_at].min
449
- when :overwrite
450
- run_at
451
- when :loose
452
- singleton.run_at
453
- end
454
- singleton.handler = self.handler if self.on_conflict == :overwrite
455
- singleton.save! if singleton.changed?
456
- COLUMNS.each { |c| send("#{c}=", singleton.send(c)) }
457
- end
458
- else
459
- self.class.functions.enqueue(id, queue, strand, self.class.db_time_now)
460
- end
461
- end
462
-
463
- def create
464
- self.id ||= SecureRandom.hex(16)
465
- self.created_at = self.updated_at = Time.now.utc
466
- save_job_to_redis
467
- update_queues
468
-
469
- @new_record = false
470
- self.id
471
- end
472
-
473
- def update
474
- self.updated_at = Time.now.utc
475
- save_job_to_redis
476
- update_queues
477
- true
478
- end
479
-
480
- def queue_score
481
- "#{priority}.#{run_at.to_i}".to_f
482
- end
483
-
484
- def save_job_to_redis
485
- to_delete = []
486
- attrs = {}
487
- COLUMNS.each do |k|
488
- v = send(k)
489
- if v.nil?
490
- to_delete << k if !new_record? && changed.include?(k.to_s)
491
- elsif v.is_a?(ActiveSupport::TimeWithZone) || v.is_a?(Time)
492
- attrs[k] = v.utc.to_f
493
- else
494
- attrs[k] = v.as_json
495
- end
496
- end
497
- key = Keys::JOB[id]
498
- redis.mapped_hmset(key, attrs)
499
- redis.hdel(key, to_delete) unless to_delete.empty?
500
- end
501
-
502
- def self.instantiate_from_attrs(redis_attrs)
503
- if redis_attrs['id'].present?
504
- attrs = redis_attrs.with_indifferent_access
505
- TIMESTAMP_COLUMNS.each { |k| attrs[k] = Time.zone.at(attrs[k].to_f) if attrs[k] }
506
- INTEGER_COLUMNS.each { |k| attrs[k] = attrs[k].to_i if attrs[k] }
507
- instantiate(attrs)
508
- else
509
- nil
510
- end
511
- end
512
-
513
- def global_id
514
- id
515
- end
516
-
517
- class Failed < Job
518
- include Delayed::Backend::Base
519
- def self.key_for_job_id(job_id)
520
- Keys::FAILED_JOB[job_id]
521
- end
522
-
523
- def original_job_id
524
- id
525
- end
526
- end
527
- end
528
- end
@@ -1,5 +0,0 @@
1
- local job_id = unpack(ARGV)
2
- local locked_at, queue, strand = unpack(redis.call('HMGET', Keys.job(job_id), 'locked_at', 'queue', 'strand'))
3
-
4
- remove_from_queues(job_id, queue, strand)
5
- redis.call('ZADD', Keys.running_jobs(), locked_at, job_id)
@@ -1,2 +0,0 @@
1
- local job_id, strand, now = unpack(ARGV)
2
- tickle_strand(job_id, strand, now)
@@ -1,7 +0,0 @@
1
- source "https://rubygems.org"
2
-
3
- gemspec :path => "../../"
4
-
5
- gem "rails", "~> 5.2.0"
6
- gem 'sinatra', "~> 2.0"
7
- gem 'sinatra-contrib', "~> 2.0"
@@ -1,7 +0,0 @@
1
- source "https://rubygems.org"
2
-
3
- gemspec :path => "../../"
4
-
5
- gem "rails", "~> 6.0.0"
6
- gem 'sinatra', "~> 2.0"
7
- gem 'sinatra-contrib', "~> 2.0"