joblin 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. checksums.yaml +7 -0
  2. data/README.md +1 -0
  3. data/app/models/joblin/background_task/api_access.rb +148 -0
  4. data/app/models/joblin/background_task/attachments.rb +47 -0
  5. data/app/models/joblin/background_task/executor.rb +63 -0
  6. data/app/models/joblin/background_task/options.rb +75 -0
  7. data/app/models/joblin/background_task/retention_policy.rb +28 -0
  8. data/app/models/joblin/background_task.rb +72 -0
  9. data/app/models/joblin/concerns/job_working_dirs.rb +21 -0
  10. data/db/migrate/20250903184852_create_background_tasks.rb +12 -0
  11. data/joblin.gemspec +35 -0
  12. data/lib/joblin/batching/batch.rb +537 -0
  13. data/lib/joblin/batching/callback.rb +135 -0
  14. data/lib/joblin/batching/chain_builder.rb +247 -0
  15. data/lib/joblin/batching/compat/active_job.rb +108 -0
  16. data/lib/joblin/batching/compat/sidekiq/web/batches_assets/css/styles.less +182 -0
  17. data/lib/joblin/batching/compat/sidekiq/web/batches_assets/js/batch_tree.js +108 -0
  18. data/lib/joblin/batching/compat/sidekiq/web/batches_assets/js/util.js +2 -0
  19. data/lib/joblin/batching/compat/sidekiq/web/helpers.rb +41 -0
  20. data/lib/joblin/batching/compat/sidekiq/web/views/_batch_tree.erb +6 -0
  21. data/lib/joblin/batching/compat/sidekiq/web/views/_batches_table.erb +44 -0
  22. data/lib/joblin/batching/compat/sidekiq/web/views/_common.erb +13 -0
  23. data/lib/joblin/batching/compat/sidekiq/web/views/_jobs_table.erb +21 -0
  24. data/lib/joblin/batching/compat/sidekiq/web/views/_pagination.erb +26 -0
  25. data/lib/joblin/batching/compat/sidekiq/web/views/batch.erb +81 -0
  26. data/lib/joblin/batching/compat/sidekiq/web/views/batches.erb +23 -0
  27. data/lib/joblin/batching/compat/sidekiq/web/views/pool.erb +137 -0
  28. data/lib/joblin/batching/compat/sidekiq/web/views/pools.erb +47 -0
  29. data/lib/joblin/batching/compat/sidekiq/web.rb +218 -0
  30. data/lib/joblin/batching/compat/sidekiq.rb +149 -0
  31. data/lib/joblin/batching/compat.rb +20 -0
  32. data/lib/joblin/batching/context_hash.rb +157 -0
  33. data/lib/joblin/batching/hier_batch_ids.lua +25 -0
  34. data/lib/joblin/batching/jobs/base_job.rb +7 -0
  35. data/lib/joblin/batching/jobs/concurrent_batch_job.rb +20 -0
  36. data/lib/joblin/batching/jobs/managed_batch_job.rb +175 -0
  37. data/lib/joblin/batching/jobs/serial_batch_job.rb +20 -0
  38. data/lib/joblin/batching/pool.rb +254 -0
  39. data/lib/joblin/batching/pool_refill.lua +47 -0
  40. data/lib/joblin/batching/schedule_callback.lua +14 -0
  41. data/lib/joblin/batching/status.rb +89 -0
  42. data/lib/joblin/engine.rb +15 -0
  43. data/lib/joblin/lazy_access.rb +72 -0
  44. data/lib/joblin/uniqueness/compat/active_job.rb +75 -0
  45. data/lib/joblin/uniqueness/compat/sidekiq.rb +135 -0
  46. data/lib/joblin/uniqueness/compat.rb +20 -0
  47. data/lib/joblin/uniqueness/configuration.rb +25 -0
  48. data/lib/joblin/uniqueness/job_uniqueness.rb +49 -0
  49. data/lib/joblin/uniqueness/lock_context.rb +199 -0
  50. data/lib/joblin/uniqueness/locksmith.rb +92 -0
  51. data/lib/joblin/uniqueness/on_conflict/base.rb +32 -0
  52. data/lib/joblin/uniqueness/on_conflict/log.rb +13 -0
  53. data/lib/joblin/uniqueness/on_conflict/null_strategy.rb +9 -0
  54. data/lib/joblin/uniqueness/on_conflict/raise.rb +11 -0
  55. data/lib/joblin/uniqueness/on_conflict/reject.rb +21 -0
  56. data/lib/joblin/uniqueness/on_conflict/reschedule.rb +20 -0
  57. data/lib/joblin/uniqueness/on_conflict.rb +62 -0
  58. data/lib/joblin/uniqueness/strategy/base.rb +107 -0
  59. data/lib/joblin/uniqueness/strategy/until_and_while_executing.rb +35 -0
  60. data/lib/joblin/uniqueness/strategy/until_executed.rb +20 -0
  61. data/lib/joblin/uniqueness/strategy/until_executing.rb +20 -0
  62. data/lib/joblin/uniqueness/strategy/until_expired.rb +16 -0
  63. data/lib/joblin/uniqueness/strategy/while_executing.rb +26 -0
  64. data/lib/joblin/uniqueness/strategy.rb +27 -0
  65. data/lib/joblin/uniqueness/unique_job_common.rb +79 -0
  66. data/lib/joblin/version.rb +3 -0
  67. data/lib/joblin.rb +37 -0
  68. data/spec/batching/batch_spec.rb +493 -0
  69. data/spec/batching/callback_spec.rb +38 -0
  70. data/spec/batching/compat/active_job_spec.rb +107 -0
  71. data/spec/batching/compat/sidekiq_spec.rb +127 -0
  72. data/spec/batching/context_hash_spec.rb +54 -0
  73. data/spec/batching/flow_spec.rb +82 -0
  74. data/spec/batching/integration/fail_then_succeed.rb +42 -0
  75. data/spec/batching/integration/integration.rb +57 -0
  76. data/spec/batching/integration/nested.rb +88 -0
  77. data/spec/batching/integration/simple.rb +47 -0
  78. data/spec/batching/integration/workflow.rb +134 -0
  79. data/spec/batching/integration_helper.rb +50 -0
  80. data/spec/batching/pool_spec.rb +161 -0
  81. data/spec/batching/status_spec.rb +76 -0
  82. data/spec/batching/support/base_job.rb +19 -0
  83. data/spec/batching/support/sample_callback.rb +2 -0
  84. data/spec/internal/config/database.yml +5 -0
  85. data/spec/internal/config/routes.rb +5 -0
  86. data/spec/internal/config/storage.yml +3 -0
  87. data/spec/internal/db/combustion_test.sqlite +0 -0
  88. data/spec/internal/db/schema.rb +6 -0
  89. data/spec/internal/log/test.log +48200 -0
  90. data/spec/internal/public/favicon.ico +0 -0
  91. data/spec/models/background_task_spec.rb +41 -0
  92. data/spec/spec_helper.rb +29 -0
  93. data/spec/uniqueness/compat/active_job_spec.rb +49 -0
  94. data/spec/uniqueness/compat/sidekiq_spec.rb +68 -0
  95. data/spec/uniqueness/lock_context_spec.rb +106 -0
  96. data/spec/uniqueness/on_conflict/log_spec.rb +11 -0
  97. data/spec/uniqueness/on_conflict/raise_spec.rb +10 -0
  98. data/spec/uniqueness/on_conflict/reschedule_spec.rb +63 -0
  99. data/spec/uniqueness/on_conflict_spec.rb +16 -0
  100. data/spec/uniqueness/spec_helper.rb +19 -0
  101. data/spec/uniqueness/strategy/base_spec.rb +100 -0
  102. data/spec/uniqueness/strategy/until_and_while_executing_spec.rb +48 -0
  103. data/spec/uniqueness/strategy/until_executed_spec.rb +23 -0
  104. data/spec/uniqueness/strategy/until_executing_spec.rb +23 -0
  105. data/spec/uniqueness/strategy/until_expired_spec.rb +23 -0
  106. data/spec/uniqueness/strategy/while_executing_spec.rb +33 -0
  107. data/spec/uniqueness/support/lock_strategy.rb +28 -0
  108. data/spec/uniqueness/support/on_conflict.rb +24 -0
  109. data/spec/uniqueness/support/test_worker.rb +19 -0
  110. data/spec/uniqueness/unique_job_common_spec.rb +45 -0
  111. metadata +308 -0
@@ -0,0 +1,254 @@
1
+ module Joblin::Batching
2
+ class Pool
3
+ include RediConn::RedisModel
4
+
5
+ POOL_REFILL = RediConn::RedisScript.new(Pathname.new(__FILE__) + "../pool_refill.lua")
6
+
7
+ attr_reader :pid
8
+ redis_attr :description
9
+ redis_attr :created_at
10
+ redis_attr :concurrency, :int
11
+ redis_attr :complete_count, :int
12
+ redis_attr :order
13
+ redis_attr :on_failed_job, :symbol
14
+ redis_attr :clean_when_empty, :bool
15
+
16
+ def initialize(pooolid = nil, **kwargs)
17
+ if pooolid
18
+ @existing = true
19
+ @pid = pooolid
20
+ else
21
+ @pid = SecureRandom.urlsafe_base64(10)
22
+ initialize_new(**kwargs)
23
+ end
24
+ end
25
+
26
+ def self.from_pid(pid)
27
+ raise "PID must be given" unless pid.present?
28
+ new(pid)
29
+ end
30
+
31
+ def <<(job_desc)
32
+ add_job(job_desc)
33
+ end
34
+
35
+ def add_job(job_desc)
36
+ add_jobs([job_desc])
37
+ end
38
+
39
+ def add_jobs(job_descs, skip_refill: false)
40
+ job_descs.each do |job_desc|
41
+ wrapper = Batch.new
42
+ wrapper.description = "Pool Job Wrapper (PID: #{pid})"
43
+ checkin_event = (on_failed_job == :wait) ? :success : :complete
44
+ wrapper.on(checkin_event, "#{self.class.to_s}.job_checked_in", pool_id: pid)
45
+ wrapper.placeholder!
46
+
47
+ job_desc = job_desc.symbolize_keys
48
+ job_desc = job_desc.merge!(
49
+ job: job_desc[:job].to_s,
50
+ pool_wrapper_batch: wrapper.bid,
51
+ )
52
+
53
+ push_job_to_pool(job_desc)
54
+ end
55
+ refill_allotment unless skip_refill
56
+ end
57
+
58
+ def keep_open!(token = SecureRandom.urlsafe_base64(10))
59
+ if block_given?
60
+ begin
61
+ token = keep_open!(token)
62
+ yield
63
+ ensure
64
+ let_close!(token)
65
+ end
66
+ else
67
+ redis.multi do |r|
68
+ r.sadd("#{redis_key}-holds", token)
69
+ r.expire("#{redis_key}-holds", Batch::BID_EXPIRE_TTL)
70
+ end
71
+ token
72
+ end
73
+ end
74
+
75
+ def let_close!(token = :unset)
76
+ if token == :unset # Legacy
77
+ redis.del("#{redis_key}-holds")
78
+ redis.hset(redis_key, 'keep_open', 'false')
79
+ else
80
+ redis.srem("#{redis_key}-holds", token)
81
+ end
82
+
83
+ cleanup_if_empty
84
+ end
85
+
86
+ def cleanup_redis
87
+ Batch.logger.debug {"Cleaning redis of pool #{pid}"}
88
+ redis do |r|
89
+ r.zrem("pools", pid)
90
+ r.unlink(
91
+ "#{redis_key}",
92
+ "#{redis_key}-jobs",
93
+ )
94
+ end
95
+ end
96
+
97
+ def cleanup_if_empty
98
+ self.order
99
+
100
+ activec, pactivec, pendingc, clean_when_empty, keep_open, holds = redis.multi do |r|
101
+ r.hlen("#{redis_key}-active")
102
+ r.hget(redis_key, "_active_count")
103
+ pending_count(r)
104
+ r.hget(redis_key, 'clean_when_empty')
105
+ r.hget(redis_key, 'keep_open')
106
+ r.scard("#{redis_key}-holds")
107
+ end
108
+
109
+ return if keep_open == 'true' || clean_when_empty == 'false' || (holds && holds > 0)
110
+
111
+ if activec <= 0 && (pactivec.try(:to_i) || 0) <= 0 && pendingc <= 0
112
+ cleanup_redis
113
+ end
114
+ end
115
+
116
+ def active_count(r = redis)
117
+ r.hlen("#{redis_key}-active") + r.hincrby(redis_key, "_active_count", 0)
118
+ end
119
+
120
+ def active_jobs(r = redis)
121
+ r.hvals("#{redis_key}-active").map {|desc| JSON.parse(desc)[0] }
122
+ end
123
+
124
+ def pending_count(r = redis)
125
+ jobs_key = "#{redis_key}-jobs"
126
+ order = self.order || 'fifo'
127
+ case order.to_sym
128
+ when :fifo, :lifo
129
+ r.llen(jobs_key)
130
+ when :random
131
+ r.scard(jobs_key)
132
+ when :priority
133
+ r.zcard(jobs_key)
134
+ end
135
+ end
136
+
137
+ def job_checked_in(status, options)
138
+ active_count = refill_allotment(status.bid)
139
+ cleanup_if_empty unless active_count > 0
140
+ end
141
+
142
+ def self.job_checked_in(status, options)
143
+ pid = options['pool_id']
144
+ from_pid(pid).job_checked_in(status, options)
145
+ end
146
+
147
+ # Administrative/console method to cleanup expired pools from the WebUI
148
+ def self.cleanup_redis_index!
149
+ suffixes = ["", "-active", "-jobs"]
150
+ r.zrangebyscore("pools", "0", Batch::BID_EXPIRE_TTL.seconds.ago.to_i).each do |pid|
151
+ r.zrem("pools", pid) if Batch.cleanup_redis_index_for("POOLID-#{pid}", suffixes)
152
+ end
153
+ end
154
+
155
+ protected
156
+
157
+ def redis_key
158
+ "POOLID-#{pid}"
159
+ end
160
+
161
+ def refill_allotment(checkin_bid = nil)
162
+ active_count, job_descs = POOL_REFILL.call(redis, [redis_key, "#{redis_key}-jobs", "#{redis_key}-active"], [checkin_bid || ""])
163
+ return active_count if active_count < 0
164
+
165
+ pending_job_descs = job_descs.dup
166
+
167
+ added_jobs = {}
168
+ failed_to_add_jobs = []
169
+ add_exception = nil
170
+
171
+ while pending_job_descs.count > 0
172
+ begin
173
+ job_json = pending_job_descs.shift
174
+ job_desc = ::ActiveJob::Arguments.deserialize(JSON.parse(job_json))[0]&.symbolize_keys
175
+
176
+ wbid = job_desc[:pool_wrapper_batch]
177
+
178
+ Batch.new(wbid).jobs do
179
+ ChainBuilder.enqueue_job(job_desc)
180
+ end
181
+
182
+ added_jobs[wbid] = job_json
183
+ rescue => ex
184
+ failed_to_add_jobs << job_json
185
+ add_exception = ex
186
+ end
187
+ end
188
+
189
+ redis.multi do |r|
190
+ r.mapped_hmset("#{redis_key}-active", added_jobs) if added_jobs.count > 0
191
+ # Release reserved slots now that we've added the jobs to `-active`
192
+ r.hincrby(redis_key, "_active_count", -job_descs.count)
193
+
194
+ r.expire(redis_key, Batch::BID_EXPIRE_TTL)
195
+ r.expire("#{redis_key}-active", Batch::BID_EXPIRE_TTL)
196
+ r.expire("#{redis_key}-jobs", Batch::BID_EXPIRE_TTL)
197
+ end
198
+
199
+ # If this happens, we end up in a bad state (as we don't try to re-add items to the pool or refill_allotment again), but
200
+ # this should be a _really_ rare case that should only occur if we've lost connection to Redis or something, so we're
201
+ # operating on the assumption that if we get here, any recovery logic will fail too
202
+ if add_exception.present?
203
+ Batch.logger.error {"Error popping jobs from Pool #{pid}: #{add_exception}"}
204
+ raise add_exception
205
+ end
206
+
207
+ active_count + added_jobs.count
208
+ end
209
+
210
+ def push_job_to_pool(job_desc)
211
+ jobs_key = "#{redis_key}-jobs"
212
+ # This allows duplicate jobs when a Redis Set is used
213
+ job_desc[:_pool_random_key_] = SecureRandom.urlsafe_base64(10)
214
+ job_json = JSON.unparse(::ActiveJob::Arguments.serialize([job_desc]))
215
+ order = self.order
216
+
217
+ redis.multi do |r|
218
+ case order.to_sym
219
+ when :fifo, :lifo
220
+ r.rpush(jobs_key, job_json)
221
+ when :random
222
+ r.sadd(jobs_key, job_json)
223
+ when :priority
224
+ r.zadd(jobs_key, job_desc[:priority] || 0, job_json)
225
+ end
226
+ r.expire(redis_key, Batch::BID_EXPIRE_TTL)
227
+ r.expire(jobs_key, Batch::BID_EXPIRE_TTL)
228
+ end
229
+ end
230
+
231
+ def self.redis(&blk)
232
+ Batch.redis &blk
233
+ end
234
+ delegate :redis, to: :class
235
+
236
+ def flush_pending_attrs
237
+ super
238
+ redis.expire(redis_key, Batch::BID_EXPIRE_TTL)
239
+ redis.zadd("pools", created_at, pid)
240
+ end
241
+
242
+ private
243
+
244
+ def initialize_new(concurrency: nil, order: :fifo, clean_when_empty: true, on_failed_job: :wait, description: nil)
245
+ self.created_at = Time.now.utc.to_f
246
+ self.description = description
247
+ self.order = order
248
+ self.concurrency = concurrency
249
+ self.clean_when_empty = clean_when_empty
250
+ self.on_failed_job = on_failed_job
251
+ flush_pending_attrs
252
+ end
253
+ end
254
+ end
@@ -0,0 +1,47 @@
1
+
2
+ local poolkey = KEYS[1]
3
+ local qkey = KEYS[2]
4
+ local activekey = KEYS[3]
5
+
6
+ local checkin_item = ARGV[1]
7
+
8
+ if redis.call('EXISTS', poolkey) == 0 then
9
+ return { -1, {} } -- pool doesn't exist
10
+ end
11
+
12
+ if checkin_item ~= "" then
13
+ redis.call("HDEL", activekey, checkin_item)
14
+ redis.call("HINCRBY", poolkey, "complete_count", 1)
15
+ end
16
+
17
+ local pool_type = redis.call('HGET', poolkey, "order")
18
+ local allotment = tonumber(redis.call("HGET", poolkey, "concurrency"))
19
+ local active = redis.call("HLEN", activekey) + (redis.call("HGET", poolkey, "_active_count") or 0)
20
+
21
+ local pop_count = allotment - active
22
+
23
+ local popped_items = {}
24
+
25
+ if pop_count > 0 then
26
+ if pool_type == "fifo" then
27
+ popped_items = redis.call("LPOP", qkey, pop_count) or {}
28
+ elseif pool_type == "lifo" then
29
+ popped_items = redis.call("RPOP", qkey, pop_count) or {}
30
+ elseif pool_type == "random" then
31
+ popped_items = redis.call("SPOP", qkey, pop_count) or {}
32
+ elseif pool_type == "priority" then
33
+ local temp_items = redis.call("ZPOPMAX", qkey, pop_count) or {}
34
+ for i,v in ipairs(temp_items) do
35
+ if i % 2 == 1 then
36
+ table.insert(popped_items, v)
37
+ end
38
+ end
39
+ end
40
+ end
41
+
42
+ -- Reserve slots for these jobs while we return to Ruby and deserialize them
43
+ -- This could also be inlined by just storing a key in the queue and storing parameters
44
+ -- in a Hash, but this seems more efficient.
45
+ redis.call('HINCRBY', poolkey, "_active_count", #popped_items)
46
+
47
+ return { active, popped_items }
@@ -0,0 +1,14 @@
1
+
2
+ local previously_scheduled = redis.call('HGET', KEYS[1], ARGV[1])
3
+ redis.call('HSET', KEYS[1], ARGV[1], 'true')
4
+
5
+ if previously_scheduled ~= 'true' then
6
+ local pcb_key = KEYS[1] .. '-pending_callbacks'
7
+ redis.call('SADD', pcb_key, ARGV[1] .. '-finalize')
8
+ if ARGV[2] == 'true' then
9
+ redis.call('SADD', pcb_key, ARGV[1])
10
+ end
11
+ redis.call('EXPIRE', pcb_key, ARGV[3])
12
+ end
13
+
14
+ return previously_scheduled
@@ -0,0 +1,89 @@
1
+ module Joblin::Batching
2
+ class Batch
3
+ class Status
4
+ attr_reader :bid
5
+
6
+ def initialize(bid)
7
+ bid = bid.bid if bid.is_a?(Batch)
8
+ @bid = bid
9
+ end
10
+
11
+ def join
12
+ raise "Not supported"
13
+ end
14
+
15
+ def pending
16
+ Batch.redis { |r| r.hget("BID-#{bid}", 'pending') }.to_i
17
+ end
18
+
19
+ def failures
20
+ Batch.redis { |r| r.scard("BID-#{bid}-failed") }.to_i
21
+ end
22
+
23
+ def dead
24
+ Batch.redis { |r| r.scard("BID-#{bid}-dead") }.to_i
25
+ end
26
+
27
+ def completed_count
28
+ job_count - pending
29
+ end
30
+
31
+ def job_count
32
+ Batch.redis { |r| r.hget("BID-#{bid}", "job_count") }.to_i
33
+ end
34
+
35
+ def created_at
36
+ Batch.redis { |r| r.hget("BID-#{bid}", 'created_at') }
37
+ end
38
+
39
+ def parent_bid
40
+ Batch.redis { |r| r.hget("BID-#{bid}", "parent_bid") }
41
+ end
42
+
43
+ def failure_info
44
+ Batch.redis { |r| r.smembers("BID-#{bid}-failed") } || []
45
+ end
46
+
47
+ def complete?
48
+ 'true' == Batch.redis { |r| r.hget("BID-#{bid}", 'complete') }
49
+ end
50
+
51
+ def success?
52
+ 'true' == Batch.redis { |r| r.hget("BID-#{bid}", 'success') }
53
+ end
54
+
55
+ def child_count
56
+ Batch.redis { |r| r.hget("BID-#{bid}", 'children') }.to_i
57
+ end
58
+
59
+ def completed_children_count
60
+ Batch.redis { |r| r.scard("BID-#{bid}-batches-complete") }.to_i
61
+ end
62
+
63
+ def successful_children_count
64
+ Batch.redis { |r| r.scard("BID-#{bid}-batches-success") }.to_i
65
+ end
66
+
67
+ def failed_children_count
68
+ Batch.redis { |r| r.scard("BID-#{bid}-batches-failed") }.to_i
69
+ end
70
+
71
+ def data
72
+ {
73
+ bid: bid,
74
+ failures: failures,
75
+ pending: pending,
76
+ created_at: created_at,
77
+ complete: complete?,
78
+ success: success?,
79
+ failure_info: failure_info,
80
+ parent_bid: parent_bid,
81
+ child_count: child_count,
82
+ completed_children_count: completed_children_count,
83
+ successful_children_count: successful_children_count,
84
+ failed_children_count: failed_children_count,
85
+ }
86
+ end
87
+ end
88
+ end
89
+ end
@@ -0,0 +1,15 @@
1
+ require "rails"
2
+
3
+ module Joblin
4
+ class Engine < ::Rails::Engine
5
+ isolate_namespace Joblin
6
+
7
+ initializer :append_migrations do |app|
8
+ config.paths["db/migrate"].expanded.each do |expanded_path|
9
+ app.config.paths["db/migrate"] << expanded_path
10
+ end
11
+ # Apartment will modify this, but it doesn't fully support engine migrations, so we'll reset it here
12
+ ActiveRecord::Migrator.migrations_paths = Rails.application.paths["db/migrate"].to_a
13
+ end
14
+ end
15
+ end
@@ -0,0 +1,72 @@
1
+ module Joblin
2
+ module LazyAccess
3
+ extend ActiveSupport::Concern
4
+
5
+ def initialize(...)
6
+ super
7
+ @access_cache = {}
8
+ end
9
+
10
+ def self.raw
11
+ current = Thread.current[:lazy_access_read_raw]
12
+ Thread.current[:lazy_access_read_raw] = true
13
+ yield
14
+ ensure
15
+ Thread.current[:lazy_access_read_raw] = current
16
+ end
17
+
18
+ def self.load(val)
19
+ case val
20
+ when String
21
+ if val.start_with?("gid://")
22
+ val = GlobalID::Locator.locate(val)
23
+ end
24
+ when Hash
25
+ val = LazyAccessHash.new(val)
26
+ when Array
27
+ val = LazyAccessArray.new(val)
28
+ end
29
+ val
30
+ end
31
+
32
+ def self.dump(val)
33
+ case val
34
+ when Array
35
+ val.to_a.map {|x| dump(x) }
36
+ when Hash
37
+ val.to_h.transform_values{|x| dump(x) }
38
+ when ActiveRecord::Base
39
+ val.to_gid
40
+ else
41
+ val
42
+ end
43
+ end
44
+
45
+ def [](key)
46
+ if Thread.current[:lazy_access_read_raw]
47
+ super
48
+ else
49
+ key = key.to_s if key.is_a?(Symbol)
50
+ unless @access_cache.key?(key)
51
+ val = super
52
+ @access_cache[key] = LazyAccess.load(val)
53
+ end
54
+ @access_cache[key]
55
+ end
56
+ end
57
+
58
+ def []=(key, value)
59
+ key = key.to_s if key.is_a?(Symbol)
60
+ @access_cache[key] = value
61
+ super
62
+ end
63
+ end
64
+
65
+ class LazyAccessHash < HashWithIndifferentAccess
66
+ include LazyAccess
67
+ end
68
+
69
+ class LazyAccessArray < Array
70
+ include LazyAccess
71
+ end
72
+ end
@@ -0,0 +1,75 @@
1
+
2
+ module Joblin::Uniqueness
3
+ module Compat
4
+ module ActiveJob
5
+
6
+ class ActiveJobLockContext < LockContext
7
+ def job_scheduled_at
8
+ job_instance&.scheduled_at
9
+ end
10
+
11
+ def reenqueue(schedule_in:)
12
+ job_class.set(
13
+ queue: job_queue.to_sym,
14
+ wait: schedule_in,
15
+ priortity: job_instance.priority,
16
+ ).perform_later(*job_instance.arguments)
17
+ end
18
+ end
19
+
20
+ module UniqueJobExtension
21
+ extend ActiveSupport::Concern
22
+
23
+ included do
24
+ set_callback(:enqueue, :around, prepend: true) do |job, block|
25
+ ctx = uniqueness_lock_context
26
+ @uniqueness_cache_data = ctx.cache_data
27
+ ctx.handle_lifecycle!(:enqueue, &block)
28
+ end
29
+
30
+ around_perform do |job, block|
31
+ ctx = uniqueness_lock_context
32
+ ctx.handle_lifecycle!(:perform, &block)
33
+ end
34
+ end
35
+
36
+ def serialize
37
+ super.tap do |data|
38
+ data['uniqueness_cache_data'] = @uniqueness_cache_data.stringify_keys
39
+ end
40
+ end
41
+
42
+ def deserialize(data)
43
+ super
44
+ @uniqueness_cache_data = data['uniqueness_cache_data']&.symbolize_keys
45
+ end
46
+
47
+ def uniqueness_lock_context
48
+ ActiveJobLockContext.new({
49
+ job_clazz: self.class,
50
+ jid: self.job_id,
51
+ args: self.arguments,
52
+ queue: self.queue_name,
53
+ **(@uniqueness_cache_data || {})
54
+ }, job_instance: self)
55
+ end
56
+ end
57
+
58
+ module JobExtension
59
+ extend ActiveSupport::Concern
60
+ include UniqueJobCommon
61
+
62
+ class_methods do
63
+ def ensure_uniqueness(**kwargs)
64
+ super(**kwargs)
65
+ include UniqueJobExtension
66
+ end
67
+ end
68
+ end
69
+
70
+ def self.configure
71
+ ::ActiveJob::Base.include JobExtension
72
+ end
73
+ end
74
+ end
75
+ end
@@ -0,0 +1,135 @@
1
+
2
+ module Joblin::Uniqueness
3
+ module Compat
4
+ module Sidekiq
5
+ module WorkerExtension
6
+ extend ActiveSupport::Concern
7
+ include UniqueJobCommon
8
+
9
+ class_methods do
10
+ def ensure_uniqueness(**kwargs)
11
+ super(**kwargs)
12
+ if !(defined?(@@validated_config) && @@validated_config)
13
+ Compat::Sidekiq.validate_middleware_placement!()
14
+ @@validated_config = true
15
+ end
16
+ end
17
+ end
18
+ end
19
+
20
+ class SidekiqLockContext < LockContext
21
+ def job_scheduled_at
22
+ @job_instance&.[]("at")
23
+ end
24
+
25
+ def reenqueue(schedule_in:)
26
+ job_class.set(queue: job_queue.to_sym).perform_in(schedule_in, *@job_instance["args"])
27
+ end
28
+ end
29
+
30
+ class CommonMiddleware
31
+ def lock_context(msg)
32
+ opts = worker_uniqueness(msg)
33
+ return nil unless opts
34
+
35
+ SidekiqLockContext.new({
36
+ job_clazz: msg['class'],
37
+ jid: msg['jid'],
38
+ queue: msg['queue'],
39
+ args: msg['args'],
40
+ # kwargs: msg['kwargs'],
41
+ **(msg['uniqueness_cache_data']&.symbolize_keys || {}),
42
+ }, job_instance: msg)
43
+ end
44
+
45
+ def worker_uniqueness(msg)
46
+ return nil if Compat::Sidekiq.is_activejob_job?(msg)
47
+
48
+ worker_class = msg['class'].constantize
49
+ return nil unless worker_class.respond_to?(:unique_job_options)
50
+
51
+ worker_class.unique_job_options
52
+ end
53
+ end
54
+
55
+ class ClientMiddleware < CommonMiddleware
56
+ include ::Sidekiq::ClientMiddleware if defined? ::Sidekiq::ClientMiddleware
57
+
58
+ def call(_worker, msg, _queue, _redis_pool = nil, &blk)
59
+ ctx = lock_context(msg)
60
+ return blk.call unless ctx
61
+ msg['uniqueness_cache_data'] = ctx.cache_data.stringify_keys
62
+ ctx.handle_lifecycle!(:enqueue, &blk)
63
+ end
64
+ end
65
+
66
+ class ServerMiddleware < CommonMiddleware
67
+ include ::Sidekiq::ServerMiddleware if defined? ::Sidekiq::ServerMiddleware
68
+
69
+ def call(_worker, msg, _queue, &blk)
70
+ ctx = lock_context(msg)
71
+ return blk.call unless ctx
72
+ ctx.handle_lifecycle!(:perform, &blk)
73
+ end
74
+ end
75
+
76
+ def self.is_activejob_job?(msg)
77
+ return false unless defined?(::ActiveJob)
78
+
79
+ msg['class'] == 'ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper' && (msg['wrapped'].to_s).constantize < Compat::ActiveJob::UniqueJobExtension
80
+ end
81
+
82
+ def self.validate_middleware_order(chain, order)
83
+ chain_classes = chain.entries.map(&:klass)
84
+ filtered = chain_classes.select { |klass| order.include?(klass) }
85
+ raise "Middleware chain does not contain all required middleware: #{order - filtered}" unless order.all? { |klass| filtered.include?(klass) }
86
+ raise "Middleware must be in order: #{order.inspect}" if filtered != order
87
+ end
88
+
89
+ def self.sidekiq_middleware(placement, &blk)
90
+ install_middleware = ->(config) do
91
+ config.send("#{placement}_middleware") do |chain|
92
+ blk.call(chain)
93
+ end
94
+ end
95
+
96
+ ::Sidekiq.configure_client(&install_middleware) if placement == :client
97
+ ::Sidekiq.configure_server(&install_middleware)
98
+ end
99
+
100
+ def self.validate_middleware_placement!
101
+ sidekiq_middleware(:client) do |chain|
102
+ # Unique middleware must come _before_ the Batch middleware so that the uniqueness middleware can wrap the job in a batch
103
+ validate_middleware_order(chain, [
104
+ Joblin::Uniqueness::Compat::Sidekiq::ClientMiddleware,
105
+ Joblin::Batching::Compat::Sidekiq::ClientMiddleware,
106
+ ])
107
+ end
108
+
109
+ sidekiq_middleware(:server) do |chain|
110
+ # Unique middleware must com _after_ the Batch middleware so that the Batch is loaded before reaching the uniqueness middleware
111
+ validate_middleware_order(chain, [
112
+ Joblin::Batching::Compat::Sidekiq::ServerMiddleware,
113
+ Joblin::Uniqueness::Compat::Sidekiq::ServerMiddleware,
114
+ ])
115
+ end
116
+ end
117
+
118
+ def self.configure
119
+ sidekiq_middleware(:client) do |chain|
120
+ chain.insert_before Joblin::Batching::Compat::Sidekiq::ClientMiddleware, Compat::Sidekiq::ClientMiddleware
121
+ end
122
+
123
+ sidekiq_middleware(:server) do |chain|
124
+ chain.insert_after Joblin::Batching::Compat::Sidekiq::ServerMiddleware, Compat::Sidekiq::ServerMiddleware
125
+ end
126
+
127
+ ::Sidekiq::Worker.extend(ActiveSupport::Concern) unless ::Sidekiq::Worker < ActiveSupport::Concern
128
+
129
+ ::Sidekiq::Worker.send(:include, Compat::Sidekiq::WorkerExtension)
130
+ end
131
+ end
132
+ end
133
+ end
134
+
135
+ # require_relative 'sidekiq/web'