cloudtasker 0.13.1 → 0.14.rc1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/lint_rubocop.yml +1 -1
  3. data/.github/workflows/test_ruby_3.x.yml +1 -0
  4. data/.gitignore +4 -1
  5. data/.rubocop.yml +37 -9
  6. data/Appraisals +0 -12
  7. data/CHANGELOG.md +31 -0
  8. data/Gemfile +12 -0
  9. data/README.md +145 -6
  10. data/app/controllers/cloudtasker/worker_controller.rb +30 -9
  11. data/cloudtasker.gemspec +3 -10
  12. data/docs/CRON_JOBS.md +23 -0
  13. data/docs/STORABLE_JOBS.md +68 -0
  14. data/exe/cloudtasker +5 -2
  15. data/gemfiles/google_cloud_tasks_1.0.gemfile +10 -1
  16. data/gemfiles/google_cloud_tasks_1.1.gemfile +10 -1
  17. data/gemfiles/google_cloud_tasks_1.2.gemfile +10 -1
  18. data/gemfiles/google_cloud_tasks_1.3.gemfile +10 -1
  19. data/gemfiles/google_cloud_tasks_1.4.gemfile +10 -1
  20. data/gemfiles/google_cloud_tasks_1.5.gemfile +10 -1
  21. data/gemfiles/google_cloud_tasks_2.0.gemfile +10 -1
  22. data/gemfiles/google_cloud_tasks_2.1.gemfile +10 -1
  23. data/gemfiles/rails_5.2.gemfile +10 -0
  24. data/gemfiles/rails_6.0.gemfile +10 -0
  25. data/gemfiles/rails_6.1.gemfile +10 -0
  26. data/gemfiles/rails_7.0.gemfile +10 -0
  27. data/gemfiles/semantic_logger_3.4.gemfile +9 -1
  28. data/gemfiles/semantic_logger_4.6.gemfile +9 -1
  29. data/gemfiles/semantic_logger_4.7.0.gemfile +9 -1
  30. data/gemfiles/semantic_logger_4.7.2.gemfile +9 -1
  31. data/lib/active_job/queue_adapters/cloudtasker_adapter.rb +8 -1
  32. data/lib/cloudtasker/authenticator.rb +35 -0
  33. data/lib/cloudtasker/backend/google_cloud_task_v1.rb +2 -4
  34. data/lib/cloudtasker/backend/google_cloud_task_v2.rb +3 -5
  35. data/lib/cloudtasker/backend/memory_task.rb +8 -4
  36. data/lib/cloudtasker/backend/redis_task.rb +10 -4
  37. data/lib/cloudtasker/batch/batch_progress.rb +18 -14
  38. data/lib/cloudtasker/batch/job.rb +124 -31
  39. data/lib/cloudtasker/batch/middleware/server.rb +2 -2
  40. data/lib/cloudtasker/cli.rb +5 -7
  41. data/lib/cloudtasker/cloud_task.rb +16 -20
  42. data/lib/cloudtasker/config.rb +43 -10
  43. data/lib/cloudtasker/cron/middleware/server.rb +2 -2
  44. data/lib/cloudtasker/cron/schedule.rb +5 -2
  45. data/lib/cloudtasker/middleware/chain.rb +1 -1
  46. data/lib/cloudtasker/redis_client.rb +1 -4
  47. data/lib/cloudtasker/retry_worker_error.rb +6 -0
  48. data/lib/cloudtasker/storable/worker.rb +78 -0
  49. data/lib/cloudtasker/storable.rb +3 -0
  50. data/lib/cloudtasker/unique_job/conflict_strategy/base_strategy.rb +4 -2
  51. data/lib/cloudtasker/unique_job/lock/until_executed.rb +4 -4
  52. data/lib/cloudtasker/unique_job/lock/until_executing.rb +2 -2
  53. data/lib/cloudtasker/unique_job/lock/while_executing.rb +2 -2
  54. data/lib/cloudtasker/unique_job/middleware/client.rb +2 -2
  55. data/lib/cloudtasker/unique_job/middleware/server.rb +2 -2
  56. data/lib/cloudtasker/version.rb +1 -1
  57. data/lib/cloudtasker/worker.rb +38 -15
  58. data/lib/cloudtasker/worker_handler.rb +25 -19
  59. data/lib/cloudtasker/worker_logger.rb +48 -0
  60. data/lib/cloudtasker.rb +4 -1
  61. data/lib/tasks/setup_queue.rake +6 -6
  62. metadata +9 -145
  63. data/.github/workflows/test_ruby_2.6.yml +0 -37
@@ -24,6 +24,7 @@ module Cloudtasker
24
24
  # means that the job will never succeed. There is no point in blocking
25
25
  # the batch forever so we proceed forward eventually.
26
26
  #
27
+ BATCH_STATUSES = %w[scheduled processing completed errored dead all].freeze
27
28
  COMPLETION_STATUSES = %w[completed dead].freeze
28
29
 
29
30
  # These callbacks do not need to raise errors on their own
@@ -183,12 +184,40 @@ module Cloudtasker
183
184
  end
184
185
 
185
186
  #
186
- # The list of jobs in the batch
187
+ # Return the key under which the batch progress is stored
188
+ # for a specific state.
189
+ #
190
+ # @return [String] The batch progress state namespaced id.
191
+ #
192
+ def batch_state_count_gid(state)
193
+ "#{batch_state_gid}/state_count/#{state}"
194
+ end
195
+
196
+ #
197
+ # Return the number of jobs in a given state
198
+ #
199
+ # @return [String] The batch progress state namespaced id.
200
+ #
201
+ def batch_state_count(state)
202
+ redis.get(batch_state_count_gid(state)).to_i
203
+ end
204
+
205
+ #
206
+ # The list of jobs to be enqueued in the batch
187
207
  #
188
208
  # @return [Array<Cloudtasker::Worker>] The jobs to enqueue at the end of the batch.
189
209
  #
190
- def jobs
191
- @jobs ||= []
210
+ def pending_jobs
211
+ @pending_jobs ||= []
212
+ end
213
+
214
+ #
215
+ # The list of jobs that have been enqueued as part of the batch
216
+ #
217
+ # @return [Array<Cloudtasker::Worker>] The jobs enqueued as part of the batch.
218
+ #
219
+ def enqueued_jobs
220
+ @enqueued_jobs ||= []
192
221
  end
193
222
 
194
223
  #
@@ -208,7 +237,7 @@ module Cloudtasker
208
237
  # @param [Class] worker_klass The worker class.
209
238
  # @param [Array<any>] *args The worker arguments.
210
239
  #
211
- # @return [Array<Cloudtasker::Worker>] The updated list of jobs.
240
+ # @return [Array<Cloudtasker::Worker>] The updated list of pending jobs.
212
241
  #
213
242
  def add(worker_klass, *args)
214
243
  add_to_queue(worker.job_queue, worker_klass, *args)
@@ -221,10 +250,10 @@ module Cloudtasker
221
250
  # @param [Class] worker_klass The worker class.
222
251
  # @param [Array<any>] *args The worker arguments.
223
252
  #
224
- # @return [Array<Cloudtasker::Worker>] The updated list of jobs.
253
+ # @return [Array<Cloudtasker::Worker>] The updated list of pending jobs.
225
254
  #
226
255
  def add_to_queue(queue, worker_klass, *args)
227
- jobs << worker_klass.new(
256
+ pending_jobs << worker_klass.new(
228
257
  job_args: args,
229
258
  job_meta: { key(:parent_id) => batch_id },
230
259
  job_queue: queue
@@ -249,6 +278,28 @@ module Cloudtasker
249
278
  end
250
279
  end
251
280
 
281
+ #
282
+ # This method initializes the batch job counters if not set already
283
+ #
284
+ def migrate_progress_stats_to_redis_counters
285
+ # Abort if counters have already been set. The 'all' counter acts as a feature flag.
286
+ return if redis.exists?(batch_state_count_gid('all'))
287
+
288
+ # Get all job states
289
+ values = batch_state.values
290
+
291
+ # Count by value
292
+ redis.multi do |m|
293
+ # Per status
294
+ values.tally.each do |k, v|
295
+ m.set(batch_state_count_gid(k), v)
296
+ end
297
+
298
+ # All counter
299
+ m.set(batch_state_count_gid('all'), values.size)
300
+ end
301
+ end
302
+
252
303
  #
253
304
  # Save serialized version of the worker.
254
305
  #
@@ -269,8 +320,17 @@ module Cloudtasker
269
320
  def update_state(batch_id, status)
270
321
  migrate_batch_state_to_redis_hash
271
322
 
323
+ # Get current status
324
+ current_status = redis.hget(batch_state_gid, batch_id)
325
+ return if current_status == status.to_s
326
+
272
327
  # Update the batch state batch_id entry with the new status
273
- redis.hset(batch_state_gid, batch_id, status)
328
+ # and update counters
329
+ redis.multi do |m|
330
+ m.hset(batch_state_gid, batch_id, status)
331
+ m.decr(batch_state_count_gid(current_status))
332
+ m.incr(batch_state_count_gid(status))
333
+ end
274
334
  end
275
335
 
276
336
  #
@@ -295,7 +355,14 @@ module Cloudtasker
295
355
  # @return [any] The callback return value
296
356
  #
297
357
  def run_worker_callback(callback, *args)
298
- worker.try(callback, *args)
358
+ worker.try(callback, *args).tap do
359
+ # Enqueue pending jobs if batch was expanded in callback
360
+ # A completed batch cannot receive additional jobs
361
+ schedule_pending_jobs if callback.to_sym != :on_batch_complete
362
+
363
+ # Schedule pending jobs on parent if batch was expanded
364
+ parent_batch&.schedule_pending_jobs
365
+ end
299
366
  rescue StandardError => e
300
367
  # There is no point in retrying jobs due to failure callbacks failing
301
368
  # Only completion callbacks will trigger a re-run of the job because
@@ -369,8 +436,11 @@ module Cloudtasker
369
436
  redis.hkeys(batch_state_gid).each { |id| self.class.find(id)&.cleanup }
370
437
 
371
438
  # Delete batch redis entries
372
- redis.del(batch_gid)
373
- redis.del(batch_state_gid)
439
+ redis.multi do |m|
440
+ m.del(batch_gid)
441
+ m.del(batch_state_gid)
442
+ BATCH_STATUSES.each { |e| m.del(batch_state_count_gid(e)) }
443
+ end
374
444
  end
375
445
 
376
446
  #
@@ -384,33 +454,31 @@ module Cloudtasker
384
454
  def progress(depth: 0)
385
455
  depth = depth.to_i
386
456
 
387
- # Capture batch state
388
- state = batch_state
457
+ # Initialize counters from batch state. This is only applicable to running batches
458
+ # that started before the counter-based progress was implemented/released.
459
+ migrate_progress_stats_to_redis_counters
389
460
 
390
461
  # Return immediately if we do not need to go down the tree
391
- return BatchProgress.new(state) if depth <= 0
462
+ return BatchProgress.new([self]) if depth <= 0
392
463
 
393
464
  # Sum batch progress of current batch and sub-batches up to the specified
394
465
  # depth
395
- state.to_h.reduce(BatchProgress.new(state)) do |memo, (child_id, child_status)|
396
- memo + (self.class.find(child_id)&.progress(depth: depth - 1) ||
397
- BatchProgress.new(child_id => child_status))
466
+ batch_state.to_h.reduce(BatchProgress.new([self])) do |memo, (child_id, _)|
467
+ memo + (self.class.find(child_id)&.progress(depth: depth - 1) || BatchProgress.new)
398
468
  end
399
469
  end
400
470
 
401
471
  #
402
- # Save the batch and enqueue all child workers attached to it.
472
+ # Schedule the child workers that were added to the batch
403
473
  #
404
- def setup
405
- return true if jobs.empty?
474
+ def schedule_pending_jobs
475
+ ret_list = []
406
476
 
407
- # Save batch
408
- save
409
-
410
- # Schedule all child workers
411
- jobs.each do |j|
477
+ while (j = pending_jobs.shift)
412
478
  # Schedule the job
413
- j.schedule
479
+ # Skip batch registration if the job was not actually scheduled
480
+ # E.g. the job was evicted due to uniqueness requirements
481
+ next unless j.schedule
414
482
 
415
483
  # Initialize the batch state unless the job has already started (and taken
416
484
  # hold of its own status)
@@ -418,15 +486,39 @@ module Cloudtasker
418
486
  # having never-ending batches - which could occur if a batch was crashing
419
487
  # while enqueuing children due to a OOM error and since 'scheduled' is a
420
488
  # blocking status.
421
- redis.hsetnx(batch_state_gid, j.job_id, 'scheduled')
489
+ redis.multi do |m|
490
+ m.hsetnx(batch_state_gid, j.job_id, 'scheduled')
491
+ m.incr(batch_state_count_gid('scheduled'))
492
+ m.incr(batch_state_count_gid('all'))
493
+ end
494
+
495
+ # Flag job as enqueued
496
+ ret_list << j
497
+ enqueued_jobs << j
422
498
  end
499
+
500
+ # Return the list of jobs just enqueued
501
+ ret_list
502
+ end
503
+
504
+ #
505
+ # Save the batch and enqueue all child workers attached to it.
506
+ #
507
+ def setup
508
+ return true if pending_jobs.empty?
509
+
510
+ # Save batch
511
+ save
512
+
513
+ # Schedule all child workers
514
+ schedule_pending_jobs
423
515
  end
424
516
 
425
517
  #
426
518
  # Post-perform logic. The parent batch is notified if the job is complete.
427
519
  #
428
520
  def complete(status = :completed)
429
- return true if reenqueued? || jobs.any?
521
+ return true if reenqueued?
430
522
 
431
523
  # Notify the parent batch that a child is complete
432
524
  on_complete(status) if complete?
@@ -445,11 +537,12 @@ module Cloudtasker
445
537
  # Perform job
446
538
  yield
447
539
 
448
- # Save batch if child jobs added
449
- setup if jobs.any?
540
+ # Setup batch
541
+ # Only applicable if the batch has pending_jobs
542
+ setup
450
543
 
451
- # Save parent batch if batch expanded
452
- parent_batch&.setup if parent_batch&.jobs&.any?
544
+ # Save parent batch if batch was expanded
545
+ parent_batch&.schedule_pending_jobs
453
546
 
454
547
  # Complete batch
455
548
  complete(:completed)
@@ -5,8 +5,8 @@ module Cloudtasker
5
5
  module Middleware
6
6
  # Server middleware, invoked when jobs are executed
7
7
  class Server
8
- def call(worker, **_kwargs)
9
- Job.for(worker).execute { yield }
8
+ def call(worker, **_kwargs, &block)
9
+ Job.for(worker).execute(&block)
10
10
  end
11
11
  end
12
12
  end
@@ -58,7 +58,7 @@ module Cloudtasker
58
58
  #
59
59
  def boot_system
60
60
  # Sync logs
61
- STDOUT.sync = true
61
+ $stdout.sync = true
62
62
 
63
63
  # Check for Rails
64
64
  return false unless File.exist?('./config/environment.rb')
@@ -103,7 +103,7 @@ module Cloudtasker
103
103
  def run_server(read_pipe, opts = {})
104
104
  local_server.start(opts)
105
105
 
106
- while (readable_io = IO.select([read_pipe]))
106
+ while (readable_io = read_pipe.wait_readable)
107
107
  signal = readable_io.first[0].gets.strip
108
108
  handle_signal(signal)
109
109
  end
@@ -124,11 +124,9 @@ module Cloudtasker
124
124
  # USR1 and USR2 don't work on the JVM
125
125
  sigs << 'USR2' unless jruby?
126
126
  sigs.each do |sig|
127
- begin
128
- trap(sig) { write_pipe.puts(sig) }
129
- rescue ArgumentError
130
- puts "Signal #{sig} not supported"
131
- end
127
+ trap(sig) { write_pipe.puts(sig) }
128
+ rescue ArgumentError
129
+ puts "Signal #{sig} not supported"
132
130
  end
133
131
  end
134
132
 
@@ -19,17 +19,15 @@ module Cloudtasker
19
19
  # Re-evaluate backend every time if testing mode enabled
20
20
  @backend = nil if defined?(Cloudtasker::Testing)
21
21
 
22
- @backend ||= begin
23
- if defined?(Cloudtasker::Testing) && Cloudtasker::Testing.in_memory?
24
- require 'cloudtasker/backend/memory_task'
25
- Backend::MemoryTask
26
- elsif Cloudtasker.config.mode.to_sym == :development
27
- require 'cloudtasker/backend/redis_task'
28
- Backend::RedisTask
29
- else
30
- gct_backend
31
- end
32
- end
22
+ @backend ||= if defined?(Cloudtasker::Testing) && Cloudtasker::Testing.in_memory?
23
+ require 'cloudtasker/backend/memory_task'
24
+ Backend::MemoryTask
25
+ elsif Cloudtasker.config.mode.to_sym == :development
26
+ require 'cloudtasker/backend/redis_task'
27
+ Backend::RedisTask
28
+ else
29
+ gct_backend
30
+ end
33
31
  end
34
32
 
35
33
  #
@@ -42,15 +40,13 @@ module Cloudtasker
42
40
  # ] The google cloud task backend.
43
41
  #
44
42
  def self.gct_backend
45
- @gct_backend ||= begin
46
- if !defined?(Google::Cloud::Tasks::VERSION) || Google::Cloud::Tasks::VERSION < '2'
47
- require 'cloudtasker/backend/google_cloud_task_v1'
48
- Backend::GoogleCloudTaskV1
49
- else
50
- require 'cloudtasker/backend/google_cloud_task_v2'
51
- Backend::GoogleCloudTaskV2
52
- end
53
- end
43
+ @gct_backend ||= if !defined?(Google::Cloud::Tasks::VERSION) || Google::Cloud::Tasks::VERSION < '2'
44
+ require 'cloudtasker/backend/google_cloud_task_v1'
45
+ Backend::GoogleCloudTaskV1
46
+ else
47
+ require 'cloudtasker/backend/google_cloud_task_v2'
48
+ Backend::GoogleCloudTaskV2
49
+ end
54
50
  end
55
51
 
56
52
  #
@@ -8,23 +8,18 @@ module Cloudtasker
8
8
  attr_accessor :redis, :store_payloads_in_redis, :gcp_queue_prefix
9
9
  attr_writer :secret, :gcp_location_id, :gcp_project_id,
10
10
  :processor_path, :logger, :mode, :max_retries,
11
- :dispatch_deadline, :on_error, :on_dead
11
+ :dispatch_deadline, :on_error, :on_dead, :oidc, :local_server_ssl_verify
12
12
 
13
13
  # Max Cloud Task size in bytes
14
14
  MAX_TASK_SIZE = 100 * 1024 # 100 KB
15
15
 
16
16
  # Retry header in Cloud Task responses
17
17
  #
18
- # TODO: use 'X-CloudTasks-TaskExecutionCount' instead of 'X-CloudTasks-TaskRetryCount'
19
- # 'X-CloudTasks-TaskExecutionCount' is currently bugged and remains at 0 even on retries.
20
- #
21
- # See bug: https://issuetracker.google.com/issues/154532072
22
- #
23
18
  # Definitions:
24
19
  # X-CloudTasks-TaskRetryCount: total number of retries (including 504 "instance unreachable")
25
20
  # X-CloudTasks-TaskExecutionCount: number of non-503 retries (= actual number of job failures)
26
21
  #
27
- RETRY_HEADER = 'X-CloudTasks-TaskRetryCount'
22
+ RETRY_HEADER = 'X-Cloudtasks-Taskexecutioncount'
28
23
 
29
24
  # Cloud Task ID header
30
25
  TASK_ID_HEADER = 'X-CloudTasks-TaskName'
@@ -35,12 +30,18 @@ module Cloudtasker
35
30
  # Content Type
36
31
  CONTENT_TYPE_HEADER = 'Content-Type'
37
32
 
38
- # Authorization header
39
- AUTHORIZATION_HEADER = 'Authorization'
33
+ # OIDC Authorization header
34
+ OIDC_AUTHORIZATION_HEADER = 'Authorization'
35
+
36
+ # Custom authentication header that does not conflict with
37
+ # OIDC authorization header
38
+ CT_AUTHORIZATION_HEADER = 'X-Cloudtasker-Authorization'
39
+ CT_SIGNATURE_HEADER = 'X-Cloudtasker-Signature'
40
40
 
41
41
  # Default values
42
42
  DEFAULT_LOCATION_ID = 'us-east1'
43
43
  DEFAULT_PROCESSOR_PATH = '/cloudtasker/run'
44
+ DEFAULT_LOCAL_SERVER_SSL_VERIFY_MODE = true
44
45
 
45
46
  # Default queue values
46
47
  DEFAULT_JOB_QUEUE = 'default'
@@ -55,6 +56,10 @@ module Cloudtasker
55
56
  # Default on_error Proc
56
57
  DEFAULT_ON_ERROR = ->(error, worker) {}
57
58
 
59
+ # Cache key prefix used to store workers in cache and retrieve
60
+ # them later.
61
+ WORKER_STORE_PREFIX = 'worker_store'
62
+
58
63
  # The number of times jobs will be attempted before declaring them dead.
59
64
  #
60
65
  # With the default retry configuration (maxDoublings = 16 and minBackoff = 0.100s)
@@ -78,6 +83,10 @@ module Cloudtasker
78
83
  Missing cloudtasker secret.
79
84
  Please specify a secret in the cloudtasker initializer or add Rails secret_key_base in your credentials
80
85
  DOC
86
+ OIDC_EMAIL_MISSING_ERROR = <<~DOC
87
+ Missing OpenID Connect (OIDC) service account email.
88
+ You specified an OIDC configuration hash but the :service_account_email property is missing.
89
+ DOC
81
90
 
82
91
  #
83
92
  # Return the threshold above which job arguments must be stored
@@ -131,7 +140,7 @@ module Cloudtasker
131
140
  # @return [Logger, any] The cloudtasker logger.
132
141
  #
133
142
  def logger
134
- @logger ||= defined?(Rails) ? Rails.logger : ::Logger.new(STDOUT)
143
+ @logger ||= defined?(Rails) ? Rails.logger : ::Logger.new($stdout)
135
144
  end
136
145
 
137
146
  #
@@ -244,6 +253,21 @@ module Cloudtasker
244
253
  @on_dead || DEFAULT_ON_ERROR
245
254
  end
246
255
 
256
+ #
257
+ # Return the Open ID Connect configuration to use for tasks.
258
+ #
259
+ # @return [Hash] The OIDC configuration
260
+ #
261
+ def oidc
262
+ return unless @oidc
263
+ raise(StandardError, OIDC_EMAIL_MISSING_ERROR) unless @oidc[:service_account_email]
264
+
265
+ {
266
+ service_account_email: @oidc[:service_account_email],
267
+ audience: @oidc[:audience] || processor_host
268
+ }
269
+ end
270
+
247
271
  #
248
272
  # Return the chain of client middlewares.
249
273
  #
@@ -265,5 +289,14 @@ module Cloudtasker
265
289
  yield @server_middleware if block_given?
266
290
  @server_middleware
267
291
  end
292
+
293
+ #
294
+ # Return the ssl verify mode for the Cloudtasker local server.
295
+ #
296
+ # @return [Boolean] The ssl verify mode for the Cloudtasker local server.
297
+ #
298
+ def local_server_ssl_verify
299
+ @local_server_ssl_verify.nil? ? DEFAULT_LOCAL_SERVER_SSL_VERIFY_MODE : @local_server_ssl_verify
300
+ end
268
301
  end
269
302
  end
@@ -5,8 +5,8 @@ module Cloudtasker
5
5
  module Middleware
6
6
  # Server middleware, invoked when jobs are executed
7
7
  class Server
8
- def call(worker, **_kwargs)
9
- Job.new(worker).execute { yield }
8
+ def call(worker, **_kwargs, &block)
9
+ Job.new(worker).execute(&block)
10
10
  end
11
11
  end
12
12
  end
@@ -159,6 +159,9 @@ module Cloudtasker
159
159
  #
160
160
  def valid?
161
161
  id && cron_schedule && worker
162
+ rescue ArgumentError
163
+ # Rescue invalid cron expressions
164
+ false
162
165
  end
163
166
 
164
167
  #
@@ -225,7 +228,7 @@ module Cloudtasker
225
228
  # @return [Fugit::Cron] The cron schedule.
226
229
  #
227
230
  def cron_schedule
228
- @cron_schedule ||= Fugit::Cron.parse(cron)
231
+ @cron_schedule ||= Fugit::Cron.do_parse(cron)
229
232
  end
230
233
 
231
234
  #
@@ -255,7 +258,7 @@ module Cloudtasker
255
258
  #
256
259
  def assign_attributes(opts)
257
260
  opts
258
- .select { |k, _| instance_variables.include?("@#{k}".to_sym) }
261
+ .select { |k, _| instance_variables.include?(:"@#{k}") }
259
262
  .each { |k, v| instance_variable_set("@#{k}", v) }
260
263
  end
261
264
 
@@ -159,7 +159,7 @@ module Cloudtasker
159
159
  def insert_after(oldklass, newklass, *args)
160
160
  i = entries.index { |entry| entry.klass == newklass }
161
161
  new_entry = i.nil? ? Entry.new(newklass, *args) : entries.delete_at(i)
162
- i = entries.index { |entry| entry.klass == oldklass } || entries.count - 1
162
+ i = entries.index { |entry| entry.klass == oldklass } || (entries.count - 1)
163
163
  entries.insert(i + 1, new_entry)
164
164
  end
165
165
 
@@ -12,7 +12,7 @@ module Cloudtasker
12
12
  LOCK_WAIT_DURATION = 0.03 # seconds
13
13
 
14
14
  # Default pool size used for Redis
15
- DEFAULT_POOL_SIZE = ENV.fetch('RAILS_MAX_THREADS') { 25 }
15
+ DEFAULT_POOL_SIZE = ENV.fetch('RAILS_MAX_THREADS', 25)
16
16
  DEFAULT_POOL_TIMEOUT = 5
17
17
 
18
18
  def self.client
@@ -132,7 +132,6 @@ module Cloudtasker
132
132
  list
133
133
  end
134
134
 
135
- # rubocop:disable Style/MissingRespondToMissing
136
135
  if RUBY_VERSION < '3'
137
136
  #
138
137
  # Delegate all methods to the redis client.
@@ -172,8 +171,6 @@ module Cloudtasker
172
171
  end
173
172
  end
174
173
  end
175
- # rubocop:enable Style/MissingRespondToMissing
176
-
177
174
  #
178
175
  # Check if the class respond to a certain method.
179
176
  #
@@ -0,0 +1,6 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Cloudtasker
4
+ class RetryWorkerError < StandardError
5
+ end
6
+ end
@@ -0,0 +1,78 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Cloudtasker
4
+ module Storable
5
+ # Add ability to store and pull workers in Redis under a specific namespace
6
+ module Worker
7
+ # Add class method to including class
8
+ def self.included(base)
9
+ base.extend(ClassMethods)
10
+ end
11
+
12
+ # Module class methods
13
+ module ClassMethods
14
+ #
15
+ # Return the namespaced store key used to store jobs that
16
+ # have been parked and should be manually popped later.
17
+ #
18
+ # @param [String] namespace The user-provided store namespace
19
+ #
20
+ # @return [String] The full store cache key
21
+ #
22
+ def store_cache_key(namespace)
23
+ cache_key([Config::WORKER_STORE_PREFIX, namespace])
24
+ end
25
+
26
+ #
27
+ # Push the worker to a namespaced store.
28
+ #
29
+ # @param [String] namespace The store namespace
30
+ # @param [Array<any>] *args List of worker arguments
31
+ #
32
+ # @return [String] The number of elements added to the store
33
+ #
34
+ def push_to_store(namespace, *args)
35
+ redis.rpush(store_cache_key(namespace), [args.to_json])
36
+ end
37
+
38
+ #
39
+ # Push many workers to a namespaced store at once.
40
+ #
41
+ # @param [String] namespace The store namespace
42
+ # @param [Array<Array<any>>] args_list A list of arguments for each worker
43
+ #
44
+ # @return [String] The number of elements added to the store
45
+ #
46
+ def push_many_to_store(namespace, args_list)
47
+ redis.rpush(store_cache_key(namespace), args_list.map(&:to_json))
48
+ end
49
+
50
+ #
51
+ # Pull the jobs from the namespaced store and enqueue them.
52
+ #
53
+ # @param [String] namespace The store namespace.
54
+ # @param [Integer] page_size The number of items to pull on each page. Defaults to 1000.
55
+ #
56
+ def pull_all_from_store(namespace, page_size: 1000)
57
+ items = nil
58
+
59
+ while items.nil? || items.present?
60
+ # Pull items
61
+ items = redis.lpop(store_cache_key(namespace), page_size).to_a
62
+
63
+ # For each item, execute block or enqueue it
64
+ items.each do |args_json|
65
+ worker_args = JSON.parse(args_json)
66
+
67
+ if block_given?
68
+ yield(worker_args)
69
+ else
70
+ perform_async(*worker_args)
71
+ end
72
+ end
73
+ end
74
+ end
75
+ end
76
+ end
77
+ end
78
+ end
@@ -0,0 +1,3 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative 'storable/worker'
@@ -20,8 +20,10 @@ module Cloudtasker
20
20
  # Handling logic to perform when a conflict occurs while
21
21
  # scheduling a job.
22
22
  #
23
+ # We return nil to flag the job as not scheduled
24
+ #
23
25
  def on_schedule
24
- true
26
+ nil
25
27
  end
26
28
 
27
29
  #
@@ -29,7 +31,7 @@ module Cloudtasker
29
31
  # executing a job.
30
32
  #
31
33
  def on_execute
32
- true
34
+ nil
33
35
  end
34
36
  end
35
37
  end
@@ -10,22 +10,22 @@ module Cloudtasker
10
10
  # Acquire a lock for the job and trigger a conflict
11
11
  # if the lock could not be acquired.
12
12
  #
13
- def schedule
13
+ def schedule(&block)
14
14
  job.lock!
15
15
  yield
16
16
  rescue LockError
17
- conflict_instance.on_schedule { yield }
17
+ conflict_instance.on_schedule(&block)
18
18
  end
19
19
 
20
20
  #
21
21
  # Acquire a lock for the job and trigger a conflict
22
22
  # if the lock could not be acquired.
23
23
  #
24
- def execute
24
+ def execute(&block)
25
25
  job.lock!
26
26
  yield
27
27
  rescue LockError
28
- conflict_instance.on_execute { yield }
28
+ conflict_instance.on_execute(&block)
29
29
  ensure
30
30
  # Unlock the job on any error to avoid deadlocks.
31
31
  job.unlock!