switchman-inst-jobs 4.0.3 → 4.0.6

Sign up to get free protection for your applications and to get access to all the features.
Files changed (41) hide show
  1. checksums.yaml +4 -4
  2. data/db/migrate/20101216224513_create_delayed_jobs.rb +0 -4
  3. data/db/migrate/20110208031356_add_delayed_jobs_tag.rb +0 -4
  4. data/db/migrate/20110426161613_add_delayed_jobs_max_attempts.rb +0 -4
  5. data/db/migrate/20110516225834_add_delayed_jobs_strand.rb +0 -4
  6. data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +1 -5
  7. data/db/migrate/20110610213249_optimize_delayed_jobs.rb +0 -9
  8. data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +6 -10
  9. data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +0 -4
  10. data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +0 -4
  11. data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +0 -4
  12. data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +0 -4
  13. data/db/migrate/20120608191051_add_jobs_run_at_index.rb +0 -4
  14. data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +0 -4
  15. data/db/migrate/20140505215131_add_failed_jobs_original_job_id.rb +0 -4
  16. data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -6
  17. data/db/migrate/20140505223637_drop_failed_jobs_original_id.rb +0 -4
  18. data/db/migrate/20140512213941_add_source_to_jobs.rb +0 -4
  19. data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +0 -4
  20. data/db/migrate/20151123210429_add_expires_at_to_jobs.rb +0 -4
  21. data/db/migrate/20151210162949_improve_max_concurrent.rb +0 -4
  22. data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +4 -8
  23. data/db/migrate/20170308045400_add_shard_id_to_delayed_jobs.rb +0 -4
  24. data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +0 -4
  25. data/db/migrate/20190726154743_make_critical_columns_not_null.rb +0 -4
  26. data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +0 -4
  27. data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +0 -4
  28. data/db/migrate/20200825011002_add_strand_order_override.rb +0 -4
  29. data/db/migrate/20210812210128_add_singleton_column.rb +6 -6
  30. data/db/migrate/20210917232626_add_delete_conflicting_singletons_before_unlock_trigger.rb +1 -1
  31. data/db/migrate/20220127091200_fix_singleton_unique_constraint.rb +31 -0
  32. data/db/migrate/20220128084800_update_insert_trigger_for_singleton_unique_constraint_change.rb +60 -0
  33. data/db/migrate/20220128084900_update_delete_trigger_for_singleton_unique_constraint_change.rb +209 -0
  34. data/db/migrate/20220203063200_remove_old_singleton_index.rb +31 -0
  35. data/lib/switchman_inst_jobs/delayed/backend/active_record/abstract_job.rb +25 -0
  36. data/lib/switchman_inst_jobs/engine.rb +6 -2
  37. data/lib/switchman_inst_jobs/jobs_migrator.rb +124 -66
  38. data/lib/switchman_inst_jobs/version.rb +1 -1
  39. data/lib/switchman_inst_jobs.rb +4 -4
  40. metadata +9 -5
  41. data/lib/switchman_inst_jobs/active_record/connection_adapters/connection_pool.rb +0 -15
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: f07c44f5a58d897c8dbcc46a40906cde8c6a99b39723286a405669cf159019e4
4
- data.tar.gz: cf730bfc3d3ad6d2c7da7712c3c097322421820b60f2d6af025ef610f0f3aa09
3
+ metadata.gz: 61fb55d3fff48ff95a3fae70acc49935250642ac4b2b24d3ece10e9d5624e1dd
4
+ data.tar.gz: 8544c358faaa63992a5db35db5d1d2a7317048349d91857d9b0241b02ec30b13
5
5
  SHA512:
6
- metadata.gz: 9189978ce61d257fb25bbf57c23a36998437b3c4aa6adb93798af07ea878caf244a1514c5711c5190b39836169607e869f65b89ed71cc08d44fabfd9d6fe0e8e
7
- data.tar.gz: d4a76e521ee4ba38bb2172a1ffabcc4b139a204d24690a2662ca7e8587d367ba88d98db5912c716421c96940f309296f6b35e6b292025326d61e32e03217d001
6
+ metadata.gz: f4601a44b1c795edb3302f5b9174292ff7724315cdb321a0fd9213f44eb72e3ffb0d48195f03426c7701eb32232b550f8bff325ea48f2f89133c7ba2e8039653
7
+ data.tar.gz: 8651595cec7bf1f67544e33fe806f1f082fe26809fdadacb7ad86ebfb885436db5c9333e09a1d4c320fc52dee7b1b942fe646524e1ee45d8db62b88b3f116397
@@ -1,8 +1,4 @@
1
1
  class CreateDelayedJobs < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
3
  unless connection.adapter_name == 'PostgreSQL'
8
4
  raise("#{connection.adapter_name} is not supported for delayed jobs queue")
@@ -1,8 +1,4 @@
1
1
  class AddDelayedJobsTag < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
3
  add_column :delayed_jobs, :tag, :string
8
4
  add_index :delayed_jobs, [:tag]
@@ -1,8 +1,4 @@
1
1
  class AddDelayedJobsMaxAttempts < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
3
  add_column :delayed_jobs, :max_attempts, :integer
8
4
  end
@@ -1,8 +1,4 @@
1
1
  class AddDelayedJobsStrand < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
3
  add_column :delayed_jobs, :strand, :string
8
4
  add_index :delayed_jobs, :strand
@@ -1,14 +1,10 @@
1
1
  class CleanupDelayedJobsIndexes < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
3
  case connection.adapter_name
8
4
  when 'PostgreSQL'
9
5
  # "nulls first" syntax is postgresql specific, and allows for more
10
6
  # efficient querying for the next job
11
- connection.execute("CREATE INDEX get_delayed_jobs_index ON #{::Delayed::Job.quoted_table_name} (priority, run_at, failed_at nulls first, locked_at nulls first, queue)")
7
+ connection.execute("CREATE INDEX get_delayed_jobs_index ON #{connection.quote_table_name(::Delayed::Job.table_name)} (priority, run_at, failed_at nulls first, locked_at nulls first, queue)")
12
8
  else
13
9
  add_index :delayed_jobs, %w[priority run_at locked_at failed_at queue], name: 'get_delayed_jobs_index'
14
10
  end
@@ -1,8 +1,4 @@
1
1
  class OptimizeDelayedJobs < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
3
  create_table :failed_jobs do |t|
8
4
  t.integer 'priority', default: 0
@@ -27,11 +23,6 @@ class OptimizeDelayedJobs < ActiveRecord::Migration[4.2]
27
23
 
28
24
  add_index :delayed_jobs, %w[run_at queue locked_at strand priority], name: 'index_delayed_jobs_for_get_next'
29
25
  add_index :delayed_jobs, %w[strand id], name: 'index_delayed_jobs_on_strand'
30
-
31
- # move all failed jobs to the new failed table
32
- Delayed::Backend::ActiveRecord::Job.where.not(failed_at: nil).find_each do |job|
33
- job.fail! unless job.on_hold?
34
- end
35
26
  end
36
27
 
37
28
  def down
@@ -1,15 +1,11 @@
1
1
  class AddDelayedJobsNextInStrand < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
3
  remove_index :delayed_jobs, name: 'index_delayed_jobs_for_get_next'
8
4
 
9
5
  add_column :delayed_jobs, :next_in_strand, :boolean, default: true, null: false
10
6
 
11
7
  # create the new index
12
- connection.execute("CREATE INDEX get_delayed_jobs_index ON #{::Delayed::Job.quoted_table_name} (priority, run_at, queue) WHERE locked_at IS NULL AND next_in_strand = 't'")
8
+ connection.execute("CREATE INDEX get_delayed_jobs_index ON #{connection.quote_table_name(::Delayed::Job.table_name)} (priority, run_at, queue) WHERE locked_at IS NULL AND next_in_strand = 't'")
13
9
 
14
10
  # create the insert trigger
15
11
  execute(<<-CODE)
@@ -23,7 +19,7 @@ class AddDelayedJobsNextInStrand < ActiveRecord::Migration[4.2]
23
19
  END;
24
20
  $$ LANGUAGE plpgsql SET search_path TO #{::Switchman::Shard.current.name};
25
21
  CODE
26
- execute("CREATE TRIGGER delayed_jobs_before_insert_row_tr BEFORE INSERT ON #{::Delayed::Job.quoted_table_name} FOR EACH ROW WHEN (NEW.strand IS NOT NULL) EXECUTE PROCEDURE #{connection.quote_table_name('delayed_jobs_before_insert_row_tr_fn')}()")
22
+ execute("CREATE TRIGGER delayed_jobs_before_insert_row_tr BEFORE INSERT ON #{connection.quote_table_name(::Delayed::Job.table_name)} FOR EACH ROW WHEN (NEW.strand IS NOT NULL) EXECUTE PROCEDURE #{connection.quote_table_name('delayed_jobs_before_insert_row_tr_fn')}()")
27
23
 
28
24
  # create the delete trigger
29
25
  execute(<<-CODE)
@@ -34,15 +30,15 @@ class AddDelayedJobsNextInStrand < ActiveRecord::Migration[4.2]
34
30
  END;
35
31
  $$ LANGUAGE plpgsql SET search_path TO #{::Switchman::Shard.current.name};
36
32
  CODE
37
- execute("CREATE TRIGGER delayed_jobs_after_delete_row_tr AFTER DELETE ON #{::Delayed::Job.quoted_table_name} FOR EACH ROW WHEN (OLD.strand IS NOT NULL AND OLD.next_in_strand = 't') EXECUTE PROCEDURE #{connection.quote_table_name('delayed_jobs_after_delete_row_tr_fn')} ()")
33
+ execute("CREATE TRIGGER delayed_jobs_after_delete_row_tr AFTER DELETE ON #{connection.quote_table_name(::Delayed::Job.table_name)} FOR EACH ROW WHEN (OLD.strand IS NOT NULL AND OLD.next_in_strand = 't') EXECUTE PROCEDURE #{connection.quote_table_name('delayed_jobs_after_delete_row_tr_fn')} ()")
38
34
 
39
- execute(%{UPDATE #{::Delayed::Job.quoted_table_name} SET next_in_strand = 'f' WHERE strand IS NOT NULL AND id <> (SELECT id FROM #{::Delayed::Job.quoted_table_name} j2 WHERE j2.strand = delayed_jobs.strand ORDER BY j2.strand, j2.id ASC LIMIT 1)})
35
+ execute(%{UPDATE #{connection.quote_table_name(::Delayed::Job.table_name)} SET next_in_strand = 'f' WHERE strand IS NOT NULL AND id <> (SELECT id FROM #{connection.quote_table_name(::Delayed::Job.table_name)} j2 WHERE j2.strand = delayed_jobs.strand ORDER BY j2.strand, j2.id ASC LIMIT 1)})
40
36
  end
41
37
 
42
38
  def down
43
- execute %(DROP TRIGGER delayed_jobs_before_insert_row_tr ON #{::Delayed::Job.quoted_table_name})
39
+ execute %(DROP TRIGGER delayed_jobs_before_insert_row_tr ON #{connection.quote_table_name(::Delayed::Job.table_name)})
44
40
  execute %{DROP FUNCTION #{connection.quote_table_name('delayed_jobs_before_insert_row_tr_fn')} ()}
45
- execute %(DROP TRIGGER delayed_jobs_after_delete_row_tr ON #{::Delayed::Job.quoted_table_name})
41
+ execute %(DROP TRIGGER delayed_jobs_after_delete_row_tr ON #{connection.quote_table_name(::Delayed::Job.table_name)})
46
42
  execute %{DROP FUNCTION #{connection.quote_table_name('delayed_jobs_after_delete_row_tr_fn')} ()}
47
43
 
48
44
  remove_column :delayed_jobs, :next_in_strand
@@ -1,8 +1,4 @@
1
1
  class DelayedJobsDeleteTriggerLockForUpdate < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
3
  if connection.adapter_name == 'PostgreSQL'
8
4
  execute(<<-CODE)
@@ -1,8 +1,4 @@
1
1
  class DropPsqlJobsPopFn < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
3
  if connection.adapter_name == 'PostgreSQL'
8
4
  connection.execute('DROP FUNCTION IF EXISTS pop_from_delayed_jobs(varchar, varchar, integer, integer, timestamp without time zone)')
@@ -1,8 +1,4 @@
1
1
  class DelayedJobsUseAdvisoryLocks < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
3
  # use an advisory lock based on the name of the strand, instead of locking the whole table
8
4
  # note that we're using half of the md5, so collisions are possible, but we don't really
@@ -1,10 +1,6 @@
1
1
  class IndexJobsOnLockedBy < ActiveRecord::Migration[4.2]
2
2
  disable_ddl_transaction!
3
3
 
4
- def connection
5
- Delayed::Backend::ActiveRecord::AbstractJob.connection
6
- end
7
-
8
4
  def up
9
5
  add_index :delayed_jobs, :locked_by, algorithm: :concurrently, where: 'locked_by IS NOT NULL'
10
6
  end
@@ -1,10 +1,6 @@
1
1
  class AddJobsRunAtIndex < ActiveRecord::Migration[4.2]
2
2
  disable_ddl_transaction!
3
3
 
4
- def connection
5
- Delayed::Backend::ActiveRecord::AbstractJob.connection
6
- end
7
-
8
4
  def up
9
5
  add_index :delayed_jobs, %w[run_at tag], algorithm: :concurrently
10
6
  end
@@ -1,8 +1,4 @@
1
1
  class ChangeDelayedJobsHandlerToText < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
3
  change_column :delayed_jobs, :handler, :text
8
4
  end
@@ -1,8 +1,4 @@
1
1
  class AddFailedJobsOriginalJobId < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
3
  add_column :failed_jobs, :original_job_id, :integer, limit: 8
8
4
  end
@@ -1,11 +1,7 @@
1
1
  class CopyFailedJobsOriginalId < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
- # this is a smaller, less frequently accessed table, so we just update all at once
8
- Delayed::Backend::ActiveRecord::Job::Failed.where(original_job_id: nil).update_all('original_job_id = original_id')
3
+ # Noop since we don't want to modify the shard using a different connection than the one we are using to build it and
4
+ # this migration is very old
9
5
  end
10
6
 
11
7
  def down; end
@@ -1,8 +1,4 @@
1
1
  class DropFailedJobsOriginalId < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
3
  remove_column :failed_jobs, :original_id
8
4
  end
@@ -1,8 +1,4 @@
1
1
  class AddSourceToJobs < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
3
  add_column :delayed_jobs, :source, :string
8
4
  add_column :failed_jobs, :source, :string
@@ -1,8 +1,4 @@
1
1
  class AddMaxConcurrentToJobs < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
3
  add_column :delayed_jobs, :max_concurrent, :integer, default: 1, null: false
8
4
 
@@ -1,8 +1,4 @@
1
1
  class AddExpiresAtToJobs < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
3
  add_column :delayed_jobs, :expires_at, :datetime
8
4
  add_column :failed_jobs, :expires_at, :datetime
@@ -1,8 +1,4 @@
1
1
  class ImproveMaxConcurrent < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
3
  if connection.adapter_name == 'PostgreSQL'
8
4
  execute(<<-CODE)
@@ -1,8 +1,4 @@
1
1
  class AddBackDefaultStringLimitsJobs < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
3
  drop_triggers
8
4
 
@@ -22,13 +18,13 @@ class AddBackDefaultStringLimitsJobs < ActiveRecord::Migration[4.2]
22
18
  end
23
19
 
24
20
  def drop_triggers
25
- execute %(DROP TRIGGER delayed_jobs_before_insert_row_tr ON #{::Delayed::Job.quoted_table_name})
26
- execute %(DROP TRIGGER delayed_jobs_after_delete_row_tr ON #{::Delayed::Job.quoted_table_name})
21
+ execute %(DROP TRIGGER delayed_jobs_before_insert_row_tr ON #{connection.quote_table_name(::Delayed::Job.table_name)})
22
+ execute %(DROP TRIGGER delayed_jobs_after_delete_row_tr ON #{connection.quote_table_name(::Delayed::Job.table_name)})
27
23
  end
28
24
 
29
25
  def readd_triggers
30
- execute("CREATE TRIGGER delayed_jobs_before_insert_row_tr BEFORE INSERT ON #{::Delayed::Job.quoted_table_name} FOR EACH ROW WHEN (NEW.strand IS NOT NULL) EXECUTE PROCEDURE #{connection.quote_table_name('delayed_jobs_before_insert_row_tr_fn')}()")
31
- execute("CREATE TRIGGER delayed_jobs_after_delete_row_tr AFTER DELETE ON #{::Delayed::Job.quoted_table_name} FOR EACH ROW WHEN (OLD.strand IS NOT NULL AND OLD.next_in_strand = 't') EXECUTE PROCEDURE #{connection.quote_table_name('delayed_jobs_after_delete_row_tr_fn')}()")
26
+ execute("CREATE TRIGGER delayed_jobs_before_insert_row_tr BEFORE INSERT ON #{connection.quote_table_name(::Delayed::Job.table_name)} FOR EACH ROW WHEN (NEW.strand IS NOT NULL) EXECUTE PROCEDURE #{connection.quote_table_name('delayed_jobs_before_insert_row_tr_fn')}()")
27
+ execute("CREATE TRIGGER delayed_jobs_after_delete_row_tr AFTER DELETE ON #{connection.quote_table_name(::Delayed::Job.table_name)} FOR EACH ROW WHEN (OLD.strand IS NOT NULL AND OLD.next_in_strand = 't') EXECUTE PROCEDURE #{connection.quote_table_name('delayed_jobs_after_delete_row_tr_fn')}()")
32
28
  end
33
29
 
34
30
  def add_string_limit_if_missing(table, column)
@@ -1,10 +1,6 @@
1
1
  class AddShardIdToDelayedJobs < ActiveRecord::Migration[4.2]
2
2
  disable_ddl_transaction!
3
3
 
4
- def connection
5
- Delayed::Backend::ActiveRecord::AbstractJob.connection
6
- end
7
-
8
4
  def up
9
5
  add_column :delayed_jobs, :shard_id, :integer, limit: 8
10
6
  add_index :delayed_jobs, :shard_id, algorithm: :concurrently
@@ -1,8 +1,4 @@
1
1
  class SpeedUpMaxConcurrentTriggers < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
3
  if connection.adapter_name == 'PostgreSQL'
8
4
  # tl;dr sacrifice some responsiveness to max_concurrent changes for faster performance
@@ -1,8 +1,4 @@
1
1
  class MakeCriticalColumnsNotNull < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
3
  change_column_null :delayed_jobs, :run_at, false
8
4
  change_column_null :delayed_jobs, :queue, false
@@ -1,10 +1,6 @@
1
1
  class AddIdToGetDelayedJobsIndex < ActiveRecord::Migration[4.2]
2
2
  disable_ddl_transaction!
3
3
 
4
- def connection
5
- Delayed::Backend::ActiveRecord::AbstractJob.connection
6
- end
7
-
8
4
  def up
9
5
  rename_index :delayed_jobs, 'get_delayed_jobs_index', 'get_delayed_jobs_index_old'
10
6
  add_index :delayed_jobs, %i[queue priority run_at id],
@@ -1,8 +1,4 @@
1
1
  class SpeedUpMaxConcurrentDeleteTrigger < ActiveRecord::Migration[4.2]
2
- def connection
3
- Delayed::Backend::ActiveRecord::AbstractJob.connection
4
- end
5
-
6
2
  def up
7
3
  if connection.adapter_name == 'PostgreSQL'
8
4
  # tl;dr sacrifice some responsiveness to max_concurrent changes for faster performance
@@ -1,10 +1,6 @@
1
1
  class AddStrandOrderOverride < ActiveRecord::Migration[4.2]
2
2
  disable_ddl_transaction!
3
3
 
4
- def connection
5
- Delayed::Backend::ActiveRecord::AbstractJob.connection
6
- end
7
-
8
4
  def up
9
5
  add_column :delayed_jobs, :strand_order_override, :integer, default: 0, null: false
10
6
  add_column :failed_jobs, :strand_order_override, :integer, default: 0, null: false
@@ -181,20 +181,20 @@ class AddSingletonColumn < ActiveRecord::Migration[5.2]
181
181
  reversible do |direction|
182
182
  direction.up do
183
183
  drop_triggers
184
- execute("CREATE TRIGGER delayed_jobs_before_insert_row_tr BEFORE INSERT ON #{::Delayed::Job.quoted_table_name} FOR EACH ROW WHEN (NEW.strand IS NOT NULL OR NEW.singleton IS NOT NULL) EXECUTE PROCEDURE #{connection.quote_table_name('delayed_jobs_before_insert_row_tr_fn')}()")
185
- execute("CREATE TRIGGER delayed_jobs_after_delete_row_tr AFTER DELETE ON #{::Delayed::Job.quoted_table_name} FOR EACH ROW WHEN ((OLD.strand IS NOT NULL OR OLD.singleton IS NOT NULL) AND OLD.next_in_strand=true) EXECUTE PROCEDURE #{connection.quote_table_name('delayed_jobs_after_delete_row_tr_fn')}()")
184
+ execute("CREATE TRIGGER delayed_jobs_before_insert_row_tr BEFORE INSERT ON #{connection.quote_table_name(::Delayed::Job.table_name)} FOR EACH ROW WHEN (NEW.strand IS NOT NULL OR NEW.singleton IS NOT NULL) EXECUTE PROCEDURE #{connection.quote_table_name('delayed_jobs_before_insert_row_tr_fn')}()")
185
+ execute("CREATE TRIGGER delayed_jobs_after_delete_row_tr AFTER DELETE ON #{connection.quote_table_name(::Delayed::Job.table_name)} FOR EACH ROW WHEN ((OLD.strand IS NOT NULL OR OLD.singleton IS NOT NULL) AND OLD.next_in_strand=true) EXECUTE PROCEDURE #{connection.quote_table_name('delayed_jobs_after_delete_row_tr_fn')}()")
186
186
  end
187
187
  direction.down do
188
188
  drop_triggers
189
- execute("CREATE TRIGGER delayed_jobs_before_insert_row_tr BEFORE INSERT ON #{::Delayed::Job.quoted_table_name} FOR EACH ROW WHEN (NEW.strand IS NOT NULL) EXECUTE PROCEDURE #{connection.quote_table_name('delayed_jobs_before_insert_row_tr_fn')}()")
190
- execute("CREATE TRIGGER delayed_jobs_after_delete_row_tr AFTER DELETE ON #{::Delayed::Job.quoted_table_name} FOR EACH ROW WHEN (OLD.strand IS NOT NULL AND OLD.next_in_strand = 't') EXECUTE PROCEDURE #{connection.quote_table_name('delayed_jobs_after_delete_row_tr_fn()')}")
189
+ execute("CREATE TRIGGER delayed_jobs_before_insert_row_tr BEFORE INSERT ON #{connection.quote_table_name(::Delayed::Job.table_name)} FOR EACH ROW WHEN (NEW.strand IS NOT NULL) EXECUTE PROCEDURE #{connection.quote_table_name('delayed_jobs_before_insert_row_tr_fn')}()")
190
+ execute("CREATE TRIGGER delayed_jobs_after_delete_row_tr AFTER DELETE ON #{connection.quote_table_name(::Delayed::Job.table_name)} FOR EACH ROW WHEN (OLD.strand IS NOT NULL AND OLD.next_in_strand = 't') EXECUTE PROCEDURE #{connection.quote_table_name('delayed_jobs_after_delete_row_tr_fn()')}")
191
191
  end
192
192
  end
193
193
  end
194
194
  end
195
195
 
196
196
  def drop_triggers
197
- execute("DROP TRIGGER delayed_jobs_before_insert_row_tr ON #{::Delayed::Job.quoted_table_name}")
198
- execute("DROP TRIGGER delayed_jobs_after_delete_row_tr ON #{::Delayed::Job.quoted_table_name}")
197
+ execute("DROP TRIGGER delayed_jobs_before_insert_row_tr ON #{connection.quote_table_name(::Delayed::Job.table_name)}")
198
+ execute("DROP TRIGGER delayed_jobs_after_delete_row_tr ON #{connection.quote_table_name(::Delayed::Job.table_name)}")
199
199
  end
200
200
  end
@@ -13,7 +13,7 @@ class AddDeleteConflictingSingletonsBeforeUnlockTrigger < ActiveRecord::Migratio
13
13
  $$ LANGUAGE plpgsql SET search_path TO #{::Switchman::Shard.current.name};
14
14
  SQL
15
15
  execute(<<~SQL)
16
- CREATE TRIGGER delayed_jobs_before_unlock_delete_conflicting_singletons_row_tr BEFORE UPDATE ON #{::Delayed::Job.quoted_table_name} FOR EACH ROW WHEN (
16
+ CREATE TRIGGER delayed_jobs_before_unlock_delete_conflicting_singletons_row_tr BEFORE UPDATE ON #{connection.quote_table_name(::Delayed::Job.table_name)} FOR EACH ROW WHEN (
17
17
  OLD.singleton IS NOT NULL AND
18
18
  OLD.singleton=NEW.singleton AND
19
19
  OLD.locked_by IS NOT NULL AND
@@ -0,0 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ class FixSingletonUniqueConstraint < ActiveRecord::Migration[5.2]
4
+ disable_ddl_transaction!
5
+
6
+ def up
7
+ rename_index :delayed_jobs, 'index_delayed_jobs_on_singleton_not_running', 'index_delayed_jobs_on_singleton_not_running_old'
8
+ rename_index :delayed_jobs, 'index_delayed_jobs_on_singleton_running', 'index_delayed_jobs_on_singleton_running_old'
9
+
10
+ # only one job can be queued in a singleton
11
+ add_index :delayed_jobs,
12
+ :singleton,
13
+ where: "singleton IS NOT NULL AND (locked_by IS NULL OR locked_by = '#{::Delayed::Backend::Base::ON_HOLD_LOCKED_BY}')",
14
+ unique: true,
15
+ name: 'index_delayed_jobs_on_singleton_not_running',
16
+ algorithm: :concurrently
17
+
18
+ # only one job can be running for a singleton
19
+ add_index :delayed_jobs,
20
+ :singleton,
21
+ where: "singleton IS NOT NULL AND locked_by IS NOT NULL AND locked_by <> '#{::Delayed::Backend::Base::ON_HOLD_LOCKED_BY}'",
22
+ unique: true,
23
+ name: 'index_delayed_jobs_on_singleton_running',
24
+ algorithm: :concurrently
25
+ end
26
+
27
+ def down
28
+ remove_index :delayed_jobs, name: 'index_delayed_jobs_on_singleton_not_running_old'
29
+ remove_index :delayed_jobs, name: 'index_delayed_jobs_on_singleton_running_old'
30
+ end
31
+ end
@@ -0,0 +1,60 @@
1
+ # frozen_string_literal: true
2
+
3
+ class UpdateInsertTriggerForSingletonUniqueConstraintChange < ActiveRecord::Migration[5.2]
4
+ def change
5
+ reversible do |direction|
6
+ direction.up do
7
+ execute(<<~SQL)
8
+ CREATE OR REPLACE FUNCTION #{connection.quote_table_name('delayed_jobs_before_insert_row_tr_fn')} () RETURNS trigger AS $$
9
+ BEGIN
10
+ IF NEW.strand IS NOT NULL THEN
11
+ PERFORM pg_advisory_xact_lock(half_md5_as_bigint(NEW.strand));
12
+ IF (SELECT COUNT(*) FROM (
13
+ SELECT 1 FROM delayed_jobs WHERE strand = NEW.strand AND next_in_strand=true LIMIT NEW.max_concurrent
14
+ ) s) = NEW.max_concurrent THEN
15
+ NEW.next_in_strand := false;
16
+ END IF;
17
+ END IF;
18
+ IF NEW.singleton IS NOT NULL THEN
19
+ PERFORM pg_advisory_xact_lock(half_md5_as_bigint(CONCAT('singleton:', NEW.singleton)));
20
+ -- this condition seems silly, but it forces postgres to use the two partial indexes on singleton,
21
+ -- rather than doing a seq scan
22
+ PERFORM 1 FROM delayed_jobs WHERE singleton = NEW.singleton AND (locked_by IS NULL OR locked_by = '#{::Delayed::Backend::Base::ON_HOLD_LOCKED_BY}' OR locked_by <> '#{::Delayed::Backend::Base::ON_HOLD_LOCKED_BY}');
23
+ IF FOUND THEN
24
+ NEW.next_in_strand := false;
25
+ END IF;
26
+ END IF;
27
+ RETURN NEW;
28
+ END;
29
+ $$ LANGUAGE plpgsql SET search_path TO #{::Switchman::Shard.current.name};
30
+ SQL
31
+ end
32
+ direction.down do
33
+ execute(<<~SQL)
34
+ CREATE OR REPLACE FUNCTION #{connection.quote_table_name('delayed_jobs_before_insert_row_tr_fn')} () RETURNS trigger AS $$
35
+ BEGIN
36
+ IF NEW.strand IS NOT NULL THEN
37
+ PERFORM pg_advisory_xact_lock(half_md5_as_bigint(NEW.strand));
38
+ IF (SELECT COUNT(*) FROM (
39
+ SELECT 1 FROM delayed_jobs WHERE strand = NEW.strand AND next_in_strand=true LIMIT NEW.max_concurrent
40
+ ) s) = NEW.max_concurrent THEN
41
+ NEW.next_in_strand := false;
42
+ END IF;
43
+ END IF;
44
+ IF NEW.singleton IS NOT NULL THEN
45
+ PERFORM pg_advisory_xact_lock(half_md5_as_bigint(CONCAT('singleton:', NEW.singleton)));
46
+ -- this condition seems silly, but it forces postgres to use the two partial indexes on singleton,
47
+ -- rather than doing a seq scan
48
+ PERFORM 1 FROM delayed_jobs WHERE singleton = NEW.singleton AND (locked_by IS NULL OR locked_by IS NOT NULL);
49
+ IF FOUND THEN
50
+ NEW.next_in_strand := false;
51
+ END IF;
52
+ END IF;
53
+ RETURN NEW;
54
+ END;
55
+ $$ LANGUAGE plpgsql SET search_path TO #{::Switchman::Shard.current.name};
56
+ SQL
57
+ end
58
+ end
59
+ end
60
+ end
@@ -0,0 +1,209 @@
1
+ # frozen_string_literal: true
2
+
3
+ class UpdateDeleteTriggerForSingletonUniqueConstraintChange < ActiveRecord::Migration[5.2]
4
+ def up
5
+ execute(<<~SQL)
6
+ CREATE OR REPLACE FUNCTION #{connection.quote_table_name('delayed_jobs_after_delete_row_tr_fn')} () RETURNS trigger AS $$
7
+ DECLARE
8
+ next_strand varchar;
9
+ running_count integer;
10
+ should_lock boolean;
11
+ should_be_precise boolean;
12
+ update_query varchar;
13
+ skip_locked varchar;
14
+ transition boolean;
15
+ BEGIN
16
+ IF OLD.strand IS NOT NULL THEN
17
+ should_lock := true;
18
+ should_be_precise := OLD.id % (OLD.max_concurrent * 4) = 0;
19
+
20
+ IF NOT should_be_precise AND OLD.max_concurrent > 16 THEN
21
+ running_count := (SELECT COUNT(*) FROM (
22
+ SELECT 1 as one FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
23
+ ) subquery_for_count);
24
+ should_lock := running_count < OLD.max_concurrent;
25
+ END IF;
26
+
27
+ IF should_lock THEN
28
+ PERFORM pg_advisory_xact_lock(half_md5_as_bigint(OLD.strand));
29
+ END IF;
30
+
31
+ -- note that we don't really care if the row we're deleting has a singleton, or if it even
32
+ -- matches the row(s) we're going to update. we just need to make sure that whatever
33
+ -- singleton we grab isn't already running (which is a simple existence check, since
34
+ -- the unique indexes ensure there is at most one singleton running, and one queued)
35
+ update_query := 'UPDATE delayed_jobs SET next_in_strand=true WHERE id IN (
36
+ SELECT id FROM delayed_jobs j2
37
+ WHERE next_in_strand=false AND
38
+ j2.strand=$1.strand AND
39
+ (j2.singleton IS NULL OR NOT EXISTS (SELECT 1 FROM delayed_jobs j3 WHERE j3.singleton=j2.singleton AND j3.id<>j2.id AND (j3.locked_by IS NULL OR j3.locked_by = ''#{::Delayed::Backend::Base::ON_HOLD_LOCKED_BY}'' OR j3.locked_by <> ''#{::Delayed::Backend::Base::ON_HOLD_LOCKED_BY}'')))
40
+ ORDER BY j2.strand_order_override ASC, j2.id ASC
41
+ LIMIT ';
42
+
43
+ IF should_be_precise THEN
44
+ running_count := (SELECT COUNT(*) FROM (
45
+ SELECT 1 FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
46
+ ) s);
47
+ IF running_count < OLD.max_concurrent THEN
48
+ update_query := update_query || '($1.max_concurrent - $2)';
49
+ ELSE
50
+ -- we have too many running already; just bail
51
+ RETURN OLD;
52
+ END IF;
53
+ ELSE
54
+ update_query := update_query || '1';
55
+
56
+ -- n-strands don't require precise ordering; we can make this query more performant
57
+ IF OLD.max_concurrent > 1 THEN
58
+ skip_locked := ' SKIP LOCKED';
59
+ END IF;
60
+ END IF;
61
+
62
+ update_query := update_query || ' FOR UPDATE' || COALESCE(skip_locked, '') || ')';
63
+ EXECUTE update_query USING OLD, running_count;
64
+ END IF;
65
+
66
+ IF OLD.singleton IS NOT NULL THEN
67
+ PERFORM pg_advisory_xact_lock(half_md5_as_bigint(CONCAT('singleton:', OLD.singleton)));
68
+
69
+ transition := EXISTS (SELECT 1 FROM delayed_jobs AS j1 WHERE j1.singleton = OLD.singleton AND j1.strand IS DISTINCT FROM OLD.strand AND locked_by IS NULL);
70
+
71
+ IF transition THEN
72
+ next_strand := (SELECT j1.strand FROM delayed_jobs AS j1 WHERE j1.singleton = OLD.singleton AND j1.strand IS DISTINCT FROM OLD.strand AND locked_by IS NULL AND j1.strand IS NOT NULL LIMIT 1);
73
+
74
+ IF next_strand IS NOT NULL THEN
75
+ -- if the singleton has a new strand defined, we need to lock it to ensure we obey n_strand constraints --
76
+ IF NOT pg_try_advisory_xact_lock(half_md5_as_bigint(next_strand)) THEN
77
+ -- a failure to acquire the lock means that another process already has it and will thus handle this singleton --
78
+ RETURN OLD;
79
+ END IF;
80
+ END IF;
81
+ ELSIF OLD.strand IS NOT NULL THEN
82
+ -- if there is no transition and there is a strand then we have already handled this singleton in the case above --
83
+ RETURN OLD;
84
+ END IF;
85
+
86
+ -- handles transitioning a singleton from stranded to not stranded --
87
+ -- handles transitioning a singleton from unstranded to stranded --
88
+ -- handles transitioning a singleton from strand A to strand B --
89
+ -- these transitions are a relatively rare case, so we take a shortcut and --
90
+ -- only start the next singleton if its strand does not currently have any running jobs --
91
+ -- if it does, the next stranded job that finishes will start this singleton if it can --
92
+ UPDATE delayed_jobs SET next_in_strand=true WHERE id IN (
93
+ SELECT id FROM delayed_jobs j2
94
+ WHERE next_in_strand=false AND
95
+ j2.singleton=OLD.singleton AND
96
+ j2.locked_by IS NULL AND
97
+ (j2.strand IS NULL OR NOT EXISTS (SELECT 1 FROM delayed_jobs j3 WHERE j3.strand=j2.strand AND j3.id<>j2.id))
98
+ FOR UPDATE
99
+ );
100
+ END IF;
101
+ RETURN OLD;
102
+ END;
103
+ $$ LANGUAGE plpgsql SET search_path TO #{::Switchman::Shard.current.name};
104
+ SQL
105
+ end
106
+
107
+ def down
108
+ execute(<<~SQL)
109
+ CREATE OR REPLACE FUNCTION #{connection.quote_table_name('delayed_jobs_after_delete_row_tr_fn')} () RETURNS trigger AS $$
110
+ DECLARE
111
+ next_strand varchar;
112
+ running_count integer;
113
+ should_lock boolean;
114
+ should_be_precise boolean;
115
+ update_query varchar;
116
+ skip_locked varchar;
117
+ transition boolean;
118
+ BEGIN
119
+ IF OLD.strand IS NOT NULL THEN
120
+ should_lock := true;
121
+ should_be_precise := OLD.id % (OLD.max_concurrent * 4) = 0;
122
+
123
+ IF NOT should_be_precise AND OLD.max_concurrent > 16 THEN
124
+ running_count := (SELECT COUNT(*) FROM (
125
+ SELECT 1 as one FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
126
+ ) subquery_for_count);
127
+ should_lock := running_count < OLD.max_concurrent;
128
+ END IF;
129
+
130
+ IF should_lock THEN
131
+ PERFORM pg_advisory_xact_lock(half_md5_as_bigint(OLD.strand));
132
+ END IF;
133
+
134
+ -- note that we don't really care if the row we're deleting has a singleton, or if it even
135
+ -- matches the row(s) we're going to update. we just need to make sure that whatever
136
+ -- singleton we grab isn't already running (which is a simple existence check, since
137
+ -- the unique indexes ensure there is at most one singleton running, and one queued)
138
+ update_query := 'UPDATE delayed_jobs SET next_in_strand=true WHERE id IN (
139
+ SELECT id FROM delayed_jobs j2
140
+ WHERE next_in_strand=false AND
141
+ j2.strand=$1.strand AND
142
+ (j2.singleton IS NULL OR NOT EXISTS (SELECT 1 FROM delayed_jobs j3 WHERE j3.singleton=j2.singleton AND j3.id<>j2.id AND (j3.locked_by IS NULL OR j3.locked_by IS NOT NULL)))
143
+ ORDER BY j2.strand_order_override ASC, j2.id ASC
144
+ LIMIT ';
145
+
146
+ IF should_be_precise THEN
147
+ running_count := (SELECT COUNT(*) FROM (
148
+ SELECT 1 FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
149
+ ) s);
150
+ IF running_count < OLD.max_concurrent THEN
151
+ update_query := update_query || '($1.max_concurrent - $2)';
152
+ ELSE
153
+ -- we have too many running already; just bail
154
+ RETURN OLD;
155
+ END IF;
156
+ ELSE
157
+ update_query := update_query || '1';
158
+
159
+ -- n-strands don't require precise ordering; we can make this query more performant
160
+ IF OLD.max_concurrent > 1 THEN
161
+ skip_locked := ' SKIP LOCKED';
162
+ END IF;
163
+ END IF;
164
+
165
+ update_query := update_query || ' FOR UPDATE' || COALESCE(skip_locked, '') || ')';
166
+ EXECUTE update_query USING OLD, running_count;
167
+ END IF;
168
+
169
+ IF OLD.singleton IS NOT NULL THEN
170
+ PERFORM pg_advisory_xact_lock(half_md5_as_bigint(CONCAT('singleton:', OLD.singleton)));
171
+
172
+ transition := EXISTS (SELECT 1 FROM delayed_jobs AS j1 WHERE j1.singleton = OLD.singleton AND j1.strand IS DISTINCT FROM OLD.strand AND locked_by IS NULL);
173
+
174
+ IF transition THEN
175
+ next_strand := (SELECT j1.strand FROM delayed_jobs AS j1 WHERE j1.singleton = OLD.singleton AND j1.strand IS DISTINCT FROM OLD.strand AND locked_by IS NULL AND j1.strand IS NOT NULL LIMIT 1);
176
+
177
+ IF next_strand IS NOT NULL THEN
178
+ -- if the singleton has a new strand defined, we need to lock it to ensure we obey n_strand constraints --
179
+ IF NOT pg_try_advisory_xact_lock(half_md5_as_bigint(next_strand)) THEN
180
+ -- a failure to acquire the lock means that another process already has it and will thus handle this singleton --
181
+ RETURN OLD;
182
+ END IF;
183
+ END IF;
184
+ ELSIF OLD.strand IS NOT NULL THEN
185
+ -- if there is no transition and there is a strand then we have already handled this singleton in the case above --
186
+ RETURN OLD;
187
+ END IF;
188
+
189
+ -- handles transitioning a singleton from stranded to not stranded --
190
+ -- handles transitioning a singleton from unstranded to stranded --
191
+ -- handles transitioning a singleton from strand A to strand B --
192
+ -- these transitions are a relatively rare case, so we take a shortcut and --
193
+ -- only start the next singleton if its strand does not currently have any running jobs --
194
+ -- if it does, the next stranded job that finishes will start this singleton if it can --
195
+ UPDATE delayed_jobs SET next_in_strand=true WHERE id IN (
196
+ SELECT id FROM delayed_jobs j2
197
+ WHERE next_in_strand=false AND
198
+ j2.singleton=OLD.singleton AND
199
+ j2.locked_by IS NULL AND
200
+ (j2.strand IS NULL OR NOT EXISTS (SELECT 1 FROM delayed_jobs j3 WHERE j3.strand=j2.strand AND j3.id<>j2.id))
201
+ FOR UPDATE
202
+ );
203
+ END IF;
204
+ RETURN OLD;
205
+ END;
206
+ $$ LANGUAGE plpgsql SET search_path TO #{::Switchman::Shard.current.name};
207
+ SQL
208
+ end
209
+ end
@@ -0,0 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ class RemoveOldSingletonIndex < ActiveRecord::Migration[5.2]
4
+ disable_ddl_transaction!
5
+
6
+ def up
7
+ remove_index :delayed_jobs, name: 'index_delayed_jobs_on_singleton_not_running_old'
8
+ remove_index :delayed_jobs, name: 'index_delayed_jobs_on_singleton_running_old'
9
+ end
10
+
11
+ def down
12
+ rename_index :delayed_jobs, 'index_delayed_jobs_on_singleton_not_running', 'index_delayed_jobs_on_singleton_not_running_old'
13
+ rename_index :delayed_jobs, 'index_delayed_jobs_on_singleton_running', 'index_delayed_jobs_on_singleton_running_old'
14
+
15
+ # only one job can be queued in a singleton
16
+ add_index :delayed_jobs,
17
+ :singleton,
18
+ where: 'singleton IS NOT NULL AND locked_by IS NULL',
19
+ unique: true,
20
+ name: 'index_delayed_jobs_on_singleton_not_running',
21
+ algorithm: :concurrently
22
+
23
+ # only one job can be running for a singleton
24
+ add_index :delayed_jobs,
25
+ :singleton,
26
+ where: 'singleton IS NOT NULL AND locked_by IS NOT NULL',
27
+ unique: true,
28
+ name: 'index_delayed_jobs_on_singleton_running',
29
+ algorithm: :concurrently
30
+ end
31
+ end
@@ -0,0 +1,25 @@
1
+ module SwitchmanInstJobs
2
+ module Delayed
3
+ module Backend
4
+ module ActiveRecord
5
+ module AbstractJob
6
+ module ClassMethods
7
+ def current_switchman_shard
8
+ connected_to_stack.reverse_each do |hash|
9
+ return hash[:switchman_shard] if hash[:switchman_shard] && hash[:klasses].include?(connection_classes)
10
+ end
11
+
12
+ ::ActiveRecord::Base.current_switchman_shard.delayed_jobs_shard
13
+ end
14
+ end
15
+
16
+ def self.prepended(base)
17
+ base.singleton_class.prepend(ClassMethods)
18
+
19
+ base.sharded_model
20
+ end
21
+ end
22
+ end
23
+ end
24
+ end
25
+ end
@@ -21,13 +21,17 @@ module SwitchmanInstJobs
21
21
 
22
22
  # Ensure jobs get unblocked on the new shard if they exist
23
23
  ::Delayed::Worker.lifecycle.after(:perform) do |_worker, job|
24
- if job.strand
24
+ if job.strand || job.singleton
25
+ column = job.strand ? :strand : :singleton
26
+
25
27
  ::Switchman::Shard.clear_cache
26
28
  ::Switchman::Shard.default.activate do
27
29
  current_job_shard = ::Switchman::Shard.lookup(job.shard_id).delayed_jobs_shard
28
30
  if current_job_shard != ::Switchman::Shard.current(::Delayed::Backend::ActiveRecord::AbstractJob)
29
31
  current_job_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
30
- j = ::Delayed::Job.where(strand: job.strand).next_in_strand_order.first
32
+ ::Delayed::Job.where(source: 'JobsMigrator::StrandBlocker', **{ column => job.try(column) }).delete_all
33
+
34
+ j = ::Delayed::Job.where(**{ column => job.try(column) }).next_in_strand_order.first
31
35
  j.update_column(:next_in_strand, true) if j && !j.next_in_strand
32
36
  end
33
37
  end
@@ -89,7 +89,9 @@ module SwitchmanInstJobs
89
89
  migrate_everything
90
90
  end
91
91
 
92
- def migrate_strands
92
+ def migrate_strands(batch_size: 1_000)
93
+ source_shard = ::Switchman::Shard.current(::Delayed::Backend::ActiveRecord::AbstractJob)
94
+
93
95
  # there are 4 scenarios to deal with here
94
96
  # 1) no running job, no jobs moved: do nothing
95
97
  # 2) running job, no jobs moved; create blocker with next_in_strand=false
@@ -98,60 +100,89 @@ module SwitchmanInstJobs
98
100
  # those (= do nothing since it should already be false)
99
101
  # 4) no running job, jobs moved: set next_in_strand=true on the first of
100
102
  # those (= do nothing since it should already be true)
103
+ handler = lambda { |scope, column, blocker_job_kwargs = {}, advisory_lock_cb = nil|
104
+ shard_map = build_shard_map(scope, source_shard)
105
+ shard_map.each do |(target_shard, source_shard_ids)|
106
+ shard_scope = scope.where(shard_id: source_shard_ids)
101
107
 
102
- source_shard = ::Switchman::Shard.current(::Delayed::Backend::ActiveRecord::AbstractJob)
103
- strand_scope = ::Delayed::Job.shard(source_shard).where.not(strand: nil)
104
- shard_map = build_shard_map(strand_scope, source_shard)
105
- shard_map.each do |(target_shard, source_shard_ids)|
106
- shard_scope = strand_scope.where(shard_id: source_shard_ids)
107
-
108
- # 1) is taken care of because it should not show up here in strands
109
- strands = shard_scope.distinct.order(:strand).pluck(:strand)
110
-
111
- target_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
112
- strands.each do |strand|
113
- transaction_on([source_shard, target_shard]) do
114
- this_strand_scope = shard_scope.where(strand: strand)
115
- # we want to copy all the jobs except the one that is still running.
116
- jobs_scope = this_strand_scope.where(locked_by: nil)
117
-
118
- # 2) and part of 3) are taken care of here by creating a blocker
119
- # job with next_in_strand = false. as soon as the current
120
- # running job is finished it should set next_in_strand
121
- # We lock it to ensure that the jobs worker can't delete it until we are done moving the strand
122
- # Since we only unlock it on the new jobs queue *after* deleting from the original
123
- # the lock ensures the blocker always gets unlocked
124
- first = this_strand_scope.where.not(locked_by: nil).next_in_strand_order.lock.first
125
- if first
126
- first_job = ::Delayed::Job.create!(strand: strand, next_in_strand: false)
127
- first_job.payload_object = ::Delayed::PerformableMethod.new(Kernel, :sleep, args: [0])
128
- first_job.queue = first.queue
129
- first_job.tag = 'Kernel.sleep'
130
- first_job.source = 'JobsMigrator::StrandBlocker'
131
- first_job.max_attempts = 1
132
- # If we ever have jobs left over from 9999 jobs moves of a single shard,
133
- # something has gone terribly wrong
134
- first_job.strand_order_override = -9999
135
- first_job.save!
136
- # the rest of 3) is taken care of here
137
- # make sure that all the jobs moved over are NOT next in strand
138
- ::Delayed::Job.where(next_in_strand: true, strand: strand, locked_by: nil).
139
- update_all(next_in_strand: false)
140
- end
108
+ # 1) is taken care of because it should not show up here in strands
109
+ values = shard_scope.distinct.order(column).pluck(column)
141
110
 
142
- # 4) is taken care of here, by leaving next_in_strand alone and
143
- # it should execute on the new shard
144
- batch_move_jobs(
145
- target_shard: target_shard,
146
- source_shard: source_shard,
147
- scope: jobs_scope
148
- ) do |job, new_job|
149
- # This ensures jobs enqueued on the old jobs shard run before jobs on the new jobs queue
150
- new_job.strand_order_override = job.strand_order_override - 1
111
+ target_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
112
+ values.each do |value|
113
+ transaction_on([source_shard, target_shard]) do
114
+ source_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
115
+ advisory_lock_cb&.call(value)
116
+ end
117
+
118
+ value_scope = shard_scope.where(**{ column => value })
119
+ # we want to copy all the jobs except the one that is still running.
120
+ jobs_scope = value_scope.where(locked_by: nil)
121
+
122
+ # 2) and part of 3) are taken care of here by creating a blocker
123
+ # job with next_in_strand = false. as soon as the current
124
+ # running job is finished it should set next_in_strand
125
+ # We lock it to ensure that the jobs worker can't delete it until we are done moving the strand
126
+ # Since we only unlock it on the new jobs queue *after* deleting from the original
127
+ # the lock ensures the blocker always gets unlocked
128
+ first = value_scope.where.not(locked_by: nil).next_in_strand_order.lock.first
129
+ if first
130
+ create_blocker_job(
131
+ queue: first.queue,
132
+ shard_id: first.shard_id,
133
+ **{ column => value },
134
+ **blocker_job_kwargs
135
+ )
136
+
137
+ # the rest of 3) is taken care of here
138
+ # make sure that all the jobs moved over are NOT next in strand
139
+ ::Delayed::Job.where(next_in_strand: true, locked_by: nil, **{ column => value }).
140
+ update_all(next_in_strand: false)
141
+ end
142
+
143
+ # 4) is taken care of here, by leaving next_in_strand alone and
144
+ # it should execute on the new shard
145
+ batch_move_jobs(
146
+ target_shard: target_shard,
147
+ source_shard: source_shard,
148
+ scope: jobs_scope,
149
+ batch_size: batch_size
150
+ ) do |job, new_job|
151
+ # This ensures jobs enqueued on the old jobs shard run before jobs on the new jobs queue
152
+ new_job.strand_order_override = job.strand_order_override - 1
153
+ end
151
154
  end
152
155
  end
153
156
  end
157
+ end
158
+ }
159
+
160
+ strand_scope = ::Delayed::Job.shard(source_shard).where.not(strand: nil)
161
+ singleton_scope = ::Delayed::Job.shard(source_shard).where('strand IS NULL AND singleton IS NOT NULL')
162
+ all_scope = ::Delayed::Job.shard(source_shard).where('strand IS NOT NULL OR singleton IS NOT NULL')
154
163
 
164
+ singleton_blocker_additional_kwargs = {
165
+ locked_at: DateTime.now,
166
+ locked_by: ::Delayed::Backend::Base::ON_HOLD_BLOCKER
167
+ }
168
+
169
+ quoted_function_name = ::Delayed::Job.connection.quote_table_name('half_md5_as_bigint')
170
+ strand_advisory_lock_fn = lambda do |value|
171
+ ::Delayed::Job.connection.execute("SELECT pg_advisory_xact_lock(#{quoted_function_name}('#{value}'))")
172
+ end
173
+
174
+ singleton_advisory_lock_fn = lambda do |value|
175
+ ::Delayed::Job.connection.execute(
176
+ "SELECT pg_advisory_xact_lock(#{quoted_function_name}('singleton:#{value}'))"
177
+ )
178
+ end
179
+
180
+ handler.call(strand_scope, :strand, {}, strand_advisory_lock_fn)
181
+ handler.call(singleton_scope, :singleton, singleton_blocker_additional_kwargs, singleton_advisory_lock_fn)
182
+
183
+ shard_map = build_shard_map(all_scope, source_shard)
184
+ shard_map.each do |(target_shard, source_shard_ids)|
185
+ target_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
155
186
  updated = ::Switchman::Shard.where(id: source_shard_ids, block_stranded: true).
156
187
  update_all(block_stranded: false)
157
188
  # If this is being manually re-run for some reason to clean something up, don't wait for nothing to happen
@@ -166,26 +197,40 @@ module SwitchmanInstJobs
166
197
  end
167
198
  end
168
199
 
169
- def unblock_strands(target_shard)
170
- target_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
171
- loop do
172
- # We only want to unlock stranded jobs where they don't belong to a blocked shard (if they *do* belong)
173
- # to a blocked shard, they must be part of a concurrent jobs migration from a different source shard to
174
- # this target shard, so we shouldn't unlock them yet. We only ever unlock one job here to keep the
175
- # logic cleaner; if the job is n-stranded, after the first one runs, the trigger will unlock larger
176
- # batches
177
- break if ::Delayed::Job.where(id: ::Delayed::Job.select('DISTINCT ON (strand) id').
178
- where.not(strand: nil).
179
- where.not(shard_id: ::Switchman::Shard.where(block_stranded: true).pluck(:id)).where(
200
+ def unblock_strands(target_shard, batch_size: 10_000)
201
+ block_stranded_ids = ::Switchman::Shard.where(block_stranded: true).pluck(:id)
202
+ query = lambda { |column, scope|
203
+ ::Delayed::Job.
204
+ where(id: ::Delayed::Job.select("DISTINCT ON (#{column}) id").
205
+ where(scope).
206
+ where.not(shard_id: block_stranded_ids).
207
+ where(
180
208
  ::Delayed::Job.select(1).from("#{::Delayed::Job.quoted_table_name} dj2").
181
209
  where("dj2.next_in_strand = true OR dj2.source = 'JobsMigrator::StrandBlocker'").
182
- where('dj2.strand = delayed_jobs.strand').arel.exists.not
183
- ).order(:strand, :strand_order_override, :id)).limit(500).update_all(next_in_strand: true).zero?
210
+ where("dj2.#{column} = delayed_jobs.#{column}").arel.exists.not
211
+ ).
212
+ order(column, :strand_order_override, :id)).limit(batch_size)
213
+ }
214
+
215
+ target_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
216
+ # We only want to unlock stranded jobs where they don't belong to a blocked shard (if they *do* belong)
217
+ # to a blocked shard, they must be part of a concurrent jobs migration from a different source shard to
218
+ # this target shard, so we shouldn't unlock them yet. We only ever unlock one job here to keep the
219
+ # logic cleaner; if the job is n-stranded, after the first one runs, the trigger will unlock larger
220
+ # batches
221
+
222
+ loop do
223
+ break if query.call(:strand, 'strand IS NOT NULL').update_all(next_in_strand: true).zero?
224
+ end
225
+
226
+ loop do
227
+ break if query.call(:singleton,
228
+ 'strand IS NULL AND singleton IS NOT NULL').update_all(next_in_strand: true).zero?
184
229
  end
185
230
  end
186
231
  end
187
232
 
188
- def migrate_everything
233
+ def migrate_everything(batch_size: 1_000)
189
234
  source_shard = ::Switchman::Shard.current(::Delayed::Backend::ActiveRecord::AbstractJob)
190
235
  scope = ::Delayed::Job.shard(source_shard).where(strand: nil)
191
236
 
@@ -194,13 +239,26 @@ module SwitchmanInstJobs
194
239
  batch_move_jobs(
195
240
  target_shard: target_shard,
196
241
  source_shard: source_shard,
197
- scope: scope.where(shard_id: source_shard_ids).where(locked_by: nil)
242
+ scope: scope.where(shard_id: source_shard_ids).where(locked_by: nil),
243
+ batch_size: batch_size
198
244
  )
199
245
  end
200
246
  end
201
247
 
202
248
  private
203
249
 
250
+ def create_blocker_job(**kwargs)
251
+ first_job = ::Delayed::Job.create!(**kwargs, next_in_strand: false)
252
+ first_job.payload_object = ::Delayed::PerformableMethod.new(Kernel, :sleep, args: [0])
253
+ first_job.tag = 'Kernel.sleep'
254
+ first_job.source = 'JobsMigrator::StrandBlocker'
255
+ first_job.max_attempts = 1
256
+ # If we ever have jobs left over from 9999 jobs moves of a single shard,
257
+ # something has gone terribly wrong
258
+ first_job.strand_order_override = -9999
259
+ first_job.save!
260
+ end
261
+
204
262
  def build_shard_map(scope, source_shard)
205
263
  shard_ids = scope.distinct.pluck(:shard_id)
206
264
 
@@ -215,10 +273,10 @@ module SwitchmanInstJobs
215
273
  shard_map
216
274
  end
217
275
 
218
- def batch_move_jobs(target_shard:, source_shard:, scope:)
276
+ def batch_move_jobs(target_shard:, source_shard:, scope:, batch_size:)
219
277
  while scope.exists?
220
278
  # Adapted from get_and_lock_next_available in delayed/backend/active_record.rb
221
- target_jobs = scope.limit(1000).lock('FOR UPDATE SKIP LOCKED')
279
+ target_jobs = scope.limit(batch_size).lock('FOR UPDATE SKIP LOCKED')
222
280
 
223
281
  query = source_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
224
282
  <<~SQL
@@ -1,3 +1,3 @@
1
1
  module SwitchmanInstJobs
2
- VERSION = '4.0.3'.freeze
2
+ VERSION = '4.0.6'.freeze
3
3
  end
@@ -5,9 +5,6 @@ module SwitchmanInstJobs
5
5
  cattr_accessor :delayed_jobs_shard_fallback
6
6
 
7
7
  def self.initialize_active_record
8
- ::ActiveRecord::ConnectionAdapters::ConnectionPool.prepend(
9
- ActiveRecord::ConnectionAdapters::ConnectionPool
10
- )
11
8
  ::ActiveRecord::ConnectionAdapters::PostgreSQLAdapter.prepend(
12
9
  ActiveRecord::ConnectionAdapters::PostgreSQLAdapter
13
10
  )
@@ -17,6 +14,9 @@ module SwitchmanInstJobs
17
14
  ::Delayed::Backend::ActiveRecord::Job.prepend(
18
15
  Delayed::Backend::Base
19
16
  )
17
+ ::Delayed::Backend::ActiveRecord::AbstractJob.prepend(
18
+ Delayed::Backend::ActiveRecord::AbstractJob
19
+ )
20
20
  ::Delayed::Pool.prepend Delayed::Pool
21
21
  ::Delayed::Worker.prepend Delayed::Worker
22
22
  ::Delayed::Worker::HealthCheck.prepend Delayed::Worker::HealthCheck
@@ -35,10 +35,10 @@ module SwitchmanInstJobs
35
35
  end
36
36
  end
37
37
 
38
- require 'switchman_inst_jobs/active_record/connection_adapters/connection_pool'
39
38
  require 'switchman_inst_jobs/active_record/connection_adapters/postgresql_adapter'
40
39
  require 'switchman_inst_jobs/active_record/migration'
41
40
  require 'switchman_inst_jobs/delayed/settings'
41
+ require 'switchman_inst_jobs/delayed/backend/active_record/abstract_job'
42
42
  require 'switchman_inst_jobs/delayed/backend/base'
43
43
  require 'switchman_inst_jobs/delayed/message_sending'
44
44
  require 'switchman_inst_jobs/delayed/pool'
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: switchman-inst-jobs
3
3
  version: !ruby/object:Gem::Version
4
- version: 4.0.3
4
+ version: 4.0.6
5
5
  platform: ruby
6
6
  authors:
7
7
  - Bryan Petty
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2021-12-20 00:00:00.000000000 Z
11
+ date: 2022-02-23 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: inst-jobs
@@ -73,7 +73,7 @@ dependencies:
73
73
  version: '3.0'
74
74
  - - ">="
75
75
  - !ruby/object:Gem::Version
76
- version: 3.0.1
76
+ version: 3.0.7
77
77
  type: :runtime
78
78
  prerelease: false
79
79
  version_requirements: !ruby/object:Gem::Requirement
@@ -83,7 +83,7 @@ dependencies:
83
83
  version: '3.0'
84
84
  - - ">="
85
85
  - !ruby/object:Gem::Version
86
- version: 3.0.1
86
+ version: 3.0.7
87
87
  - !ruby/object:Gem::Dependency
88
88
  name: bundler
89
89
  requirement: !ruby/object:Gem::Requirement
@@ -340,11 +340,15 @@ files:
340
340
  - db/migrate/20211207094200_update_after_delete_trigger_for_singleton_transition_cases.rb
341
341
  - db/migrate/20211220112800_fix_singleton_race_condition_insert.rb
342
342
  - db/migrate/20211220113000_fix_singleton_race_condition_delete.rb
343
+ - db/migrate/20220127091200_fix_singleton_unique_constraint.rb
344
+ - db/migrate/20220128084800_update_insert_trigger_for_singleton_unique_constraint_change.rb
345
+ - db/migrate/20220128084900_update_delete_trigger_for_singleton_unique_constraint_change.rb
346
+ - db/migrate/20220203063200_remove_old_singleton_index.rb
343
347
  - lib/switchman-inst-jobs.rb
344
348
  - lib/switchman_inst_jobs.rb
345
- - lib/switchman_inst_jobs/active_record/connection_adapters/connection_pool.rb
346
349
  - lib/switchman_inst_jobs/active_record/connection_adapters/postgresql_adapter.rb
347
350
  - lib/switchman_inst_jobs/active_record/migration.rb
351
+ - lib/switchman_inst_jobs/delayed/backend/active_record/abstract_job.rb
348
352
  - lib/switchman_inst_jobs/delayed/backend/base.rb
349
353
  - lib/switchman_inst_jobs/delayed/message_sending.rb
350
354
  - lib/switchman_inst_jobs/delayed/pool.rb
@@ -1,15 +0,0 @@
1
- module SwitchmanInstJobs
2
- module ActiveRecord
3
- module ConnectionAdapters
4
- module ConnectionPool
5
- def shard
6
- if connection_klass == ::Delayed::Backend::ActiveRecord::AbstractJob
7
- return shard_stack.last || ::Switchman::Shard.current(::ActiveRecord::Base).delayed_jobs_shard
8
- end
9
-
10
- super
11
- end
12
- end
13
- end
14
- end
15
- end