resque-multi-job-forks 0.5.1 → 0.5.5

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: cd2a600dbcd1f57dcee123b6cb34f055f170bf9ef8eb1a114ded55841bd99149
4
- data.tar.gz: 80a3ffe7e7f626fb77e11487b51faa129a13a60382944d3c5c15e8c60b942c9e
3
+ metadata.gz: 73e29f0c8722121adb19f9f3dc3a98d06ac059c24f8944c191af09d8e1379d2d
4
+ data.tar.gz: 6b9589d77c89be94000575d839db7aa6b9363775e6512c5be5fcc23945c1a974
5
5
  SHA512:
6
- metadata.gz: 1184ff5df61cebf795557a9e93b43be059a6bf1671ab7362a89400dcb2b7d97658e9f06735adf0d7c3a3f9cab57addc755232904ec0dff251106e71e116ffc5c
7
- data.tar.gz: e6e76c24c2c2d591cc6c944ab3e8a9ef57ab67510ef1454b78a7e4704c9876fd78e1c44ebf52f47a0404254796728730757b1e5f89ce7e7cdd0c0a06bd93416b
6
+ metadata.gz: c10f5f45ec3017765431a20d20128ac0ae423ad1ecbc25cd57a51e6e92601fb816bfcd3e5c8c0baa7c4a710308fdc78c3891afe5882aa776759a32986f56a45c
7
+ data.tar.gz: f39b880a4ead9246a9d88d7961ee91f6a3db3ed5930505feb60c615bd4c1fd21f4b2d35e3033e2dc952188c88dfa55c93f69b41a7e88fb451993d77693a851e3
@@ -10,6 +10,8 @@ module Resque
10
10
  attr_accessor :memory_threshold
11
11
  attr_reader :jobs_processed
12
12
 
13
+ WorkerTerminated = Class.new(StandardError)
14
+
13
15
  def self.multi_jobs_per_fork?
14
16
  ENV["DISABLE_MULTI_JOBS_PER_FORK"].nil?
15
17
  end
@@ -22,6 +24,20 @@ module Resque
22
24
  else
23
25
  if term_child
24
26
  unregister_signal_handlers
27
+ trap('TERM') do
28
+ trap('TERM') do
29
+ # Ignore subsequent term signals
30
+ end
31
+
32
+ if @performing_job
33
+ # If a job is in progress, stop it immediately.
34
+ raise TermException.new("SIGTERM")
35
+ else
36
+ # If we're not currently running a job, shut down cleanly.
37
+ # This allows us to push unworked jobs back on the queue.
38
+ shutdown
39
+ end
40
+ end
25
41
  trap('QUIT') { shutdown }
26
42
  end
27
43
  raise NotImplementedError, "Pretending to not have forked"
@@ -39,9 +55,20 @@ module Resque
39
55
 
40
56
  def perform_with_multi_job_forks(job = nil)
41
57
  @fork_per_job = true unless fork_hijacked? # reconnect and after_fork
58
+ if shutdown?
59
+ # We got a request to shut down _after_ grabbing a job but _before_ starting work
60
+ # on it. Immediately report the job as failed and return.
61
+ if job
62
+ report_failed_job(job, WorkerTerminated.new("shutdown before job start"))
63
+ end
64
+ return
65
+ end
66
+ @performing_job = true
42
67
  perform_without_multi_job_forks(job)
43
68
  hijack_fork unless fork_hijacked?
44
69
  @jobs_processed += 1
70
+ ensure
71
+ @performing_job = false
45
72
  end
46
73
  alias_method :perform_without_multi_job_forks, :perform
47
74
  alias_method :perform, :perform_with_multi_job_forks
@@ -91,7 +118,7 @@ module Resque
91
118
  def shutdown_child
92
119
  return unless @child
93
120
  begin
94
- log! "multi_jobs_per_fork: Sending QUIT signal to #{@child}"
121
+ log_with_severity :debug, "multi_jobs_per_fork: Sending QUIT signal to #{@child}"
95
122
  Process.kill('QUIT', @child)
96
123
  rescue Errno::ESRCH
97
124
  nil
@@ -112,7 +139,7 @@ module Resque
112
139
  end
113
140
 
114
141
  def hijack_fork
115
- log 'hijack fork.'
142
+ log_with_severity :debug, 'hijack fork.'
116
143
  @suppressed_fork_hooks = [Resque.after_fork, Resque.before_fork]
117
144
  Resque.after_fork = Resque.before_fork = nil
118
145
  @release_fork_limit = fork_job_limit
@@ -121,11 +148,11 @@ module Resque
121
148
  end
122
149
 
123
150
  def release_fork
124
- log "jobs processed by child: #{jobs_processed}; rss: #{rss}"
151
+ log_with_severity :info, "jobs processed by child: #{jobs_processed}; rss: #{rss}"
125
152
  run_hook :before_child_exit, self
126
153
  Resque.after_fork, Resque.before_fork = *@suppressed_fork_hooks
127
154
  @release_fork_limit = @jobs_processed = nil
128
- log 'hijack over, counter terrorists win.'
155
+ log_with_severity :debug, 'hijack over, counter terrorists win.'
129
156
  @shutdown = true
130
157
  end
131
158
 
data/test/helper.rb CHANGED
@@ -19,6 +19,7 @@ Resque.redis = $redis
19
19
  # set `VERBOSE=true` when running the tests to view resques log output.
20
20
  module Resque
21
21
  class Worker
22
+ attr_accessor :start_lag
22
23
 
23
24
  def log_with_severity(severity, msg)
24
25
  if ENV['VERBOSE']
@@ -27,14 +28,18 @@ module Resque
27
28
  end
28
29
  end
29
30
 
30
- def log(message)
31
- log_with_severity :info, message
31
+ def report_failed_job(job, exception)
32
+ $SEQ_WRITER.print "failed_job_#{exception.class.name.downcase.gsub('::', '_')}\n"
32
33
  end
33
34
 
34
- def log!(message)
35
- log_with_severity :debug, message
35
+ def fork_hijacked?
36
+ if @release_fork_limit
37
+ if start_lag
38
+ sleep start_lag
39
+ end
40
+ end
41
+ @release_fork_limit
36
42
  end
37
-
38
43
  end
39
44
  end
40
45
 
@@ -8,7 +8,7 @@ class TestResqueMultiJobForks < Test::Unit::TestCase
8
8
  end
9
9
 
10
10
  def test_timeout_limit_sequence_of_events
11
- @worker.log! "in test_timeout_limit_sequence_of_events"
11
+ @worker.log_with_severity :debug, "in test_timeout_limit_sequence_of_events"
12
12
  # only allow enough time for 3 jobs to process.
13
13
  @worker.seconds_per_fork = 3
14
14
 
@@ -29,7 +29,7 @@ class TestResqueMultiJobForks < Test::Unit::TestCase
29
29
  end
30
30
 
31
31
  def test_graceful_shutdown_during_first_job
32
- @worker.log! "in test_graceful_shutdown_during_first_job"
32
+ @worker.log_with_severity :debug, "in test_graceful_shutdown_during_first_job"
33
33
  # enough time for all jobs to process.
34
34
  @worker.seconds_per_fork = 60
35
35
 
@@ -51,7 +51,7 @@ class TestResqueMultiJobForks < Test::Unit::TestCase
51
51
  end
52
52
 
53
53
  def test_immediate_shutdown_during_first_job
54
- @worker.log! "in test_immediate_shutdown_during_first_job"
54
+ @worker.log_with_severity :debug, "in test_immediate_shutdown_during_first_job"
55
55
  # enough time for all jobs to process.
56
56
  @worker.seconds_per_fork = 60
57
57
  @worker.term_child = false
@@ -73,7 +73,7 @@ class TestResqueMultiJobForks < Test::Unit::TestCase
73
73
  end
74
74
 
75
75
  def test_sigterm_shutdown_during_first_job
76
- @worker.log! "in test_sigterm_shutdown_during_first_job"
76
+ @worker.log_with_severity :debug, "in test_sigterm_shutdown_during_first_job"
77
77
  # enough time for all jobs to process.
78
78
  @worker.seconds_per_fork = 60
79
79
  @worker.term_child = true
@@ -91,14 +91,38 @@ class TestResqueMultiJobForks < Test::Unit::TestCase
91
91
  sequence = $SEQ_READER.each_line.map {|l| l.strip.to_sym }
92
92
 
93
93
  # test the sequence is correct.
94
- assert_equal([:before_fork, :after_fork,
94
+ assert_equal([:before_fork, :after_fork, :failed_job_resque_termexception,
95
95
  :before_child_exit_1], sequence, 'correct sequence')
96
96
  t.join
97
97
  end
98
98
 
99
+ def test_shutdown_between_jobs
100
+ @worker.log_with_severity :debug, "in test_sigterm_shutdown_during_first_job"
101
+ # enough time for all jobs to process.
102
+ @worker.seconds_per_fork = 60
103
+ @worker.term_child = true
104
+ @worker.graceful_term = true
105
+ @worker.term_timeout = 0.5
106
+ @worker.start_lag = 1
107
+
108
+ Resque.enqueue(QuickSequenceJob, 1)
109
+ Resque.enqueue(SequenceJob, 2)
110
+ t = Thread.new do
111
+ sleep 2
112
+ Process.kill("TERM", @worker.pid)
113
+ end
114
+ @worker.work(0)
115
+ $SEQ_WRITER.close
116
+
117
+ sequence = $SEQ_READER.each_line.map {|l| l.strip.to_sym }
118
+ assert_equal([:before_fork, :after_fork, :work_1, :before_child_exit_1, :failed_job_resque_worker_workerterminated], sequence, 'correct sequence')
119
+
120
+ t.join
121
+ end
122
+
99
123
  # test we can also limit fork job process by a job limit.
100
124
  def test_job_limit_sequence_of_events
101
- @worker.log! "in test_job_limit_sequence_of_events"
125
+ @worker.log_with_severity :debug, "in test_job_limit_sequence_of_events"
102
126
  # only allow 20 jobs per fork
103
127
  ENV['JOBS_PER_FORK'] = '20'
104
128
 
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: resque-multi-job-forks
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.5.1
4
+ version: 0.5.5
5
5
  platform: ruby
6
6
  authors:
7
7
  - Mick Staugaard
@@ -10,7 +10,7 @@ authors:
10
10
  autorequire:
11
11
  bindir: bin
12
12
  cert_chain: []
13
- date: 2019-05-23 00:00:00.000000000 Z
13
+ date: 2022-01-14 00:00:00.000000000 Z
14
14
  dependencies:
15
15
  - !ruby/object:Gem::Dependency
16
16
  name: resque
@@ -21,7 +21,7 @@ dependencies:
21
21
  version: 1.27.0
22
22
  - - "<"
23
23
  - !ruby/object:Gem::Version
24
- version: '2.1'
24
+ version: '2.3'
25
25
  type: :runtime
26
26
  prerelease: false
27
27
  version_requirements: !ruby/object:Gem::Requirement
@@ -31,7 +31,7 @@ dependencies:
31
31
  version: 1.27.0
32
32
  - - "<"
33
33
  - !ruby/object:Gem::Version
34
- version: '2.1'
34
+ version: '2.3'
35
35
  - !ruby/object:Gem::Dependency
36
36
  name: json
37
37
  requirement: !ruby/object:Gem::Requirement
@@ -121,7 +121,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
121
121
  - !ruby/object:Gem::Version
122
122
  version: '0'
123
123
  requirements: []
124
- rubygems_version: 3.0.3
124
+ rubygems_version: 3.1.6
125
125
  signing_key:
126
126
  specification_version: 4
127
127
  summary: Have your resque workers process more that one job