workhorse 1.2.17.rc0 → 1.2.17.rc2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 25908f294cd9623ebde44ec8bba5181ee54965703f4fb93df18876b216718eb8
4
- data.tar.gz: 568e620571a9ded165fee1fc4420efbb6810f073edd4365c6c8efbe7c3676052
3
+ metadata.gz: ce2281ae514ba5f269328c38dd0a2e9bf0f76e2c26be09f4be467b1f818f5dec
4
+ data.tar.gz: 3315c9047af3bf707d23d5309b65a602474342f6c0081f329cd0582d18b7720f
5
5
  SHA512:
6
- metadata.gz: 83b6ac441e3762f251e35f04dd213044d6fb904a005155235546391cdecd4f9c0c1db24be7c30371da4704b27e6f18aa6ce0b448b6c6c1dbc8c77eeb81faa9e3
7
- data.tar.gz: 9c2940a6bfb7cd0be49170d81af2378f27395dbf597f6d8fa2c777bdc812b26d06cc849454ba5925fcf5f26b7724779b3d6e941bc6c9237d6dc251b7d071188f
6
+ metadata.gz: 2047809f4fe65fca5f5eaee641b5868846098e381a18d5d1d07a4a9fe3e0d94a709185b13e7cf79c3094eaed5f908aac7399abcb4488a13b288379f414a8fdf7
7
+ data.tar.gz: fdca924627efb8ab50938b43ac76873d5bc0a5e794484a8a1e1b7fa4afba4fe86aa4003c2ccba6ff63b9a977503f9a63f818e536bc07bc34c987be52b3580793
data/CHANGELOG.md CHANGED
@@ -1,5 +1,30 @@
1
1
  # Workhorse Changelog
2
2
 
3
+ ## 1.2.17.rc2 - 2024-02-08
4
+
5
+ * Remove unnecessary output from `watch` command in certain cases.
6
+
7
+ Sitrox reference: #121312.
8
+
9
+ * Fix, improve and extend automated tests.
10
+
11
+ ## 1.2.17.rc1 - 2024-02-08
12
+
13
+ * Revamp memory handling:
14
+
15
+ * Change memory handling for workers to automatically shut down themselves
16
+ upon exceeding `config.max_worker_memory_mb` (if configured and > 0),
17
+ triggering the creation of a shutdown file
18
+ (`tmp/pids/workhorse.<pid>.shutdown`).
19
+ * Have the `watch` command, if scheduled, silently restart the shutdown worker
20
+ and remove the shutdown file.
21
+ * The presence of the shutdown file informs the watcher to produce output or
22
+ remain silent.
23
+ * Implement this adjustment to limit `watch` command output to error cases,
24
+ facilitating seamless cron integration for notification purposes.
25
+
26
+ Sitrox reference: #121312.
27
+
3
28
  ## 1.2.17.rc0 - 2024-02-05
4
29
 
5
30
  * Add option `config.max_worker_memory_mb` for automatic restart of workers
data/README.md CHANGED
@@ -466,17 +466,18 @@ succeeded jobs. You can run this using your scheduler in a specific interval.
466
466
 
467
467
  ## Memory handling
468
468
 
469
- When dealing with jobs that may exhibit a large memory footprint, it's important
470
- to note that Ruby might not release consumed memory back to the operating
471
- system. Consequently, your job workers could accumulate a significant amount of
472
- memory over time. To address this, Workhorse provides the
473
- `config.max_worker_memory_mb` option.
474
-
475
- If `config.max_worker_memory_mb` is set to a value above `0`, the `watch`
476
- command will check the memory footprint (RSS / resident size) of all worker
477
- processes. If any worker exceeds the specified footprint, Workhorse will
478
- silently restart it to ensure proper memory release. This process does not
479
- produce any output in the `watch` command.
469
+ When a worker exceeds the memory limit specified by
470
+ `config.max_worker_memory_mb` (assuming it is configured and > 0), it initiates
471
+ a graceful shutdown process by creating a shutdown file named
472
+ `tmp/pids/workhorse.<pid>.shutdown`.
473
+
474
+ Simultaneously, the `watch` command, if scheduled, monitors the presence of this
475
+ shutdown file. Upon detecting its existence, it silently triggers the restart of
476
+ the shutdown worker and removes the shutdown file to signify that the restart
477
+ process has begun.
478
+
479
+ This mechanism ensures that workers are automatically restarted without manual
480
+ intervention when memory limits are exceeded.
480
481
 
481
482
  Example configuration:
482
483
 
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.2.17.rc0
1
+ 1.2.17.rc2
@@ -45,39 +45,56 @@ module Workhorse
45
45
  def start(quiet: false)
46
46
  code = 0
47
47
 
48
+ # Holds messages in format [[<message>, <severity>]]
49
+ messages = []
50
+
48
51
  for_each_worker do |worker|
49
- pid_file, pid = read_pid(worker)
52
+ pid_file, pid, active = read_pid(worker)
50
53
 
51
- if pid_file && pid
52
- warn "Worker ##{worker.id} (#{worker.name}): Already started (PID #{pid})" unless quiet
54
+ if pid_file && pid && active
55
+ messages << ["Worker ##{worker.id} (#{worker.name}): Already started (PID #{pid})", 2] unless quiet
53
56
  code = 2
54
57
  elsif pid_file
55
58
  File.delete pid_file
56
- puts "Worker ##{worker.id} (#{worker.name}): Starting (stale pid file)" unless quiet
59
+
60
+ shutdown_file = pid ? Workhorse::Worker.shutdown_file_for(pid) : nil
61
+ shutdown_file = nil if shutdown_file && !File.exist?(shutdown_file)
62
+
63
+ messages << ["Worker ##{worker.id} (#{worker.name}): Starting (stale pid file)", 1] unless quiet || shutdown_file
57
64
  start_worker worker
65
+ FileUtils.rm(shutdown_file) if shutdown_file
58
66
  else
59
- warn "Worker ##{worker.id} (#{worker.name}): Starting" unless quiet
67
+ messages << ["Worker ##{worker.id} (#{worker.name}): Starting", 1] unless quiet
60
68
  start_worker worker
61
69
  end
62
70
  end
63
71
 
72
+ if messages.any?
73
+ min = messages.min_by(&:last)[1]
74
+
75
+ # Only print messages if there is at least one message with severity 1
76
+ if min == 1
77
+ messages.each { |(message, _severity)| warn message }
78
+ end
79
+ end
80
+
64
81
  return code
65
82
  end
66
83
 
67
- def stop(kill = false)
84
+ def stop(kill = false, quiet: false)
68
85
  code = 0
69
86
 
70
87
  for_each_worker do |worker|
71
- pid_file, pid = read_pid(worker)
88
+ pid_file, pid, active = read_pid(worker)
72
89
 
73
- if pid_file && pid
74
- puts "Worker (#{worker.name}) ##{worker.id}: Stopping"
90
+ if pid_file && pid && active
91
+ puts "Worker (#{worker.name}) ##{worker.id}: Stopping" unless quiet
75
92
  stop_worker pid_file, pid, kill: kill
76
93
  elsif pid_file
77
94
  File.delete pid_file
78
- puts "Worker (#{worker.name}) ##{worker.id}: Already stopped (stale PID file)"
95
+ puts "Worker (#{worker.name}) ##{worker.id}: Already stopped (stale PID file)" unless quiet
79
96
  else
80
- warn "Worker (#{worker.name}) ##{worker.id}: Already stopped"
97
+ warn "Worker (#{worker.name}) ##{worker.id}: Already stopped" unless quiet
81
98
  code = 2
82
99
  end
83
100
  end
@@ -89,9 +106,9 @@ module Workhorse
89
106
  code = 0
90
107
 
91
108
  for_each_worker do |worker|
92
- pid_file, pid = read_pid(worker)
109
+ pid_file, pid, active = read_pid(worker)
93
110
 
94
- if pid_file && pid
111
+ if pid_file && pid && active
95
112
  puts "Worker ##{worker.id} (#{worker.name}): Running" unless quiet
96
113
  elsif pid_file
97
114
  warn "Worker ##{worker.id} (#{worker.name}): Not running (stale PID file)" unless quiet
@@ -113,14 +130,10 @@ module Workhorse
113
130
  end
114
131
 
115
132
  if should_be_running && status(quiet: true) != 0
116
- code = start(quiet: Workhorse.silence_watcher)
133
+ return start(quiet: Workhorse.silence_watcher)
117
134
  else
118
- code = 0
135
+ return 0
119
136
  end
120
-
121
- watch_memory! if should_be_running
122
-
123
- return code
124
137
  end
125
138
 
126
139
  def restart
@@ -132,7 +145,9 @@ module Workhorse
132
145
  code = 0
133
146
 
134
147
  for_each_worker do |worker|
135
- _pid_file, pid = read_pid(worker)
148
+ _pid_file, pid, active = read_pid(worker)
149
+
150
+ next unless pid && active
136
151
 
137
152
  begin
138
153
  Process.kill 'HUP', pid
@@ -148,30 +163,6 @@ module Workhorse
148
163
 
149
164
  private
150
165
 
151
- def watch_memory!
152
- return if Workhorse.max_worker_memory_mb == 0
153
-
154
- for_each_worker do |worker|
155
- pid_file, pid = read_pid(worker)
156
- next unless pid_file && pid
157
-
158
- memory = memory_for(pid)
159
- next unless memory
160
-
161
- if memory > Workhorse.max_worker_memory_mb
162
- stop_worker pid_file, pid
163
- start_worker worker
164
- end
165
- end
166
- end
167
-
168
- # Returns the memory (RSS) in MB for the given process.
169
- def memory_for(pid)
170
- mem = `ps -p #{pid} -o rss=`&.strip
171
- return nil if mem.blank?
172
- return mem.to_i / 1024
173
- end
174
-
175
166
  def for_each_worker(&block)
176
167
  @workers.each(&block)
177
168
  end
@@ -237,16 +228,21 @@ module Workhorse
237
228
 
238
229
  def read_pid(worker)
239
230
  file = pid_file_for(worker)
231
+ pid = nil
232
+ active = false
240
233
 
241
234
  if File.exist?(file)
242
235
  raw_pid = File.read(file)
243
- return nil, nil if raw_pid.blank?
244
236
 
245
- pid = Integer(raw_pid)
246
- return file, process?(pid) ? pid : nil
237
+ unless raw_pid.blank?
238
+ pid = Integer(raw_pid)
239
+ active = process?(pid)
240
+ end
247
241
  else
248
242
  return nil, nil
249
243
  end
244
+
245
+ return file, pid, active
250
246
  end
251
247
  end
252
248
  end
@@ -22,7 +22,7 @@ module Workhorse
22
22
  Thread.current[:workhorse_current_performer] = self
23
23
 
24
24
  ActiveRecord::Base.connection_pool.with_connection do
25
- if defined?(Rails) && Rails.application && Rails.application.respond_to?(:executor)
25
+ if defined?(Rails) && Rails.respond_to?(:application) && Rails.application && Rails.application.respond_to?(:executor)
26
26
  Rails.application.executor.wrap do
27
27
  perform_wrapped
28
28
  end
@@ -63,7 +63,6 @@ module Workhorse
63
63
 
64
64
  inner_job_class = deserialized_job.try(:job_class) || deserialized_job.class
65
65
  skip_tx = inner_job_class.try(:skip_tx?)
66
- log "SKIP TX: #{skip_tx.inspect}".red, :error
67
66
 
68
67
  if Workhorse.perform_jobs_in_tx && !skip_tx
69
68
  Workhorse.tx_callback.call do
@@ -9,7 +9,7 @@ module Workhorse
9
9
  attr_reader :worker
10
10
  attr_reader :table
11
11
 
12
- def initialize(worker)
12
+ def initialize(worker, before_poll = proc { true })
13
13
  @worker = worker
14
14
  @running = false
15
15
  @table = Workhorse::DbJob.arel_table
@@ -17,6 +17,7 @@ module Workhorse
17
17
  @instant_repoll = Concurrent::AtomicBoolean.new(false)
18
18
  @global_lock_fails = 0
19
19
  @max_global_lock_fails_reached = false
20
+ @before_poll = before_poll
20
21
  end
21
22
 
22
23
  def running?
@@ -34,6 +35,12 @@ module Workhorse
34
35
  break unless running?
35
36
 
36
37
  begin
38
+ unless @before_poll.call
39
+ Thread.new { worker.shutdown }
40
+ sleep
41
+ next
42
+ end
43
+
37
44
  poll
38
45
  sleep
39
46
  rescue Exception => e
@@ -2,6 +2,7 @@ module Workhorse
2
2
  # Abstraction layer of a simple thread pool implementation used by the worker.
3
3
  class Pool
4
4
  attr_reader :mutex
5
+ attr_reader :active_threads
5
6
 
6
7
  def initialize(size)
7
8
  @size = size
@@ -20,6 +20,12 @@ module Workhorse
20
20
  worker.wait
21
21
  end
22
22
 
23
+ # @private
24
+ def self.shutdown_file_for(pid)
25
+ return nil unless defined?(Rails)
26
+ Rails.root.join('tmp', 'pids', "workhorse.#{pid}.shutdown")
27
+ end
28
+
23
29
  # Instantiates a new worker. The worker is not automatically started.
24
30
  #
25
31
  # @param queues [Array] The queues you want this worker to process. If an
@@ -51,7 +57,7 @@ module Workhorse
51
57
 
52
58
  @mutex = Mutex.new
53
59
  @pool = Pool.new(@pool_size)
54
- @poller = Workhorse::Poller.new(self)
60
+ @poller = Workhorse::Poller.new(self, proc { check_memory })
55
61
  @logger = logger
56
62
 
57
63
  unless (@polling_interval / 0.1).round(2).modulo(1).zero?
@@ -155,6 +161,37 @@ module Workhorse
155
161
 
156
162
  private
157
163
 
164
+ def check_memory
165
+ mem = current_memory_consumption
166
+
167
+ unless mem
168
+ log "Could not determine memory consumption of worker with pid #{pid}"
169
+ return false
170
+ end
171
+
172
+ max = Workhorse.max_worker_memory_mb
173
+ exceeded = max > 0 && current_memory_consumption > max
174
+
175
+ return true unless exceeded
176
+
177
+ if defined?(Rails)
178
+ FileUtils.touch self.class.shutdown_file_for(pid)
179
+ end
180
+
181
+ log "Worker process #{id.inspect} memory consumption (RSS) of #{mem}MB exceeds "\
182
+ "configured per-worker limit of #{max}MB and is now being shut down. Make sure "\
183
+ 'that your worker processes are watched (e.g. using the "watch"-command) for ' \
184
+ 'this worker to be restarted automatically.'
185
+
186
+ return false
187
+ end
188
+
189
+ def current_memory_consumption
190
+ mem = `ps -p #{pid} -o rss=`&.strip
191
+ return nil if mem.blank?
192
+ return mem.to_i / 1024
193
+ end
194
+
158
195
  def check_rails_env
159
196
  unless Rails.env.production?
160
197
  warn 'WARNING: Always run workhorse workers in production environment. Other environments can lead to unexpected behavior.'
data/test/lib/jobs.rb CHANGED
@@ -36,6 +36,15 @@ class SyntaxErrorJob
36
36
  end
37
37
  end
38
38
 
39
+ class MemHungryJob
40
+ class_attribute :data
41
+
42
+ # Should consume roughly 1GB of memory.
43
+ def perform
44
+ self.class.data = 'x' * 250.megabytes
45
+ end
46
+ end
47
+
39
48
  class DummyRailsOpsOp
40
49
  class_attribute :results
41
50
  self.results = Concurrent::Array.new
@@ -5,15 +5,64 @@ require 'pry'
5
5
  require 'colorize'
6
6
  require 'mysql2'
7
7
  require 'benchmark'
8
+ require 'concurrent'
8
9
  require 'jobs'
9
10
 
11
+ class MockRailsEnv < String
12
+ def production?
13
+ self == 'production'
14
+ end
15
+
16
+ def test?
17
+ self == 'test'
18
+ end
19
+
20
+ def development?
21
+ self == 'development'
22
+ end
23
+ end
24
+
25
+ class Rails
26
+ def self.root
27
+ Pathname.new(File.expand_path(File.join(File.dirname(__FILE__), '../../')))
28
+ end
29
+
30
+ def self.env
31
+ MockRailsEnv.new('production')
32
+ end
33
+ end
34
+
10
35
  class WorkhorseTest < ActiveSupport::TestCase
11
36
  def setup
37
+ remove_pids!
38
+ Workhorse.silence_watcher = true
12
39
  Workhorse::DbJob.delete_all
13
40
  end
14
41
 
15
42
  protected
16
43
 
44
+ attr_reader :daemon
45
+
46
+ def remove_pids!
47
+ Dir[Rails.root.join('tmp', 'pids', '*')].each do |file|
48
+ FileUtils.rm file
49
+ end
50
+ end
51
+
52
+ def kill(pid)
53
+ signals = %w[TERM INT]
54
+
55
+ loop do
56
+ begin
57
+ signals.each { |signal| Process.kill(signal, pid) }
58
+ rescue Errno::ESRCH
59
+ break
60
+ end
61
+
62
+ sleep 0.5
63
+ end
64
+ end
65
+
17
66
  def capture_log(level: :debug)
18
67
  io = StringIO.new
19
68
  logger = Logger.new(io, level: level)
@@ -31,6 +80,14 @@ class WorkhorseTest < ActiveSupport::TestCase
31
80
  end
32
81
  end
33
82
 
83
+ def work_until(max: 50, interval: 0.1, **options, &block)
84
+ w = Workhorse::Worker.new(**options)
85
+ w.start
86
+ return with_retries(max, interval: interval, &block)
87
+ ensure
88
+ w.shutdown
89
+ end
90
+
34
91
  def with_worker(options = {})
35
92
  w = Workhorse::Worker.new(**options)
36
93
  w.start
@@ -40,6 +97,45 @@ class WorkhorseTest < ActiveSupport::TestCase
40
97
  w.shutdown
41
98
  end
42
99
  end
100
+
101
+ def with_daemon(workers = 1, &_block)
102
+ @daemon = Workhorse::Daemon.new(pidfile: 'tmp/pids/test%s.pid') do |d|
103
+ workers.times do |i|
104
+ d.worker "Test Worker #{i}" do
105
+ begin
106
+ Workhorse::Worker.start_and_wait(
107
+ pool_size: 1,
108
+ polling_interval: 0.1
109
+ )
110
+ end
111
+ end
112
+ end
113
+ end
114
+ daemon.start(quiet: true)
115
+ yield @daemon
116
+ ensure
117
+ daemon.stop(quiet: true)
118
+ end
119
+
120
+ def with_retries(max = 50, interval: 0.1, &_block)
121
+ runs = 0
122
+
123
+ loop do
124
+ return yield
125
+ rescue Minitest::Assertion
126
+ fail if runs > max
127
+ sleep interval
128
+ runs += 1
129
+ end
130
+ end
131
+
132
+ def capture_stderr
133
+ old, $stderr = $stderr, StringIO.new
134
+ yield
135
+ $stderr.string
136
+ ensure
137
+ $stderr = old
138
+ end
43
139
  end
44
140
 
45
141
  ActiveRecord::Base.establish_connection(
@@ -0,0 +1,92 @@
1
+ require 'test_helper'
2
+
3
+ class Workhorse::DaemonTest < WorkhorseTest
4
+ def setup
5
+ remove_pids!
6
+ end
7
+
8
+ def test_watch_all_ok
9
+ with_daemon 2 do
10
+ assert_watch_output
11
+ end
12
+ end
13
+
14
+ def test_watch_starting_stale_pid
15
+ with_daemon 2 do
16
+ # Kill first worker
17
+ Process.kill 'KILL', daemon.workers.first.pid
18
+
19
+ # Watch
20
+ assert_watch_output(
21
+ 'Worker #1 (Test Worker 0): Starting (stale pid file)',
22
+ "Worker #2 (Test Worker 1): Already started (PID #{daemon.workers.second.pid})"
23
+ )
24
+ end
25
+ end
26
+
27
+ def test_watch_starting_missing_pid
28
+ with_daemon 2 do
29
+ # Kill first worker
30
+ kill daemon.workers.first.pid
31
+ FileUtils.rm "tmp/pids/test1.pid"
32
+
33
+ # Watch
34
+ assert_watch_output(
35
+ 'Worker #1 (Test Worker 0): Starting',
36
+ "Worker #2 (Test Worker 1): Already started (PID #{daemon.workers.second.pid})"
37
+ )
38
+ end
39
+ end
40
+
41
+ def test_watch_controlled_shutdown
42
+ with_daemon 2 do
43
+ # Kill first worker
44
+ kill daemon.workers.first.pid
45
+ FileUtils.touch "tmp/pids/workhorse.#{daemon.workers.first.pid}.shutdown"
46
+
47
+ # Watch
48
+ assert_watch_output
49
+ end
50
+
51
+ assert_not File.exist?("tmp/pids/workhorse.#{daemon.workers.first.pid}.shutdown")
52
+ end
53
+
54
+ def test_watch_mixed
55
+ # Worker 0: Kill, remove PID
56
+ # Worker 1: Kill, keep PID
57
+ # Worker 2: Keep
58
+ # Worker 3: Controlled shutdown
59
+ with_daemon 4 do
60
+ # Worker 0: Kill, remove PID
61
+ kill daemon.workers[0].pid
62
+ FileUtils.rm "tmp/pids/test1.pid"
63
+
64
+ # Worker 1: Kill, keep PID
65
+ kill daemon.workers[1].pid
66
+
67
+ # Worker 3: Controlled shutdown
68
+ kill daemon.workers[3].pid
69
+ FileUtils.touch "tmp/pids/workhorse.#{daemon.workers.first.pid}.shutdown"
70
+
71
+ # Watch
72
+ assert_watch_output(
73
+ 'Worker #1 (Test Worker 0): Starting',
74
+ 'Worker #2 (Test Worker 1): Starting (stale pid file)',
75
+ "Worker #3 (Test Worker 2): Already started (PID #{daemon.workers[2].pid})",
76
+ 'Worker #4 (Test Worker 3): Starting (stale pid file)'
77
+ )
78
+ end
79
+
80
+ assert_not File.exist?("tmp/pids/workhorse.#{daemon.workers.first.pid}.shutdown")
81
+ end
82
+
83
+ private
84
+
85
+ def assert_watch_output(*expected_lines)
86
+ silence_watcher_was = Workhorse.silence_watcher
87
+ Workhorse.silence_watcher = false
88
+ assert_equal expected_lines, capture_stderr { daemon.watch }.lines.map(&:chomp)
89
+ ensure
90
+ Workhorse.silence_watcher = silence_watcher_was
91
+ end
92
+ end
@@ -182,31 +182,32 @@ class Workhorse::PollerTest < WorkhorseTest
182
182
  Workhorse::DbJob.delete_all
183
183
 
184
184
  Workhorse.clean_stuck_jobs = clean
185
- start_deamon
186
- Workhorse.enqueue BasicJob.new(sleep_time: 5)
187
- sleep 0.2
188
- kill_deamon_workers
189
-
190
- assert_equal 1, Workhorse::DbJob.count
191
-
192
- Workhorse::DbJob.first.update(
193
- state: 'locked',
194
- started_at: nil
195
- )
196
-
197
- Workhorse::Worker.new.poller.send(:clean_stuck_jobs!) if clean
198
-
199
- assert_equal 1, Workhorse::DbJob.count
200
-
201
- Workhorse::DbJob.first.tap do |job|
202
- if clean
203
- assert_equal 'waiting', job.state
204
- assert_nil job.locked_at
205
- assert_nil job.locked_by
206
- assert_nil job.started_at
207
- assert_nil job.last_error
208
- else
209
- assert_equal 'locked', job.state
185
+ with_daemon do
186
+ Workhorse.enqueue BasicJob.new(sleep_time: 5)
187
+ sleep 0.2
188
+ kill_deamon_workers
189
+
190
+ assert_equal 1, Workhorse::DbJob.count
191
+
192
+ Workhorse::DbJob.first.update(
193
+ state: 'locked',
194
+ started_at: nil
195
+ )
196
+
197
+ Workhorse::Worker.new.poller.send(:clean_stuck_jobs!) if clean
198
+
199
+ assert_equal 1, Workhorse::DbJob.count
200
+
201
+ Workhorse::DbJob.first.tap do |job|
202
+ if clean
203
+ assert_equal 'waiting', job.state
204
+ assert_nil job.locked_at
205
+ assert_nil job.locked_by
206
+ assert_nil job.started_at
207
+ assert_nil job.last_error
208
+ else
209
+ assert_equal 'locked', job.state
210
+ end
210
211
  end
211
212
  end
212
213
  ensure
@@ -219,25 +220,26 @@ class Workhorse::PollerTest < WorkhorseTest
219
220
  Workhorse::DbJob.delete_all
220
221
 
221
222
  Workhorse.clean_stuck_jobs = true
222
- start_deamon
223
- Workhorse.enqueue BasicJob.new(sleep_time: 5)
224
- sleep 0.2
225
- kill_deamon_workers
226
-
227
- assert_equal 1, Workhorse::DbJob.count
228
- assert_equal 'started', Workhorse::DbJob.first.state
229
-
230
- work 0.1 if clean
231
-
232
- assert_equal 1, Workhorse::DbJob.count
233
-
234
- Workhorse::DbJob.first.tap do |job|
235
- if clean
236
- assert_equal 'failed', job.state
237
- assert_match(/started by PID #{@daemon.workers.first.pid}/, job.last_error)
238
- assert_match(/on host #{Socket.gethostname}/, job.last_error)
239
- else
240
- assert_equal 'started', job.state
223
+ with_daemon do
224
+ Workhorse.enqueue BasicJob.new(sleep_time: 5)
225
+ sleep 0.2
226
+ kill_deamon_workers
227
+
228
+ assert_equal 1, Workhorse::DbJob.count
229
+ assert_equal 'started', Workhorse::DbJob.first.state
230
+
231
+ work 0.1 if clean
232
+
233
+ assert_equal 1, Workhorse::DbJob.count
234
+
235
+ Workhorse::DbJob.first.tap do |job|
236
+ if clean
237
+ assert_equal 'failed', job.state
238
+ assert_match(/started by PID #{daemon.workers.first.pid}/, job.last_error)
239
+ assert_match(/on host #{Socket.gethostname}/, job.last_error)
240
+ else
241
+ assert_equal 'started', job.state
242
+ end
241
243
  end
242
244
  end
243
245
  ensure
@@ -248,23 +250,11 @@ class Workhorse::PollerTest < WorkhorseTest
248
250
  private
249
251
 
250
252
  def kill_deamon_workers
251
- @daemon.workers.each do |worker|
253
+ daemon.workers.each do |worker|
252
254
  Process.kill 'KILL', worker.pid
253
255
  end
254
256
  end
255
257
 
256
- def start_deamon
257
- @daemon = Workhorse::Daemon.new(pidfile: 'tmp/pids/test%s.pid') do |d|
258
- d.worker 'Test Worker' do
259
- Workhorse::Worker.start_and_wait(
260
- pool_size: 1,
261
- polling_interval: 0.1
262
- )
263
- end
264
- end
265
- @daemon.start
266
- end
267
-
268
258
  def setup
269
259
  Workhorse::DbJob.delete_all
270
260
  end
@@ -180,8 +180,60 @@ class Workhorse::WorkerTest < WorkhorseTest
180
180
  assert_equal 'waiting', jobs[1].state
181
181
  end
182
182
 
183
+ def test_controlled_shutdown
184
+ Workhorse.max_worker_memory_mb = 50
185
+ with_daemon do
186
+ pid = with_retries do
187
+ pid = daemon.workers.first.pid
188
+ assert_process(pid)
189
+ pid
190
+ end
191
+
192
+ 10.times do
193
+ Workhorse.enqueue BasicJob.new(sleep_time: 0.1)
194
+
195
+ with_retries do
196
+ assert_equal 'succeeded', Workhorse::DbJob.first.state
197
+ Workhorse::DbJob.delete_all
198
+ end
199
+ end
200
+
201
+ Workhorse.enqueue MemHungryJob.new
202
+
203
+ with_retries do
204
+ assert_equal 'succeeded', Workhorse::DbJob.first.state
205
+
206
+ assert File.exist?("tmp/pids/workhorse.#{pid}.shutdown")
207
+ assert_not_process pid
208
+ end
209
+
210
+ capture_stderr { daemon.watch }
211
+
212
+ with_retries do
213
+ assert_not File.exist?("tmp/pids/workhorse.#{pid}.shutdown")
214
+ end
215
+ end
216
+ ensure
217
+ Workhorse.max_worker_memory_mb = 0
218
+ end
219
+
183
220
  private
184
221
 
222
+ def assert_process(pid)
223
+ assert process?(pid), "Process #{pid} expected to be running"
224
+ end
225
+
226
+ def assert_not_process(pid)
227
+ assert_not process?(pid), "Process #{pid} expected to be stopped"
228
+ end
229
+
230
+ def process?(pid)
231
+ Process.kill(0, pid)
232
+ true
233
+ rescue Errno::EPERM, Errno::ESRCH
234
+ false
235
+ end
236
+
185
237
  def enqueue_in_multiple_queues
186
238
  Workhorse.enqueue BasicJob.new(some_param: nil)
187
239
  Workhorse.enqueue BasicJob.new(some_param: :q1), queue: :q1
data/workhorse.gemspec CHANGED
@@ -1,18 +1,18 @@
1
1
  # -*- encoding: utf-8 -*-
2
- # stub: workhorse 1.2.17.rc0 ruby lib
2
+ # stub: workhorse 1.2.17.rc2 ruby lib
3
3
 
4
4
  Gem::Specification.new do |s|
5
5
  s.name = "workhorse".freeze
6
- s.version = "1.2.17.rc0"
6
+ s.version = "1.2.17.rc2"
7
7
 
8
8
  s.required_rubygems_version = Gem::Requirement.new("> 1.3.1".freeze) if s.respond_to? :required_rubygems_version=
9
9
  s.require_paths = ["lib".freeze]
10
10
  s.authors = ["Sitrox".freeze]
11
- s.date = "2024-02-05"
12
- s.files = [".github/workflows/ruby.yml".freeze, ".gitignore".freeze, ".releaser_config".freeze, ".rubocop.yml".freeze, "CHANGELOG.md".freeze, "FAQ.md".freeze, "Gemfile".freeze, "LICENSE".freeze, "README.md".freeze, "RUBY_VERSION".freeze, "Rakefile".freeze, "VERSION".freeze, "bin/rubocop".freeze, "lib/active_job/queue_adapters/workhorse_adapter.rb".freeze, "lib/generators/workhorse/install_generator.rb".freeze, "lib/generators/workhorse/templates/bin/workhorse.rb".freeze, "lib/generators/workhorse/templates/config/initializers/workhorse.rb".freeze, "lib/generators/workhorse/templates/create_table_jobs.rb".freeze, "lib/workhorse.rb".freeze, "lib/workhorse/active_job_extension.rb".freeze, "lib/workhorse/daemon.rb".freeze, "lib/workhorse/daemon/shell_handler.rb".freeze, "lib/workhorse/db_job.rb".freeze, "lib/workhorse/enqueuer.rb".freeze, "lib/workhorse/jobs/cleanup_succeeded_jobs.rb".freeze, "lib/workhorse/jobs/detect_stale_jobs_job.rb".freeze, "lib/workhorse/jobs/run_active_job.rb".freeze, "lib/workhorse/jobs/run_rails_op.rb".freeze, "lib/workhorse/performer.rb".freeze, "lib/workhorse/poller.rb".freeze, "lib/workhorse/pool.rb".freeze, "lib/workhorse/scoped_env.rb".freeze, "lib/workhorse/worker.rb".freeze, "test/active_job/queue_adapters/workhorse_adapter_test.rb".freeze, "test/lib/db_schema.rb".freeze, "test/lib/jobs.rb".freeze, "test/lib/test_helper.rb".freeze, "test/workhorse/db_job_test.rb".freeze, "test/workhorse/enqueuer_test.rb".freeze, "test/workhorse/performer_test.rb".freeze, "test/workhorse/poller_test.rb".freeze, "test/workhorse/pool_test.rb".freeze, "test/workhorse/worker_test.rb".freeze, "workhorse.gemspec".freeze]
11
+ s.date = "2024-02-08"
12
+ s.files = [".github/workflows/ruby.yml".freeze, ".gitignore".freeze, ".releaser_config".freeze, ".rubocop.yml".freeze, "CHANGELOG.md".freeze, "FAQ.md".freeze, "Gemfile".freeze, "LICENSE".freeze, "README.md".freeze, "RUBY_VERSION".freeze, "Rakefile".freeze, "VERSION".freeze, "bin/rubocop".freeze, "lib/active_job/queue_adapters/workhorse_adapter.rb".freeze, "lib/generators/workhorse/install_generator.rb".freeze, "lib/generators/workhorse/templates/bin/workhorse.rb".freeze, "lib/generators/workhorse/templates/config/initializers/workhorse.rb".freeze, "lib/generators/workhorse/templates/create_table_jobs.rb".freeze, "lib/workhorse.rb".freeze, "lib/workhorse/active_job_extension.rb".freeze, "lib/workhorse/daemon.rb".freeze, "lib/workhorse/daemon/shell_handler.rb".freeze, "lib/workhorse/db_job.rb".freeze, "lib/workhorse/enqueuer.rb".freeze, "lib/workhorse/jobs/cleanup_succeeded_jobs.rb".freeze, "lib/workhorse/jobs/detect_stale_jobs_job.rb".freeze, "lib/workhorse/jobs/run_active_job.rb".freeze, "lib/workhorse/jobs/run_rails_op.rb".freeze, "lib/workhorse/performer.rb".freeze, "lib/workhorse/poller.rb".freeze, "lib/workhorse/pool.rb".freeze, "lib/workhorse/scoped_env.rb".freeze, "lib/workhorse/worker.rb".freeze, "test/active_job/queue_adapters/workhorse_adapter_test.rb".freeze, "test/lib/db_schema.rb".freeze, "test/lib/jobs.rb".freeze, "test/lib/test_helper.rb".freeze, "test/workhorse/daemon_test.rb".freeze, "test/workhorse/db_job_test.rb".freeze, "test/workhorse/enqueuer_test.rb".freeze, "test/workhorse/performer_test.rb".freeze, "test/workhorse/poller_test.rb".freeze, "test/workhorse/pool_test.rb".freeze, "test/workhorse/worker_test.rb".freeze, "workhorse.gemspec".freeze]
13
13
  s.rubygems_version = "3.4.6".freeze
14
14
  s.summary = "Multi-threaded job backend with database queuing for ruby.".freeze
15
- s.test_files = ["test/active_job/queue_adapters/workhorse_adapter_test.rb".freeze, "test/lib/db_schema.rb".freeze, "test/lib/jobs.rb".freeze, "test/lib/test_helper.rb".freeze, "test/workhorse/db_job_test.rb".freeze, "test/workhorse/enqueuer_test.rb".freeze, "test/workhorse/performer_test.rb".freeze, "test/workhorse/poller_test.rb".freeze, "test/workhorse/pool_test.rb".freeze, "test/workhorse/worker_test.rb".freeze]
15
+ s.test_files = ["test/active_job/queue_adapters/workhorse_adapter_test.rb".freeze, "test/lib/db_schema.rb".freeze, "test/lib/jobs.rb".freeze, "test/lib/test_helper.rb".freeze, "test/workhorse/daemon_test.rb".freeze, "test/workhorse/db_job_test.rb".freeze, "test/workhorse/enqueuer_test.rb".freeze, "test/workhorse/performer_test.rb".freeze, "test/workhorse/poller_test.rb".freeze, "test/workhorse/pool_test.rb".freeze, "test/workhorse/worker_test.rb".freeze]
16
16
 
17
17
  s.specification_version = 4
18
18
 
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: workhorse
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.2.17.rc0
4
+ version: 1.2.17.rc2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Sitrox
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2024-02-05 00:00:00.000000000 Z
11
+ date: 2024-02-08 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -221,6 +221,7 @@ files:
221
221
  - test/lib/db_schema.rb
222
222
  - test/lib/jobs.rb
223
223
  - test/lib/test_helper.rb
224
+ - test/workhorse/daemon_test.rb
224
225
  - test/workhorse/db_job_test.rb
225
226
  - test/workhorse/enqueuer_test.rb
226
227
  - test/workhorse/performer_test.rb
@@ -255,6 +256,7 @@ test_files:
255
256
  - test/lib/db_schema.rb
256
257
  - test/lib/jobs.rb
257
258
  - test/lib/test_helper.rb
259
+ - test/workhorse/daemon_test.rb
258
260
  - test/workhorse/db_job_test.rb
259
261
  - test/workhorse/enqueuer_test.rb
260
262
  - test/workhorse/performer_test.rb