canvas-jobs 0.9.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (61) hide show
  1. checksums.yaml +7 -0
  2. data/db/migrate/20101216224513_create_delayed_jobs.rb +40 -0
  3. data/db/migrate/20110208031356_add_delayed_jobs_tag.rb +14 -0
  4. data/db/migrate/20110426161613_add_delayed_jobs_max_attempts.rb +13 -0
  5. data/db/migrate/20110516225834_add_delayed_jobs_strand.rb +14 -0
  6. data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +26 -0
  7. data/db/migrate/20110610213249_optimize_delayed_jobs.rb +40 -0
  8. data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +52 -0
  9. data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +31 -0
  10. data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +15 -0
  11. data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +80 -0
  12. data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +15 -0
  13. data/db/migrate/20120608191051_add_jobs_run_at_index.rb +15 -0
  14. data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +13 -0
  15. data/db/migrate/20140505215131_add_failed_jobs_original_job_id.rb +13 -0
  16. data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +13 -0
  17. data/db/migrate/20140505223637_drop_failed_jobs_original_id.rb +13 -0
  18. data/db/migrate/20140512213941_add_source_to_jobs.rb +15 -0
  19. data/lib/canvas-jobs.rb +1 -0
  20. data/lib/delayed/backend/active_record.rb +297 -0
  21. data/lib/delayed/backend/base.rb +317 -0
  22. data/lib/delayed/backend/redis/bulk_update.lua +40 -0
  23. data/lib/delayed/backend/redis/destroy_job.lua +2 -0
  24. data/lib/delayed/backend/redis/enqueue.lua +29 -0
  25. data/lib/delayed/backend/redis/fail_job.lua +5 -0
  26. data/lib/delayed/backend/redis/find_available.lua +3 -0
  27. data/lib/delayed/backend/redis/functions.rb +57 -0
  28. data/lib/delayed/backend/redis/get_and_lock_next_available.lua +17 -0
  29. data/lib/delayed/backend/redis/includes/jobs_common.lua +203 -0
  30. data/lib/delayed/backend/redis/job.rb +481 -0
  31. data/lib/delayed/backend/redis/set_running.lua +5 -0
  32. data/lib/delayed/backend/redis/tickle_strand.lua +2 -0
  33. data/lib/delayed/batch.rb +56 -0
  34. data/lib/delayed/engine.rb +4 -0
  35. data/lib/delayed/job_tracking.rb +31 -0
  36. data/lib/delayed/lifecycle.rb +83 -0
  37. data/lib/delayed/message_sending.rb +130 -0
  38. data/lib/delayed/performable_method.rb +42 -0
  39. data/lib/delayed/periodic.rb +81 -0
  40. data/lib/delayed/pool.rb +335 -0
  41. data/lib/delayed/settings.rb +32 -0
  42. data/lib/delayed/version.rb +3 -0
  43. data/lib/delayed/worker.rb +213 -0
  44. data/lib/delayed/yaml_extensions.rb +63 -0
  45. data/lib/delayed_job.rb +40 -0
  46. data/spec/active_record_job_spec.rb +61 -0
  47. data/spec/gemfiles/32.gemfile +6 -0
  48. data/spec/gemfiles/40.gemfile +6 -0
  49. data/spec/gemfiles/41.gemfile +6 -0
  50. data/spec/gemfiles/42.gemfile +6 -0
  51. data/spec/migrate/20140924140513_add_story_table.rb +7 -0
  52. data/spec/redis_job_spec.rb +77 -0
  53. data/spec/sample_jobs.rb +26 -0
  54. data/spec/shared/delayed_batch.rb +85 -0
  55. data/spec/shared/delayed_method.rb +419 -0
  56. data/spec/shared/performable_method.rb +52 -0
  57. data/spec/shared/shared_backend.rb +836 -0
  58. data/spec/shared/worker.rb +291 -0
  59. data/spec/shared_jobs_specs.rb +13 -0
  60. data/spec/spec_helper.rb +91 -0
  61. metadata +329 -0
@@ -0,0 +1,335 @@
1
+ require 'optparse'
2
+ require 'yaml'
3
+ require 'fileutils'
4
+
5
+ module Delayed
6
+ class Pool
7
+ mattr_accessor :on_fork
8
+ self.on_fork = ->{ }
9
+
10
+ attr_reader :options, :workers
11
+
12
+ def initialize(args = ARGV)
13
+ @args = args
14
+ @workers = {}
15
+ @config = { :workers => [] }
16
+ @options = {
17
+ :config_file => expand_rails_path("config/delayed_jobs.yml"),
18
+ :pid_folder => expand_rails_path("tmp/pids"),
19
+ :tail_logs => true, # only in FG mode
20
+ }
21
+ end
22
+
23
+ def run
24
+ if GC.respond_to?(:copy_on_write_friendly=)
25
+ GC.copy_on_write_friendly = true
26
+ end
27
+
28
+ op = OptionParser.new do |opts|
29
+ opts.banner = "Usage #{$0} <command> <options>"
30
+ opts.separator %{\nWhere <command> is one of:
31
+ start start the jobs daemon
32
+ stop stop the jobs daemon
33
+ run start and run in the foreground
34
+ restart stop and then start the jobs daemon
35
+ status show daemon status
36
+ }
37
+
38
+ opts.separator "\n<options>"
39
+ opts.on("-c", "--config", "Use alternate config file (default #{options[:config_file]})") { |c| options[:config_file] = c }
40
+ opts.on("-p", "--pid", "Use alternate folder for PID files (default #{options[:pid_folder]})") { |p| options[:pid_folder] = p }
41
+ opts.on("--no-tail", "Don't tail the logs (only affects non-daemon mode)") { options[:tail_logs] = false }
42
+ opts.on("--with-prejudice", "When stopping, interrupt jobs in progress, instead of letting them drain") { options[:kill] ||= true }
43
+ opts.on("--with-extreme-prejudice", "When stopping, immediately kill jobs in progress, instead of letting them drain") { options[:kill] = 9 }
44
+ opts.on_tail("-h", "--help", "Show this message") { puts opts; exit }
45
+ end
46
+ op.parse!(@args)
47
+
48
+ read_config(options[:config_file])
49
+
50
+ command = @args.shift
51
+ case command
52
+ when 'start'
53
+ exit 1 if status(:alive) == :running
54
+ daemonize
55
+ start
56
+ when 'stop'
57
+ stop(options[:kill])
58
+ when 'run'
59
+ start
60
+ when 'status'
61
+ if status
62
+ exit 0
63
+ else
64
+ exit 1
65
+ end
66
+ when 'restart'
67
+ alive = status(false)
68
+ if alive == :running || (options[:kill] && alive == :draining)
69
+ stop(options[:kill])
70
+ if options[:kill]
71
+ sleep(0.5) while status(false)
72
+ else
73
+ sleep(0.5) while status(false) == :running
74
+ end
75
+ end
76
+ daemonize
77
+ start
78
+ when nil
79
+ puts op
80
+ else
81
+ raise("Unknown command: #{command.inspect}")
82
+ end
83
+ end
84
+
85
+ protected
86
+
87
+ def procname
88
+ "delayed_jobs_pool#{Settings.pool_procname_suffix}"
89
+ end
90
+
91
+ def start
92
+ load_rails
93
+ tail_rails_log unless @daemon
94
+
95
+ say "Started job master", :info
96
+ $0 = procname
97
+ apply_config
98
+
99
+ # fork to handle unlocking (to prevent polluting the parent with worker objects)
100
+ unlock_pid = fork_with_reconnects do
101
+ unlock_orphaned_jobs
102
+ end
103
+ Process.wait unlock_pid
104
+
105
+ spawn_periodic_auditor
106
+ spawn_all_workers
107
+ say "Workers spawned"
108
+ join
109
+ say "Shutting down"
110
+ rescue Interrupt => e
111
+ say "Signal received, exiting", :info
112
+ rescue Exception => e
113
+ say "Job master died with error: #{e.inspect}\n#{e.backtrace.join("\n")}", :fatal
114
+ raise
115
+ end
116
+
117
+ def say(msg, level = :debug)
118
+ if defined?(Rails.logger) && Rails.logger
119
+ Rails.logger.send(level, "[#{Process.pid}]P #{msg}")
120
+ else
121
+ puts(msg)
122
+ end
123
+ end
124
+
125
+ def load_rails
126
+ require(expand_rails_path("config/environment.rb"))
127
+ Dir.chdir(Rails.root)
128
+ end
129
+
130
+ def unlock_orphaned_jobs(worker = nil, pid = nil)
131
+ # don't bother trying to unlock jobs by process name if the name is overridden
132
+ return if @config.key?(:name)
133
+ return if @config[:disable_automatic_orphan_unlocking]
134
+ return if @config[:workers].any? { |worker_config| worker_config.key?(:name) || worker_config.key?('name') }
135
+
136
+ unlocked_jobs = Delayed::Job.unlock_orphaned_jobs(pid)
137
+ say "Unlocked #{unlocked_jobs} orphaned jobs" if unlocked_jobs > 0
138
+ ActiveRecord::Base.connection_handler.clear_all_connections! unless Rails.env.test?
139
+ end
140
+
141
+ def spawn_all_workers
142
+ ActiveRecord::Base.connection_handler.clear_all_connections!
143
+
144
+ @config[:workers].each do |worker_config|
145
+ worker_config = worker_config.with_indifferent_access
146
+ (worker_config[:workers] || 1).times { spawn_worker(@config.merge(worker_config)) }
147
+ end
148
+ end
149
+
150
+ def spawn_worker(worker_config)
151
+ if worker_config[:periodic]
152
+ return # backwards compat
153
+ else
154
+ worker_config[:parent_pid] = Process.pid
155
+ worker = Delayed::Worker.new(worker_config)
156
+ end
157
+
158
+ pid = fork_with_reconnects do
159
+ worker.start
160
+ end
161
+ workers[pid] = worker
162
+ end
163
+
164
+ # child processes need to reconnect so they don't accidentally share redis or
165
+ # db connections with the parent
166
+ def fork_with_reconnects
167
+ fork do
168
+ Pool.on_fork.()
169
+ Delayed::Job.reconnect!
170
+ yield
171
+ end
172
+ end
173
+
174
+ def spawn_periodic_auditor
175
+ return if @config[:disable_periodic_jobs]
176
+
177
+ @periodic_thread = Thread.new do
178
+ # schedule the initial audit immediately on startup
179
+ schedule_periodic_audit
180
+ # initial sleep is randomized, for some staggering in the audit calls
181
+ # since job processors are usually all restarted at the same time
182
+ sleep(rand(15 * 60))
183
+ loop do
184
+ schedule_periodic_audit
185
+ sleep(15 * 60)
186
+ end
187
+ end
188
+ end
189
+
190
+ def schedule_periodic_audit
191
+ pid = fork_with_reconnects do
192
+ # we want to avoid db connections in the main pool process
193
+ $0 = "delayed_periodic_audit_scheduler"
194
+ Delayed::Periodic.audit_queue
195
+ end
196
+ workers[pid] = :periodic_audit
197
+ end
198
+
199
+ def join
200
+ loop do
201
+ child = Process.wait
202
+ if child
203
+ worker = workers.delete(child)
204
+ if worker.is_a?(Symbol)
205
+ say "ran auditor: #{worker}"
206
+ else
207
+ say "child exited: #{child}, restarting", :info
208
+ # fork to handle unlocking (to prevent polluting the parent with worker objects)
209
+ unlock_pid = fork_with_reconnects do
210
+ unlock_orphaned_jobs(worker, child)
211
+ end
212
+ Process.wait unlock_pid
213
+ spawn_worker(worker.config)
214
+ end
215
+ end
216
+ end
217
+ end
218
+
219
+ def tail_rails_log
220
+ return if !@options[:tail_logs]
221
+ return if !Rails.logger.respond_to?(:log_path)
222
+ Rails.logger.auto_flushing = true if Rails.logger.respond_to?(:auto_flushing=)
223
+ Thread.new do
224
+ f = File.open(Rails.logger.log_path, 'r')
225
+ f.seek(0, IO::SEEK_END)
226
+ loop do
227
+ content = f.read
228
+ content.present? ? STDOUT.print(content) : sleep(0.5)
229
+ end
230
+ end
231
+ end
232
+
233
+ def daemonize
234
+ FileUtils.mkdir_p(pid_folder)
235
+ puts "Daemonizing..."
236
+
237
+ exit if fork
238
+ Process.setsid
239
+ exit if fork
240
+ Process.setpgrp
241
+
242
+ @daemon = true
243
+ File.open(pid_file, 'wb') { |f| f.write(Process.pid.to_s) }
244
+ # if we blow up so badly that we can't syslog the error, try to send
245
+ # it somewhere useful
246
+ last_ditch_logfile = self.last_ditch_logfile || "log/delayed_job.log"
247
+ if last_ditch_logfile[0] != '|'
248
+ last_ditch_logfile = expand_rails_path(last_ditch_logfile)
249
+ end
250
+ STDIN.reopen("/dev/null")
251
+ STDOUT.reopen(open(last_ditch_logfile, 'a'))
252
+ STDERR.reopen(STDOUT)
253
+ STDOUT.sync = STDERR.sync = true
254
+ end
255
+
256
+ def pid_folder
257
+ options[:pid_folder]
258
+ end
259
+
260
+ def pid_file
261
+ File.join(pid_folder, 'delayed_jobs_pool.pid')
262
+ end
263
+
264
+ def remove_pid_file
265
+ return unless @daemon
266
+ pid = File.read(pid_file) if File.file?(pid_file)
267
+ if pid.to_i == Process.pid
268
+ FileUtils.rm(pid_file)
269
+ end
270
+ end
271
+
272
+ def last_ditch_logfile
273
+ @config['last_ditch_logfile']
274
+ end
275
+
276
+ def stop(kill = false)
277
+ pid = status(false) && File.read(pid_file).to_i if File.file?(pid_file)
278
+ if pid && pid > 0
279
+ puts "Stopping pool #{pid}..."
280
+ signal = 'INT'
281
+ if kill
282
+ pid = -pid # send to the whole group
283
+ if kill == 9
284
+ signal = 'KILL'
285
+ else
286
+ signal = 'TERM'
287
+ end
288
+ end
289
+ begin
290
+ Process.kill(signal, pid)
291
+ rescue Errno::ESRCH
292
+ # ignore if the pid no longer exists
293
+ end
294
+ else
295
+ status
296
+ end
297
+ end
298
+
299
+ def status(print = true)
300
+ pid = File.read(pid_file) if File.file?(pid_file)
301
+ alive = pid && pid.to_i > 0 && (Process.kill(0, pid.to_i) rescue false) && :running
302
+ alive ||= :draining if pid.to_i > 0 && Process.kill(0, -pid.to_i) rescue false
303
+ if alive
304
+ puts "Delayed jobs #{alive}, pool PID: #{pid}" if print
305
+ else
306
+ puts "No delayed jobs pool running" if print && print != :alive
307
+ end
308
+ alive
309
+ end
310
+
311
+ def read_config(config_filename)
312
+ config = YAML.load_file(config_filename)
313
+ env = defined?(RAILS_ENV) ? RAILS_ENV : ENV['RAILS_ENV'] || 'development'
314
+ @config = config[env] || config['default']
315
+ # Backwards compatibility from when the config was just an array of queues
316
+ @config = { :workers => @config } if @config.is_a?(Array)
317
+ unless @config && @config.is_a?(Hash)
318
+ raise ArgumentError,
319
+ "Invalid config file #{config_filename}"
320
+ end
321
+ end
322
+
323
+ def apply_config
324
+ @config = @config.with_indifferent_access
325
+ Settings::SETTINGS.each do |setting|
326
+ Settings.send("#{setting}=", @config[setting.to_s]) if @config.key?(setting.to_s)
327
+ end
328
+ end
329
+
330
+ def expand_rails_path(path)
331
+ File.expand_path("../#{path}", ENV['BUNDLE_GEMFILE'])
332
+ end
333
+
334
+ end
335
+ end
@@ -0,0 +1,32 @@
1
+ module Delayed
2
+ module Settings
3
+ SETTINGS = [ :queue, :max_attempts, :sleep_delay, :sleep_delay_stagger, :fetch_batch_size, :select_random_from_batch, :worker_procname_prefix, :pool_procname_suffix, :default_job_options ]
4
+ SETTINGS_WITH_ARGS = [ :num_strands ]
5
+
6
+ SETTINGS.each do |setting|
7
+ mattr_writer(setting)
8
+ self.send("#{setting}=", nil)
9
+ define_singleton_method(setting) do
10
+ val = class_variable_get(:"@@#{setting}")
11
+ val.respond_to?(:call) ? val.call() : val
12
+ end
13
+ end
14
+
15
+ mattr_accessor(*SETTINGS_WITH_ARGS)
16
+
17
+ def self.queue=(queue_name)
18
+ raise(ArgumentError, "queue_name must not be blank") if queue_name.blank?
19
+ @@queue = queue_name
20
+ end
21
+
22
+ self.queue = "queue"
23
+ self.max_attempts = 1
24
+ self.sleep_delay = 2.0
25
+ self.sleep_delay_stagger = 2.0
26
+ self.fetch_batch_size = 5
27
+ self.select_random_from_batch = false
28
+
29
+ self.num_strands = ->(strand_name){ nil }
30
+ self.default_job_options = ->{ Hash.new }
31
+ end
32
+ end
@@ -0,0 +1,3 @@
1
+ module Delayed
2
+ VERSION = "0.9.0"
3
+ end
@@ -0,0 +1,213 @@
1
+ module Delayed
2
+
3
+ class TimeoutError < RuntimeError; end
4
+
5
+ require 'tmpdir'
6
+
7
+ class Worker
8
+ attr_reader :config, :queue, :min_priority, :max_priority
9
+
10
+ # Callback to fire when a delayed job fails max_attempts times. If this
11
+ # callback is defined, then the value of destroy_failed_jobs is ignored, and
12
+ # the job is destroyed if this block returns true.
13
+ #
14
+ # This allows for destroying "uninteresting" failures, while keeping around
15
+ # interesting failures to be investigated later.
16
+ #
17
+ # The block is called with args(job, last_exception)
18
+ def self.on_max_failures=(block)
19
+ @@on_max_failures = block
20
+ end
21
+ cattr_reader :on_max_failures
22
+
23
+ def self.lifecycle
24
+ @lifecycle ||= Delayed::Lifecycle.new
25
+ end
26
+
27
+ def initialize(options = {})
28
+ @exit = false
29
+ @config = options
30
+ @parent_pid = options[:parent_pid]
31
+ @queue = options[:queue] || Settings.queue
32
+ @min_priority = options[:min_priority]
33
+ @max_priority = options[:max_priority]
34
+ @max_job_count = options[:worker_max_job_count].to_i
35
+ @max_memory_usage = options[:worker_max_memory_usage].to_i
36
+ @job_count = 0
37
+ end
38
+
39
+ def name=(name)
40
+ @name = name
41
+ end
42
+
43
+ def name
44
+ @name ||= "#{Socket.gethostname rescue "X"}:#{self.id}"
45
+ end
46
+
47
+ def set_process_name(new_name)
48
+ $0 = "delayed:#{new_name}"
49
+ end
50
+
51
+ def exit?
52
+ @exit || parent_exited?
53
+ end
54
+
55
+ def parent_exited?
56
+ @parent_pid && @parent_pid != Process.ppid
57
+ end
58
+
59
+ def start
60
+ say "Starting worker", :info
61
+
62
+ trap('INT') { say 'Exiting'; @exit = true }
63
+
64
+ loop do
65
+ run
66
+ break if exit?
67
+ end
68
+
69
+ say "Stopping worker", :info
70
+ rescue => e
71
+ Rails.logger.fatal("Child process died: #{e.inspect}") rescue nil
72
+ ErrorReport.log_exception(:delayed_jobs, e) rescue nil
73
+ ensure
74
+ Delayed::Job.clear_locks!(name)
75
+ end
76
+
77
+ def run
78
+ job =
79
+ self.class.lifecycle.run_callbacks(:pop, self) do
80
+ Delayed::Job.get_and_lock_next_available(
81
+ name,
82
+ queue,
83
+ min_priority,
84
+ max_priority)
85
+ end
86
+
87
+ if job
88
+ configure_for_job(job) do
89
+ @job_count += perform(job)
90
+
91
+ if @max_job_count > 0 && @job_count >= @max_job_count
92
+ say "Max job count of #{@max_job_count} exceeded, dying"
93
+ @exit = true
94
+ end
95
+
96
+ if @max_memory_usage > 0
97
+ memory = sample_memory
98
+ if memory > @max_memory_usage
99
+ say "Memory usage of #{memory} exceeds max of #{@max_memory_usage}, dying"
100
+ @exit = true
101
+ else
102
+ say "Memory usage: #{memory}"
103
+ end
104
+ end
105
+ end
106
+ else
107
+ set_process_name("wait:#{Settings.worker_procname_prefix}#{@queue}:#{min_priority || 0}:#{max_priority || 'max'}")
108
+ sleep(Settings.sleep_delay + (rand * Settings.sleep_delay_stagger))
109
+ end
110
+ end
111
+
112
+ def perform(job)
113
+ count = 1
114
+ self.class.lifecycle.run_callbacks(:perform, self, job) do
115
+ set_process_name("run:#{Settings.worker_procname_prefix}#{job.id}:#{job.name}")
116
+ say("Processing #{log_job(job, :long)}", :info)
117
+ runtime = Benchmark.realtime do
118
+ if job.batch?
119
+ # each job in the batch will have perform called on it, so we don't
120
+ # need a timeout around this
121
+ count = perform_batch(job)
122
+ else
123
+ job.invoke_job
124
+ end
125
+ job.destroy
126
+ end
127
+ say("Completed #{log_job(job)} #{"%.0fms" % (runtime * 1000)}", :info)
128
+ end
129
+ count
130
+ rescue Exception => e
131
+ handle_failed_job(job, e)
132
+ count
133
+ end
134
+
135
+ def perform_batch(parent_job)
136
+ batch = parent_job.payload_object
137
+ if batch.mode == :serial
138
+ batch.jobs.each do |job|
139
+ job.source = parent_job.source
140
+ job.create_and_lock!(name)
141
+ configure_for_job(job) do
142
+ perform(job)
143
+ end
144
+ end
145
+ batch.items.size
146
+ end
147
+ end
148
+
149
+ def handle_failed_job(job, error)
150
+ job.last_error = "#{error.message}\n#{error.backtrace.join("\n")}"
151
+ say("Failed with #{error.class} [#{error.message}] (#{job.attempts} attempts)", :error)
152
+ job.reschedule(error)
153
+ end
154
+
155
+ def id
156
+ Process.pid
157
+ end
158
+
159
+ def say(msg, level = :debug)
160
+ Rails.logger.send(level, msg)
161
+ end
162
+
163
+ def log_job(job, format = :short)
164
+ case format
165
+ when :long
166
+ "#{job.full_name} #{ job.to_json(:include_root => false, :only => %w(tag strand priority attempts created_at max_attempts source)) }"
167
+ else
168
+ job.full_name
169
+ end
170
+ end
171
+
172
+ # set up the session context information, so that it gets logged with the job log lines
173
+ # also set up a unique tmpdir, which will get removed at the end of the job.
174
+ def configure_for_job(job)
175
+ previous_tmpdir = ENV['TMPDIR']
176
+ Thread.current[:running_delayed_job] = job
177
+
178
+ Dir.mktmpdir("job-#{job.id}-#{self.name.gsub(/[^\w\.]/, '.')}-") do |dir|
179
+ ENV['TMPDIR'] = dir
180
+ yield
181
+ end
182
+ ensure
183
+ ENV['TMPDIR'] = previous_tmpdir
184
+ Thread.current[:running_delayed_job] = nil
185
+ end
186
+
187
+ def self.current_job
188
+ Thread.current[:running_delayed_job]
189
+ end
190
+
191
+ # `sample` reports KB, not B
192
+ if File.directory?("/proc")
193
+ # linux w/ proc fs
194
+ LINUX_PAGE_SIZE = (size = `getconf PAGESIZE`.to_i; size > 0 ? size : 4096)
195
+ def sample_memory
196
+ s = File.read("/proc/#{Process.pid}/statm").to_i rescue 0
197
+ s * LINUX_PAGE_SIZE / 1024
198
+ end
199
+ else
200
+ # generic unix solution
201
+ def sample_memory
202
+ if Rails.env.test?
203
+ 0
204
+ else
205
+ # hmm this is actually resident set size, doesn't include swapped-to-disk
206
+ # memory.
207
+ `ps -o rss= -p #{Process.pid}`.to_i
208
+ end
209
+ end
210
+ end
211
+
212
+ end
213
+ end