sidekiq 5.0.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (116) hide show
  1. checksums.yaml +7 -0
  2. data/.github/contributing.md +32 -0
  3. data/.github/issue_template.md +9 -0
  4. data/.gitignore +13 -0
  5. data/.travis.yml +18 -0
  6. data/3.0-Upgrade.md +70 -0
  7. data/4.0-Upgrade.md +53 -0
  8. data/5.0-Upgrade.md +56 -0
  9. data/COMM-LICENSE +95 -0
  10. data/Changes.md +1402 -0
  11. data/Ent-Changes.md +174 -0
  12. data/Gemfile +29 -0
  13. data/LICENSE +9 -0
  14. data/Pro-2.0-Upgrade.md +138 -0
  15. data/Pro-3.0-Upgrade.md +44 -0
  16. data/Pro-Changes.md +632 -0
  17. data/README.md +107 -0
  18. data/Rakefile +12 -0
  19. data/bin/sidekiq +18 -0
  20. data/bin/sidekiqctl +99 -0
  21. data/bin/sidekiqload +149 -0
  22. data/code_of_conduct.md +50 -0
  23. data/lib/generators/sidekiq/templates/worker.rb.erb +9 -0
  24. data/lib/generators/sidekiq/templates/worker_spec.rb.erb +6 -0
  25. data/lib/generators/sidekiq/templates/worker_test.rb.erb +8 -0
  26. data/lib/generators/sidekiq/worker_generator.rb +49 -0
  27. data/lib/sidekiq.rb +228 -0
  28. data/lib/sidekiq/api.rb +871 -0
  29. data/lib/sidekiq/cli.rb +413 -0
  30. data/lib/sidekiq/client.rb +238 -0
  31. data/lib/sidekiq/core_ext.rb +119 -0
  32. data/lib/sidekiq/delay.rb +21 -0
  33. data/lib/sidekiq/exception_handler.rb +31 -0
  34. data/lib/sidekiq/extensions/action_mailer.rb +57 -0
  35. data/lib/sidekiq/extensions/active_record.rb +40 -0
  36. data/lib/sidekiq/extensions/class_methods.rb +40 -0
  37. data/lib/sidekiq/extensions/generic_proxy.rb +31 -0
  38. data/lib/sidekiq/fetch.rb +81 -0
  39. data/lib/sidekiq/job_logger.rb +27 -0
  40. data/lib/sidekiq/job_retry.rb +235 -0
  41. data/lib/sidekiq/launcher.rb +167 -0
  42. data/lib/sidekiq/logging.rb +106 -0
  43. data/lib/sidekiq/manager.rb +138 -0
  44. data/lib/sidekiq/middleware/chain.rb +150 -0
  45. data/lib/sidekiq/middleware/i18n.rb +42 -0
  46. data/lib/sidekiq/middleware/server/active_record.rb +22 -0
  47. data/lib/sidekiq/paginator.rb +43 -0
  48. data/lib/sidekiq/processor.rb +238 -0
  49. data/lib/sidekiq/rails.rb +60 -0
  50. data/lib/sidekiq/redis_connection.rb +106 -0
  51. data/lib/sidekiq/scheduled.rb +147 -0
  52. data/lib/sidekiq/testing.rb +324 -0
  53. data/lib/sidekiq/testing/inline.rb +29 -0
  54. data/lib/sidekiq/util.rb +63 -0
  55. data/lib/sidekiq/version.rb +4 -0
  56. data/lib/sidekiq/web.rb +213 -0
  57. data/lib/sidekiq/web/action.rb +89 -0
  58. data/lib/sidekiq/web/application.rb +331 -0
  59. data/lib/sidekiq/web/helpers.rb +286 -0
  60. data/lib/sidekiq/web/router.rb +100 -0
  61. data/lib/sidekiq/worker.rb +144 -0
  62. data/sidekiq.gemspec +32 -0
  63. data/web/assets/images/favicon.ico +0 -0
  64. data/web/assets/images/logo.png +0 -0
  65. data/web/assets/images/status.png +0 -0
  66. data/web/assets/javascripts/application.js +92 -0
  67. data/web/assets/javascripts/dashboard.js +298 -0
  68. data/web/assets/stylesheets/application-rtl.css +246 -0
  69. data/web/assets/stylesheets/application.css +1111 -0
  70. data/web/assets/stylesheets/bootstrap-rtl.min.css +9 -0
  71. data/web/assets/stylesheets/bootstrap.css +5 -0
  72. data/web/locales/ar.yml +80 -0
  73. data/web/locales/cs.yml +78 -0
  74. data/web/locales/da.yml +68 -0
  75. data/web/locales/de.yml +69 -0
  76. data/web/locales/el.yml +68 -0
  77. data/web/locales/en.yml +79 -0
  78. data/web/locales/es.yml +69 -0
  79. data/web/locales/fa.yml +80 -0
  80. data/web/locales/fr.yml +78 -0
  81. data/web/locales/he.yml +79 -0
  82. data/web/locales/hi.yml +75 -0
  83. data/web/locales/it.yml +69 -0
  84. data/web/locales/ja.yml +78 -0
  85. data/web/locales/ko.yml +68 -0
  86. data/web/locales/nb.yml +77 -0
  87. data/web/locales/nl.yml +68 -0
  88. data/web/locales/pl.yml +59 -0
  89. data/web/locales/pt-br.yml +68 -0
  90. data/web/locales/pt.yml +67 -0
  91. data/web/locales/ru.yml +78 -0
  92. data/web/locales/sv.yml +68 -0
  93. data/web/locales/ta.yml +75 -0
  94. data/web/locales/uk.yml +76 -0
  95. data/web/locales/ur.yml +80 -0
  96. data/web/locales/zh-cn.yml +68 -0
  97. data/web/locales/zh-tw.yml +68 -0
  98. data/web/views/_footer.erb +17 -0
  99. data/web/views/_job_info.erb +88 -0
  100. data/web/views/_nav.erb +66 -0
  101. data/web/views/_paging.erb +23 -0
  102. data/web/views/_poll_link.erb +7 -0
  103. data/web/views/_status.erb +4 -0
  104. data/web/views/_summary.erb +40 -0
  105. data/web/views/busy.erb +94 -0
  106. data/web/views/dashboard.erb +75 -0
  107. data/web/views/dead.erb +34 -0
  108. data/web/views/layout.erb +40 -0
  109. data/web/views/morgue.erb +75 -0
  110. data/web/views/queue.erb +45 -0
  111. data/web/views/queues.erb +28 -0
  112. data/web/views/retries.erb +76 -0
  113. data/web/views/retry.erb +34 -0
  114. data/web/views/scheduled.erb +54 -0
  115. data/web/views/scheduled_job_info.erb +8 -0
  116. metadata +366 -0
@@ -0,0 +1,235 @@
1
+ require 'sidekiq/scheduled'
2
+ require 'sidekiq/api'
3
+
4
+ module Sidekiq
5
+ ##
6
+ # Automatically retry jobs that fail in Sidekiq.
7
+ # Sidekiq's retry support assumes a typical development lifecycle:
8
+ #
9
+ # 0. Push some code changes with a bug in it.
10
+ # 1. Bug causes job processing to fail, Sidekiq's middleware captures
11
+ # the job and pushes it onto a retry queue.
12
+ # 2. Sidekiq retries jobs in the retry queue multiple times with
13
+ # an exponential delay, the job continues to fail.
14
+ # 3. After a few days, a developer deploys a fix. The job is
15
+ # reprocessed successfully.
16
+ # 4. Once retries are exhausted, Sidekiq will give up and move the
17
+ # job to the Dead Job Queue (aka morgue) where it must be dealt with
18
+ # manually in the Web UI.
19
+ # 5. After 6 months on the DJQ, Sidekiq will discard the job.
20
+ #
21
+ # A job looks like:
22
+ #
23
+ # { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => true }
24
+ #
25
+ # The 'retry' option also accepts a number (in place of 'true'):
26
+ #
27
+ # { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => 5 }
28
+ #
29
+ # The job will be retried this number of times before giving up. (If simply
30
+ # 'true', Sidekiq retries 25 times)
31
+ #
32
+ # We'll add a bit more data to the job to support retries:
33
+ #
34
+ # * 'queue' - the queue to use
35
+ # * 'retry_count' - number of times we've retried so far.
36
+ # * 'error_message' - the message from the exception
37
+ # * 'error_class' - the exception class
38
+ # * 'failed_at' - the first time it failed
39
+ # * 'retried_at' - the last time it was retried
40
+ # * 'backtrace' - the number of lines of error backtrace to store
41
+ #
42
+ # We don't store the backtrace by default as that can add a lot of overhead
43
+ # to the job and everyone is using an error service, right?
44
+ #
45
+ # The default number of retries is 25 which works out to about 3 weeks
46
+ # You can change the default maximum number of retries in your initializer:
47
+ #
48
+ # Sidekiq.options[:max_retries] = 7
49
+ #
50
+ # or limit the number of retries for a particular worker with:
51
+ #
52
+ # class MyWorker
53
+ # include Sidekiq::Worker
54
+ # sidekiq_options :retry => 10
55
+ # end
56
+ #
57
+ class JobRetry
58
+ class Skip < ::RuntimeError; end
59
+
60
+ include Sidekiq::Util
61
+
62
+ DEFAULT_MAX_RETRY_ATTEMPTS = 25
63
+
64
+ def initialize(options = {})
65
+ @max_retries = Sidekiq.options.merge(options).fetch(:max_retries, DEFAULT_MAX_RETRY_ATTEMPTS)
66
+ end
67
+
68
+ # The global retry handler requires only the barest of data.
69
+ # We want to be able to retry as much as possible so we don't
70
+ # require the worker to be instantiated.
71
+ def global(msg, queue)
72
+ yield
73
+ rescue Skip => ex
74
+ raise ex
75
+ rescue Sidekiq::Shutdown => ey
76
+ # ignore, will be pushed back onto queue during hard_shutdown
77
+ raise ey
78
+ rescue Exception => e
79
+ # ignore, will be pushed back onto queue during hard_shutdown
80
+ raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
81
+
82
+ raise e unless msg['retry']
83
+ attempt_retry(nil, msg, queue, e)
84
+ raise e
85
+ end
86
+
87
+
88
+ # The local retry support means that any errors that occur within
89
+ # this block can be associated with the given worker instance.
90
+ # This is required to support the `sidekiq_retries_exhausted` block.
91
+ #
92
+ # Note that any exception from the block is wrapped in the Skip
93
+ # exception so the global block does not reprocess the error. The
94
+ # Skip exception is unwrapped within Sidekiq::Processor#process before
95
+ # calling the handle_exception handlers.
96
+ def local(worker, msg, queue)
97
+ yield
98
+ rescue Skip => ex
99
+ raise ex
100
+ rescue Sidekiq::Shutdown => ey
101
+ # ignore, will be pushed back onto queue during hard_shutdown
102
+ raise ey
103
+ rescue Exception => e
104
+ # ignore, will be pushed back onto queue during hard_shutdown
105
+ raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
106
+
107
+ if msg['retry'] == nil
108
+ msg['retry'] = worker.class.get_sidekiq_options['retry']
109
+ end
110
+
111
+ raise e unless msg['retry']
112
+ attempt_retry(worker, msg, queue, e)
113
+ # We've handled this error associated with this job, don't
114
+ # need to handle it at the global level
115
+ raise Skip
116
+ end
117
+
118
+ private
119
+
120
+ # Note that +worker+ can be nil here if an error is raised before we can
121
+ # instantiate the worker instance. All access must be guarded and
122
+ # best effort.
123
+ def attempt_retry(worker, msg, queue, exception)
124
+ max_retry_attempts = retry_attempts_from(msg['retry'], @max_retries)
125
+
126
+ msg['queue'] = if msg['retry_queue']
127
+ msg['retry_queue']
128
+ else
129
+ queue
130
+ end
131
+
132
+ # App code can stuff all sorts of crazy binary data into the error message
133
+ # that won't convert to JSON.
134
+ m = exception.message.to_s[0, 10_000]
135
+ if m.respond_to?(:scrub!)
136
+ m.force_encoding("utf-8")
137
+ m.scrub!
138
+ end
139
+
140
+ msg['error_message'] = m
141
+ msg['error_class'] = exception.class.name
142
+ count = if msg['retry_count']
143
+ msg['retried_at'] = Time.now.to_f
144
+ msg['retry_count'] += 1
145
+ else
146
+ msg['failed_at'] = Time.now.to_f
147
+ msg['retry_count'] = 0
148
+ end
149
+
150
+ if msg['backtrace'] == true
151
+ msg['error_backtrace'] = exception.backtrace
152
+ elsif !msg['backtrace']
153
+ # do nothing
154
+ elsif msg['backtrace'].to_i != 0
155
+ msg['error_backtrace'] = exception.backtrace[0...msg['backtrace'].to_i]
156
+ end
157
+
158
+ if count < max_retry_attempts
159
+ delay = delay_for(worker, count, exception)
160
+ logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
161
+ retry_at = Time.now.to_f + delay
162
+ payload = Sidekiq.dump_json(msg)
163
+ Sidekiq.redis do |conn|
164
+ conn.zadd('retry', retry_at.to_s, payload)
165
+ end
166
+ else
167
+ # Goodbye dear message, you (re)tried your best I'm sure.
168
+ retries_exhausted(worker, msg, exception)
169
+ end
170
+ end
171
+
172
+ def retries_exhausted(worker, msg, exception)
173
+ logger.debug { "Retries exhausted for job" }
174
+ begin
175
+ block = worker && worker.sidekiq_retries_exhausted_block || Sidekiq.default_retries_exhausted
176
+ block.call(msg, exception) if block
177
+ rescue => e
178
+ handle_exception(e, { context: "Error calling retries_exhausted for #{msg['class']}", job: msg })
179
+ end
180
+
181
+ send_to_morgue(msg) unless msg['dead'] == false
182
+ end
183
+
184
+ def send_to_morgue(msg)
185
+ Sidekiq.logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
186
+ payload = Sidekiq.dump_json(msg)
187
+ now = Time.now.to_f
188
+ Sidekiq.redis do |conn|
189
+ conn.multi do
190
+ conn.zadd('dead', now, payload)
191
+ conn.zremrangebyscore('dead', '-inf', now - DeadSet.timeout)
192
+ conn.zremrangebyrank('dead', 0, -DeadSet.max_jobs)
193
+ end
194
+ end
195
+ end
196
+
197
+ def retry_attempts_from(msg_retry, default)
198
+ if msg_retry.is_a?(Integer)
199
+ msg_retry
200
+ else
201
+ default
202
+ end
203
+ end
204
+
205
+ def delay_for(worker, count, exception)
206
+ worker && worker.sidekiq_retry_in_block? && retry_in(worker, count, exception) || seconds_to_delay(count)
207
+ end
208
+
209
+ # delayed_job uses the same basic formula
210
+ def seconds_to_delay(count)
211
+ (count ** 4) + 15 + (rand(30)*(count+1))
212
+ end
213
+
214
+ def retry_in(worker, count, exception)
215
+ begin
216
+ worker.sidekiq_retry_in_block.call(count, exception).to_i
217
+ rescue Exception => e
218
+ handle_exception(e, { context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default" })
219
+ nil
220
+ end
221
+ end
222
+
223
+ def exception_caused_by_shutdown?(e, checked_causes = [])
224
+ return false unless e.cause
225
+
226
+ # Handle circular causes
227
+ checked_causes << e.object_id
228
+ return false if checked_causes.include?(e.cause.object_id)
229
+
230
+ e.cause.instance_of?(Sidekiq::Shutdown) ||
231
+ exception_caused_by_shutdown?(e.cause, checked_causes)
232
+ end
233
+
234
+ end
235
+ end
@@ -0,0 +1,167 @@
1
+ # encoding: utf-8
2
+ # frozen_string_literal: true
3
+ require 'sidekiq/manager'
4
+ require 'sidekiq/fetch'
5
+ require 'sidekiq/scheduled'
6
+
7
+ module Sidekiq
8
+ # The Launcher is a very simple Actor whose job is to
9
+ # start, monitor and stop the core Actors in Sidekiq.
10
+ # If any of these actors die, the Sidekiq process exits
11
+ # immediately.
12
+ class Launcher
13
+ include Util
14
+
15
+ attr_accessor :manager, :poller, :fetcher
16
+
17
+ def initialize(options)
18
+ @manager = Sidekiq::Manager.new(options)
19
+ @poller = Sidekiq::Scheduled::Poller.new
20
+ @done = false
21
+ @options = options
22
+ end
23
+
24
+ def run
25
+ @thread = safe_thread("heartbeat", &method(:start_heartbeat))
26
+ @poller.start
27
+ @manager.start
28
+ end
29
+
30
+ # Stops this instance from processing any more jobs,
31
+ #
32
+ def quiet
33
+ @done = true
34
+ @manager.quiet
35
+ @poller.terminate
36
+ end
37
+
38
+ # Shuts down the process. This method does not
39
+ # return until all work is complete and cleaned up.
40
+ # It can take up to the timeout to complete.
41
+ def stop
42
+ deadline = Time.now + @options[:timeout]
43
+
44
+ @done = true
45
+ @manager.quiet
46
+ @poller.terminate
47
+
48
+ @manager.stop(deadline)
49
+
50
+ # Requeue everything in case there was a worker who grabbed work while stopped
51
+ # This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
52
+ strategy = (@options[:fetch] || Sidekiq::BasicFetch)
53
+ strategy.bulk_requeue([], @options)
54
+
55
+ clear_heartbeat
56
+ end
57
+
58
+ def stopping?
59
+ @done
60
+ end
61
+
62
+ private unless $TESTING
63
+
64
+ def heartbeat
65
+ results = Sidekiq::CLI::PROCTITLES.map {|x| x.(self, to_data) }
66
+ results.compact!
67
+ $0 = results.join(' ')
68
+
69
+
70
+ end
71
+
72
+ def ❤
73
+ key = identity
74
+ fails = procd = 0
75
+ begin
76
+ Processor::FAILURE.update {|curr| fails = curr; 0 }
77
+ Processor::PROCESSED.update {|curr| procd = curr; 0 }
78
+
79
+ workers_key = "#{key}:workers".freeze
80
+ nowdate = Time.now.utc.strftime("%Y-%m-%d".freeze)
81
+ Sidekiq.redis do |conn|
82
+ conn.multi do
83
+ conn.incrby("stat:processed".freeze, procd)
84
+ conn.incrby("stat:processed:#{nowdate}", procd)
85
+ conn.incrby("stat:failed".freeze, fails)
86
+ conn.incrby("stat:failed:#{nowdate}", fails)
87
+ conn.del(workers_key)
88
+ Processor::WORKER_STATE.each_pair do |tid, hash|
89
+ conn.hset(workers_key, tid, Sidekiq.dump_json(hash))
90
+ end
91
+ conn.expire(workers_key, 60)
92
+ end
93
+ end
94
+ fails = procd = 0
95
+
96
+ _, exists, _, _, msg = Sidekiq.redis do |conn|
97
+ conn.multi do
98
+ conn.sadd('processes', key)
99
+ conn.exists(key)
100
+ conn.hmset(key, 'info', to_json, 'busy', Processor::WORKER_STATE.size, 'beat', Time.now.to_f, 'quiet', @done)
101
+ conn.expire(key, 60)
102
+ conn.rpop("#{key}-signals")
103
+ end
104
+ end
105
+
106
+ # first heartbeat or recovering from an outage and need to reestablish our heartbeat
107
+ fire_event(:heartbeat) if !exists
108
+
109
+ return unless msg
110
+
111
+ ::Process.kill(msg, $$)
112
+ rescue => e
113
+ # ignore all redis/network issues
114
+ logger.error("heartbeat: #{e.message}")
115
+ # don't lose the counts if there was a network issue
116
+ Processor::PROCESSED.increment(procd)
117
+ Processor::FAILURE.increment(fails)
118
+ end
119
+ end
120
+
121
+ def start_heartbeat
122
+ while true
123
+ heartbeat
124
+ sleep 5
125
+ end
126
+ Sidekiq.logger.info("Heartbeat stopping...")
127
+ end
128
+
129
+ def to_data
130
+ @data ||= begin
131
+ {
132
+ 'hostname' => hostname,
133
+ 'started_at' => Time.now.to_f,
134
+ 'pid' => $$,
135
+ 'tag' => @options[:tag] || '',
136
+ 'concurrency' => @options[:concurrency],
137
+ 'queues' => @options[:queues].uniq,
138
+ 'labels' => @options[:labels],
139
+ 'identity' => identity,
140
+ }
141
+ end
142
+ end
143
+
144
+ def to_json
145
+ @json ||= begin
146
+ # this data changes infrequently so dump it to a string
147
+ # now so we don't need to dump it every heartbeat.
148
+ Sidekiq.dump_json(to_data)
149
+ end
150
+ end
151
+
152
+ def clear_heartbeat
153
+ # Remove record from Redis since we are shutting down.
154
+ # Note we don't stop the heartbeat thread; if the process
155
+ # doesn't actually exit, it'll reappear in the Web UI.
156
+ Sidekiq.redis do |conn|
157
+ conn.pipelined do
158
+ conn.srem('processes', identity)
159
+ conn.del("#{identity}:workers")
160
+ end
161
+ end
162
+ rescue
163
+ # best effort, ignore network errors
164
+ end
165
+
166
+ end
167
+ end
@@ -0,0 +1,106 @@
1
+ # frozen_string_literal: true
2
+ require 'time'
3
+ require 'logger'
4
+ require 'fcntl'
5
+
6
+ module Sidekiq
7
+ module Logging
8
+
9
+ class Pretty < Logger::Formatter
10
+ SPACE = " "
11
+
12
+ # Provide a call() method that returns the formatted message.
13
+ def call(severity, time, program_name, message)
14
+ "#{time.utc.iso8601(3)} #{::Process.pid} TID-#{Thread.current.object_id.to_s(36)}#{context} #{severity}: #{message}\n"
15
+ end
16
+
17
+ def context
18
+ c = Thread.current[:sidekiq_context]
19
+ " #{c.join(SPACE)}" if c && c.any?
20
+ end
21
+ end
22
+
23
+ class WithoutTimestamp < Pretty
24
+ def call(severity, time, program_name, message)
25
+ "#{::Process.pid} TID-#{Thread.current.object_id.to_s(36)}#{context} #{severity}: #{message}\n"
26
+ end
27
+ end
28
+
29
+ def self.with_context(msg)
30
+ Thread.current[:sidekiq_context] ||= []
31
+ Thread.current[:sidekiq_context] << msg
32
+ yield
33
+ ensure
34
+ Thread.current[:sidekiq_context].pop
35
+ end
36
+
37
+ def self.initialize_logger(log_target = STDOUT)
38
+ oldlogger = defined?(@logger) ? @logger : nil
39
+ @logger = Logger.new(log_target)
40
+ @logger.level = Logger::INFO
41
+ @logger.formatter = ENV['DYNO'] ? WithoutTimestamp.new : Pretty.new
42
+ oldlogger.close if oldlogger && !$TESTING # don't want to close testing's STDOUT logging
43
+ @logger
44
+ end
45
+
46
+ def self.logger
47
+ defined?(@logger) ? @logger : initialize_logger
48
+ end
49
+
50
+ def self.logger=(log)
51
+ @logger = (log ? log : Logger.new(File::NULL))
52
+ end
53
+
54
+ # This reopens ALL logfiles in the process that have been rotated
55
+ # using logrotate(8) (without copytruncate) or similar tools.
56
+ # A +File+ object is considered for reopening if it is:
57
+ # 1) opened with the O_APPEND and O_WRONLY flags
58
+ # 2) the current open file handle does not match its original open path
59
+ # 3) unbuffered (as far as userspace buffering goes, not O_SYNC)
60
+ # Returns the number of files reopened
61
+ def self.reopen_logs
62
+ to_reopen = []
63
+ append_flags = File::WRONLY | File::APPEND
64
+
65
+ ObjectSpace.each_object(File) do |fp|
66
+ begin
67
+ if !fp.closed? && fp.stat.file? && fp.sync && (fp.fcntl(Fcntl::F_GETFL) & append_flags) == append_flags
68
+ to_reopen << fp
69
+ end
70
+ rescue IOError, Errno::EBADF
71
+ end
72
+ end
73
+
74
+ nr = 0
75
+ to_reopen.each do |fp|
76
+ orig_st = begin
77
+ fp.stat
78
+ rescue IOError, Errno::EBADF
79
+ next
80
+ end
81
+
82
+ begin
83
+ b = File.stat(fp.path)
84
+ next if orig_st.ino == b.ino && orig_st.dev == b.dev
85
+ rescue Errno::ENOENT
86
+ end
87
+
88
+ begin
89
+ File.open(fp.path, 'a') { |tmpfp| fp.reopen(tmpfp) }
90
+ fp.sync = true
91
+ nr += 1
92
+ rescue IOError, Errno::EBADF
93
+ # not much we can do...
94
+ end
95
+ end
96
+ nr
97
+ rescue RuntimeError => ex
98
+ # RuntimeError: ObjectSpace is disabled; each_object will only work with Class, pass -X+O to enable
99
+ puts "Unable to reopen logs: #{ex.message}"
100
+ end
101
+
102
+ def logger
103
+ Sidekiq::Logging.logger
104
+ end
105
+ end
106
+ end