sidekiq_queue_manager 1.0.2 → 1.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +122 -20
- data/app/assets/javascripts/sidekiq_queue_manager/application.js +859 -12
- data/app/assets/stylesheets/sidekiq_queue_manager/application.css +467 -7
- data/app/controllers/sidekiq_queue_manager/application_controller.rb +14 -5
- data/app/controllers/sidekiq_queue_manager/dashboard_controller.rb +202 -0
- data/app/services/sidekiq_queue_manager/queue_service.rb +485 -0
- data/app/views/sidekiq_queue_manager/dashboard/index.html.erb +287 -19
- data/config/routes.rb +21 -0
- data/lib/sidekiq_queue_manager/version.rb +1 -1
- metadata +1 -1
@@ -117,6 +117,393 @@ module SidekiqQueueManager
|
|
117
117
|
handle_service_error(e, 'bulk resume operation')
|
118
118
|
end
|
119
119
|
|
120
|
+
# ========================================
|
121
|
+
# Scheduled Jobs Management
|
122
|
+
# ========================================
|
123
|
+
|
124
|
+
# Get all scheduled jobs with pagination and filtering
|
125
|
+
# @param page [Integer] page number (1-based)
|
126
|
+
# @param per_page [Integer] jobs per page (max 100)
|
127
|
+
# @param filter [String] optional filter by job class
|
128
|
+
# @return [Hash] response with scheduled jobs data
|
129
|
+
def scheduled_jobs(page: 1, per_page: 25, filter: nil)
|
130
|
+
page = page.to_i.clamp(1, Float::INFINITY)
|
131
|
+
per_page = per_page.to_i.clamp(1, 100)
|
132
|
+
|
133
|
+
scheduled_set = Sidekiq::ScheduledSet.new
|
134
|
+
total_jobs = scheduled_set.size
|
135
|
+
|
136
|
+
# Apply filtering if specified
|
137
|
+
jobs = if filter.present?
|
138
|
+
scheduled_set.select { |job| job.klass.include?(filter) }
|
139
|
+
else
|
140
|
+
scheduled_set.to_a
|
141
|
+
end
|
142
|
+
|
143
|
+
# Sort by scheduled time (ascending)
|
144
|
+
jobs = jobs.sort_by(&:at)
|
145
|
+
|
146
|
+
# Apply pagination
|
147
|
+
offset = (page - 1) * per_page
|
148
|
+
paginated_jobs = jobs.slice(offset, per_page) || []
|
149
|
+
|
150
|
+
formatted_jobs = paginated_jobs.map.with_index(offset + 1) do |job, position|
|
151
|
+
format_scheduled_job_data(job, position)
|
152
|
+
end
|
153
|
+
|
154
|
+
success_response('Scheduled jobs retrieved successfully',
|
155
|
+
jobs: formatted_jobs,
|
156
|
+
total_count: total_jobs,
|
157
|
+
filtered_count: jobs.size,
|
158
|
+
pagination: build_pagination_data(page, per_page, jobs.size))
|
159
|
+
rescue StandardError => e
|
160
|
+
handle_service_error(e, 'get scheduled jobs')
|
161
|
+
end
|
162
|
+
|
163
|
+
# Delete a scheduled job
|
164
|
+
# @param job_id [String] the job ID (JID)
|
165
|
+
# @return [Hash] response with success status
|
166
|
+
def delete_scheduled_job(job_id)
|
167
|
+
return failure_response('Invalid job ID') if job_id.blank?
|
168
|
+
|
169
|
+
scheduled_set = Sidekiq::ScheduledSet.new
|
170
|
+
job = scheduled_set.find_job(job_id)
|
171
|
+
|
172
|
+
return failure_response('Scheduled job not found') unless job
|
173
|
+
|
174
|
+
job.delete
|
175
|
+
log_operation("Scheduled job #{job_id} (#{job.klass}) deleted")
|
176
|
+
success_response('Scheduled job deleted successfully')
|
177
|
+
rescue StandardError => e
|
178
|
+
handle_service_error(e, "delete scheduled job #{job_id}")
|
179
|
+
end
|
180
|
+
|
181
|
+
# Enqueue a scheduled job immediately
|
182
|
+
# @param job_id [String] the job ID (JID)
|
183
|
+
# @return [Hash] response with success status
|
184
|
+
def enqueue_scheduled_job(job_id)
|
185
|
+
return failure_response('Invalid job ID') if job_id.blank?
|
186
|
+
|
187
|
+
scheduled_set = Sidekiq::ScheduledSet.new
|
188
|
+
job = scheduled_set.find_job(job_id)
|
189
|
+
|
190
|
+
return failure_response('Scheduled job not found') unless job
|
191
|
+
|
192
|
+
job.add_to_queue
|
193
|
+
log_operation("Scheduled job #{job_id} (#{job.klass}) enqueued immediately")
|
194
|
+
success_response('Scheduled job enqueued successfully')
|
195
|
+
rescue StandardError => e
|
196
|
+
handle_service_error(e, "enqueue scheduled job #{job_id}")
|
197
|
+
end
|
198
|
+
|
199
|
+
# Clear all scheduled jobs (with optional filtering)
|
200
|
+
# @param filter [String] optional filter by job class
|
201
|
+
# @return [Hash] response with count of cleared jobs
|
202
|
+
def clear_scheduled_jobs(filter: nil)
|
203
|
+
scheduled_set = Sidekiq::ScheduledSet.new
|
204
|
+
initial_count = scheduled_set.size
|
205
|
+
|
206
|
+
if filter.present?
|
207
|
+
jobs_to_delete = scheduled_set.select { |job| job.klass.include?(filter) }
|
208
|
+
jobs_to_delete.each(&:delete)
|
209
|
+
cleared_count = jobs_to_delete.size
|
210
|
+
message = "Cleared #{cleared_count} scheduled jobs matching '#{filter}'"
|
211
|
+
else
|
212
|
+
scheduled_set.clear
|
213
|
+
cleared_count = initial_count
|
214
|
+
message = "Cleared all #{cleared_count} scheduled jobs"
|
215
|
+
end
|
216
|
+
|
217
|
+
log_operation(message)
|
218
|
+
success_response(message, jobs_cleared: cleared_count)
|
219
|
+
rescue StandardError => e
|
220
|
+
handle_service_error(e, 'clear scheduled jobs')
|
221
|
+
end
|
222
|
+
|
223
|
+
# ========================================
|
224
|
+
# Retry Jobs Management
|
225
|
+
# ========================================
|
226
|
+
|
227
|
+
# Get all retry jobs with pagination and filtering
|
228
|
+
# @param page [Integer] page number (1-based)
|
229
|
+
# @param per_page [Integer] jobs per page (max 100)
|
230
|
+
# @param filter [String] optional filter by job class
|
231
|
+
# @return [Hash] response with retry jobs data
|
232
|
+
def retry_jobs(page: 1, per_page: 25, filter: nil)
|
233
|
+
page = page.to_i.clamp(1, Float::INFINITY)
|
234
|
+
per_page = per_page.to_i.clamp(1, 100)
|
235
|
+
|
236
|
+
retry_set = Sidekiq::RetrySet.new
|
237
|
+
total_jobs = retry_set.size
|
238
|
+
|
239
|
+
# Apply filtering if specified
|
240
|
+
jobs = if filter.present?
|
241
|
+
retry_set.select { |job| job.klass.include?(filter) }
|
242
|
+
else
|
243
|
+
retry_set.to_a
|
244
|
+
end
|
245
|
+
|
246
|
+
# Sort by next retry time (ascending)
|
247
|
+
jobs = jobs.sort_by { |job| job['retry_at'] || job['failed_at'] || 0 }
|
248
|
+
|
249
|
+
# Apply pagination
|
250
|
+
offset = (page - 1) * per_page
|
251
|
+
paginated_jobs = jobs.slice(offset, per_page) || []
|
252
|
+
|
253
|
+
formatted_jobs = paginated_jobs.map.with_index(offset + 1) do |job, position|
|
254
|
+
format_retry_job_data(job, position)
|
255
|
+
end
|
256
|
+
|
257
|
+
success_response('Retry jobs retrieved successfully',
|
258
|
+
jobs: formatted_jobs,
|
259
|
+
total_count: total_jobs,
|
260
|
+
filtered_count: jobs.size,
|
261
|
+
pagination: build_pagination_data(page, per_page, jobs.size))
|
262
|
+
rescue StandardError => e
|
263
|
+
handle_service_error(e, 'get retry jobs')
|
264
|
+
end
|
265
|
+
|
266
|
+
# Retry a job immediately
|
267
|
+
# @param job_id [String] the job ID (JID)
|
268
|
+
# @return [Hash] response with success status
|
269
|
+
def retry_job_now(job_id)
|
270
|
+
return failure_response('Invalid job ID') if job_id.blank?
|
271
|
+
|
272
|
+
retry_set = Sidekiq::RetrySet.new
|
273
|
+
job = retry_set.find_job(job_id)
|
274
|
+
|
275
|
+
return failure_response('Retry job not found') unless job
|
276
|
+
|
277
|
+
job.retry
|
278
|
+
log_operation("Retry job #{job_id} (#{job.klass}) retried immediately")
|
279
|
+
success_response('Job retried successfully')
|
280
|
+
rescue StandardError => e
|
281
|
+
handle_service_error(e, "retry job #{job_id}")
|
282
|
+
end
|
283
|
+
|
284
|
+
# Delete a retry job
|
285
|
+
# @param job_id [String] the job ID (JID)
|
286
|
+
# @return [Hash] response with success status
|
287
|
+
def delete_retry_job(job_id)
|
288
|
+
return failure_response('Invalid job ID') if job_id.blank?
|
289
|
+
|
290
|
+
retry_set = Sidekiq::RetrySet.new
|
291
|
+
job = retry_set.find_job(job_id)
|
292
|
+
|
293
|
+
return failure_response('Retry job not found') unless job
|
294
|
+
|
295
|
+
job.delete
|
296
|
+
log_operation("Retry job #{job_id} (#{job.klass}) deleted")
|
297
|
+
success_response('Retry job deleted successfully')
|
298
|
+
rescue StandardError => e
|
299
|
+
handle_service_error(e, "delete retry job #{job_id}")
|
300
|
+
end
|
301
|
+
|
302
|
+
# Kill a retry job (move to dead set)
|
303
|
+
# @param job_id [String] the job ID (JID)
|
304
|
+
# @return [Hash] response with success status
|
305
|
+
def kill_retry_job(job_id)
|
306
|
+
return failure_response('Invalid job ID') if job_id.blank?
|
307
|
+
|
308
|
+
retry_set = Sidekiq::RetrySet.new
|
309
|
+
job = retry_set.find_job(job_id)
|
310
|
+
|
311
|
+
return failure_response('Retry job not found') unless job
|
312
|
+
|
313
|
+
job.kill
|
314
|
+
log_operation("Retry job #{job_id} (#{job.klass}) moved to dead queue")
|
315
|
+
success_response('Job moved to dead queue successfully')
|
316
|
+
rescue StandardError => e
|
317
|
+
handle_service_error(e, "kill retry job #{job_id}")
|
318
|
+
end
|
319
|
+
|
320
|
+
# Clear all retry jobs (with optional filtering)
|
321
|
+
# @param filter [String] optional filter by job class
|
322
|
+
# @return [Hash] response with count of cleared jobs
|
323
|
+
def clear_retry_jobs(filter: nil)
|
324
|
+
retry_set = Sidekiq::RetrySet.new
|
325
|
+
initial_count = retry_set.size
|
326
|
+
|
327
|
+
if filter.present?
|
328
|
+
jobs_to_delete = retry_set.select { |job| job.klass.include?(filter) }
|
329
|
+
jobs_to_delete.each(&:delete)
|
330
|
+
cleared_count = jobs_to_delete.size
|
331
|
+
message = "Cleared #{cleared_count} retry jobs matching '#{filter}'"
|
332
|
+
else
|
333
|
+
retry_set.clear
|
334
|
+
cleared_count = initial_count
|
335
|
+
message = "Cleared all #{cleared_count} retry jobs"
|
336
|
+
end
|
337
|
+
|
338
|
+
log_operation(message)
|
339
|
+
success_response(message, jobs_cleared: cleared_count)
|
340
|
+
rescue StandardError => e
|
341
|
+
handle_service_error(e, 'clear retry jobs')
|
342
|
+
end
|
343
|
+
|
344
|
+
# Retry all jobs in the retry set
|
345
|
+
# @param filter [String] optional filter by job class
|
346
|
+
# @return [Hash] response with count of retried jobs
|
347
|
+
def retry_all_jobs(filter: nil)
|
348
|
+
retry_set = Sidekiq::RetrySet.new
|
349
|
+
|
350
|
+
jobs_to_retry = if filter.present?
|
351
|
+
retry_set.select { |job| job.klass.include?(filter) }
|
352
|
+
else
|
353
|
+
retry_set.to_a
|
354
|
+
end
|
355
|
+
|
356
|
+
retried_count = 0
|
357
|
+
jobs_to_retry.each do |job|
|
358
|
+
job.retry
|
359
|
+
retried_count += 1
|
360
|
+
end
|
361
|
+
|
362
|
+
message = if filter.present?
|
363
|
+
"Retried #{retried_count} jobs matching '#{filter}'"
|
364
|
+
else
|
365
|
+
"Retried all #{retried_count} jobs"
|
366
|
+
end
|
367
|
+
|
368
|
+
log_operation(message)
|
369
|
+
success_response(message, jobs_retried: retried_count)
|
370
|
+
rescue StandardError => e
|
371
|
+
handle_service_error(e, 'retry all jobs')
|
372
|
+
end
|
373
|
+
|
374
|
+
# ========================================
|
375
|
+
# Dead Jobs Management
|
376
|
+
# ========================================
|
377
|
+
|
378
|
+
# Get all dead jobs with pagination and filtering
|
379
|
+
# @param page [Integer] page number (1-based)
|
380
|
+
# @param per_page [Integer] jobs per page (max 100)
|
381
|
+
# @param filter [String] optional filter by job class
|
382
|
+
# @return [Hash] response with dead jobs data
|
383
|
+
def dead_jobs(page: 1, per_page: 25, filter: nil)
|
384
|
+
page = page.to_i.clamp(1, Float::INFINITY)
|
385
|
+
per_page = per_page.to_i.clamp(1, 100)
|
386
|
+
|
387
|
+
dead_set = Sidekiq::DeadSet.new
|
388
|
+
total_jobs = dead_set.size
|
389
|
+
|
390
|
+
# Apply filtering if specified
|
391
|
+
jobs = if filter.present?
|
392
|
+
dead_set.select { |job| job.klass.include?(filter) }
|
393
|
+
else
|
394
|
+
dead_set.to_a
|
395
|
+
end
|
396
|
+
|
397
|
+
# Sort by death time (most recent first)
|
398
|
+
jobs = jobs.sort_by { |job| -(job['failed_at'] || 0) }
|
399
|
+
|
400
|
+
# Apply pagination
|
401
|
+
offset = (page - 1) * per_page
|
402
|
+
paginated_jobs = jobs.slice(offset, per_page) || []
|
403
|
+
|
404
|
+
formatted_jobs = paginated_jobs.map.with_index(offset + 1) do |job, position|
|
405
|
+
format_dead_job_data(job, position)
|
406
|
+
end
|
407
|
+
|
408
|
+
success_response('Dead jobs retrieved successfully',
|
409
|
+
jobs: formatted_jobs,
|
410
|
+
total_count: total_jobs,
|
411
|
+
filtered_count: jobs.size,
|
412
|
+
pagination: build_pagination_data(page, per_page, jobs.size))
|
413
|
+
rescue StandardError => e
|
414
|
+
handle_service_error(e, 'get dead jobs')
|
415
|
+
end
|
416
|
+
|
417
|
+
# Resurrect a dead job (move back to retry set)
|
418
|
+
# @param job_id [String] the job ID (JID)
|
419
|
+
# @return [Hash] response with success status
|
420
|
+
def resurrect_dead_job(job_id)
|
421
|
+
return failure_response('Invalid job ID') if job_id.blank?
|
422
|
+
|
423
|
+
dead_set = Sidekiq::DeadSet.new
|
424
|
+
job = dead_set.find_job(job_id)
|
425
|
+
|
426
|
+
return failure_response('Dead job not found') unless job
|
427
|
+
|
428
|
+
job.retry
|
429
|
+
log_operation("Dead job #{job_id} (#{job.klass}) resurrected to retry queue")
|
430
|
+
success_response('Dead job resurrected successfully')
|
431
|
+
rescue StandardError => e
|
432
|
+
handle_service_error(e, "resurrect dead job #{job_id}")
|
433
|
+
end
|
434
|
+
|
435
|
+
# Delete a dead job permanently
|
436
|
+
# @param job_id [String] the job ID (JID)
|
437
|
+
# @return [Hash] response with success status
|
438
|
+
def delete_dead_job(job_id)
|
439
|
+
return failure_response('Invalid job ID') if job_id.blank?
|
440
|
+
|
441
|
+
dead_set = Sidekiq::DeadSet.new
|
442
|
+
job = dead_set.find_job(job_id)
|
443
|
+
|
444
|
+
return failure_response('Dead job not found') unless job
|
445
|
+
|
446
|
+
job.delete
|
447
|
+
log_operation("Dead job #{job_id} (#{job.klass}) deleted permanently")
|
448
|
+
success_response('Dead job deleted permanently')
|
449
|
+
rescue StandardError => e
|
450
|
+
handle_service_error(e, "delete dead job #{job_id}")
|
451
|
+
end
|
452
|
+
|
453
|
+
# Clear all dead jobs (with optional filtering)
|
454
|
+
# @param filter [String] optional filter by job class
|
455
|
+
# @return [Hash] response with count of cleared jobs
|
456
|
+
def clear_dead_jobs(filter: nil)
|
457
|
+
dead_set = Sidekiq::DeadSet.new
|
458
|
+
initial_count = dead_set.size
|
459
|
+
|
460
|
+
if filter.present?
|
461
|
+
jobs_to_delete = dead_set.select { |job| job.klass.include?(filter) }
|
462
|
+
jobs_to_delete.each(&:delete)
|
463
|
+
cleared_count = jobs_to_delete.size
|
464
|
+
message = "Cleared #{cleared_count} dead jobs matching '#{filter}'"
|
465
|
+
else
|
466
|
+
dead_set.clear
|
467
|
+
cleared_count = initial_count
|
468
|
+
message = "Cleared all #{cleared_count} dead jobs"
|
469
|
+
end
|
470
|
+
|
471
|
+
log_operation(message)
|
472
|
+
success_response(message, jobs_cleared: cleared_count)
|
473
|
+
rescue StandardError => e
|
474
|
+
handle_service_error(e, 'clear dead jobs')
|
475
|
+
end
|
476
|
+
|
477
|
+
# Resurrect all dead jobs (move back to retry set)
|
478
|
+
# @param filter [String] optional filter by job class
|
479
|
+
# @return [Hash] response with count of resurrected jobs
|
480
|
+
def resurrect_all_dead_jobs(filter: nil)
|
481
|
+
dead_set = Sidekiq::DeadSet.new
|
482
|
+
|
483
|
+
jobs_to_resurrect = if filter.present?
|
484
|
+
dead_set.select { |job| job.klass.include?(filter) }
|
485
|
+
else
|
486
|
+
dead_set.to_a
|
487
|
+
end
|
488
|
+
|
489
|
+
resurrected_count = 0
|
490
|
+
jobs_to_resurrect.each do |job|
|
491
|
+
job.retry
|
492
|
+
resurrected_count += 1
|
493
|
+
end
|
494
|
+
|
495
|
+
message = if filter.present?
|
496
|
+
"Resurrected #{resurrected_count} dead jobs matching '#{filter}'"
|
497
|
+
else
|
498
|
+
"Resurrected all #{resurrected_count} dead jobs"
|
499
|
+
end
|
500
|
+
|
501
|
+
log_operation(message)
|
502
|
+
success_response(message, jobs_resurrected: resurrected_count)
|
503
|
+
rescue StandardError => e
|
504
|
+
handle_service_error(e, 'resurrect all dead jobs')
|
505
|
+
end
|
506
|
+
|
120
507
|
# ========================================
|
121
508
|
# Statistics and Monitoring
|
122
509
|
# ========================================
|
@@ -364,6 +751,104 @@ module SidekiqQueueManager
|
|
364
751
|
}
|
365
752
|
end
|
366
753
|
|
754
|
+
def format_scheduled_job_data(job, position)
|
755
|
+
{
|
756
|
+
position: position,
|
757
|
+
jid: job.jid,
|
758
|
+
class: job.klass,
|
759
|
+
args: job.args,
|
760
|
+
queue: job.queue,
|
761
|
+
created_at: job.created_at&.strftime('%Y-%m-%d %H:%M:%S'),
|
762
|
+
scheduled_at: Time.zone.at(job.at).strftime('%Y-%m-%d %H:%M:%S'),
|
763
|
+
scheduled_at_epoch: job.at,
|
764
|
+
retry_count: job['retry_count'] || 0,
|
765
|
+
time_until_execution: calculate_time_until(job.at),
|
766
|
+
priority: job['priority']
|
767
|
+
}
|
768
|
+
end
|
769
|
+
|
770
|
+
def format_retry_job_data(job, position)
|
771
|
+
{
|
772
|
+
position: position,
|
773
|
+
jid: job.jid,
|
774
|
+
class: job.klass,
|
775
|
+
args: job.args,
|
776
|
+
queue: job.queue,
|
777
|
+
created_at: job.created_at&.strftime('%Y-%m-%d %H:%M:%S'),
|
778
|
+
failed_at: job['failed_at'] ? Time.zone.at(job['failed_at']).strftime('%Y-%m-%d %H:%M:%S') : nil,
|
779
|
+
retry_at: job['retry_at'] ? Time.zone.at(job['retry_at']).strftime('%Y-%m-%d %H:%M:%S') : nil,
|
780
|
+
retry_at_epoch: job['retry_at'],
|
781
|
+
retry_count: job['retry_count'] || 0,
|
782
|
+
retry_limit: job['retry'] || 25,
|
783
|
+
error_message: job['error_message'],
|
784
|
+
error_class: job['error_class'],
|
785
|
+
failed_at_relative: time_ago_in_words(job['failed_at']),
|
786
|
+
next_retry_relative: job['retry_at'] ? time_until_in_words(job['retry_at']) : nil
|
787
|
+
}
|
788
|
+
end
|
789
|
+
|
790
|
+
def format_dead_job_data(job, position)
|
791
|
+
{
|
792
|
+
position: position,
|
793
|
+
jid: job.jid,
|
794
|
+
class: job.klass,
|
795
|
+
args: job.args,
|
796
|
+
queue: job.queue,
|
797
|
+
created_at: job.created_at&.strftime('%Y-%m-%d %H:%M:%S'),
|
798
|
+
failed_at: job['failed_at'] ? Time.zone.at(job['failed_at']).strftime('%Y-%m-%d %H:%M:%S') : nil,
|
799
|
+
failed_at_epoch: job['failed_at'],
|
800
|
+
retry_count: job['retry_count'] || 0,
|
801
|
+
error_message: job['error_message'],
|
802
|
+
error_class: job['error_class'],
|
803
|
+
backtrace: job['error_backtrace']&.first(5), # First 5 lines of backtrace
|
804
|
+
failed_at_relative: time_ago_in_words(job['failed_at'])
|
805
|
+
}
|
806
|
+
end
|
807
|
+
|
808
|
+
# Helper method to calculate time until execution for scheduled jobs
|
809
|
+
def calculate_time_until(scheduled_at_epoch)
|
810
|
+
return 'Now' if scheduled_at_epoch <= Time.current.to_f
|
811
|
+
|
812
|
+
Time.current.to_f
|
813
|
+
time_until_in_words(scheduled_at_epoch)
|
814
|
+
end
|
815
|
+
|
816
|
+
# Convert epoch time to relative "time ago" string
|
817
|
+
def time_ago_in_words(epoch_time)
|
818
|
+
return 'Unknown' unless epoch_time
|
819
|
+
|
820
|
+
time_diff = Time.current.to_f - epoch_time
|
821
|
+
case time_diff
|
822
|
+
when 0..59
|
823
|
+
"#{time_diff.to_i}s ago"
|
824
|
+
when 60..3599
|
825
|
+
"#{(time_diff / 60).to_i}m ago"
|
826
|
+
when 3600..86399
|
827
|
+
"#{(time_diff / 3600).to_i}h ago"
|
828
|
+
else
|
829
|
+
"#{(time_diff / 86400).to_i}d ago"
|
830
|
+
end
|
831
|
+
end
|
832
|
+
|
833
|
+
# Convert epoch time to relative "time until" string
|
834
|
+
def time_until_in_words(epoch_time)
|
835
|
+
return 'Now' unless epoch_time
|
836
|
+
|
837
|
+
time_diff = epoch_time - Time.current.to_f
|
838
|
+
return 'Now' if time_diff <= 0
|
839
|
+
|
840
|
+
case time_diff
|
841
|
+
when 0..59
|
842
|
+
"in #{time_diff.to_i}s"
|
843
|
+
when 60..3599
|
844
|
+
"in #{(time_diff / 60).to_i}m"
|
845
|
+
when 3600..86399
|
846
|
+
"in #{(time_diff / 3600).to_i}h"
|
847
|
+
else
|
848
|
+
"in #{(time_diff / 86400).to_i}d"
|
849
|
+
end
|
850
|
+
end
|
851
|
+
|
367
852
|
def build_bulk_operation_message(operation, success_count, skipped_count, failed_queues)
|
368
853
|
message = "Bulk #{operation} completed. #{operation.capitalize}d: #{success_count}, Skipped: #{skipped_count}"
|
369
854
|
message += ", Failed: #{failed_queues.join(', ')}" if failed_queues.any?
|