jiggler 0.1.0.rc2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +6 -0
- data/LICENSE +4 -0
- data/README.md +423 -0
- data/bin/jiggler +31 -0
- data/lib/jiggler/cleaner.rb +130 -0
- data/lib/jiggler/cli.rb +263 -0
- data/lib/jiggler/config.rb +165 -0
- data/lib/jiggler/core.rb +22 -0
- data/lib/jiggler/errors.rb +5 -0
- data/lib/jiggler/job.rb +116 -0
- data/lib/jiggler/launcher.rb +69 -0
- data/lib/jiggler/manager.rb +73 -0
- data/lib/jiggler/redis_store.rb +55 -0
- data/lib/jiggler/retrier.rb +122 -0
- data/lib/jiggler/scheduled/enqueuer.rb +78 -0
- data/lib/jiggler/scheduled/poller.rb +97 -0
- data/lib/jiggler/stats/collection.rb +26 -0
- data/lib/jiggler/stats/monitor.rb +103 -0
- data/lib/jiggler/summary.rb +101 -0
- data/lib/jiggler/support/helper.rb +35 -0
- data/lib/jiggler/version.rb +5 -0
- data/lib/jiggler/web/assets/stylesheets/application.css +64 -0
- data/lib/jiggler/web/views/application.erb +329 -0
- data/lib/jiggler/web.rb +80 -0
- data/lib/jiggler/worker.rb +179 -0
- data/lib/jiggler.rb +10 -0
- data/spec/examples.txt +79 -0
- data/spec/fixtures/config/jiggler.yml +4 -0
- data/spec/fixtures/jobs.rb +5 -0
- data/spec/fixtures/my_failed_job.rb +10 -0
- data/spec/fixtures/my_job.rb +9 -0
- data/spec/fixtures/my_job_with_args.rb +18 -0
- data/spec/jiggler/cleaner_spec.rb +171 -0
- data/spec/jiggler/cli_spec.rb +87 -0
- data/spec/jiggler/config_spec.rb +56 -0
- data/spec/jiggler/core_spec.rb +34 -0
- data/spec/jiggler/job_spec.rb +99 -0
- data/spec/jiggler/launcher_spec.rb +66 -0
- data/spec/jiggler/manager_spec.rb +52 -0
- data/spec/jiggler/redis_store_spec.rb +20 -0
- data/spec/jiggler/retrier_spec.rb +55 -0
- data/spec/jiggler/scheduled/enqueuer_spec.rb +81 -0
- data/spec/jiggler/scheduled/poller_spec.rb +40 -0
- data/spec/jiggler/stats/monitor_spec.rb +40 -0
- data/spec/jiggler/summary_spec.rb +168 -0
- data/spec/jiggler/web_spec.rb +37 -0
- data/spec/jiggler/worker_spec.rb +110 -0
- data/spec/spec_helper.rb +54 -0
- metadata +230 -0
@@ -0,0 +1,55 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'redis_client'
|
4
|
+
|
5
|
+
module Jiggler
|
6
|
+
class RedisStore
|
7
|
+
def initialize(options = {})
|
8
|
+
@options = options
|
9
|
+
end
|
10
|
+
|
11
|
+
def pool
|
12
|
+
@options[:async] ? async_pool : sync_pool
|
13
|
+
end
|
14
|
+
|
15
|
+
def async_pool
|
16
|
+
@async_pool ||= begin
|
17
|
+
config = RedisClient.config(url: @options[:redis_url], timeout: nil)
|
18
|
+
Async::Pool::Controller.wrap(limit: @options[:concurrency]) do
|
19
|
+
config.new_client
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
def sync_pool
|
25
|
+
@sync_pool ||= begin
|
26
|
+
config = RedisClient.config(url: @options[:redis_url])
|
27
|
+
pool = config.new_pool(size: @options[:concurrency])
|
28
|
+
def pool.acquire(&block)
|
29
|
+
with(&block)
|
30
|
+
end
|
31
|
+
pool
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
module Jiggler
|
38
|
+
class RedisClient < ::RedisClient
|
39
|
+
def concurrency
|
40
|
+
1
|
41
|
+
end
|
42
|
+
|
43
|
+
def viable?
|
44
|
+
connected?
|
45
|
+
end
|
46
|
+
|
47
|
+
def closed?
|
48
|
+
@raw_connection.nil?
|
49
|
+
end
|
50
|
+
|
51
|
+
def reusable?
|
52
|
+
!@raw_connection.nil?
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
@@ -0,0 +1,122 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Jiggler
|
4
|
+
class Retrier
|
5
|
+
include Support::Helper
|
6
|
+
|
7
|
+
attr_reader :config, :collection
|
8
|
+
|
9
|
+
def initialize(config, collection)
|
10
|
+
@config = config
|
11
|
+
@collection = collection
|
12
|
+
@tid = tid
|
13
|
+
end
|
14
|
+
|
15
|
+
def wrapped(instance, parsed_job, queue)
|
16
|
+
logger.info {
|
17
|
+
"Starting #{instance.class.name} queue=#{instance.class.queue} tid=#{@tid} jid=#{parsed_job['jid']}"
|
18
|
+
}
|
19
|
+
yield
|
20
|
+
logger.info {
|
21
|
+
"Finished #{instance.class.name} queue=#{instance.class.queue} tid=#{@tid} jid=#{parsed_job['jid']}"
|
22
|
+
}
|
23
|
+
rescue Async::Stop => stop
|
24
|
+
raise stop
|
25
|
+
rescue => err
|
26
|
+
raise Async::Stop if exception_caused_by_shutdown?(err)
|
27
|
+
|
28
|
+
process_retry(instance, parsed_job, queue, err)
|
29
|
+
collection.incr_failures
|
30
|
+
|
31
|
+
log_error(
|
32
|
+
err,
|
33
|
+
{
|
34
|
+
context: '\'Job raised exception\'',
|
35
|
+
error_class: err.class.name,
|
36
|
+
name: parsed_job['name'],
|
37
|
+
queue: parsed_job['queue'],
|
38
|
+
args: parsed_job['args'],
|
39
|
+
attempt: parsed_job['attempt'],
|
40
|
+
tid: @tid,
|
41
|
+
jid: parsed_job['jid']
|
42
|
+
}
|
43
|
+
)
|
44
|
+
end
|
45
|
+
|
46
|
+
private
|
47
|
+
|
48
|
+
def process_retry(jobinst, parsed_job, queue, exception)
|
49
|
+
job_class = jobinst.class
|
50
|
+
parsed_job['started_at'] ||= Time.now.to_f
|
51
|
+
|
52
|
+
message = exception_message(exception)
|
53
|
+
if message.respond_to?(:scrub!)
|
54
|
+
message.force_encoding('utf-8')
|
55
|
+
message.scrub!
|
56
|
+
end
|
57
|
+
|
58
|
+
parsed_job['error_message'] = message
|
59
|
+
parsed_job['error_class'] = exception.class.name
|
60
|
+
|
61
|
+
max_retry_attempts = parsed_job['retries'].to_i
|
62
|
+
count = parsed_job['attempt'].to_i + 1
|
63
|
+
return retries_exhausted(jobinst, parsed_job, exception) if count > max_retry_attempts
|
64
|
+
|
65
|
+
delay = count**4 + 15
|
66
|
+
retry_at = Time.now.to_f + delay
|
67
|
+
parsed_job['retry_at'] = retry_at
|
68
|
+
if count > 1
|
69
|
+
parsed_job['retried_at'] = Time.now.to_f
|
70
|
+
end
|
71
|
+
parsed_job['attempt'] = count
|
72
|
+
parsed_job['queue'] = job_class.retry_queue
|
73
|
+
payload = Oj.dump(parsed_job, mode: :compat)
|
74
|
+
|
75
|
+
config.with_async_redis do |conn|
|
76
|
+
conn.call('ZADD', config.retries_set, retry_at.to_s, payload)
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|
80
|
+
def retries_exhausted(jobinst, parsed_job, exception)
|
81
|
+
logger.debug('Retrier') {
|
82
|
+
"Retries exhausted for #{parsed_job['name']} jid=#{parsed_job['jid']}"
|
83
|
+
}
|
84
|
+
|
85
|
+
send_to_morgue(parsed_job)
|
86
|
+
end
|
87
|
+
|
88
|
+
def send_to_morgue(parsed_job)
|
89
|
+
logger.warn('Retrier') {
|
90
|
+
"#{parsed_job['name']} has been sent to dead jid=#{parsed_job['jid']}"
|
91
|
+
}
|
92
|
+
payload = Oj.dump(parsed_job, mode: :compat)
|
93
|
+
now = Time.now.to_f
|
94
|
+
|
95
|
+
config.with_async_redis do |conn|
|
96
|
+
conn.multi do |xa|
|
97
|
+
xa.call('ZADD', config.dead_set, now.to_s, payload)
|
98
|
+
xa.call('ZREMRANGEBYSCORE', config.dead_set, '-inf', now - config[:dead_timeout])
|
99
|
+
xa.call('ZREMRANGEBYRANK', config.dead_set, 0, - config[:max_dead_jobs])
|
100
|
+
end
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
def exception_caused_by_shutdown?(e, checked_causes = [])
|
105
|
+
return false unless e.cause
|
106
|
+
|
107
|
+
# Handle circular causes
|
108
|
+
checked_causes << e.object_id
|
109
|
+
return false if checked_causes.include?(e.cause.object_id)
|
110
|
+
|
111
|
+
e.cause.instance_of?(Async::Stop) ||
|
112
|
+
exception_caused_by_shutdown?(e.cause, checked_causes)
|
113
|
+
end
|
114
|
+
|
115
|
+
def exception_message(exception)
|
116
|
+
# Message from app code
|
117
|
+
exception.message.to_s[0, 10_000]
|
118
|
+
rescue
|
119
|
+
'Exception message unavailable'
|
120
|
+
end
|
121
|
+
end
|
122
|
+
end
|
@@ -0,0 +1,78 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Jiggler
|
4
|
+
module Scheduled
|
5
|
+
class Enqueuer
|
6
|
+
include Support::Helper
|
7
|
+
|
8
|
+
LUA_ZPOPBYSCORE = <<~LUA
|
9
|
+
local key, now = KEYS[1], ARGV[1]
|
10
|
+
local jobs = redis.call('zrangebyscore', key, '-inf', now, 'limit', 0, 1)
|
11
|
+
if jobs[1] then
|
12
|
+
redis.call('zrem', key, jobs[1])
|
13
|
+
return jobs[1]
|
14
|
+
end
|
15
|
+
LUA
|
16
|
+
|
17
|
+
def initialize(config)
|
18
|
+
@config = config
|
19
|
+
@done = false
|
20
|
+
@lua_zpopbyscore_sha = nil
|
21
|
+
@tid = tid
|
22
|
+
end
|
23
|
+
|
24
|
+
def enqueue_jobs
|
25
|
+
@config.with_async_redis do |conn|
|
26
|
+
sorted_sets.each do |sorted_set|
|
27
|
+
# Get next item in the queue with score (time to execute) <= now
|
28
|
+
job_args = zpopbyscore(conn, key: sorted_set, argv: Time.now.to_f.to_s)
|
29
|
+
while !@done && job_args
|
30
|
+
push_job(conn, job_args)
|
31
|
+
job_args = zpopbyscore(conn, key: sorted_set, argv: Time.now.to_f.to_s)
|
32
|
+
end
|
33
|
+
end
|
34
|
+
rescue => err
|
35
|
+
log_error_short(err, { context: '\'Enqueuing jobs error\'', tid: @tid })
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
def terminate
|
40
|
+
@done = true
|
41
|
+
end
|
42
|
+
|
43
|
+
def push_job(conn, job_args)
|
44
|
+
name = Oj.load(job_args, mode: :compat)['queue'] || @config.default_queue
|
45
|
+
list_name = "#{@config.queue_prefix}#{name}"
|
46
|
+
# logger.debug('Poller Enqueuer') { "Pushing #{job_args} to #{list_name}" }
|
47
|
+
conn.call('LPUSH', list_name, job_args)
|
48
|
+
rescue => err
|
49
|
+
log_error_short(
|
50
|
+
err, {
|
51
|
+
context: '\'Pushing scheduled job error\'',
|
52
|
+
tid: @tid,
|
53
|
+
job_args: job_args,
|
54
|
+
queue: list_name
|
55
|
+
}
|
56
|
+
)
|
57
|
+
end
|
58
|
+
|
59
|
+
private
|
60
|
+
|
61
|
+
def sorted_sets
|
62
|
+
@sorted_sets ||= [@config.retries_set, @config.scheduled_set].freeze
|
63
|
+
end
|
64
|
+
|
65
|
+
def zpopbyscore(conn, key: nil, argv: nil)
|
66
|
+
if @lua_zpopbyscore_sha.nil?
|
67
|
+
@lua_zpopbyscore_sha = conn.call('SCRIPT', 'LOAD', LUA_ZPOPBYSCORE)
|
68
|
+
end
|
69
|
+
conn.call('EVALSHA', @lua_zpopbyscore_sha, 1, key, argv)
|
70
|
+
rescue RedisClient::CommandError => e
|
71
|
+
raise unless e.message.start_with?('NOSCRIPT')
|
72
|
+
|
73
|
+
@lua_zpopbyscore_sha = nil
|
74
|
+
retry
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
@@ -0,0 +1,97 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# The Poller checks Redis every N seconds for jobs in the retry or scheduled
|
4
|
+
# set have passed their timestamp and should be enqueued.
|
5
|
+
module Jiggler
|
6
|
+
module Scheduled
|
7
|
+
class Poller
|
8
|
+
include Support::Helper
|
9
|
+
|
10
|
+
INITIAL_WAIT = 5
|
11
|
+
|
12
|
+
def initialize(config)
|
13
|
+
@config = config
|
14
|
+
@enqueuer = Jiggler::Scheduled::Enqueuer.new(config)
|
15
|
+
@done = false
|
16
|
+
@job = nil
|
17
|
+
@count_calls = 0
|
18
|
+
@condition = Async::Condition.new
|
19
|
+
end
|
20
|
+
|
21
|
+
def terminate
|
22
|
+
@done = true
|
23
|
+
@enqueuer.terminate
|
24
|
+
|
25
|
+
Async do
|
26
|
+
@condition.signal
|
27
|
+
@job&.wait
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
def start
|
32
|
+
@job = safe_async('Poller') do
|
33
|
+
@tid = tid
|
34
|
+
initial_wait
|
35
|
+
until @done
|
36
|
+
enqueue
|
37
|
+
wait unless @done
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
def enqueue
|
43
|
+
# logger.warn('Poller runs')
|
44
|
+
@enqueuer.enqueue_jobs
|
45
|
+
end
|
46
|
+
|
47
|
+
private
|
48
|
+
|
49
|
+
def wait
|
50
|
+
Async(transient: true) do
|
51
|
+
sleep(random_poll_interval)
|
52
|
+
@condition.signal
|
53
|
+
end
|
54
|
+
@condition.wait
|
55
|
+
end
|
56
|
+
|
57
|
+
def random_poll_interval
|
58
|
+
count = process_count
|
59
|
+
interval = @config[:poll_interval]
|
60
|
+
|
61
|
+
if count < 10
|
62
|
+
interval * rand + interval.to_f / 2
|
63
|
+
else
|
64
|
+
interval * rand
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
def fetch_count
|
69
|
+
@config.with_sync_redis do |conn|
|
70
|
+
conn.call('SCAN', '0', 'MATCH', @config.process_scan_key).last.size
|
71
|
+
rescue => err
|
72
|
+
log_error_short(err, { context: '\'Poller getting processes error\'', tid: @tid })
|
73
|
+
1
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
def process_count
|
78
|
+
count = fetch_count
|
79
|
+
count = 1 if count == 0
|
80
|
+
count
|
81
|
+
end
|
82
|
+
|
83
|
+
# wait a random amount of time so in case of multiple processes
|
84
|
+
# their pollers won't be synchronized
|
85
|
+
def initial_wait
|
86
|
+
total = INITIAL_WAIT + (12 * rand)
|
87
|
+
|
88
|
+
# in case of an early exit skip the initial wait
|
89
|
+
Async(transient: true) do
|
90
|
+
sleep(total)
|
91
|
+
@condition.signal
|
92
|
+
end
|
93
|
+
@condition.wait
|
94
|
+
end
|
95
|
+
end
|
96
|
+
end
|
97
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Jiggler
|
4
|
+
module Stats
|
5
|
+
class Collection
|
6
|
+
attr_reader :uuid, :data
|
7
|
+
|
8
|
+
def initialize(uuid)
|
9
|
+
@uuid = uuid
|
10
|
+
@data = {
|
11
|
+
processed: 0,
|
12
|
+
failures: 0,
|
13
|
+
current_jobs: {}
|
14
|
+
}
|
15
|
+
end
|
16
|
+
|
17
|
+
def incr_processed
|
18
|
+
@data[:processed] += 1
|
19
|
+
end
|
20
|
+
|
21
|
+
def incr_failures
|
22
|
+
@data[:failures] += 1
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
@@ -0,0 +1,103 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Jiggler
|
4
|
+
module Stats
|
5
|
+
class Monitor
|
6
|
+
include Support::Helper
|
7
|
+
|
8
|
+
attr_reader :config, :collection, :data_key, :exp
|
9
|
+
|
10
|
+
def initialize(config, collection)
|
11
|
+
@config = config
|
12
|
+
@collection = collection
|
13
|
+
@done = false
|
14
|
+
@condition = Async::Condition.new
|
15
|
+
# the key expiration should be greater than the stats interval
|
16
|
+
# to avoid cases where the monitor is blocked
|
17
|
+
# by long running workers and the key is not updated in time
|
18
|
+
@exp = @config[:stats_interval] + 180 # interval + 3 minutes
|
19
|
+
@rss_path = "/proc/#{Process.pid}/status"
|
20
|
+
end
|
21
|
+
|
22
|
+
def start
|
23
|
+
@job = safe_async('Monitor') do
|
24
|
+
@tid = tid
|
25
|
+
until @done
|
26
|
+
load_data_into_redis
|
27
|
+
wait unless @done
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
def terminate
|
33
|
+
@condition.signal
|
34
|
+
@done = true
|
35
|
+
cleanup
|
36
|
+
end
|
37
|
+
|
38
|
+
def process_data
|
39
|
+
Oj.dump({
|
40
|
+
heartbeat: Time.now.to_f,
|
41
|
+
rss: process_rss,
|
42
|
+
current_jobs: collection.data[:current_jobs],
|
43
|
+
}, mode: :compat)
|
44
|
+
end
|
45
|
+
|
46
|
+
def load_data_into_redis
|
47
|
+
# logger.warn('Monitor runs')
|
48
|
+
processed_jobs = collection.data[:processed]
|
49
|
+
failed_jobs = collection.data[:failures]
|
50
|
+
collection.data[:processed] -= processed_jobs
|
51
|
+
collection.data[:failures] -= failed_jobs
|
52
|
+
|
53
|
+
config.with_async_redis do |conn|
|
54
|
+
conn.pipelined do |pipeline|
|
55
|
+
pipeline.call('SET', collection.uuid, process_data, ex: exp)
|
56
|
+
pipeline.call('INCRBY', config.processed_counter, processed_jobs)
|
57
|
+
pipeline.call('INCRBY', config.failures_counter, failed_jobs)
|
58
|
+
end
|
59
|
+
|
60
|
+
cleanup_with(conn) if @done
|
61
|
+
rescue => err
|
62
|
+
log_error_short(err, { context: '\'Monitor loading stats error\'', tid: @tid })
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
def process_rss
|
67
|
+
case RUBY_PLATFORM
|
68
|
+
when /linux/
|
69
|
+
IO.readlines(@rss_path).each do |line|
|
70
|
+
next unless line.start_with?('VmRSS:')
|
71
|
+
break line.split[1].to_i
|
72
|
+
end
|
73
|
+
when /darwin|bsd/
|
74
|
+
`ps -o pid,rss -p #{Process.pid}`.lines.last.split.last.to_i
|
75
|
+
else
|
76
|
+
nil
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|
80
|
+
def cleanup
|
81
|
+
config.with_async_redis do |conn|
|
82
|
+
cleanup_with(conn)
|
83
|
+
rescue => err
|
84
|
+
log_error_short(err, { context: '\'Cleanup error\'', tid: @tid })
|
85
|
+
end
|
86
|
+
end
|
87
|
+
|
88
|
+
def wait
|
89
|
+
Async(transient: true) do
|
90
|
+
sleep(config[:stats_interval])
|
91
|
+
@condition.signal
|
92
|
+
end
|
93
|
+
@condition.wait
|
94
|
+
end
|
95
|
+
|
96
|
+
private
|
97
|
+
|
98
|
+
def cleanup_with(conn)
|
99
|
+
conn.call('DEL', collection.uuid)
|
100
|
+
end
|
101
|
+
end
|
102
|
+
end
|
103
|
+
end
|
@@ -0,0 +1,101 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Jiggler
|
4
|
+
class Summary
|
5
|
+
KEYS = %w[
|
6
|
+
retry_jobs_count
|
7
|
+
dead_jobs_count
|
8
|
+
scheduled_jobs_count
|
9
|
+
failures_count
|
10
|
+
processed_count
|
11
|
+
processes
|
12
|
+
queues
|
13
|
+
].freeze
|
14
|
+
|
15
|
+
attr_reader :config
|
16
|
+
|
17
|
+
def initialize(config)
|
18
|
+
@config = config
|
19
|
+
end
|
20
|
+
|
21
|
+
def all
|
22
|
+
summary = {}
|
23
|
+
collected_data = config.client_redis_pool.acquire do |conn|
|
24
|
+
data = conn.pipelined do |pipeline|
|
25
|
+
pipeline.call('ZCARD', config.retries_set)
|
26
|
+
pipeline.call('ZCARD', config.dead_set)
|
27
|
+
pipeline.call('ZCARD', config.scheduled_set)
|
28
|
+
pipeline.call('GET', config.failures_counter)
|
29
|
+
pipeline.call('GET', config.processed_counter)
|
30
|
+
end
|
31
|
+
[*data, fetch_and_format_processes(conn), fetch_and_format_queues(conn)]
|
32
|
+
end
|
33
|
+
KEYS.each_with_index do |key, index|
|
34
|
+
val = collected_data[index]
|
35
|
+
val = val.to_i if index <= 4 # counters
|
36
|
+
summary[key] = val
|
37
|
+
end
|
38
|
+
summary
|
39
|
+
end
|
40
|
+
|
41
|
+
def last_retry_jobs(num)
|
42
|
+
config.client_redis_pool.acquire do |conn|
|
43
|
+
conn.call('ZRANGE', config.retries_set, '+inf', '-inf', 'BYSCORE', 'REV', 'LIMIT', 0, num)
|
44
|
+
end.map { |job| Oj.load(job, mode: :compat) }
|
45
|
+
end
|
46
|
+
|
47
|
+
def last_scheduled_jobs(num)
|
48
|
+
config.client_redis_pool.acquire do |conn|
|
49
|
+
conn.call('ZRANGE', config.scheduled_set, '+inf', '-inf', 'BYSCORE', 'REV', 'LIMIT', 0, num, 'WITHSCORES')
|
50
|
+
end.map do |(job, score)|
|
51
|
+
Oj.load(job).merge('scheduled_at' => score)
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
def last_dead_jobs(num)
|
56
|
+
config.client_redis_pool.acquire do |conn|
|
57
|
+
conn.call('ZRANGE', config.dead_set, '+inf', '-inf', 'BYSCORE', 'REV', 'LIMIT', 0, num)
|
58
|
+
end.map { |job| Oj.load(job, mode: :compat) }
|
59
|
+
end
|
60
|
+
|
61
|
+
private
|
62
|
+
|
63
|
+
def fetch_processes(conn)
|
64
|
+
# in case they keys were deleted/modified could return incorrect results
|
65
|
+
conn.call('SCAN', '0', 'MATCH', config.process_scan_key).last
|
66
|
+
end
|
67
|
+
|
68
|
+
def fetch_and_format_processes(conn)
|
69
|
+
fetch_processes(conn).reduce({}) do |acc, uuid|
|
70
|
+
process_data = Oj.load(conn.call('GET', uuid), mode: :compat) || {}
|
71
|
+
values = uuid.split(':')
|
72
|
+
acc[uuid] = process_data.merge({
|
73
|
+
'name' => values[0..2].join(':'),
|
74
|
+
'concurrency' => values[3],
|
75
|
+
'timeout' => values[4],
|
76
|
+
'queues' => values[5],
|
77
|
+
'poller_enabled' => values[6] == '1',
|
78
|
+
'started_at' => values[7],
|
79
|
+
'pid' => values[8]
|
80
|
+
})
|
81
|
+
acc[uuid]['hostname'] = values[9..-1].join(':')
|
82
|
+
acc
|
83
|
+
end
|
84
|
+
end
|
85
|
+
|
86
|
+
def fetch_and_format_queues(conn)
|
87
|
+
lists = conn.call('SCAN', '0', 'MATCH', config.queue_scan_key).last
|
88
|
+
lists_data = {}
|
89
|
+
|
90
|
+
collected_data = conn.pipelined do |pipeline|
|
91
|
+
lists.each do |list|
|
92
|
+
pipeline.call('LLEN', list)
|
93
|
+
end
|
94
|
+
end
|
95
|
+
lists.each_with_index do |list, index|
|
96
|
+
lists_data[list.split(':').last] = collected_data[index]
|
97
|
+
end
|
98
|
+
lists_data
|
99
|
+
end
|
100
|
+
end
|
101
|
+
end
|
@@ -0,0 +1,35 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Jiggler
|
4
|
+
module Support
|
5
|
+
module Helper
|
6
|
+
def safe_async(name)
|
7
|
+
Async do
|
8
|
+
yield
|
9
|
+
rescue Exception => ex
|
10
|
+
log_error(ex, { context: name, tid: tid })
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
def log_error(ex, ctx = {})
|
15
|
+
err_context = ctx.compact.map { |k, v| "#{k}=#{v}" }.join(' ')
|
16
|
+
logger.error("error_message='#{ex.message}' #{err_context}")
|
17
|
+
logger.error(ex.backtrace.first(12).join("\n")) unless ex.backtrace.nil?
|
18
|
+
end
|
19
|
+
|
20
|
+
def log_error_short(err, ctx = {})
|
21
|
+
err_context = ctx.compact.map { |k, v| "#{k}=#{v}" }.join(' ')
|
22
|
+
logger.error("error_message='#{err.message}' #{err_context}")
|
23
|
+
end
|
24
|
+
|
25
|
+
def logger
|
26
|
+
@config.logger
|
27
|
+
end
|
28
|
+
|
29
|
+
def tid
|
30
|
+
return unless Async::Task.current?
|
31
|
+
(Async::Task.current.object_id ^ ::Process.pid).to_s(36)
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
@@ -0,0 +1,64 @@
|
|
1
|
+
body {
|
2
|
+
background: rgb(45, 44, 49);
|
3
|
+
color: #d8d9da;
|
4
|
+
padding: 20px;
|
5
|
+
font-family: "Segoe UI","Roboto","Oxygen","Ubuntu","Cantarell","Fira Sans","Droid Sans","Helvetica Neue",sans-serif;
|
6
|
+
}
|
7
|
+
|
8
|
+
.main-table {
|
9
|
+
border-collapse: collapse;
|
10
|
+
margin: 1rem 0;
|
11
|
+
width: 100%;
|
12
|
+
}
|
13
|
+
|
14
|
+
.main-table th, td {
|
15
|
+
border: 1px solid #858585;
|
16
|
+
border-collapse: collapse;
|
17
|
+
padding: 0.5rem;
|
18
|
+
}
|
19
|
+
|
20
|
+
.main-table tr:nth-child(even) {
|
21
|
+
background-color: rgb(45, 44, 49);
|
22
|
+
}
|
23
|
+
|
24
|
+
.main-table thead {
|
25
|
+
background-color: rgb(54 52 64);
|
26
|
+
}
|
27
|
+
|
28
|
+
.main-table tbody tr:hover {
|
29
|
+
background-color: rgb(56, 55, 61);
|
30
|
+
}
|
31
|
+
|
32
|
+
.right {
|
33
|
+
text-align: right;
|
34
|
+
}
|
35
|
+
|
36
|
+
.stats-entry {
|
37
|
+
background-color: #363440;
|
38
|
+
border: 1px solid #858585;
|
39
|
+
display: inline-block;
|
40
|
+
padding: 10px;
|
41
|
+
}
|
42
|
+
|
43
|
+
.badge {
|
44
|
+
background-color: #858585;
|
45
|
+
color: #fff;
|
46
|
+
padding: 0.1rem 0.3rem;
|
47
|
+
text-align: center;
|
48
|
+
border-radius: 3px;
|
49
|
+
font-size: 0.7rem;
|
50
|
+
font-weight: 500;
|
51
|
+
vertical-align: top;
|
52
|
+
}
|
53
|
+
|
54
|
+
.badge-success {
|
55
|
+
background-color: #008B69;
|
56
|
+
}
|
57
|
+
|
58
|
+
.outdated {
|
59
|
+
color: #858585;
|
60
|
+
}
|
61
|
+
|
62
|
+
.outdated .badge-success {
|
63
|
+
background-color: #858585;
|
64
|
+
}
|