tracestax 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE +21 -0
- data/README.md +107 -0
- data/lib/tracestax/client.rb +440 -0
- data/lib/tracestax/configuration.rb +21 -0
- data/lib/tracestax/delayed_job.rb +66 -0
- data/lib/tracestax/good_job.rb +61 -0
- data/lib/tracestax/resque.rb +79 -0
- data/lib/tracestax/sidekiq/client.rb +92 -0
- data/lib/tracestax/sidekiq/configuration.rb +21 -0
- data/lib/tracestax/sidekiq/heartbeat_poller.rb +91 -0
- data/lib/tracestax/sidekiq/server_middleware.rb +88 -0
- data/lib/tracestax/sidekiq/version.rb +7 -0
- data/lib/tracestax/sidekiq.rb +46 -0
- data/lib/tracestax/solid_queue.rb +59 -0
- data/lib/tracestax/version.rb +5 -0
- data/lib/tracestax.rb +32 -0
- metadata +145 -0
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "socket"
|
|
4
|
+
|
|
5
|
+
module TraceStax
|
|
6
|
+
module GoodJob
|
|
7
|
+
# ActiveSupport::Notifications subscriber for Good Job.
|
|
8
|
+
#
|
|
9
|
+
# Usage:
|
|
10
|
+
# TraceStax::GoodJob.subscribe!
|
|
11
|
+
def self.subscribe!
|
|
12
|
+
ActiveSupport::Notifications.subscribe("perform.good_job") do |*args|
|
|
13
|
+
event = ActiveSupport::Notifications::Event.new(*args)
|
|
14
|
+
handle_perform(event)
|
|
15
|
+
end
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
def self.handle_perform(event)
|
|
19
|
+
client = TraceStax::Client.instance
|
|
20
|
+
payload = event.payload
|
|
21
|
+
job = payload[:good_job] || payload[:job]
|
|
22
|
+
|
|
23
|
+
task_name = job&.job_class || job&.class&.name || "unknown"
|
|
24
|
+
job_id = job&.good_job_concurrency_key || job&.provider_job_id || SecureRandom.uuid
|
|
25
|
+
queue = job&.queue_name || "default"
|
|
26
|
+
attempt = (job&.executions || 0) + 1
|
|
27
|
+
duration_ms = (event.duration || 0).round
|
|
28
|
+
|
|
29
|
+
worker_info = {
|
|
30
|
+
key: "#{Socket.gethostname}:#{Process.pid}",
|
|
31
|
+
hostname: Socket.gethostname,
|
|
32
|
+
pid: Process.pid,
|
|
33
|
+
queues: [queue],
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
error = event.payload[:error] || event.payload[:exception_object]
|
|
37
|
+
status = error ? "failed" : "succeeded"
|
|
38
|
+
|
|
39
|
+
event_payload = {
|
|
40
|
+
type: "task_event", framework: "good_job", language: "ruby",
|
|
41
|
+
sdk_version: TraceStax::VERSION, status: status,
|
|
42
|
+
worker: worker_info,
|
|
43
|
+
task: { name: task_name, id: job_id.to_s, queue: queue, attempt: attempt },
|
|
44
|
+
metrics: { duration_ms: duration_ms },
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
if error
|
|
48
|
+
event_payload[:error] = {
|
|
49
|
+
type: error.class.name,
|
|
50
|
+
message: error.message,
|
|
51
|
+
stack_trace: error.backtrace&.first(20)&.join("\n"),
|
|
52
|
+
}
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
client.send_event(event_payload)
|
|
56
|
+
rescue => e
|
|
57
|
+
# Never crash the host app
|
|
58
|
+
$stderr.puts "[tracestax] GoodJob tracking error: #{e.message}"
|
|
59
|
+
end
|
|
60
|
+
end
|
|
61
|
+
end
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "socket"
|
|
4
|
+
|
|
5
|
+
module TraceStax
|
|
6
|
+
module Resque
|
|
7
|
+
# Include this module in your Resque job classes, or use the global hook.
|
|
8
|
+
#
|
|
9
|
+
# Usage (per-job):
|
|
10
|
+
# class OrderProcessor
|
|
11
|
+
# extend TraceStax::Resque::Job
|
|
12
|
+
# def self.perform(order_id) ... end
|
|
13
|
+
# end
|
|
14
|
+
#
|
|
15
|
+
# Usage (global — all jobs):
|
|
16
|
+
# Resque.before_fork { TraceStax::Client.instance.start }
|
|
17
|
+
# Resque::Job.send(:extend, TraceStax::Resque::Job)
|
|
18
|
+
module Job
|
|
19
|
+
def around_perform_tracestax(*args)
|
|
20
|
+
client = TraceStax::Client.instance
|
|
21
|
+
job_id = args.first&.to_s || SecureRandom.uuid
|
|
22
|
+
task_name = self.name
|
|
23
|
+
queue = ::Resque.queue_from_class(self) || "default"
|
|
24
|
+
|
|
25
|
+
start_time = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
|
26
|
+
client.send_event({
|
|
27
|
+
type: "task_event",
|
|
28
|
+
framework: "resque",
|
|
29
|
+
language: "ruby",
|
|
30
|
+
sdk_version: TraceStax::VERSION,
|
|
31
|
+
status: "started",
|
|
32
|
+
worker: build_worker_info,
|
|
33
|
+
task: { name: task_name, id: job_id, queue: queue.to_s, attempt: 1 },
|
|
34
|
+
metrics: {},
|
|
35
|
+
})
|
|
36
|
+
|
|
37
|
+
begin
|
|
38
|
+
yield
|
|
39
|
+
duration_ms = ((Process.clock_gettime(Process::CLOCK_MONOTONIC) - start_time) * 1000).round
|
|
40
|
+
client.send_event({
|
|
41
|
+
type: "task_event",
|
|
42
|
+
framework: "resque",
|
|
43
|
+
language: "ruby",
|
|
44
|
+
sdk_version: TraceStax::VERSION,
|
|
45
|
+
status: "succeeded",
|
|
46
|
+
worker: build_worker_info,
|
|
47
|
+
task: { name: task_name, id: job_id, queue: queue.to_s, attempt: 1 },
|
|
48
|
+
metrics: { duration_ms: duration_ms },
|
|
49
|
+
})
|
|
50
|
+
rescue => e
|
|
51
|
+
duration_ms = ((Process.clock_gettime(Process::CLOCK_MONOTONIC) - start_time) * 1000).round
|
|
52
|
+
client.send_event({
|
|
53
|
+
type: "task_event",
|
|
54
|
+
framework: "resque",
|
|
55
|
+
language: "ruby",
|
|
56
|
+
sdk_version: TraceStax::VERSION,
|
|
57
|
+
status: "failed",
|
|
58
|
+
worker: build_worker_info,
|
|
59
|
+
task: { name: task_name, id: job_id, queue: queue.to_s, attempt: 1 },
|
|
60
|
+
metrics: { duration_ms: duration_ms },
|
|
61
|
+
error: { type: e.class.name, message: e.message, stack_trace: e.backtrace&.first(20)&.join("\n") },
|
|
62
|
+
})
|
|
63
|
+
raise
|
|
64
|
+
end
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
private
|
|
68
|
+
|
|
69
|
+
def build_worker_info
|
|
70
|
+
{
|
|
71
|
+
key: "#{Socket.gethostname}:#{Process.pid}",
|
|
72
|
+
hostname: Socket.gethostname,
|
|
73
|
+
pid: Process.pid,
|
|
74
|
+
queues: [::Resque.queue_from_class(self) || "default"].map(&:to_s),
|
|
75
|
+
}
|
|
76
|
+
end
|
|
77
|
+
end
|
|
78
|
+
end
|
|
79
|
+
end
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "faraday"
|
|
4
|
+
require "json"
|
|
5
|
+
require "singleton"
|
|
6
|
+
|
|
7
|
+
module TraceStax
|
|
8
|
+
module Sidekiq
|
|
9
|
+
class Client
|
|
10
|
+
include Singleton
|
|
11
|
+
|
|
12
|
+
def initialize
|
|
13
|
+
@queue = Queue.new
|
|
14
|
+
@mutex = Mutex.new
|
|
15
|
+
@running = false
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
def start
|
|
19
|
+
@mutex.synchronize do
|
|
20
|
+
return if @running
|
|
21
|
+
|
|
22
|
+
@running = true
|
|
23
|
+
@thread = Thread.new { flush_loop }
|
|
24
|
+
@thread.abort_on_exception = false
|
|
25
|
+
|
|
26
|
+
at_exit { shutdown }
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
def send_event(payload)
|
|
31
|
+
return unless config.enabled
|
|
32
|
+
|
|
33
|
+
if config.dry_run
|
|
34
|
+
puts "[tracestax dry-run] #{payload.to_json}"
|
|
35
|
+
return
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
@queue.push(payload)
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
def flush
|
|
42
|
+
batch = []
|
|
43
|
+
batch.push(@queue.pop(true)) while !@queue.empty? && batch.size < config.max_batch_size
|
|
44
|
+
return if batch.empty?
|
|
45
|
+
|
|
46
|
+
conn = Faraday.new(url: config.endpoint) do |f|
|
|
47
|
+
f.request :json
|
|
48
|
+
f.adapter Faraday.default_adapter
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
batch.each do |event|
|
|
52
|
+
endpoint = case event[:type]
|
|
53
|
+
when "task_event" then "/v1/ingest"
|
|
54
|
+
when "snapshot" then "/v1/snapshot"
|
|
55
|
+
when "heartbeat" then "/v1/heartbeat"
|
|
56
|
+
else "/v1/ingest"
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
conn.post(endpoint) do |req|
|
|
60
|
+
req.headers["Authorization"] = "Bearer #{config.api_key}"
|
|
61
|
+
req.headers["Content-Type"] = "application/json"
|
|
62
|
+
req.body = event.to_json
|
|
63
|
+
end
|
|
64
|
+
rescue StandardError
|
|
65
|
+
# Swallow send errors — monitoring should never break the app
|
|
66
|
+
end
|
|
67
|
+
rescue ThreadError
|
|
68
|
+
# Queue empty
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
def shutdown
|
|
72
|
+
@mutex.synchronize { @running = false }
|
|
73
|
+
flush
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
private
|
|
77
|
+
|
|
78
|
+
def config
|
|
79
|
+
TraceStax::Sidekiq.configuration
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
def flush_loop
|
|
83
|
+
while @running
|
|
84
|
+
sleep(config.flush_interval)
|
|
85
|
+
flush
|
|
86
|
+
end
|
|
87
|
+
rescue StandardError
|
|
88
|
+
# Swallow unexpected errors in the background thread
|
|
89
|
+
end
|
|
90
|
+
end
|
|
91
|
+
end
|
|
92
|
+
end
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module TraceStax
|
|
4
|
+
module Sidekiq
|
|
5
|
+
class Configuration
|
|
6
|
+
attr_accessor :api_key, :endpoint, :flush_interval, :max_batch_size, :enabled, :dry_run
|
|
7
|
+
|
|
8
|
+
def initialize
|
|
9
|
+
@endpoint = "https://ingest.tracestax.com"
|
|
10
|
+
@flush_interval = 5.0
|
|
11
|
+
@max_batch_size = 100
|
|
12
|
+
@enabled = ENV["TRACESTAX_ENABLED"] != "false"
|
|
13
|
+
@dry_run = ENV["TRACESTAX_DRY_RUN"] == "true"
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
def validate!
|
|
17
|
+
raise Error, "api_key is required" if api_key.nil? || api_key.empty?
|
|
18
|
+
end
|
|
19
|
+
end
|
|
20
|
+
end
|
|
21
|
+
end
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "socket"
|
|
4
|
+
|
|
5
|
+
module TraceStax
|
|
6
|
+
module Sidekiq
|
|
7
|
+
class HeartbeatPoller
|
|
8
|
+
INTERVAL = 60 # seconds
|
|
9
|
+
|
|
10
|
+
def self.start
|
|
11
|
+
new.start
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
def start
|
|
15
|
+
@thread = Thread.new { poll_loop }
|
|
16
|
+
@thread.abort_on_exception = false
|
|
17
|
+
self
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
private
|
|
21
|
+
|
|
22
|
+
def poll_loop
|
|
23
|
+
loop do
|
|
24
|
+
sleep(INTERVAL)
|
|
25
|
+
send_heartbeat
|
|
26
|
+
send_snapshots
|
|
27
|
+
end
|
|
28
|
+
rescue StandardError
|
|
29
|
+
# Swallow unexpected errors — monitoring should never break the app
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
def send_heartbeat
|
|
33
|
+
payload = {
|
|
34
|
+
framework: "sidekiq",
|
|
35
|
+
language: "ruby",
|
|
36
|
+
sdk_version: TraceStax::Sidekiq::VERSION,
|
|
37
|
+
timestamp: Time.now.utc.iso8601,
|
|
38
|
+
worker: {
|
|
39
|
+
key: "#{Socket.gethostname}:#{Process.pid}",
|
|
40
|
+
hostname: Socket.gethostname,
|
|
41
|
+
pid: Process.pid,
|
|
42
|
+
queues: sidekiq_queue_names,
|
|
43
|
+
concurrency: sidekiq_concurrency
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
TraceStax::Sidekiq.client.send_event(payload.merge(type: "heartbeat"))
|
|
48
|
+
rescue StandardError
|
|
49
|
+
# Swallow instrumentation errors — monitoring should never break the app
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
def send_snapshots
|
|
53
|
+
::Sidekiq::Queue.all.each do |q|
|
|
54
|
+
payload = {
|
|
55
|
+
framework: "sidekiq",
|
|
56
|
+
language: "ruby",
|
|
57
|
+
sdk_version: TraceStax::Sidekiq::VERSION,
|
|
58
|
+
timestamp: Time.now.utc.iso8601,
|
|
59
|
+
type: "snapshot",
|
|
60
|
+
queue_name: q.name,
|
|
61
|
+
depth: q.size,
|
|
62
|
+
throughput_per_min: q.latency.round(3)
|
|
63
|
+
}
|
|
64
|
+
TraceStax::Sidekiq.client.send_event(payload)
|
|
65
|
+
end
|
|
66
|
+
rescue StandardError
|
|
67
|
+
# Swallow instrumentation errors — monitoring should never break the app
|
|
68
|
+
end
|
|
69
|
+
|
|
70
|
+
def sidekiq_concurrency
|
|
71
|
+
::Sidekiq.default_configuration[:concurrency]
|
|
72
|
+
rescue StandardError
|
|
73
|
+
nil
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
def sidekiq_queue_names
|
|
77
|
+
::Sidekiq::Queue.all.map(&:name)
|
|
78
|
+
rescue StandardError
|
|
79
|
+
[]
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
def queue_stats
|
|
83
|
+
::Sidekiq::Queue.all.map do |q|
|
|
84
|
+
{ name: q.name, depth: q.size, latency: q.latency.round(3) }
|
|
85
|
+
end
|
|
86
|
+
rescue StandardError
|
|
87
|
+
[]
|
|
88
|
+
end
|
|
89
|
+
end
|
|
90
|
+
end
|
|
91
|
+
end
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "socket"
|
|
4
|
+
|
|
5
|
+
module TraceStax
|
|
6
|
+
module Sidekiq
|
|
7
|
+
class ServerMiddleware
|
|
8
|
+
def call(worker, job, queue)
|
|
9
|
+
start_time = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
|
10
|
+
|
|
11
|
+
begin
|
|
12
|
+
yield
|
|
13
|
+
rescue => err
|
|
14
|
+
duration_ms = elapsed_ms(start_time)
|
|
15
|
+
send_task_event(worker, job, queue, "failed", duration_ms, err)
|
|
16
|
+
raise
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
duration_ms = elapsed_ms(start_time)
|
|
20
|
+
send_task_event(worker, job, queue, "succeeded", duration_ms, nil)
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
private
|
|
24
|
+
|
|
25
|
+
def elapsed_ms(start_time)
|
|
26
|
+
((Process.clock_gettime(Process::CLOCK_MONOTONIC) - start_time) * 1000).round
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
def send_task_event(worker, job, queue, status, duration_ms, err)
|
|
30
|
+
payload = {
|
|
31
|
+
framework: "sidekiq",
|
|
32
|
+
language: "ruby",
|
|
33
|
+
sdk_version: TraceStax::Sidekiq::VERSION,
|
|
34
|
+
type: "task_event",
|
|
35
|
+
worker: {
|
|
36
|
+
key: "#{Socket.gethostname}:#{Process.pid}",
|
|
37
|
+
hostname: Socket.gethostname,
|
|
38
|
+
pid: Process.pid,
|
|
39
|
+
concurrency: sidekiq_concurrency,
|
|
40
|
+
queues: sidekiq_queue_names
|
|
41
|
+
},
|
|
42
|
+
task: {
|
|
43
|
+
name: job_class_name(worker, job),
|
|
44
|
+
id: job["jid"].to_s,
|
|
45
|
+
queue: queue.to_s,
|
|
46
|
+
attempt: (job["retry_count"] || 0) + 1,
|
|
47
|
+
chain_id: job["bid"]
|
|
48
|
+
}.compact,
|
|
49
|
+
status: status,
|
|
50
|
+
metrics: { duration_ms: duration_ms }
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
if err
|
|
54
|
+
payload[:error] = {
|
|
55
|
+
type: err.class.name,
|
|
56
|
+
message: err.message,
|
|
57
|
+
stack_trace: err.backtrace&.first(20)&.join("\n")
|
|
58
|
+
}
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
TraceStax::Sidekiq.client.send_event(payload)
|
|
62
|
+
rescue StandardError
|
|
63
|
+
# Swallow instrumentation errors — monitoring should never break the app
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
def job_class_name(worker, job)
|
|
67
|
+
# Prefer the worker object's class name; fall back to the job hash's "class" key
|
|
68
|
+
if worker.respond_to?(:class) && worker.class.name && worker.class.name != "Object"
|
|
69
|
+
worker.class.name
|
|
70
|
+
else
|
|
71
|
+
job["class"].to_s
|
|
72
|
+
end
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
def sidekiq_concurrency
|
|
76
|
+
::Sidekiq.default_configuration[:concurrency]
|
|
77
|
+
rescue StandardError
|
|
78
|
+
nil
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
def sidekiq_queue_names
|
|
82
|
+
::Sidekiq::Queue.all.map(&:name)
|
|
83
|
+
rescue StandardError
|
|
84
|
+
[]
|
|
85
|
+
end
|
|
86
|
+
end
|
|
87
|
+
end
|
|
88
|
+
end
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "sidekiq/version"
|
|
4
|
+
require_relative "sidekiq/configuration"
|
|
5
|
+
require_relative "sidekiq/client"
|
|
6
|
+
require_relative "sidekiq/server_middleware"
|
|
7
|
+
require_relative "sidekiq/heartbeat_poller"
|
|
8
|
+
|
|
9
|
+
module TraceStax
|
|
10
|
+
module Sidekiq
|
|
11
|
+
class Error < StandardError; end
|
|
12
|
+
|
|
13
|
+
class << self
|
|
14
|
+
attr_accessor :configuration
|
|
15
|
+
|
|
16
|
+
# Configure the TraceStax Sidekiq integration.
|
|
17
|
+
#
|
|
18
|
+
# TraceStax::Sidekiq.configure do |config|
|
|
19
|
+
# config.api_key = ENV["TRACESTAX_API_KEY"]
|
|
20
|
+
# end
|
|
21
|
+
def configure
|
|
22
|
+
self.configuration ||= Configuration.new
|
|
23
|
+
yield(configuration)
|
|
24
|
+
configuration.validate!
|
|
25
|
+
Client.instance.start
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
def client
|
|
29
|
+
Client.instance
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
# Install TraceStax into the Sidekiq server middleware chain and start the
|
|
33
|
+
# heartbeat poller. Called automatically when you use `install!`; safe to
|
|
34
|
+
# call more than once.
|
|
35
|
+
def install!
|
|
36
|
+
::Sidekiq.configure_server do |config|
|
|
37
|
+
config.server_middleware do |chain|
|
|
38
|
+
chain.prepend TraceStax::Sidekiq::ServerMiddleware
|
|
39
|
+
end
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
HeartbeatPoller.start
|
|
43
|
+
end
|
|
44
|
+
end
|
|
45
|
+
end
|
|
46
|
+
end
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "socket"
|
|
4
|
+
|
|
5
|
+
module TraceStax
|
|
6
|
+
class SolidQueueSubscriber
|
|
7
|
+
def self.attach
|
|
8
|
+
return unless defined?(SolidQueue)
|
|
9
|
+
|
|
10
|
+
ActiveSupport::Notifications.subscribe("perform_start.solid_queue") do |*args|
|
|
11
|
+
event = ActiveSupport::Notifications::Event.new(*args)
|
|
12
|
+
Thread.current[:tracestax_start_time] = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
|
13
|
+
Thread.current[:tracestax_job_data] = event.payload
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
ActiveSupport::Notifications.subscribe("perform.solid_queue") do |*args|
|
|
17
|
+
event = ActiveSupport::Notifications::Event.new(*args)
|
|
18
|
+
start_time = Thread.current[:tracestax_start_time]
|
|
19
|
+
job_data = Thread.current[:tracestax_job_data] || {}
|
|
20
|
+
duration_ms = start_time ? ((Process.clock_gettime(Process::CLOCK_MONOTONIC) - start_time) * 1000).round : nil
|
|
21
|
+
|
|
22
|
+
payload = {
|
|
23
|
+
framework: "solid_queue",
|
|
24
|
+
language: "ruby",
|
|
25
|
+
sdk_version: TraceStax::VERSION,
|
|
26
|
+
type: "task_event",
|
|
27
|
+
worker: {
|
|
28
|
+
key: "#{Socket.gethostname}:#{Process.pid}",
|
|
29
|
+
hostname: Socket.gethostname,
|
|
30
|
+
pid: Process.pid,
|
|
31
|
+
concurrency: SolidQueue.config&.dig(:workers, :threads) || 1,
|
|
32
|
+
queues: Array(job_data[:queue_name] || "default")
|
|
33
|
+
},
|
|
34
|
+
task: {
|
|
35
|
+
name: job_data[:job_class] || "unknown",
|
|
36
|
+
id: job_data[:job_id]&.to_s || SecureRandom.uuid,
|
|
37
|
+
queue: job_data[:queue_name] || "default",
|
|
38
|
+
attempt: job_data[:executions] || 1
|
|
39
|
+
},
|
|
40
|
+
status: event.payload[:error] ? "failed" : "succeeded",
|
|
41
|
+
metrics: {
|
|
42
|
+
duration_ms: duration_ms
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
if event.payload[:error]
|
|
47
|
+
err = event.payload[:error]
|
|
48
|
+
payload[:error] = {
|
|
49
|
+
type: err.class.name,
|
|
50
|
+
message: err.message,
|
|
51
|
+
stack_trace: err.backtrace&.first(20)&.join("\n")
|
|
52
|
+
}
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
TraceStax.client.send_event(payload)
|
|
56
|
+
end
|
|
57
|
+
end
|
|
58
|
+
end
|
|
59
|
+
end
|
data/lib/tracestax.rb
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "tracestax/client"
|
|
4
|
+
require_relative "tracestax/configuration"
|
|
5
|
+
require_relative "tracestax/version"
|
|
6
|
+
|
|
7
|
+
# Framework integrations are NOT auto-required — load only what you need:
|
|
8
|
+
#
|
|
9
|
+
# require "tracestax/sidekiq" # Sidekiq 6.x / 7.x
|
|
10
|
+
# require "tracestax/solid_queue" # Solid Queue (Rails 8)
|
|
11
|
+
# require "tracestax/resque" # Resque 2.x
|
|
12
|
+
# require "tracestax/delayed_job" # Delayed::Job 4.x
|
|
13
|
+
# require "tracestax/good_job" # Good Job 3.x+
|
|
14
|
+
|
|
15
|
+
module TraceStax
|
|
16
|
+
class Error < StandardError; end
|
|
17
|
+
|
|
18
|
+
class << self
|
|
19
|
+
attr_accessor :configuration
|
|
20
|
+
|
|
21
|
+
def configure
|
|
22
|
+
self.configuration ||= Configuration.new
|
|
23
|
+
yield(configuration)
|
|
24
|
+
configuration.validate!
|
|
25
|
+
Client.instance.start
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
def client
|
|
29
|
+
Client.instance
|
|
30
|
+
end
|
|
31
|
+
end
|
|
32
|
+
end
|