dead_bro 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +5 -0
- data/FEATURES.md +338 -0
- data/README.md +274 -0
- data/lib/dead_bro/cache_subscriber.rb +106 -0
- data/lib/dead_bro/circuit_breaker.rb +117 -0
- data/lib/dead_bro/client.rb +110 -0
- data/lib/dead_bro/configuration.rb +146 -0
- data/lib/dead_bro/error_middleware.rb +112 -0
- data/lib/dead_bro/http_instrumentation.rb +113 -0
- data/lib/dead_bro/job_sql_tracking_middleware.rb +26 -0
- data/lib/dead_bro/job_subscriber.rb +243 -0
- data/lib/dead_bro/lightweight_memory_tracker.rb +63 -0
- data/lib/dead_bro/logger.rb +127 -0
- data/lib/dead_bro/memory_helpers.rb +87 -0
- data/lib/dead_bro/memory_leak_detector.rb +196 -0
- data/lib/dead_bro/memory_tracking_subscriber.rb +361 -0
- data/lib/dead_bro/railtie.rb +90 -0
- data/lib/dead_bro/redis_subscriber.rb +282 -0
- data/lib/dead_bro/sql_subscriber.rb +467 -0
- data/lib/dead_bro/sql_tracking_middleware.rb +78 -0
- data/lib/dead_bro/subscriber.rb +357 -0
- data/lib/dead_bro/version.rb +5 -0
- data/lib/dead_bro/view_rendering_subscriber.rb +151 -0
- data/lib/dead_bro.rb +69 -0
- metadata +66 -0
|
@@ -0,0 +1,467 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
begin
|
|
4
|
+
require "active_support/notifications"
|
|
5
|
+
rescue LoadError
|
|
6
|
+
# ActiveSupport not available
|
|
7
|
+
end
|
|
8
|
+
|
|
9
|
+
module DeadBro
|
|
10
|
+
class SqlSubscriber
|
|
11
|
+
SQL_EVENT_NAME = "sql.active_record"
|
|
12
|
+
THREAD_LOCAL_KEY = :dead_bro_sql_queries
|
|
13
|
+
THREAD_LOCAL_ALLOC_START_KEY = :dead_bro_sql_alloc_start
|
|
14
|
+
THREAD_LOCAL_ALLOC_RESULTS_KEY = :dead_bro_sql_alloc_results
|
|
15
|
+
THREAD_LOCAL_BACKTRACE_KEY = :dead_bro_sql_backtraces
|
|
16
|
+
THREAD_LOCAL_EXPLAIN_PENDING_KEY = :dead_bro_explain_pending
|
|
17
|
+
|
|
18
|
+
def self.subscribe!
|
|
19
|
+
# Subscribe with a start/finish listener to measure allocations per query
|
|
20
|
+
if ActiveSupport::Notifications.notifier.respond_to?(:subscribe)
|
|
21
|
+
begin
|
|
22
|
+
ActiveSupport::Notifications.notifier.subscribe(SQL_EVENT_NAME, SqlAllocListener.new)
|
|
23
|
+
rescue
|
|
24
|
+
end
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
ActiveSupport::Notifications.subscribe(SQL_EVENT_NAME) do |name, started, finished, _unique_id, data|
|
|
28
|
+
next if data[:name] == "SCHEMA"
|
|
29
|
+
# Only track queries that are part of the current request
|
|
30
|
+
next unless Thread.current[THREAD_LOCAL_KEY]
|
|
31
|
+
unique_id = _unique_id
|
|
32
|
+
allocations = nil
|
|
33
|
+
captured_backtrace = nil
|
|
34
|
+
begin
|
|
35
|
+
alloc_results = Thread.current[THREAD_LOCAL_ALLOC_RESULTS_KEY]
|
|
36
|
+
allocations = alloc_results && alloc_results.delete(unique_id)
|
|
37
|
+
|
|
38
|
+
# Get the captured backtrace from when the query started
|
|
39
|
+
backtrace_map = Thread.current[THREAD_LOCAL_BACKTRACE_KEY]
|
|
40
|
+
captured_backtrace = backtrace_map && backtrace_map.delete(unique_id)
|
|
41
|
+
rescue
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
duration_ms = ((finished - started) * 1000.0).round(2)
|
|
45
|
+
original_sql = data[:sql]
|
|
46
|
+
|
|
47
|
+
query_info = {
|
|
48
|
+
sql: sanitize_sql(original_sql),
|
|
49
|
+
name: data[:name],
|
|
50
|
+
duration_ms: duration_ms,
|
|
51
|
+
cached: data[:cached] || false,
|
|
52
|
+
connection_id: data[:connection_id],
|
|
53
|
+
trace: safe_query_trace(data, captured_backtrace),
|
|
54
|
+
allocations: allocations
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
# Run EXPLAIN ANALYZE for slow queries in the background
|
|
58
|
+
if should_explain_query?(duration_ms, original_sql)
|
|
59
|
+
# Store reference to query_info so we can update it when EXPLAIN completes
|
|
60
|
+
query_info[:explain_plan] = nil # Placeholder
|
|
61
|
+
start_explain_analyze_background(original_sql, data[:connection_id], query_info)
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
# Add to thread-local storage
|
|
65
|
+
Thread.current[THREAD_LOCAL_KEY] << query_info
|
|
66
|
+
end
|
|
67
|
+
end
|
|
68
|
+
|
|
69
|
+
def self.start_request_tracking
|
|
70
|
+
Thread.current[THREAD_LOCAL_KEY] = []
|
|
71
|
+
Thread.current[THREAD_LOCAL_ALLOC_START_KEY] = {}
|
|
72
|
+
Thread.current[THREAD_LOCAL_ALLOC_RESULTS_KEY] = {}
|
|
73
|
+
Thread.current[THREAD_LOCAL_BACKTRACE_KEY] = {}
|
|
74
|
+
Thread.current[THREAD_LOCAL_EXPLAIN_PENDING_KEY] = []
|
|
75
|
+
end
|
|
76
|
+
|
|
77
|
+
def self.stop_request_tracking
|
|
78
|
+
# Wait for any pending EXPLAIN ANALYZE queries to complete (with timeout)
|
|
79
|
+
# This must happen BEFORE we get the queries array reference to ensure
|
|
80
|
+
# all explain_plan fields are populated
|
|
81
|
+
wait_for_pending_explains(5.0) # 5 second timeout
|
|
82
|
+
|
|
83
|
+
# Get queries after waiting for EXPLAIN to complete
|
|
84
|
+
queries = Thread.current[THREAD_LOCAL_KEY]
|
|
85
|
+
|
|
86
|
+
Thread.current[THREAD_LOCAL_KEY] = nil
|
|
87
|
+
Thread.current[THREAD_LOCAL_ALLOC_START_KEY] = nil
|
|
88
|
+
Thread.current[THREAD_LOCAL_ALLOC_RESULTS_KEY] = nil
|
|
89
|
+
Thread.current[THREAD_LOCAL_BACKTRACE_KEY] = nil
|
|
90
|
+
Thread.current[THREAD_LOCAL_EXPLAIN_PENDING_KEY] = nil
|
|
91
|
+
queries || []
|
|
92
|
+
end
|
|
93
|
+
|
|
94
|
+
def self.wait_for_pending_explains(timeout_seconds)
|
|
95
|
+
pending = Thread.current[THREAD_LOCAL_EXPLAIN_PENDING_KEY]
|
|
96
|
+
return unless pending && !pending.empty?
|
|
97
|
+
|
|
98
|
+
start_time = Time.now
|
|
99
|
+
pending.each do |thread|
|
|
100
|
+
remaining_time = timeout_seconds - (Time.now - start_time)
|
|
101
|
+
break if remaining_time <= 0
|
|
102
|
+
|
|
103
|
+
begin
|
|
104
|
+
thread.join(remaining_time)
|
|
105
|
+
rescue => e
|
|
106
|
+
DeadBro.logger.debug("Error waiting for EXPLAIN ANALYZE: #{e.message}")
|
|
107
|
+
end
|
|
108
|
+
end
|
|
109
|
+
end
|
|
110
|
+
|
|
111
|
+
def self.sanitize_sql(sql)
|
|
112
|
+
return sql unless sql.is_a?(String)
|
|
113
|
+
|
|
114
|
+
# Remove sensitive data patterns
|
|
115
|
+
sql = sql.gsub(/\b(password|token|secret|key|ssn|credit_card)\s*=\s*['"][^'"]*['"]/i, '\1 = ?')
|
|
116
|
+
sql = sql.gsub(/\b(password|token|secret|key|ssn|credit_card)\s*=\s*[^'",\s)]+/i, '\1 = ?')
|
|
117
|
+
|
|
118
|
+
# Remove specific values in WHERE clauses that might be sensitive
|
|
119
|
+
sql = sql.gsub(/WHERE\s+[^=]+=\s*['"][^'"]*['"]/i) do |match|
|
|
120
|
+
match.gsub(/=\s*['"][^'"]*['"]/, "= ?")
|
|
121
|
+
end
|
|
122
|
+
|
|
123
|
+
# Limit query length to prevent huge payloads
|
|
124
|
+
(sql.length > 1000) ? sql[0..1000] + "..." : sql
|
|
125
|
+
end
|
|
126
|
+
|
|
127
|
+
def self.should_explain_query?(duration_ms, sql)
|
|
128
|
+
return false unless DeadBro.configuration.explain_analyze_enabled
|
|
129
|
+
return false if duration_ms < DeadBro.configuration.slow_query_threshold_ms
|
|
130
|
+
return false unless sql.is_a?(String)
|
|
131
|
+
return false if sql.strip.empty?
|
|
132
|
+
|
|
133
|
+
# Skip EXPLAIN for certain query types that don't benefit from it
|
|
134
|
+
sql_upper = sql.upcase.strip
|
|
135
|
+
return false if sql_upper.start_with?("EXPLAIN")
|
|
136
|
+
return false if sql_upper.start_with?("BEGIN")
|
|
137
|
+
return false if sql_upper.start_with?("COMMIT")
|
|
138
|
+
return false if sql_upper.start_with?("ROLLBACK")
|
|
139
|
+
return false if sql_upper.start_with?("SAVEPOINT")
|
|
140
|
+
return false if sql_upper.start_with?("RELEASE")
|
|
141
|
+
|
|
142
|
+
true
|
|
143
|
+
end
|
|
144
|
+
|
|
145
|
+
def self.start_explain_analyze_background(sql, connection_id, query_info)
|
|
146
|
+
return unless defined?(ActiveRecord)
|
|
147
|
+
return unless ActiveRecord::Base.respond_to?(:connection)
|
|
148
|
+
|
|
149
|
+
# Capture the main thread reference to append logs to the correct thread
|
|
150
|
+
main_thread = Thread.current
|
|
151
|
+
|
|
152
|
+
# Run EXPLAIN in a background thread to avoid blocking the main request
|
|
153
|
+
explain_thread = Thread.new do
|
|
154
|
+
connection = nil
|
|
155
|
+
begin
|
|
156
|
+
# Use a separate connection to avoid interfering with the main query
|
|
157
|
+
if ActiveRecord::Base.connection_pool.respond_to?(:checkout)
|
|
158
|
+
connection = ActiveRecord::Base.connection_pool.checkout
|
|
159
|
+
else
|
|
160
|
+
connection = ActiveRecord::Base.connection
|
|
161
|
+
end
|
|
162
|
+
|
|
163
|
+
# Build EXPLAIN query based on database adapter
|
|
164
|
+
explain_sql = build_explain_query(sql, connection)
|
|
165
|
+
|
|
166
|
+
# Execute the EXPLAIN query
|
|
167
|
+
# For PostgreSQL, use select_all which returns ActiveRecord::Result
|
|
168
|
+
# For other databases, use execute
|
|
169
|
+
adapter_name = connection.adapter_name.downcase
|
|
170
|
+
if adapter_name == "postgresql" || adapter_name == "postgis"
|
|
171
|
+
# PostgreSQL: select_all returns ActiveRecord::Result with rows
|
|
172
|
+
result = connection.select_all(explain_sql)
|
|
173
|
+
else
|
|
174
|
+
# Other databases: use execute
|
|
175
|
+
result = connection.execute(explain_sql)
|
|
176
|
+
end
|
|
177
|
+
|
|
178
|
+
# Format the result based on database adapter
|
|
179
|
+
explain_plan = format_explain_result(result, connection)
|
|
180
|
+
|
|
181
|
+
# Update the query_info with the explain plan
|
|
182
|
+
# This updates the hash that's already in the queries array
|
|
183
|
+
if explain_plan && !explain_plan.to_s.strip.empty?
|
|
184
|
+
query_info[:explain_plan] = explain_plan
|
|
185
|
+
append_log_to_thread(main_thread, :debug, "Captured EXPLAIN ANALYZE for slow query (#{query_info[:duration_ms]}ms): #{explain_plan[0..1000]}...")
|
|
186
|
+
else
|
|
187
|
+
query_info[:explain_plan] = nil
|
|
188
|
+
append_log_to_thread(main_thread, :debug, "EXPLAIN ANALYZE returned empty result. Result type: #{result.class}, Result: #{result.inspect[0..200]}")
|
|
189
|
+
end
|
|
190
|
+
rescue => e
|
|
191
|
+
# Silently fail - don't let EXPLAIN break the application
|
|
192
|
+
append_log_to_thread(main_thread, :debug, "Failed to capture EXPLAIN ANALYZE: #{e.message}")
|
|
193
|
+
query_info[:explain_plan] = nil
|
|
194
|
+
ensure
|
|
195
|
+
# Return connection to pool if we checked it out
|
|
196
|
+
if connection && ActiveRecord::Base.connection_pool.respond_to?(:checkin)
|
|
197
|
+
ActiveRecord::Base.connection_pool.checkin(connection) rescue nil
|
|
198
|
+
end
|
|
199
|
+
end
|
|
200
|
+
end
|
|
201
|
+
|
|
202
|
+
# Track the thread so we can wait for it when stopping request tracking
|
|
203
|
+
pending = Thread.current[THREAD_LOCAL_EXPLAIN_PENDING_KEY] ||= []
|
|
204
|
+
pending << explain_thread
|
|
205
|
+
rescue => e
|
|
206
|
+
# Use DeadBro.logger here since we're still in the main thread
|
|
207
|
+
DeadBro.logger.debug("Failed to start EXPLAIN ANALYZE thread: #{e.message}")
|
|
208
|
+
end
|
|
209
|
+
|
|
210
|
+
# Append a log entry directly to a specific thread's log storage
|
|
211
|
+
# This is used when logging from background threads to ensure logs
|
|
212
|
+
# are collected with the main request thread's logs
|
|
213
|
+
def self.append_log_to_thread(thread, severity, message)
|
|
214
|
+
timestamp = Time.now.utc
|
|
215
|
+
log_entry = {
|
|
216
|
+
sev: severity.to_s,
|
|
217
|
+
msg: message.to_s,
|
|
218
|
+
time: timestamp.iso8601(3)
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
# Append to the specified thread's log storage
|
|
222
|
+
thread[:dead_bro_logs] ||= []
|
|
223
|
+
thread[:dead_bro_logs] << log_entry
|
|
224
|
+
|
|
225
|
+
# Also print the message immediately (using current thread's logger)
|
|
226
|
+
begin
|
|
227
|
+
if defined?(Rails) && Rails.respond_to?(:logger) && Rails.logger
|
|
228
|
+
formatted_message = "[DeadBro] #{timestamp.iso8601(3)} #{severity.to_s.upcase}: #{message}"
|
|
229
|
+
case severity
|
|
230
|
+
when :debug
|
|
231
|
+
Rails.logger.debug(formatted_message)
|
|
232
|
+
when :info
|
|
233
|
+
Rails.logger.info(formatted_message)
|
|
234
|
+
when :warn
|
|
235
|
+
Rails.logger.warn(formatted_message)
|
|
236
|
+
when :error
|
|
237
|
+
Rails.logger.error(formatted_message)
|
|
238
|
+
when :fatal
|
|
239
|
+
Rails.logger.fatal(formatted_message)
|
|
240
|
+
end
|
|
241
|
+
else
|
|
242
|
+
# Fallback to stdout
|
|
243
|
+
$stdout.puts("[DeadBro] #{timestamp.iso8601(3)} #{severity.to_s.upcase}: #{message}")
|
|
244
|
+
end
|
|
245
|
+
rescue
|
|
246
|
+
# Never let logging break the application
|
|
247
|
+
$stdout.puts("[DeadBro] #{severity.to_s.upcase}: #{message}")
|
|
248
|
+
end
|
|
249
|
+
end
|
|
250
|
+
|
|
251
|
+
def self.build_explain_query(sql, connection)
|
|
252
|
+
adapter_name = connection.adapter_name.downcase
|
|
253
|
+
|
|
254
|
+
case adapter_name
|
|
255
|
+
when "postgresql", "postgis"
|
|
256
|
+
# PostgreSQL supports ANALYZE and BUFFERS
|
|
257
|
+
"EXPLAIN (ANALYZE, BUFFERS) #{sql}"
|
|
258
|
+
when "mysql", "mysql2", "trilogy"
|
|
259
|
+
# MySQL uses different syntax - ANALYZE is a separate keyword
|
|
260
|
+
"EXPLAIN ANALYZE #{sql}"
|
|
261
|
+
when "sqlite3"
|
|
262
|
+
# SQLite supports EXPLAIN QUERY PLAN
|
|
263
|
+
"EXPLAIN QUERY PLAN #{sql}"
|
|
264
|
+
else
|
|
265
|
+
# Generic fallback - just EXPLAIN
|
|
266
|
+
"EXPLAIN #{sql}"
|
|
267
|
+
end
|
|
268
|
+
end
|
|
269
|
+
|
|
270
|
+
def self.format_explain_result(result, connection)
|
|
271
|
+
adapter_name = connection.adapter_name.downcase
|
|
272
|
+
|
|
273
|
+
case adapter_name
|
|
274
|
+
when "postgresql", "postgis"
|
|
275
|
+
# PostgreSQL returns ActiveRecord::Result from select_all
|
|
276
|
+
if result.respond_to?(:rows)
|
|
277
|
+
# ActiveRecord::Result object - rows is an array of arrays
|
|
278
|
+
# Each row is [query_plan_string]
|
|
279
|
+
plan_text = result.rows.map { |row| row.is_a?(Array) ? row.first.to_s : row.to_s }.join("\n")
|
|
280
|
+
return plan_text unless plan_text.strip.empty?
|
|
281
|
+
end
|
|
282
|
+
|
|
283
|
+
# Try alternative methods to extract the plan
|
|
284
|
+
if result.respond_to?(:each) && result.respond_to?(:columns)
|
|
285
|
+
# ActiveRecord::Result with columns
|
|
286
|
+
plan_column = result.columns.find { |col| col.downcase.include?("plan") || col.downcase.include?("query") } || result.columns.first
|
|
287
|
+
plan_text = result.map { |row|
|
|
288
|
+
if row.is_a?(Hash)
|
|
289
|
+
row[plan_column] || row[plan_column.to_sym] || row.values.first
|
|
290
|
+
else
|
|
291
|
+
row
|
|
292
|
+
end
|
|
293
|
+
}.join("\n")
|
|
294
|
+
return plan_text unless plan_text.strip.empty?
|
|
295
|
+
end
|
|
296
|
+
|
|
297
|
+
if result.is_a?(Array)
|
|
298
|
+
# Array of hashes or arrays
|
|
299
|
+
plan_text = result.map do |row|
|
|
300
|
+
if row.is_a?(Hash)
|
|
301
|
+
row["QUERY PLAN"] || row["query plan"] || row[:query_plan] || row.values.first.to_s
|
|
302
|
+
elsif row.is_a?(Array)
|
|
303
|
+
row.first.to_s
|
|
304
|
+
else
|
|
305
|
+
row.to_s
|
|
306
|
+
end
|
|
307
|
+
end.join("\n")
|
|
308
|
+
return plan_text unless plan_text.strip.empty?
|
|
309
|
+
end
|
|
310
|
+
|
|
311
|
+
# Fallback to string representation
|
|
312
|
+
result.to_s
|
|
313
|
+
when "mysql", "mysql2", "trilogy"
|
|
314
|
+
# MySQL returns rows
|
|
315
|
+
if result.is_a?(Array)
|
|
316
|
+
result.map { |row| row.is_a?(Hash) ? row.values.join(" | ") : row.to_s }.join("\n")
|
|
317
|
+
else
|
|
318
|
+
result.to_s
|
|
319
|
+
end
|
|
320
|
+
when "sqlite3"
|
|
321
|
+
# SQLite returns rows
|
|
322
|
+
if result.is_a?(Array)
|
|
323
|
+
result.map { |row| row.is_a?(Hash) ? row.values.join(" | ") : row.to_s }.join("\n")
|
|
324
|
+
else
|
|
325
|
+
result.to_s
|
|
326
|
+
end
|
|
327
|
+
else
|
|
328
|
+
# Generic fallback
|
|
329
|
+
result.to_s
|
|
330
|
+
end
|
|
331
|
+
rescue => e
|
|
332
|
+
# Fallback to string representation
|
|
333
|
+
result.to_s
|
|
334
|
+
end
|
|
335
|
+
|
|
336
|
+
def self.safe_query_trace(data, captured_backtrace = nil)
|
|
337
|
+
return [] unless data.is_a?(Hash)
|
|
338
|
+
|
|
339
|
+
# Build trace from available data fields
|
|
340
|
+
trace = []
|
|
341
|
+
|
|
342
|
+
# Use filename, line, and method if available
|
|
343
|
+
if data[:filename] && data[:line] && data[:method]
|
|
344
|
+
trace << "#{data[:filename]}:#{data[:line]}:in `#{data[:method]}'"
|
|
345
|
+
end
|
|
346
|
+
|
|
347
|
+
# Use the captured backtrace from when the query started (most accurate)
|
|
348
|
+
if captured_backtrace && captured_backtrace.is_a?(Array) && !captured_backtrace.empty?
|
|
349
|
+
# Filter to only include frames that contain "app/" (application code)
|
|
350
|
+
app_frames = captured_backtrace.select do |frame|
|
|
351
|
+
frame.include?("app/") && !frame.include?("/vendor/")
|
|
352
|
+
end
|
|
353
|
+
|
|
354
|
+
caller_trace = app_frames.map do |line|
|
|
355
|
+
# Remove any potential sensitive information from file paths
|
|
356
|
+
line.gsub(/\/[^\/]*(password|secret|key|token)[^\/]*\//i, "/[FILTERED]/")
|
|
357
|
+
end
|
|
358
|
+
|
|
359
|
+
trace.concat(caller_trace)
|
|
360
|
+
else
|
|
361
|
+
# Fallback: try to get backtrace from current context
|
|
362
|
+
begin
|
|
363
|
+
# Get all available frames - we'll filter to find application code
|
|
364
|
+
all_frames = Thread.current.backtrace || []
|
|
365
|
+
|
|
366
|
+
if all_frames.empty?
|
|
367
|
+
# Fallback to caller_locations if backtrace is empty
|
|
368
|
+
locations = caller_locations(1, 50)
|
|
369
|
+
all_frames = locations.map { |loc| "#{loc.path}:#{loc.lineno}:in `#{loc.label}'" } if locations
|
|
370
|
+
end
|
|
371
|
+
|
|
372
|
+
# Filter to only include frames that contain "app/" (application code)
|
|
373
|
+
app_frames = all_frames.select do |frame|
|
|
374
|
+
frame.include?("app/") && !frame.include?("/vendor/")
|
|
375
|
+
end
|
|
376
|
+
|
|
377
|
+
caller_trace = app_frames.map do |line|
|
|
378
|
+
line.gsub(/\/[^\/]*(password|secret|key|token)[^\/]*\//i, "/[FILTERED]/")
|
|
379
|
+
end
|
|
380
|
+
|
|
381
|
+
trace.concat(caller_trace)
|
|
382
|
+
rescue
|
|
383
|
+
# If backtrace fails, try caller as fallback
|
|
384
|
+
begin
|
|
385
|
+
caller_stack = caller(20, 50) # Get more frames to find app/ frames
|
|
386
|
+
app_frames = caller_stack.select { |frame| frame.include?("app/") && !frame.include?("/vendor/") }
|
|
387
|
+
caller_trace = app_frames.map do |line|
|
|
388
|
+
line.gsub(/\/[^\/]*(password|secret|key|token)[^\/]*\//i, "/[FILTERED]/")
|
|
389
|
+
end
|
|
390
|
+
trace.concat(caller_trace)
|
|
391
|
+
rescue
|
|
392
|
+
# If caller also fails, we still have the immediate location
|
|
393
|
+
end
|
|
394
|
+
end
|
|
395
|
+
end
|
|
396
|
+
|
|
397
|
+
# If we have a backtrace in the data, use it (but it's usually nil for SQL events)
|
|
398
|
+
if data[:backtrace] && data[:backtrace].is_a?(Array)
|
|
399
|
+
# Filter to only include frames that contain "app/"
|
|
400
|
+
app_backtrace = data[:backtrace].select do |line|
|
|
401
|
+
line.is_a?(String) && line.include?("app/") && !line.include?("/vendor/")
|
|
402
|
+
end
|
|
403
|
+
|
|
404
|
+
backtrace_trace = app_backtrace.map do |line|
|
|
405
|
+
case line
|
|
406
|
+
when String
|
|
407
|
+
line.gsub(/\/[^\/]*(password|secret|key|token)[^\/]*\//i, "/[FILTERED]/")
|
|
408
|
+
else
|
|
409
|
+
line.to_s
|
|
410
|
+
end
|
|
411
|
+
end
|
|
412
|
+
trace.concat(backtrace_trace)
|
|
413
|
+
end
|
|
414
|
+
|
|
415
|
+
# Remove duplicates and return all app/ frames (no limit)
|
|
416
|
+
trace.uniq.map do |line|
|
|
417
|
+
case line
|
|
418
|
+
when String
|
|
419
|
+
# Remove any potential sensitive information from file paths
|
|
420
|
+
line.gsub(/\/[^\/]*(password|secret|key|token)[^\/]*\//i, "/[FILTERED]/")
|
|
421
|
+
else
|
|
422
|
+
line.to_s
|
|
423
|
+
end
|
|
424
|
+
end
|
|
425
|
+
rescue
|
|
426
|
+
[]
|
|
427
|
+
end
|
|
428
|
+
end
|
|
429
|
+
end
|
|
430
|
+
|
|
431
|
+
module DeadBro
|
|
432
|
+
# Listener that records GC allocation deltas per SQL event id
|
|
433
|
+
class SqlAllocListener
|
|
434
|
+
def start(name, id, payload)
|
|
435
|
+
map = (Thread.current[DeadBro::SqlSubscriber::THREAD_LOCAL_ALLOC_START_KEY] ||= {})
|
|
436
|
+
map[id] = GC.stat[:total_allocated_objects] if defined?(GC) && GC.respond_to?(:stat)
|
|
437
|
+
|
|
438
|
+
# Capture the backtrace at query start time (before notification system processes it)
|
|
439
|
+
# This gives us the actual call stack where the SQL was executed
|
|
440
|
+
backtrace_map = (Thread.current[DeadBro::SqlSubscriber::THREAD_LOCAL_BACKTRACE_KEY] ||= {})
|
|
441
|
+
captured_backtrace = Thread.current.backtrace
|
|
442
|
+
if captured_backtrace && captured_backtrace.is_a?(Array)
|
|
443
|
+
# Skip the first few frames (our listener code) to get to the actual query execution
|
|
444
|
+
backtrace_map[id] = captured_backtrace[5..-1] || captured_backtrace
|
|
445
|
+
end
|
|
446
|
+
rescue
|
|
447
|
+
end
|
|
448
|
+
|
|
449
|
+
def finish(name, id, payload)
|
|
450
|
+
start_map = Thread.current[DeadBro::SqlSubscriber::THREAD_LOCAL_ALLOC_START_KEY]
|
|
451
|
+
return unless start_map && start_map.key?(id)
|
|
452
|
+
|
|
453
|
+
start_count = start_map.delete(id)
|
|
454
|
+
end_count = begin
|
|
455
|
+
GC.stat[:total_allocated_objects]
|
|
456
|
+
rescue
|
|
457
|
+
nil
|
|
458
|
+
end
|
|
459
|
+
return unless start_count && end_count
|
|
460
|
+
|
|
461
|
+
delta = end_count - start_count
|
|
462
|
+
results = (Thread.current[DeadBro::SqlSubscriber::THREAD_LOCAL_ALLOC_RESULTS_KEY] ||= {})
|
|
463
|
+
results[id] = delta
|
|
464
|
+
rescue
|
|
465
|
+
end
|
|
466
|
+
end
|
|
467
|
+
end
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module DeadBro
|
|
4
|
+
class SqlTrackingMiddleware
|
|
5
|
+
def initialize(app)
|
|
6
|
+
@app = app
|
|
7
|
+
end
|
|
8
|
+
|
|
9
|
+
def call(env)
|
|
10
|
+
# Clear logs for this request
|
|
11
|
+
DeadBro.logger.clear
|
|
12
|
+
|
|
13
|
+
# Start SQL tracking for this request
|
|
14
|
+
if defined?(DeadBro::SqlSubscriber)
|
|
15
|
+
DeadBro::SqlSubscriber.start_request_tracking
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
# Start cache tracking for this request
|
|
19
|
+
if defined?(DeadBro::CacheSubscriber)
|
|
20
|
+
DeadBro::CacheSubscriber.start_request_tracking
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
# Start Redis tracking for this request
|
|
24
|
+
if defined?(DeadBro::RedisSubscriber)
|
|
25
|
+
DeadBro::RedisSubscriber.start_request_tracking
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
# Start view rendering tracking for this request
|
|
29
|
+
if defined?(DeadBro::ViewRenderingSubscriber)
|
|
30
|
+
DeadBro::ViewRenderingSubscriber.start_request_tracking
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
# Start lightweight memory tracking for this request
|
|
34
|
+
if defined?(DeadBro::LightweightMemoryTracker)
|
|
35
|
+
DeadBro::LightweightMemoryTracker.start_request_tracking
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
# Start detailed memory tracking when allocation tracking is enabled
|
|
39
|
+
if DeadBro.configuration.allocation_tracking_enabled && defined?(DeadBro::MemoryTrackingSubscriber)
|
|
40
|
+
DeadBro::MemoryTrackingSubscriber.start_request_tracking
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
# Start outgoing HTTP accumulation for this request
|
|
44
|
+
Thread.current[:dead_bro_http_events] = []
|
|
45
|
+
|
|
46
|
+
@app.call(env)
|
|
47
|
+
ensure
|
|
48
|
+
# Clean up thread-local storage
|
|
49
|
+
if defined?(DeadBro::SqlSubscriber)
|
|
50
|
+
Thread.current[:dead_bro_sql_queries]
|
|
51
|
+
Thread.current[:dead_bro_sql_queries] = nil
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
if defined?(DeadBro::CacheSubscriber)
|
|
55
|
+
Thread.current[:dead_bro_cache_events]
|
|
56
|
+
Thread.current[:dead_bro_cache_events] = nil
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
if defined?(DeadBro::RedisSubscriber)
|
|
60
|
+
Thread.current[:dead_bro_redis_events]
|
|
61
|
+
Thread.current[:dead_bro_redis_events] = nil
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
if defined?(DeadBro::ViewRenderingSubscriber)
|
|
65
|
+
Thread.current[:dead_bro_view_events]
|
|
66
|
+
Thread.current[:dead_bro_view_events] = nil
|
|
67
|
+
end
|
|
68
|
+
|
|
69
|
+
if defined?(DeadBro::LightweightMemoryTracker)
|
|
70
|
+
Thread.current[:dead_bro_lightweight_memory]
|
|
71
|
+
Thread.current[:dead_bro_lightweight_memory] = nil
|
|
72
|
+
end
|
|
73
|
+
|
|
74
|
+
# Clean up HTTP events
|
|
75
|
+
Thread.current[:dead_bro_http_events] = nil
|
|
76
|
+
end
|
|
77
|
+
end
|
|
78
|
+
end
|