dead_bro 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,196 @@
1
+ # frozen_string_literal: true
2
+
3
+ module DeadBro
4
+ class MemoryLeakDetector
5
+ # Track memory patterns over time to detect leaks
6
+ MEMORY_HISTORY_KEY = :dead_bro_memory_history
7
+ LEAK_DETECTION_WINDOW = 300 # 5 minutes
8
+ MEMORY_GROWTH_THRESHOLD = 50 # 50MB growth threshold
9
+ MIN_SAMPLES_FOR_LEAK_DETECTION = 10
10
+
11
+ def self.initialize_history
12
+ Thread.current[MEMORY_HISTORY_KEY] = {
13
+ samples: [],
14
+ last_cleanup: Time.now.utc.to_i,
15
+ leak_alerts: []
16
+ }
17
+ end
18
+
19
+ def self.record_memory_sample(sample_data)
20
+ history = Thread.current[MEMORY_HISTORY_KEY] || initialize_history
21
+
22
+ sample = {
23
+ timestamp: Time.now.utc.to_i,
24
+ memory_usage: sample_data[:memory_usage] || 0,
25
+ gc_count: sample_data[:gc_count] || 0,
26
+ heap_pages: sample_data[:heap_pages] || 0,
27
+ object_count: sample_data[:object_count] || 0,
28
+ request_id: sample_data[:request_id],
29
+ controller: sample_data[:controller],
30
+ action: sample_data[:action]
31
+ }
32
+
33
+ history[:samples] << sample
34
+
35
+ # Clean up old samples
36
+ cleanup_old_samples(history)
37
+
38
+ # Check for memory leaks
39
+ check_for_memory_leaks(history)
40
+
41
+ history
42
+ end
43
+
44
+ def self.cleanup_old_samples(history)
45
+ cutoff_time = Time.now.utc.to_i - LEAK_DETECTION_WINDOW
46
+ history[:samples] = history[:samples].select { |sample| sample[:timestamp] > cutoff_time }
47
+ end
48
+
49
+ def self.check_for_memory_leaks(history)
50
+ samples = history[:samples]
51
+ return if samples.length < MIN_SAMPLES_FOR_LEAK_DETECTION
52
+
53
+ # Calculate memory growth trend
54
+ memory_values = samples.map { |s| s[:memory_usage] }
55
+ timestamps = samples.map { |s| s[:timestamp] }
56
+
57
+ # Use linear regression to detect upward trend
58
+ trend = calculate_memory_trend(memory_values, timestamps)
59
+
60
+ # Check if memory is growing consistently
61
+ if trend[:slope] > 0.1 && trend[:r_squared] > 0.7 # Growing with good correlation
62
+ memory_growth = memory_values.last - memory_values.first
63
+
64
+ if memory_growth > MEMORY_GROWTH_THRESHOLD
65
+ leak_alert = {
66
+ detected_at: Time.now.utc.to_i,
67
+ memory_growth_mb: memory_growth.round(2),
68
+ growth_rate_mb_per_second: trend[:slope],
69
+ confidence: trend[:r_squared],
70
+ sample_count: samples.length,
71
+ time_window_seconds: timestamps.last - timestamps.first,
72
+ recent_controllers: samples.last(5).map { |s| "#{s[:controller]}##{s[:action]}" }.uniq
73
+ }
74
+
75
+ history[:leak_alerts] << leak_alert
76
+
77
+ # Only keep recent leak alerts
78
+ history[:leak_alerts] = history[:leak_alerts].last(10)
79
+ end
80
+ end
81
+ end
82
+
83
+ def self.calculate_memory_trend(memory_values, timestamps)
84
+ return {slope: 0, r_squared: 0} if memory_values.length < 2
85
+
86
+ n = memory_values.length
87
+ sum_x = timestamps.sum
88
+ sum_y = memory_values.sum
89
+ sum_xy = timestamps.zip(memory_values).sum { |x, y| x * y }
90
+ sum_x2 = timestamps.sum { |x| x * x }
91
+ memory_values.sum { |y| y * y }
92
+
93
+ # Calculate slope (m) and intercept (b) for y = mx + b
94
+ slope = (n * sum_xy - sum_x * sum_y).to_f / (n * sum_x2 - sum_x * sum_x)
95
+ intercept = (sum_y - slope * sum_x).to_f / n
96
+
97
+ # Calculate R-squared (coefficient of determination)
98
+ y_mean = sum_y.to_f / n
99
+ ss_tot = memory_values.sum { |y| (y - y_mean)**2 }
100
+ ss_res = memory_values.zip(timestamps).sum { |y, x| (y - (slope * x + intercept))**2 }
101
+ r_squared = (ss_tot > 0) ? 1 - (ss_res / ss_tot) : 0
102
+
103
+ {
104
+ slope: slope,
105
+ intercept: intercept,
106
+ r_squared: r_squared
107
+ }
108
+ end
109
+
110
+ def self.get_memory_analysis
111
+ history = Thread.current[MEMORY_HISTORY_KEY] || initialize_history
112
+ samples = history[:samples]
113
+
114
+ return {status: "insufficient_data", sample_count: samples.length} if samples.length < 5
115
+
116
+ memory_values = samples.map { |s| s[:memory_usage] }
117
+ gc_counts = samples.map { |s| s[:gc_count] }
118
+ object_counts = samples.map { |s| s[:object_count] }
119
+
120
+ # Calculate basic statistics
121
+ memory_stats = calculate_stats(memory_values)
122
+ gc_stats = calculate_stats(gc_counts)
123
+ object_stats = calculate_stats(object_counts)
124
+
125
+ # Detect patterns
126
+ memory_trend = calculate_memory_trend(memory_values, samples.map { |s| s[:timestamp] })
127
+
128
+ # Analyze recent activity
129
+ recent_samples = samples.last(10)
130
+ recent_controllers = recent_samples.map { |s| "#{s[:controller]}##{s[:action]}" }.tally
131
+
132
+ {
133
+ status: "analyzed",
134
+ sample_count: samples.length,
135
+ time_window_seconds: samples.last[:timestamp] - samples.first[:timestamp],
136
+ memory_stats: memory_stats,
137
+ gc_stats: gc_stats,
138
+ object_stats: object_stats,
139
+ memory_trend: memory_trend,
140
+ recent_controllers: recent_controllers,
141
+ leak_alerts: history[:leak_alerts].last(5),
142
+ memory_efficiency: calculate_memory_efficiency(samples)
143
+ }
144
+ end
145
+
146
+ def self.calculate_stats(values)
147
+ return {} if values.empty?
148
+
149
+ {
150
+ min: values.min,
151
+ max: values.max,
152
+ mean: (values.sum.to_f / values.length).round(2),
153
+ median: values.sort[values.length / 2],
154
+ std_dev: calculate_standard_deviation(values)
155
+ }
156
+ end
157
+
158
+ def self.calculate_standard_deviation(values)
159
+ return 0 if values.length < 2
160
+
161
+ mean = values.sum.to_f / values.length
162
+ variance = values.sum { |v| (v - mean)**2 } / (values.length - 1)
163
+ Math.sqrt(variance).round(2)
164
+ end
165
+
166
+ def self.calculate_memory_efficiency(samples)
167
+ return {} if samples.length < 2
168
+
169
+ # Calculate memory per object ratio
170
+ memory_per_object = samples.map do |sample|
171
+ (sample[:object_count] > 0) ? sample[:memory_usage] / sample[:object_count] : 0
172
+ end
173
+
174
+ # Calculate GC efficiency (objects collected per GC cycle)
175
+ gc_efficiency = []
176
+ (1...samples.length).each do |i|
177
+ gc_delta = samples[i][:gc_count] - samples[i - 1][:gc_count]
178
+ memory_delta = samples[i][:memory_usage] - samples[i - 1][:memory_usage]
179
+
180
+ if gc_delta > 0 && memory_delta < 0
181
+ gc_efficiency << (-memory_delta / gc_delta).round(2)
182
+ end
183
+ end
184
+
185
+ {
186
+ average_memory_per_object_kb: (memory_per_object.sum / memory_per_object.length).round(2),
187
+ gc_efficiency_mb_per_cycle: gc_efficiency.any? ? (gc_efficiency.sum / gc_efficiency.length).round(2) : 0,
188
+ memory_volatility: calculate_standard_deviation(samples.map { |s| s[:memory_usage] })
189
+ }
190
+ end
191
+
192
+ def self.clear_history
193
+ Thread.current[MEMORY_HISTORY_KEY] = nil
194
+ end
195
+ end
196
+ end
@@ -0,0 +1,361 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "active_support/notifications"
4
+
5
+ module DeadBro
6
+ class MemoryTrackingSubscriber
7
+ # Object allocation events
8
+ ALLOCATION_EVENT = "object_allocations.active_support"
9
+
10
+ THREAD_LOCAL_KEY = :dead_bro_memory_events
11
+ # Consider objects larger than this many bytes as "large"
12
+ LARGE_OBJECT_THRESHOLD = 1_000_000 # 1MB threshold for large objects
13
+
14
+ # Performance optimization settings
15
+ ALLOCATION_SAMPLING_RATE = 1 # Track all when enabled (adjust in production)
16
+ MAX_ALLOCATIONS_PER_REQUEST = 1000 # Limit allocations tracked per request
17
+ LARGE_OBJECT_SAMPLE_RATE = 0.01 # Sample 1% of live objects to estimate large ones
18
+ MAX_LARGE_OBJECTS = 50 # Cap number of large objects captured per request
19
+
20
+ def self.subscribe!(client: Client.new)
21
+ # Only enable allocation tracking if explicitly enabled (expensive!)
22
+ return unless DeadBro.configuration.allocation_tracking_enabled
23
+ if defined?(ActiveSupport::Notifications) && ActiveSupport::Notifications.notifier.respond_to?(:subscribe)
24
+ begin
25
+ # Subscribe to object allocation events with sampling
26
+ ActiveSupport::Notifications.subscribe(ALLOCATION_EVENT) do |name, started, finished, _unique_id, data|
27
+ # Sample allocations to reduce overhead
28
+ next unless rand < ALLOCATION_SAMPLING_RATE
29
+ track_allocation(data, started, finished)
30
+ end
31
+ rescue
32
+ # Allocation tracking might not be available in all Ruby versions
33
+ end
34
+ end
35
+ rescue
36
+ # Never raise from instrumentation install
37
+ end
38
+
39
+ def self.start_request_tracking
40
+ # Only track if memory tracking is enabled
41
+ return unless DeadBro.configuration.memory_tracking_enabled
42
+
43
+ Thread.current[THREAD_LOCAL_KEY] = {
44
+ allocations: [],
45
+ memory_snapshots: [],
46
+ large_objects: [],
47
+ gc_before: gc_stats,
48
+ memory_before: memory_usage_mb,
49
+ start_time: Time.now.utc.to_i,
50
+ object_counts_before: count_objects_snapshot
51
+ }
52
+ end
53
+
54
+ def self.stop_request_tracking
55
+ events = Thread.current[THREAD_LOCAL_KEY]
56
+ Thread.current[THREAD_LOCAL_KEY] = nil
57
+
58
+ if events
59
+ events[:gc_after] = gc_stats
60
+ events[:memory_after] = memory_usage_mb
61
+ events[:end_time] = Time.now.utc.to_i
62
+ events[:duration_seconds] = events[:end_time] - events[:start_time]
63
+ events[:object_counts_after] = count_objects_snapshot
64
+
65
+ # Fallback large object detection via ObjectSpace sampling
66
+ if (events[:large_objects].nil? || events[:large_objects].empty?) && object_space_available?
67
+ events[:large_objects] = sample_large_objects
68
+ end
69
+ end
70
+
71
+ events || {}
72
+ end
73
+
74
+ def self.track_allocation(data, started, finished)
75
+ return unless Thread.current[THREAD_LOCAL_KEY]
76
+
77
+ # Only track if we have meaningful allocation data
78
+ return unless data.is_a?(Hash) && data[:count] && data[:size]
79
+
80
+ # Limit allocations per request to prevent memory bloat
81
+ allocations = Thread.current[THREAD_LOCAL_KEY][:allocations]
82
+ return if allocations.length >= MAX_ALLOCATIONS_PER_REQUEST
83
+
84
+ # Simplified allocation tracking (avoid expensive operations)
85
+ allocation = {
86
+ class_name: data[:class_name] || "Unknown",
87
+ count: data[:count],
88
+ size: data[:size]
89
+ # Removed expensive fields: duration_ms, timestamp, memory_usage
90
+ }
91
+
92
+ # Track large object allocations (these are rare and important)
93
+ if data[:size] > LARGE_OBJECT_THRESHOLD
94
+ large_object = allocation.merge(
95
+ large_object: true,
96
+ size_mb: (data[:size] / 1_000_000.0).round(2)
97
+ )
98
+ Thread.current[THREAD_LOCAL_KEY][:large_objects] << large_object
99
+ end
100
+
101
+ Thread.current[THREAD_LOCAL_KEY][:allocations] << allocation
102
+ end
103
+
104
+ def self.take_memory_snapshot(label = nil)
105
+ return unless Thread.current[THREAD_LOCAL_KEY]
106
+
107
+ snapshot = {
108
+ label: label || "snapshot_#{Time.now.to_i}",
109
+ memory_usage: memory_usage_mb,
110
+ gc_stats: gc_stats,
111
+ timestamp: Time.now.utc.to_i,
112
+ object_count: object_count,
113
+ heap_pages: heap_pages
114
+ }
115
+
116
+ Thread.current[THREAD_LOCAL_KEY][:memory_snapshots] << snapshot
117
+ end
118
+
119
+ def self.analyze_memory_performance(memory_events)
120
+ return {} if memory_events.empty?
121
+
122
+ allocations = memory_events[:allocations] || []
123
+ large_objects = memory_events[:large_objects] || []
124
+ snapshots = memory_events[:memory_snapshots] || []
125
+
126
+ # Calculate memory growth
127
+ memory_growth = 0
128
+ if memory_events[:memory_before] && memory_events[:memory_after]
129
+ memory_growth = memory_events[:memory_after] - memory_events[:memory_before]
130
+ end
131
+
132
+ # Calculate allocation totals
133
+ total_allocations = allocations.sum { |a| a[:count] }
134
+ total_allocated_size = allocations.sum { |a| a[:size] }
135
+
136
+ # Group allocations by class
137
+ allocations_by_class = allocations.group_by { |a| a[:class_name] }
138
+ .transform_values { |allocs|
139
+ {
140
+ count: allocs.sum { |a| a[:count] },
141
+ size: allocs.sum { |a| a[:size] }
142
+ }
143
+ }
144
+
145
+ # Find top allocating classes
146
+ top_allocating_classes = allocations_by_class.sort_by { |_, data| -data[:size] }.first(10)
147
+
148
+ # Analyze large objects
149
+ large_object_analysis = analyze_large_objects(large_objects)
150
+
151
+ # Analyze memory snapshots for trends
152
+ memory_trends = analyze_memory_trends(snapshots)
153
+
154
+ # Calculate GC efficiency
155
+ gc_efficiency = calculate_gc_efficiency(memory_events[:gc_before], memory_events[:gc_after])
156
+
157
+ # Analyze object type deltas (by Ruby object type, not class)
158
+ object_type_deltas = {}
159
+ if memory_events[:object_counts_before].is_a?(Hash) && memory_events[:object_counts_after].is_a?(Hash)
160
+ before = memory_events[:object_counts_before]
161
+ after = memory_events[:object_counts_after]
162
+ keys = (before.keys + after.keys).uniq
163
+ keys.each do |k|
164
+ object_type_deltas[k] = (after[k] || 0) - (before[k] || 0)
165
+ end
166
+ end
167
+
168
+ {
169
+ memory_growth_mb: memory_growth.round(2),
170
+ total_allocations: total_allocations,
171
+ total_allocated_size: total_allocated_size,
172
+ total_allocated_size_mb: (total_allocated_size / 1_000_000.0).round(2),
173
+ allocations_per_second: (memory_events[:duration_seconds] > 0) ?
174
+ (total_allocations.to_f / memory_events[:duration_seconds]).round(2) : 0,
175
+ top_allocating_classes: top_allocating_classes.map { |class_name, data|
176
+ {
177
+ class_name: class_name,
178
+ count: data[:count],
179
+ size: data[:size],
180
+ size_mb: (data[:size] / 1_000_000.0).round(2)
181
+ }
182
+ },
183
+ large_objects: large_object_analysis,
184
+ memory_trends: memory_trends,
185
+ gc_efficiency: gc_efficiency,
186
+ memory_snapshots_count: snapshots.count,
187
+ object_type_deltas: top_object_type_deltas(object_type_deltas, limit: 10)
188
+ }
189
+ end
190
+
191
+ def self.analyze_large_objects(large_objects)
192
+ return {} if large_objects.empty?
193
+
194
+ {
195
+ count: large_objects.count,
196
+ total_size_mb: large_objects.sum { |obj| obj[:size_mb] }.round(2),
197
+ largest_object_mb: large_objects.max_by { |obj| obj[:size_mb] }[:size_mb],
198
+ by_class: large_objects.group_by { |obj| obj[:class_name] }
199
+ .transform_values(&:count)
200
+ }
201
+ end
202
+
203
+ def self.top_object_type_deltas(deltas, limit: 10)
204
+ return {} unless deltas.is_a?(Hash)
205
+ deltas.sort_by { |_, v| -v.abs }.first(limit).to_h
206
+ end
207
+
208
+ def self.object_space_available?
209
+ defined?(ObjectSpace) && ObjectSpace.respond_to?(:each_object) && ObjectSpace.respond_to?(:memsize_of)
210
+ end
211
+
212
+ def self.sample_large_objects
213
+ results = []
214
+ return results unless object_space_available?
215
+
216
+ begin
217
+ # Sample across common heap object types
218
+ ObjectSpace.each_object do |obj|
219
+ # Randomly sample to control overhead
220
+ next unless rand < LARGE_OBJECT_SAMPLE_RATE
221
+
222
+ size = begin
223
+ ObjectSpace.memsize_of(obj)
224
+ rescue
225
+ 0
226
+ end
227
+ next unless size && size > LARGE_OBJECT_THRESHOLD
228
+
229
+ klass = begin
230
+ (obj.respond_to?(:class) && obj.class) ? obj.class.name : "Unknown"
231
+ rescue
232
+ "Unknown"
233
+ end
234
+ results << {class_name: klass, size: size, size_mb: (size / 1_000_000.0).round(2)}
235
+
236
+ break if results.length >= MAX_LARGE_OBJECTS
237
+ end
238
+ rescue
239
+ # Best-effort only
240
+ end
241
+
242
+ # Sort largest first and keep top N
243
+ results.sort_by { |h| -h[:size] }.first(MAX_LARGE_OBJECTS)
244
+ end
245
+
246
+ def self.count_objects_snapshot
247
+ if defined?(ObjectSpace) && ObjectSpace.respond_to?(:count_objects)
248
+ ObjectSpace.count_objects.dup
249
+ else
250
+ {}
251
+ end
252
+ rescue
253
+ {}
254
+ end
255
+
256
+ def self.analyze_memory_trends(snapshots)
257
+ return {} if snapshots.length < 2
258
+
259
+ # Calculate memory growth rate between snapshots
260
+ memory_values = snapshots.map { |s| s[:memory_usage] }
261
+ memory_growth_rates = []
262
+
263
+ (1...memory_values.length).each do |i|
264
+ growth = memory_values[i] - memory_values[i - 1]
265
+ time_diff = snapshots[i][:timestamp] - snapshots[i - 1][:timestamp]
266
+ rate = (time_diff > 0) ? growth / time_diff : 0
267
+ memory_growth_rates << rate
268
+ end
269
+
270
+ {
271
+ average_growth_rate_mb_per_second: memory_growth_rates.sum / memory_growth_rates.length,
272
+ max_growth_rate_mb_per_second: memory_growth_rates.max,
273
+ memory_volatility: memory_growth_rates.map(&:abs).sum / memory_growth_rates.length,
274
+ peak_memory_mb: memory_values.max,
275
+ min_memory_mb: memory_values.min
276
+ }
277
+ end
278
+
279
+ def self.calculate_gc_efficiency(gc_before, gc_after)
280
+ return {} unless gc_before && gc_after
281
+
282
+ {
283
+ gc_count_increase: (gc_after[:count] || 0) - (gc_before[:count] || 0),
284
+ heap_pages_increase: (gc_after[:heap_allocated_pages] || 0) - (gc_before[:heap_allocated_pages] || 0),
285
+ objects_allocated: (gc_after[:total_allocated_objects] || 0) - (gc_before[:total_allocated_objects] || 0),
286
+ gc_frequency: (gc_after[:count] && gc_before[:count]) ?
287
+ (gc_after[:count] - gc_before[:count]).to_f / [gc_after[:count], 1].max : 0
288
+ }
289
+ end
290
+
291
+ def self.memory_usage_mb
292
+ # Use cached memory calculation to avoid expensive system calls
293
+ @memory_cache ||= {}
294
+ cache_key = Process.pid
295
+
296
+ # Cache memory usage for 1 second to avoid repeated system calls
297
+ if @memory_cache[cache_key] && (Time.now - @memory_cache[cache_key][:timestamp]) < 1
298
+ return @memory_cache[cache_key][:memory]
299
+ end
300
+
301
+ memory = if defined?(GC) && GC.respond_to?(:stat)
302
+ # Use GC stats as a proxy for memory usage (much faster than ps)
303
+ gc_stats = GC.stat
304
+ # Estimate memory usage from heap pages (rough approximation)
305
+ heap_pages = gc_stats[:heap_allocated_pages] || 0
306
+ (heap_pages * 4 * 1024) / (1024 * 1024) # 4KB per page, convert to MB
307
+ else
308
+ 0
309
+ end
310
+
311
+ @memory_cache[cache_key] = {memory: memory, timestamp: Time.now}
312
+ memory
313
+ rescue
314
+ 0
315
+ end
316
+
317
+ def self.gc_stats
318
+ if defined?(GC) && GC.respond_to?(:stat)
319
+ stats = GC.stat
320
+ {
321
+ count: stats[:count] || 0,
322
+ heap_allocated_pages: stats[:heap_allocated_pages] || 0,
323
+ heap_sorted_pages: stats[:heap_sorted_pages] || 0,
324
+ total_allocated_objects: stats[:total_allocated_objects] || 0,
325
+ heap_live_slots: stats[:heap_live_slots] || 0,
326
+ heap_eden_pages: stats[:heap_eden_pages] || 0,
327
+ heap_tomb_pages: stats[:heap_tomb_pages] || 0
328
+ }
329
+ else
330
+ {}
331
+ end
332
+ rescue
333
+ {}
334
+ end
335
+
336
+ def self.object_count
337
+ if defined?(GC) && GC.respond_to?(:stat)
338
+ GC.stat[:heap_live_slots] || 0
339
+ else
340
+ 0
341
+ end
342
+ rescue
343
+ 0
344
+ end
345
+
346
+ def self.heap_pages
347
+ if defined?(GC) && GC.respond_to?(:stat)
348
+ GC.stat[:heap_allocated_pages] || 0
349
+ else
350
+ 0
351
+ end
352
+ rescue
353
+ 0
354
+ end
355
+
356
+ # Helper method to take memory snapshots at specific points
357
+ def self.snapshot_at(label)
358
+ take_memory_snapshot(label)
359
+ end
360
+ end
361
+ end
@@ -0,0 +1,90 @@
1
+ # frozen_string_literal: true
2
+
3
+ begin
4
+ require "rails/railtie"
5
+ rescue LoadError
6
+ # Rails not available, skip railtie definition
7
+ end
8
+
9
+ # Only define Railtie if Rails is available
10
+ if defined?(Rails) && defined?(Rails::Railtie)
11
+ module DeadBro
12
+ class Railtie < ::Rails::Railtie
13
+
14
+ initializer "dead_bro.subscribe" do |app|
15
+ app.config.after_initialize do
16
+ # Use the shared Client instance for all subscribers
17
+ shared_client = DeadBro.client
18
+
19
+ DeadBro::Subscriber.subscribe!(client: shared_client)
20
+ # Install outgoing HTTP instrumentation
21
+ require "dead_bro/http_instrumentation"
22
+ DeadBro::HttpInstrumentation.install!(client: shared_client)
23
+
24
+ # Install SQL query tracking
25
+ require "dead_bro/sql_subscriber"
26
+ DeadBro::SqlSubscriber.subscribe!
27
+
28
+ # Install Rails cache tracking
29
+ require "dead_bro/cache_subscriber"
30
+ DeadBro::CacheSubscriber.subscribe!
31
+
32
+ # Install Redis tracking (if Redis-related events are present)
33
+ require "dead_bro/redis_subscriber"
34
+ DeadBro::RedisSubscriber.subscribe!
35
+
36
+ # Install view rendering tracking
37
+ require "dead_bro/view_rendering_subscriber"
38
+ DeadBro::ViewRenderingSubscriber.subscribe!(client: shared_client)
39
+
40
+ # Install lightweight memory tracking (default)
41
+ require "dead_bro/lightweight_memory_tracker"
42
+ require "dead_bro/memory_leak_detector"
43
+ DeadBro::MemoryLeakDetector.initialize_history
44
+
45
+ # Install detailed memory tracking only if enabled
46
+ if DeadBro.configuration.allocation_tracking_enabled
47
+ require "dead_bro/memory_tracking_subscriber"
48
+ DeadBro::MemoryTrackingSubscriber.subscribe!(client: shared_client)
49
+ end
50
+
51
+ # Install job tracking if ActiveJob is available
52
+ if defined?(ActiveJob)
53
+ require "dead_bro/job_subscriber"
54
+ require "dead_bro/job_sql_tracking_middleware"
55
+ DeadBro::JobSqlTrackingMiddleware.subscribe!
56
+ DeadBro::JobSubscriber.subscribe!(client: shared_client)
57
+ end
58
+ rescue
59
+ # Never raise in Railtie init
60
+ end
61
+ end
62
+
63
+ # Insert Rack middleware early enough to observe uncaught exceptions
64
+ initializer "dead_bro.middleware" do |app|
65
+ require "dead_bro/error_middleware"
66
+
67
+ # Use the shared Client instance for the middleware
68
+ shared_client = DeadBro.client
69
+
70
+ if defined?(::ActionDispatch::DebugExceptions)
71
+ app.config.middleware.insert_before(::ActionDispatch::DebugExceptions, ::DeadBro::ErrorMiddleware, shared_client)
72
+ elsif defined?(::ActionDispatch::ShowExceptions)
73
+ app.config.middleware.insert_before(::ActionDispatch::ShowExceptions, ::DeadBro::ErrorMiddleware, shared_client)
74
+ else
75
+ app.config.middleware.use(::DeadBro::ErrorMiddleware, shared_client)
76
+ end
77
+ rescue
78
+ # Never raise in Railtie init
79
+ end
80
+
81
+ # Insert SQL tracking middleware
82
+ initializer "dead_bro.sql_tracking_middleware" do |app|
83
+ require "dead_bro/sql_tracking_middleware"
84
+ app.config.middleware.use(::DeadBro::SqlTrackingMiddleware)
85
+ rescue
86
+ # Never raise in Railtie init
87
+ end
88
+ end
89
+ end
90
+ end