brainzlab-rails 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,374 @@
1
+ # frozen_string_literal: true
2
+
3
+ module BrainzLab
4
+ module Rails
5
+ module Collectors
6
+ # Collects Active Job events for background job observability
7
+ # Routes to Nerve for specialized job monitoring + other products
8
+ class ActiveJob < Base
9
+ def process(event_data)
10
+ case event_data[:name]
11
+ when 'enqueue.active_job'
12
+ handle_enqueue(event_data)
13
+ when 'enqueue_at.active_job'
14
+ handle_enqueue_at(event_data)
15
+ when 'enqueue_all.active_job'
16
+ handle_enqueue_all(event_data)
17
+ when 'enqueue_retry.active_job'
18
+ handle_enqueue_retry(event_data)
19
+ when 'perform_start.active_job'
20
+ handle_perform_start(event_data)
21
+ when 'perform.active_job'
22
+ handle_perform(event_data)
23
+ when 'retry_stopped.active_job'
24
+ handle_retry_stopped(event_data)
25
+ when 'discard.active_job'
26
+ handle_discard(event_data)
27
+ end
28
+ end
29
+
30
+ private
31
+
32
+ def handle_enqueue(event_data)
33
+ payload = event_data[:payload]
34
+ job = payload[:job]
35
+ job_class = job.class.name
36
+ queue_name = job.queue_name
37
+
38
+ return if @configuration.ignored_job?(job_class)
39
+
40
+ # === NERVE: Job enqueued ===
41
+ send_to_nerve(:enqueue, {
42
+ job_id: job.job_id,
43
+ job_class: job_class,
44
+ queue: queue_name,
45
+ arguments: sanitize_job_arguments(job.arguments)
46
+ })
47
+
48
+ # === FLUX: Metrics ===
49
+ send_to_flux(:increment, 'rails.jobs.enqueued', 1, {
50
+ job_class: job_class,
51
+ queue: queue_name
52
+ })
53
+
54
+ # === REFLEX: Breadcrumb ===
55
+ add_breadcrumb(
56
+ "Job enqueued: #{job_class}",
57
+ category: 'job.enqueue',
58
+ level: :info,
59
+ data: {
60
+ job_id: job.job_id,
61
+ queue: queue_name
62
+ }
63
+ )
64
+
65
+ # === RECALL: Log ===
66
+ send_to_recall(:info, "Job enqueued", {
67
+ job_id: job.job_id,
68
+ job_class: job_class,
69
+ queue: queue_name
70
+ })
71
+ end
72
+
73
+ def handle_enqueue_at(event_data)
74
+ payload = event_data[:payload]
75
+ job = payload[:job]
76
+ job_class = job.class.name
77
+
78
+ return if @configuration.ignored_job?(job_class)
79
+
80
+ scheduled_at = job.scheduled_at
81
+
82
+ # === NERVE: Scheduled job ===
83
+ send_to_nerve(:enqueue_at, {
84
+ job_id: job.job_id,
85
+ job_class: job_class,
86
+ queue: job.queue_name,
87
+ scheduled_at: scheduled_at
88
+ })
89
+
90
+ # === FLUX: Metrics ===
91
+ send_to_flux(:increment, 'rails.jobs.scheduled', 1, {
92
+ job_class: job_class,
93
+ queue: job.queue_name
94
+ })
95
+
96
+ # === REFLEX: Breadcrumb ===
97
+ add_breadcrumb(
98
+ "Job scheduled: #{job_class} at #{scheduled_at}",
99
+ category: 'job.schedule',
100
+ level: :info,
101
+ data: {
102
+ job_id: job.job_id,
103
+ scheduled_at: scheduled_at
104
+ }
105
+ )
106
+ end
107
+
108
+ def handle_enqueue_all(event_data)
109
+ payload = event_data[:payload]
110
+ jobs = payload[:jobs] || []
111
+
112
+ # === FLUX: Bulk enqueue metrics ===
113
+ send_to_flux(:increment, 'rails.jobs.bulk_enqueued', jobs.size)
114
+
115
+ # === RECALL: Log bulk operation ===
116
+ send_to_recall(:info, "Bulk job enqueue", {
117
+ count: jobs.size,
118
+ job_classes: jobs.map { |j| j.class.name }.uniq
119
+ })
120
+ end
121
+
122
+ def handle_enqueue_retry(event_data)
123
+ payload = event_data[:payload]
124
+ job = payload[:job]
125
+ job_class = job.class.name
126
+ error = payload[:error]
127
+ wait = payload[:wait]
128
+
129
+ return if @configuration.ignored_job?(job_class)
130
+
131
+ # === NERVE: Job retry ===
132
+ send_to_nerve(:retry, {
133
+ job_id: job.job_id,
134
+ job_class: job_class,
135
+ queue: job.queue_name,
136
+ error: error&.message,
137
+ wait_seconds: wait,
138
+ executions: job.executions
139
+ })
140
+
141
+ # === FLUX: Retry metrics ===
142
+ send_to_flux(:increment, 'rails.jobs.retries', 1, {
143
+ job_class: job_class,
144
+ queue: job.queue_name
145
+ })
146
+
147
+ # === RECALL: Log retry ===
148
+ send_to_recall(:warn, "Job retry scheduled", {
149
+ job_id: job.job_id,
150
+ job_class: job_class,
151
+ error: error&.message,
152
+ wait_seconds: wait,
153
+ executions: job.executions
154
+ })
155
+
156
+ # === REFLEX: Breadcrumb ===
157
+ add_breadcrumb(
158
+ "Job retry: #{job_class} (attempt #{job.executions})",
159
+ category: 'job.retry',
160
+ level: :warning,
161
+ data: {
162
+ job_id: job.job_id,
163
+ error: error&.message,
164
+ wait_seconds: wait
165
+ }
166
+ )
167
+ end
168
+
169
+ def handle_perform_start(event_data)
170
+ payload = event_data[:payload]
171
+ job = payload[:job]
172
+ job_class = job.class.name
173
+
174
+ return if @configuration.ignored_job?(job_class)
175
+
176
+ # === NERVE: Job started ===
177
+ send_to_nerve(:start, {
178
+ job_id: job.job_id,
179
+ job_class: job_class,
180
+ queue: job.queue_name,
181
+ executions: job.executions
182
+ })
183
+
184
+ # === REFLEX: Breadcrumb ===
185
+ add_breadcrumb(
186
+ "Job started: #{job_class}",
187
+ category: 'job.start',
188
+ level: :info,
189
+ data: {
190
+ job_id: job.job_id,
191
+ queue: job.queue_name
192
+ }
193
+ )
194
+ end
195
+
196
+ def handle_perform(event_data)
197
+ payload = event_data[:payload]
198
+ job = payload[:job]
199
+ job_class = job.class.name
200
+ duration_ms = event_data[:duration_ms]
201
+ db_runtime = payload[:db_runtime]
202
+
203
+ return if @configuration.ignored_job?(job_class)
204
+
205
+ # Check for exception
206
+ if payload[:exception_object]
207
+ handle_job_error(event_data, job, payload[:exception_object])
208
+ end
209
+
210
+ # === PULSE: Job execution span ===
211
+ send_to_pulse(event_data, {
212
+ name: "job.#{job_class}",
213
+ category: 'job.perform',
214
+ attributes: {
215
+ job_id: job.job_id,
216
+ job_class: job_class,
217
+ queue: job.queue_name,
218
+ executions: job.executions,
219
+ db_runtime_ms: db_runtime&.round(2)
220
+ }
221
+ })
222
+
223
+ # === NERVE: Job completed ===
224
+ send_to_nerve(:complete, {
225
+ job_id: job.job_id,
226
+ job_class: job_class,
227
+ queue: job.queue_name,
228
+ duration_ms: duration_ms,
229
+ db_runtime_ms: db_runtime&.round(2),
230
+ success: payload[:exception_object].nil?
231
+ })
232
+
233
+ # === FLUX: Metrics ===
234
+ tags = { job_class: job_class, queue: job.queue_name }
235
+ send_to_flux(:increment, 'rails.jobs.performed', 1, tags)
236
+ send_to_flux(:timing, 'rails.jobs.duration_ms', duration_ms, tags)
237
+
238
+ if db_runtime
239
+ send_to_flux(:timing, 'rails.jobs.db_runtime_ms', db_runtime, tags)
240
+ end
241
+
242
+ # === RECALL: Log completion ===
243
+ send_to_recall(:info, "Job completed", {
244
+ job_id: job.job_id,
245
+ job_class: job_class,
246
+ queue: job.queue_name,
247
+ duration_ms: duration_ms
248
+ })
249
+
250
+ # === REFLEX: Breadcrumb ===
251
+ add_breadcrumb(
252
+ "Job completed: #{job_class}",
253
+ category: 'job.complete',
254
+ level: :info,
255
+ data: {
256
+ job_id: job.job_id,
257
+ duration_ms: duration_ms
258
+ }
259
+ )
260
+ end
261
+
262
+ def handle_job_error(event_data, job, exception)
263
+ job_class = job.class.name
264
+
265
+ # === REFLEX: Capture job error ===
266
+ send_to_reflex(exception, {
267
+ job_id: job.job_id,
268
+ job_class: job_class,
269
+ queue: job.queue_name,
270
+ executions: job.executions,
271
+ arguments: sanitize_job_arguments(job.arguments)
272
+ })
273
+
274
+ # === FLUX: Error metrics ===
275
+ send_to_flux(:increment, 'rails.jobs.errors', 1, {
276
+ job_class: job_class,
277
+ queue: job.queue_name,
278
+ exception_class: exception.class.name
279
+ })
280
+ end
281
+
282
+ def handle_retry_stopped(event_data)
283
+ payload = event_data[:payload]
284
+ job = payload[:job]
285
+ job_class = job.class.name
286
+ error = payload[:error]
287
+
288
+ return if @configuration.ignored_job?(job_class)
289
+
290
+ # === NERVE: Job dead ===
291
+ send_to_nerve(:dead, {
292
+ job_id: job.job_id,
293
+ job_class: job_class,
294
+ queue: job.queue_name,
295
+ error: error&.message,
296
+ executions: job.executions
297
+ })
298
+
299
+ # === REFLEX: Capture final failure ===
300
+ if error
301
+ send_to_reflex(error, {
302
+ job_id: job.job_id,
303
+ job_class: job_class,
304
+ queue: job.queue_name,
305
+ executions: job.executions,
306
+ final_failure: true
307
+ })
308
+ end
309
+
310
+ # === FLUX: Dead job metrics ===
311
+ send_to_flux(:increment, 'rails.jobs.dead', 1, {
312
+ job_class: job_class,
313
+ queue: job.queue_name
314
+ })
315
+
316
+ # === RECALL: Log final failure ===
317
+ send_to_recall(:error, "Job retries exhausted", {
318
+ job_id: job.job_id,
319
+ job_class: job_class,
320
+ error: error&.message,
321
+ executions: job.executions
322
+ })
323
+ end
324
+
325
+ def handle_discard(event_data)
326
+ payload = event_data[:payload]
327
+ job = payload[:job]
328
+ job_class = job.class.name
329
+ error = payload[:error]
330
+
331
+ return if @configuration.ignored_job?(job_class)
332
+
333
+ # === NERVE: Job discarded ===
334
+ send_to_nerve(:discard, {
335
+ job_id: job.job_id,
336
+ job_class: job_class,
337
+ queue: job.queue_name,
338
+ error: error&.message
339
+ })
340
+
341
+ # === FLUX: Discard metrics ===
342
+ send_to_flux(:increment, 'rails.jobs.discarded', 1, {
343
+ job_class: job_class,
344
+ queue: job.queue_name
345
+ })
346
+
347
+ # === RECALL: Log discard ===
348
+ send_to_recall(:warn, "Job discarded", {
349
+ job_id: job.job_id,
350
+ job_class: job_class,
351
+ error: error&.message
352
+ })
353
+ end
354
+
355
+ def sanitize_job_arguments(arguments)
356
+ return [] unless arguments.is_a?(Array)
357
+
358
+ arguments.map do |arg|
359
+ case arg
360
+ when Hash
361
+ sanitize_params(arg)
362
+ when String, Numeric, Symbol, TrueClass, FalseClass, NilClass
363
+ arg
364
+ else
365
+ arg.class.name
366
+ end
367
+ end
368
+ rescue StandardError
369
+ []
370
+ end
371
+ end
372
+ end
373
+ end
374
+ end
@@ -0,0 +1,250 @@
1
+ # frozen_string_literal: true
2
+
3
+ module BrainzLab
4
+ module Rails
5
+ module Collectors
6
+ # Collects Active Record database events
7
+ # Provides deep database observability including N+1 detection
8
+ class ActiveRecord < Base
9
+ def initialize(configuration)
10
+ super
11
+ @n_plus_one_detector = Analyzers::NPlusOneDetector.new
12
+ @slow_query_analyzer = Analyzers::SlowQueryAnalyzer.new(configuration)
13
+ end
14
+
15
+ def process(event_data)
16
+ case event_data[:name]
17
+ when 'sql.active_record'
18
+ handle_sql(event_data)
19
+ when 'instantiation.active_record'
20
+ handle_instantiation(event_data)
21
+ when 'start_transaction.active_record'
22
+ handle_start_transaction(event_data)
23
+ when 'transaction.active_record'
24
+ handle_transaction(event_data)
25
+ when 'strict_loading_violation.active_record'
26
+ handle_strict_loading_violation(event_data)
27
+ end
28
+ end
29
+
30
+ private
31
+
32
+ def handle_sql(event_data)
33
+ payload = event_data[:payload]
34
+ sql = payload[:sql]
35
+ name = payload[:name]
36
+ duration_ms = event_data[:duration_ms]
37
+
38
+ # Skip ignored SQL patterns (schema queries, etc.)
39
+ return if @configuration.ignored_sql?(sql)
40
+
41
+ # Skip SCHEMA and internal queries for metrics
42
+ return if name == 'SCHEMA' || name.nil?
43
+
44
+ cached = payload[:cached] == true
45
+ async = payload[:async] == true
46
+
47
+ # === N+1 Detection ===
48
+ if @configuration.n_plus_one_detection
49
+ n_plus_one = @n_plus_one_detector.check(sql, name, event_data[:unique_id])
50
+ if n_plus_one
51
+ handle_n_plus_one_detected(n_plus_one, event_data)
52
+ end
53
+ end
54
+
55
+ # === Slow Query Analysis ===
56
+ if @configuration.slow_query_threshold_ms && duration_ms > @configuration.slow_query_threshold_ms
57
+ @slow_query_analyzer.analyze(sql, duration_ms, payload)
58
+ end
59
+
60
+ # === PULSE: Database span ===
61
+ send_to_pulse(event_data, {
62
+ name: "sql.#{extract_operation(sql)}",
63
+ category: 'db.sql',
64
+ attributes: {
65
+ name: name,
66
+ sql: truncate_sql(sql),
67
+ cached: cached,
68
+ async: async,
69
+ row_count: payload[:row_count],
70
+ affected_rows: payload[:affected_rows]
71
+ }
72
+ })
73
+
74
+ # === FLUX: Metrics ===
75
+ operation = extract_operation(sql)
76
+ tags = { operation: operation, cached: cached }
77
+
78
+ send_to_flux(:increment, 'rails.db.queries', 1, tags)
79
+ send_to_flux(:timing, 'rails.db.query_ms', duration_ms, tags)
80
+
81
+ if cached
82
+ send_to_flux(:increment, 'rails.db.cache_hits', 1)
83
+ end
84
+
85
+ if payload[:row_count]
86
+ send_to_flux(:histogram, 'rails.db.rows_returned', payload[:row_count], tags)
87
+ end
88
+
89
+ # === REFLEX: Breadcrumb ===
90
+ add_breadcrumb(
91
+ "#{name}: #{truncate_sql(sql, 100)}",
92
+ category: 'db.query',
93
+ level: :debug,
94
+ data: {
95
+ duration_ms: duration_ms,
96
+ cached: cached,
97
+ operation: operation
98
+ }
99
+ )
100
+ end
101
+
102
+ def handle_n_plus_one_detected(detection, event_data)
103
+ # === RECALL: Log N+1 warning ===
104
+ send_to_recall(:warn, "N+1 query detected", {
105
+ query: detection[:query],
106
+ count: detection[:count],
107
+ model: detection[:model],
108
+ location: detection[:location]
109
+ })
110
+
111
+ # === REFLEX: Add warning breadcrumb ===
112
+ add_breadcrumb(
113
+ "N+1 detected: #{detection[:model]} (#{detection[:count]} queries)",
114
+ category: 'db.n_plus_one',
115
+ level: :warning,
116
+ data: detection
117
+ )
118
+
119
+ # === FLUX: Track N+1 occurrences ===
120
+ send_to_flux(:increment, 'rails.db.n_plus_one', 1, {
121
+ model: detection[:model]
122
+ })
123
+ end
124
+
125
+ def handle_instantiation(event_data)
126
+ payload = event_data[:payload]
127
+ record_count = payload[:record_count]
128
+ class_name = payload[:class_name]
129
+
130
+ # === PULSE: Instantiation span ===
131
+ send_to_pulse(event_data, {
132
+ name: "instantiate.#{class_name}",
133
+ category: 'db.instantiation',
134
+ attributes: {
135
+ class_name: class_name,
136
+ record_count: record_count
137
+ }
138
+ })
139
+
140
+ # === FLUX: Metrics ===
141
+ send_to_flux(:histogram, 'rails.db.records_instantiated', record_count, {
142
+ model: class_name
143
+ })
144
+
145
+ # Flag large instantiations (potential memory issue)
146
+ if record_count > 1000
147
+ send_to_recall(:warn, "Large record instantiation", {
148
+ model: class_name,
149
+ record_count: record_count
150
+ })
151
+ end
152
+ end
153
+
154
+ def handle_start_transaction(event_data)
155
+ add_breadcrumb(
156
+ "Transaction started",
157
+ category: 'db.transaction',
158
+ level: :debug,
159
+ data: {}
160
+ )
161
+
162
+ send_to_flux(:increment, 'rails.db.transactions_started', 1)
163
+ end
164
+
165
+ def handle_transaction(event_data)
166
+ payload = event_data[:payload]
167
+ outcome = payload[:outcome]
168
+ duration_ms = event_data[:duration_ms]
169
+
170
+ # === PULSE: Transaction span ===
171
+ send_to_pulse(event_data, {
172
+ name: "transaction.#{outcome}",
173
+ category: 'db.transaction',
174
+ attributes: {
175
+ outcome: outcome
176
+ }
177
+ })
178
+
179
+ # === FLUX: Metrics ===
180
+ send_to_flux(:increment, "rails.db.transactions.#{outcome}", 1)
181
+ send_to_flux(:timing, 'rails.db.transaction_ms', duration_ms, {
182
+ outcome: outcome
183
+ })
184
+
185
+ # === REFLEX: Breadcrumb ===
186
+ level = outcome == :rollback ? :warning : :debug
187
+ add_breadcrumb(
188
+ "Transaction #{outcome}",
189
+ category: 'db.transaction',
190
+ level: level,
191
+ data: {
192
+ outcome: outcome,
193
+ duration_ms: duration_ms
194
+ }
195
+ )
196
+
197
+ # Log rollbacks as warnings
198
+ if outcome == :rollback
199
+ send_to_recall(:warn, "Transaction rolled back", {
200
+ duration_ms: duration_ms
201
+ })
202
+ end
203
+ end
204
+
205
+ def handle_strict_loading_violation(event_data)
206
+ payload = event_data[:payload]
207
+ owner = payload[:owner]
208
+ reflection = payload[:reflection]
209
+
210
+ send_to_recall(:warn, "Strict loading violation", {
211
+ owner: owner.to_s,
212
+ association: reflection.to_s
213
+ })
214
+
215
+ add_breadcrumb(
216
+ "Strict loading: #{owner} -> #{reflection}",
217
+ category: 'db.strict_loading',
218
+ level: :warning,
219
+ data: {
220
+ owner: owner.to_s,
221
+ association: reflection.to_s
222
+ }
223
+ )
224
+
225
+ send_to_flux(:increment, 'rails.db.strict_loading_violations', 1)
226
+ end
227
+
228
+ def extract_operation(sql)
229
+ case sql.to_s.strip.upcase
230
+ when /\ASELECT/i then 'SELECT'
231
+ when /\AINSERT/i then 'INSERT'
232
+ when /\AUPDATE/i then 'UPDATE'
233
+ when /\ADELETE/i then 'DELETE'
234
+ when /\ABEGIN/i then 'BEGIN'
235
+ when /\ACOMMIT/i then 'COMMIT'
236
+ when /\AROLLBACK/i then 'ROLLBACK'
237
+ when /\ASAVEPOINT/i then 'SAVEPOINT'
238
+ else 'OTHER'
239
+ end
240
+ end
241
+
242
+ def truncate_sql(sql, max_length = 500)
243
+ return '' if sql.nil?
244
+
245
+ sql.length > max_length ? "#{sql[0, max_length]}..." : sql
246
+ end
247
+ end
248
+ end
249
+ end
250
+ end