pigeon-rb 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. checksums.yaml +7 -0
  2. data/README.md +343 -0
  3. data/lib/pigeon/active_job_integration.rb +32 -0
  4. data/lib/pigeon/api.rb +200 -0
  5. data/lib/pigeon/configuration.rb +161 -0
  6. data/lib/pigeon/core.rb +104 -0
  7. data/lib/pigeon/encryption.rb +213 -0
  8. data/lib/pigeon/generators/hanami/migration_generator.rb +89 -0
  9. data/lib/pigeon/generators/rails/install_generator.rb +32 -0
  10. data/lib/pigeon/generators/rails/migration_generator.rb +20 -0
  11. data/lib/pigeon/generators/rails/templates/create_outbox_messages.rb.erb +34 -0
  12. data/lib/pigeon/generators/rails/templates/initializer.rb.erb +88 -0
  13. data/lib/pigeon/hanami_integration.rb +78 -0
  14. data/lib/pigeon/health_check/kafka.rb +37 -0
  15. data/lib/pigeon/health_check/processor.rb +70 -0
  16. data/lib/pigeon/health_check/queue.rb +69 -0
  17. data/lib/pigeon/health_check.rb +63 -0
  18. data/lib/pigeon/logging/structured_logger.rb +181 -0
  19. data/lib/pigeon/metrics/collector.rb +200 -0
  20. data/lib/pigeon/mock_producer.rb +18 -0
  21. data/lib/pigeon/models/adapters/active_record_adapter.rb +133 -0
  22. data/lib/pigeon/models/adapters/rom_adapter.rb +150 -0
  23. data/lib/pigeon/models/outbox_message.rb +182 -0
  24. data/lib/pigeon/monitoring.rb +113 -0
  25. data/lib/pigeon/outbox.rb +61 -0
  26. data/lib/pigeon/processor/background_processor.rb +109 -0
  27. data/lib/pigeon/processor.rb +798 -0
  28. data/lib/pigeon/publisher.rb +524 -0
  29. data/lib/pigeon/railtie.rb +29 -0
  30. data/lib/pigeon/schema.rb +35 -0
  31. data/lib/pigeon/security.rb +30 -0
  32. data/lib/pigeon/serializer.rb +77 -0
  33. data/lib/pigeon/tasks/pigeon.rake +64 -0
  34. data/lib/pigeon/trace_api.rb +37 -0
  35. data/lib/pigeon/tracing/core.rb +119 -0
  36. data/lib/pigeon/tracing/messaging.rb +144 -0
  37. data/lib/pigeon/tracing.rb +107 -0
  38. data/lib/pigeon/version.rb +5 -0
  39. data/lib/pigeon.rb +52 -0
  40. metadata +127 -0
@@ -0,0 +1,798 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+ require "concurrent"
5
+ require_relative "processor/background_processor"
6
+ require_relative "metrics/collector"
7
+
8
+ module Pigeon
9
+ # Processor class for handling outbox messages
10
+ class Processor
11
+ include BackgroundProcessor
12
+
13
+ # Initialize a new processor
14
+ # @param auto_start [Boolean] Whether to automatically start processing pending messages on initialization
15
+ def initialize(auto_start: false)
16
+ @mutex = Concurrent::ReentrantReadWriteLock.new
17
+ @processing = Concurrent::AtomicBoolean.new(false)
18
+ @thread_pool = nil
19
+ @metrics = Pigeon.config.metrics_collector || Pigeon::Metrics::Collector.new
20
+ @start_time = Time.now
21
+
22
+ # Start processing if auto_start is true
23
+ start_processing if auto_start
24
+ end
25
+
26
+ # Start processing pending messages in the background
27
+ # @param batch_size [Integer] Number of messages to process in one batch
28
+ # @param interval [Integer] Interval in seconds between processing batches
29
+ # @param thread_count [Integer] Number of threads to use for processing
30
+ # @return [Boolean] Whether processing was started
31
+ def start_processing(batch_size: 100, interval: 5, thread_count: 2)
32
+ result = start_background_processing(batch_size: batch_size, interval: interval, thread_count: thread_count)
33
+
34
+ # Record processor start time
35
+ Pigeon.processor_start_time = Time.now if result
36
+
37
+ result
38
+ end
39
+
40
+ # Stop processing pending messages
41
+ # @return [Boolean] Whether processing was stopped
42
+ def stop_processing
43
+ stop_background_processing
44
+ end
45
+
46
+ # Process pending outbox messages
47
+ # @param batch_size [Integer] Number of messages to process in one batch
48
+ # @return [Hash] Processing statistics
49
+ def process_pending(batch_size: 100)
50
+ Pigeon.last_processing_run = Time.now
51
+ batch_id = SecureRandom.uuid
52
+ logger = create_batch_logger(batch_id, batch_size)
53
+ log_batch_start(logger, batch_id, batch_size)
54
+ start_time = Time.now
55
+
56
+ stats = { processed: 0, succeeded: 0, failed: 0, retried: 0, dead_lettered: 0 }
57
+
58
+ Pigeon::Tracing.trace_batch_process(batch_size) do |span|
59
+ @mutex.with_read_lock do
60
+ process_pending_and_retry_messages(stats, batch_size, logger, span)
61
+ end
62
+
63
+ finalize_batch_processing(stats, start_time, logger, span)
64
+ end
65
+
66
+ Pigeon.last_successful_processing_run = Time.now if stats[:succeeded].positive?
67
+ stats
68
+ end
69
+
70
+ # Process a specific outbox message
71
+ # @param message_id [String, Integer] ID of the message to process
72
+ # @return [Boolean] Success status
73
+ def process_message(message_id)
74
+ Pigeon.config.logger.info("Processing message ID: #{message_id}")
75
+
76
+ # Find the message
77
+ message = Pigeon.find_outbox_message(message_id)
78
+ return false unless message
79
+
80
+ # Process the message
81
+ result = process_single_message(message)
82
+ result[:success]
83
+ end
84
+
85
+ # Send a failed message to a dead letter queue
86
+ # @param message_id [String, Integer] ID of the message to send to DLQ
87
+ # @param dlq_topic [String, nil] Optional dead letter queue topic (defaults to original topic with .dlq suffix)
88
+ # @return [Boolean] Success status
89
+ def send_to_dead_letter_queue(message_id, dlq_topic: nil)
90
+ message = Pigeon.find_outbox_message(message_id)
91
+ return false unless message
92
+
93
+ send_message_to_dlq(message, dlq_topic)
94
+ end
95
+
96
+ # Clean up old processed messages
97
+ # @param older_than [ActiveSupport::Duration, Integer] Age threshold for cleanup
98
+ # @return [Integer] Number of records cleaned up
99
+ def cleanup_processed(older_than: 7)
100
+ # This is a placeholder implementation
101
+ # The actual implementation will be added in task 7.2
102
+ Pigeon.config.logger.info("Cleaning up processed messages older than #{older_than} days")
103
+
104
+ # In the actual implementation, we would:
105
+ # 1. Delete processed messages older than the specified threshold
106
+ # 2. Return the number of deleted records
107
+
108
+ # Return mock count
109
+ 0
110
+ end
111
+
112
+ private
113
+
114
+ # Create batch logger with context
115
+ # @param batch_id [String] Batch ID
116
+ # @param batch_size [Integer] Batch size
117
+ # @return [Logger] Logger with context
118
+ def create_batch_logger(batch_id, batch_size)
119
+ logger = Pigeon.config.logger
120
+ return logger unless logger.respond_to?(:with_context)
121
+
122
+ logger.with_context(batch_id: batch_id, batch_size: batch_size)
123
+ end
124
+
125
+ # Log batch start
126
+ # @param logger [Logger] Logger to use
127
+ # @param batch_id [String] Batch ID
128
+ # @param batch_size [Integer] Batch size
129
+ # @return [void]
130
+ def log_batch_start(logger, batch_id, batch_size)
131
+ if logger.respond_to?(:info) && logger.method(:info).arity > 1
132
+ logger.info("Starting to process pending messages", {
133
+ batch_id: batch_id,
134
+ batch_size: batch_size
135
+ })
136
+ else
137
+ logger.info("Processing pending messages, batch size: #{batch_size}")
138
+ end
139
+ end
140
+
141
+ # Process pending and retry messages
142
+ # @param stats [Hash] Statistics to update
143
+ # @param batch_size [Integer] Batch size
144
+ # @param logger [Logger] Logger to use
145
+ # @param span [OpenTelemetry::Trace::Span] Tracing span
146
+ # @return [void]
147
+ def process_pending_and_retry_messages(stats, batch_size, logger, span)
148
+ process_message_type("pending", stats, batch_size, logger, span)
149
+ process_message_type("retry", stats, batch_size, logger, span)
150
+ end
151
+
152
+ # Process messages of a specific type
153
+ # @param type [String] Message type ("pending" or "retry")
154
+ # @param stats [Hash] Statistics to update
155
+ # @param batch_size [Integer] Batch size
156
+ # @param logger [Logger] Logger to use
157
+ # @param span [OpenTelemetry::Trace::Span] Tracing span
158
+ # @return [void]
159
+ def process_message_type(type, stats, batch_size, logger, span)
160
+ messages = type == "pending" ? fetch_pending_messages(batch_size) : fetch_retry_ready_messages(batch_size)
161
+ stats[:processed] += messages.size
162
+
163
+ log_fetched_messages(logger, messages.size, type)
164
+ span&.add_attributes("messaging.#{type}_messages" => messages.size)
165
+ process_messages_batch(messages, stats)
166
+ end
167
+
168
+ # Log fetched messages
169
+ # @param logger [Logger] Logger to use
170
+ # @param count [Integer] Number of messages fetched
171
+ # @param status [String] Message status
172
+ # @return [void]
173
+ def log_fetched_messages(logger, count, status)
174
+ if logger.respond_to?(:debug) && logger.method(:debug).arity > 1
175
+ logger.debug("Fetched #{status} messages", { count: count, status: status })
176
+ else
177
+ logger.debug("Fetched #{count} #{status} messages")
178
+ end
179
+ end
180
+
181
+ # Finalize batch processing
182
+ # @param stats [Hash] Processing statistics
183
+ # @param start_time [Time] Processing start time
184
+ # @param logger [Logger] Logger to use
185
+ # @param span [OpenTelemetry::Trace::Span] Tracing span
186
+ # @return [void]
187
+ def finalize_batch_processing(stats, start_time, logger, span)
188
+ record_processing_metrics(stats)
189
+ processing_time = Time.now - start_time
190
+ @metrics.histogram(:processing_batch_duration_seconds, processing_time)
191
+ update_queue_depth_metric
192
+ log_processing_stats(stats, logger, processing_time)
193
+ add_span_attributes(span, stats, processing_time)
194
+ end
195
+
196
+ # Add attributes to span
197
+ # @param span [OpenTelemetry::Trace::Span] Tracing span
198
+ # @param stats [Hash] Processing statistics
199
+ # @param processing_time [Float] Processing time
200
+ # @return [void]
201
+ def add_span_attributes(span, stats, processing_time)
202
+ span&.add_attributes({
203
+ "messaging.processed" => stats[:processed],
204
+ "messaging.succeeded" => stats[:succeeded],
205
+ "messaging.failed" => stats[:failed],
206
+ "messaging.retried" => stats[:retried],
207
+ "messaging.dead_lettered" => stats[:dead_lettered],
208
+ "messaging.processing_time_ms" => (processing_time * 1000).round(2)
209
+ })
210
+ end
211
+
212
+ # Process messages in thread pool or synchronously
213
+ # @param messages [Array<Pigeon::Models::OutboxMessage>] Messages to process
214
+ # @param stats [Hash] Statistics hash to update
215
+ # @return [void]
216
+ def process_messages_batch(messages, stats)
217
+ messages.each do |message|
218
+ if @thread_pool && @processing.true?
219
+ # Process in thread pool if available
220
+ @thread_pool.post do
221
+ process_result = process_single_message(message)
222
+ update_stats(stats, process_result)
223
+ end
224
+ else
225
+ # Process synchronously
226
+ process_result = process_single_message(message)
227
+ update_stats(stats, process_result)
228
+ end
229
+ end
230
+ end
231
+
232
+ # Process a single message
233
+ # @param message [Pigeon::Models::OutboxMessage] Message to process
234
+ # @return [Hash] Processing result
235
+ def process_single_message(message)
236
+ result = { success: false, retried: false, dead_lettered: false }
237
+ start_time = Time.now
238
+ correlation_id = get_correlation_id(message)
239
+ logger = create_logger_with_context(message, correlation_id)
240
+
241
+ mark_message_as_processing(message)
242
+
243
+ Pigeon::Tracing.trace_process(message) do |span|
244
+ log_processing_attempt(logger, message)
245
+ add_retry_count_to_span(span, message)
246
+
247
+ publish_message_to_kafka(message)
248
+ message.mark_as_published
249
+
250
+ processing_time = Time.now - start_time
251
+ record_successful_processing_metrics(message, processing_time, span)
252
+ log_successful_processing(logger, message, processing_time)
253
+
254
+ result[:success] = true
255
+ rescue StandardError => e
256
+ handle_processing_error(message, message.id, e, result, logger)
257
+ record_failure_metrics(message, Time.now - start_time, span, e)
258
+ end
259
+
260
+ result
261
+ end
262
+
263
+ # Get correlation ID from message or generate a new one
264
+ # @param message [Pigeon::Models::OutboxMessage] Message to get correlation ID from
265
+ # @return [String] Correlation ID
266
+ def get_correlation_id(message)
267
+ correlation_id = SecureRandom.uuid
268
+ correlation_id = message.correlation_id || correlation_id if message.respond_to?(:correlation_id)
269
+ correlation_id
270
+ end
271
+
272
+ # Create a logger with correlation ID context if supported
273
+ # @param message [Pigeon::Models::OutboxMessage] Message to create logger for
274
+ # @param correlation_id [String] Correlation ID
275
+ # @return [Logger] Logger with context
276
+ def create_logger_with_context(message, correlation_id)
277
+ logger = Pigeon.config.logger
278
+ if logger.respond_to?(:with_context)
279
+ logger.with_context(
280
+ correlation_id: correlation_id,
281
+ message_id: message.id,
282
+ topic: message.topic
283
+ )
284
+ else
285
+ logger
286
+ end
287
+ end
288
+
289
+ # Log processing attempt
290
+ # @param logger [Logger] Logger to use
291
+ # @param message [Pigeon::Models::OutboxMessage] Message being processed
292
+ # @return [void]
293
+ def log_processing_attempt(logger, message)
294
+ if logger.respond_to?(:info) && logger.method(:info).arity > 1
295
+ logger.info("Processing message", {
296
+ status: "processing",
297
+ retry_count: message.retry_count
298
+ })
299
+ else
300
+ logger.info("Processing message #{message.id} (retry count: #{message.retry_count})")
301
+ end
302
+ end
303
+
304
+ # Add retry count to span
305
+ # @param span [OpenTelemetry::Trace::Span] Tracing span
306
+ # @param message [Pigeon::Models::OutboxMessage] Message being processed
307
+ # @return [void]
308
+ def add_retry_count_to_span(span, message)
309
+ span&.add_attributes("messaging.retry_count" => message.retry_count)
310
+ end
311
+
312
+ # Record successful processing metrics
313
+ # @param message [Pigeon::Models::OutboxMessage] Message that was processed
314
+ # @param processing_time [Float] Processing time in seconds
315
+ # @param span [OpenTelemetry::Trace::Span] Tracing span
316
+ # @return [void]
317
+ def record_successful_processing_metrics(message, processing_time, span)
318
+ @metrics.histogram(:message_processing_duration_seconds, processing_time, { topic: message.topic })
319
+
320
+ return unless message.created_at
321
+
322
+ message_age = Time.now - message.created_at
323
+ @metrics.histogram(:message_age_at_processing_seconds, message_age, { topic: message.topic })
324
+ span&.add_attributes("messaging.age_seconds" => message_age)
325
+ span&.add_attributes("messaging.processing_time_ms" => (processing_time * 1000).round(2))
326
+ end
327
+
328
+ # Log successful processing
329
+ # @param logger [Logger] Logger to use
330
+ # @param message [Pigeon::Models::OutboxMessage] Message that was processed
331
+ # @param processing_time [Float] Processing time in seconds
332
+ # @return [void]
333
+ def log_successful_processing(logger, message, processing_time)
334
+ processing_time_ms = (processing_time * 1000).round(2)
335
+ if logger.respond_to?(:info) && logger.method(:info).arity > 1
336
+ logger.info("Successfully processed message", {
337
+ status: "published",
338
+ processing_time_ms: processing_time_ms
339
+ })
340
+ else
341
+ logger.info("Successfully processed message #{message.id} to topic #{message.topic} in #{processing_time_ms}ms")
342
+ end
343
+ end
344
+
345
+ # Record failure metrics
346
+ # @param message [Pigeon::Models::OutboxMessage] Message that failed
347
+ # @param processing_time [Float] Processing time in seconds
348
+ # @param span [OpenTelemetry::Trace::Span] Tracing span
349
+ # @param error [StandardError] Error that occurred
350
+ # @return [void]
351
+ def record_failure_metrics(message, processing_time, span, error)
352
+ @metrics.histogram(:message_processing_failure_duration_seconds, processing_time, { topic: message.topic })
353
+ span&.record_exception(error)
354
+ span&.status = OpenTelemetry::Trace::Status.error(error.message) if span && defined?(OpenTelemetry::Trace::Status)
355
+ end
356
+
357
+ # Update statistics based on processing result
358
+ # @param stats [Hash] Statistics hash to update
359
+ # @param result [Hash] Processing result
360
+ # @return [void]
361
+ def update_stats(stats, result)
362
+ # Use a mutex to ensure thread safety when updating stats
363
+ @mutex.with_write_lock do
364
+ if result[:success]
365
+ stats[:succeeded] += 1
366
+ else
367
+ stats[:failed] += 1
368
+ stats[:retried] += 1 if result[:retried]
369
+ stats[:dead_lettered] += 1 if result[:dead_lettered]
370
+ end
371
+ end
372
+ end
373
+
374
+ # Log processing statistics
375
+ # @param stats [Hash] Processing statistics
376
+ # @param logger [Pigeon::Logging::StructuredLogger] Logger with context
377
+ # @param processing_time [Float] Processing time in seconds
378
+ # @return [void]
379
+ def log_processing_stats(stats, logger = nil, processing_time = nil)
380
+ # Use provided logger or default logger
381
+ logger ||= Pigeon.config.logger
382
+
383
+ if logger.respond_to?(:info) && logger.method(:info).arity > 1
384
+ # Structured logger
385
+ logger.info("Completed processing batch", {
386
+ processed: stats[:processed],
387
+ succeeded: stats[:succeeded],
388
+ failed: stats[:failed],
389
+ retried: stats[:retried],
390
+ dead_lettered: stats[:dead_lettered],
391
+ processing_time_ms: processing_time ? (processing_time * 1000).round(2) : nil
392
+ })
393
+ else
394
+ # Standard logger
395
+ time_info = processing_time ? " in #{(processing_time * 1000).round(2)}ms" : ""
396
+ logger.info(
397
+ "Processed #{stats[:processed]} messages#{time_info}: " \
398
+ "#{stats[:succeeded]} succeeded, " \
399
+ "#{stats[:failed]} failed, " \
400
+ "#{stats[:retried]} retried, " \
401
+ "#{stats[:dead_lettered]} sent to dead letter queue"
402
+ )
403
+ end
404
+ end
405
+
406
+ # Record processing metrics
407
+ # @param stats [Hash] Processing statistics
408
+ # @return [void]
409
+ def record_processing_metrics(stats)
410
+ @metrics.increment(:messages_processed_total, stats[:processed])
411
+ @metrics.increment(:messages_succeeded_total, stats[:succeeded])
412
+ @metrics.increment(:messages_failed_total, stats[:failed])
413
+ @metrics.increment(:messages_retried_total, stats[:retried])
414
+ @metrics.increment(:messages_dead_lettered_total, stats[:dead_lettered])
415
+ end
416
+
417
+ # Update queue depth metric
418
+ # @return [void]
419
+ def update_queue_depth_metric
420
+ # Count pending messages
421
+ pending_count = Pigeon.count_outbox_messages_by_status("pending")
422
+ @metrics.gauge(:outbox_queue_depth, pending_count)
423
+
424
+ # Count messages in retry state
425
+ retry_count = Pigeon.count_outbox_messages_by_status("retry")
426
+ @metrics.gauge(:outbox_retry_queue_depth, retry_count)
427
+
428
+ # Count failed messages
429
+ failed_count = Pigeon.count_outbox_messages_by_status("failed")
430
+ @metrics.gauge(:outbox_failed_messages, failed_count)
431
+ rescue NotImplementedError => e
432
+ # This is expected in test environments where the adapter doesn't implement count_by_status
433
+ if Pigeon.config.logger.respond_to?(:debug)
434
+ Pigeon.config.logger.debug("Skipping queue depth metrics: #{e.message}")
435
+ end
436
+ end
437
+
438
+ # Fetch pending messages from the database
439
+ # @param limit [Integer] Maximum number of messages to fetch
440
+ # @return [Array<Pigeon::Models::OutboxMessage>] Array of pending messages
441
+ def fetch_pending_messages(limit)
442
+ Pigeon.find_outbox_messages_by_status("pending", limit)
443
+ end
444
+
445
+ # Fetch messages that are ready for retry
446
+ # @param limit [Integer] Maximum number of messages to fetch
447
+ # @return [Array<Pigeon::Models::OutboxMessage>] Array of messages ready for retry
448
+ def fetch_retry_ready_messages(limit)
449
+ Pigeon.find_outbox_messages_ready_for_retry(limit)
450
+ end
451
+
452
+ # Mark a message as processing
453
+ # @param message [Pigeon::Models::OutboxMessage] Message to mark as processing
454
+ # @return [Boolean] Whether the update was successful
455
+ def mark_message_as_processing(message)
456
+ message.status = "processing"
457
+ message.updated_at = Time.now
458
+ message.save
459
+ end
460
+
461
+ # Publish a message to Kafka
462
+ # @param message [Pigeon::Models::OutboxMessage] Message to publish
463
+ # @return [void]
464
+ def publish_message_to_kafka(message)
465
+ message_payload = prepare_message_payload(message)
466
+ message_options = build_message_options(message)
467
+ Pigeon.karafka_producer.produce_sync(message_payload, **message_options)
468
+ end
469
+
470
+ # Prepare the message payload for publishing
471
+ # @param message [Pigeon::Models::OutboxMessage] Message to prepare
472
+ # @return [String] Prepared payload
473
+ def prepare_message_payload(message)
474
+ message.payload.is_a?(String) ? message.payload : message.payload.to_json
475
+ end
476
+
477
+ # Build message options for Kafka
478
+ # @param message [Pigeon::Models::OutboxMessage] Message to build options for
479
+ # @return [Hash] Message options
480
+ def build_message_options(message)
481
+ options = { topic: message.topic }
482
+ options[:key] = message.key if message.key
483
+ options[:headers] = message.headers if message.headers && !message.headers.empty?
484
+ options[:partition] = message.partition if message.partition
485
+ options
486
+ end
487
+
488
+ # Handle processing error
489
+ # @param message [Pigeon::Models::OutboxMessage] Message that failed
490
+ # @param message_id [String, Integer] ID of the message
491
+ # @param error [StandardError] Error that occurred
492
+ # @param result [Hash] Processing result to update
493
+ # @param logger [Pigeon::Logging::StructuredLogger] Logger with context
494
+ # @return [void]
495
+ def handle_processing_error(message, message_id, error, result = {}, logger = nil)
496
+ logger = create_error_logger(message, message_id, logger)
497
+ error_classification = Pigeon::Configuration.classify_error(error)
498
+ log_processing_error(logger, message_id, error, error_classification)
499
+
500
+ if message.max_retries_exceeded?
501
+ handle_max_retries_exceeded(message, message_id, error, result, logger)
502
+ else
503
+ handle_retry(message, message_id, result, logger)
504
+ end
505
+ end
506
+
507
+ # Create error logger with context
508
+ # @param message [Pigeon::Models::OutboxMessage] Message that failed
509
+ # @param message_id [String, Integer] ID of the message
510
+ # @param logger [Logger] Existing logger or nil
511
+ # @return [Logger] Logger with context
512
+ def create_error_logger(message, message_id, logger)
513
+ return logger || Pigeon.config.logger unless !logger && Pigeon.config.logger.respond_to?(:with_context)
514
+
515
+ context = { message_id: message_id, topic: message.topic }
516
+ if message.respond_to?(:correlation_id) && message.correlation_id
517
+ context[:correlation_id] =
518
+ message.correlation_id
519
+ end
520
+ Pigeon.config.logger.with_context(context)
521
+ end
522
+
523
+ # Log processing error
524
+ # @param logger [Logger] Logger to use
525
+ # @param message_id [String, Integer] ID of the message
526
+ # @param error [StandardError] Error that occurred
527
+ # @param error_classification [String] Error classification
528
+ # @return [void]
529
+ def log_processing_error(logger, message_id, error, error_classification)
530
+ if logger.respond_to?(:error) && logger.method(:error).arity > 1
531
+ logger.error("Failed to process message",
532
+ {
533
+ error_class: error.class.name,
534
+ error_message: error.message,
535
+ error_classification: error_classification
536
+ },
537
+ error)
538
+ else
539
+ logger.error("Failed to process message #{message_id}: #{error.message} (#{error_classification})")
540
+ end
541
+ end
542
+
543
+ # Handle a message that has exceeded maximum retries
544
+ # @param message [Pigeon::Models::OutboxMessage] Message that failed
545
+ # @param message_id [String, Integer] ID of the message
546
+ # @param error [StandardError] Error that occurred
547
+ # @param result [Hash] Processing result to update
548
+ # @param logger [Pigeon::Logging::StructuredLogger] Logger with context
549
+ # @return [void]
550
+ def handle_max_retries_exceeded(message, message_id, error, result, logger = nil)
551
+ logger = create_error_logger(message, message_id, logger)
552
+ message.mark_as_failed(error)
553
+ log_max_retries_exceeded(logger, message, message_id)
554
+ send_to_dlq_if_enabled(message, result, logger)
555
+ end
556
+
557
+ # Log max retries exceeded
558
+ # @param logger [Logger] Logger to use
559
+ # @param message [Pigeon::Models::OutboxMessage] Message that failed
560
+ # @param message_id [String, Integer] ID of the message
561
+ # @return [void]
562
+ def log_max_retries_exceeded(logger, message, message_id)
563
+ if logger.respond_to?(:error) && logger.method(:error).arity > 1
564
+ logger.error("Message has exceeded maximum retries", {
565
+ max_retries: message.max_retries,
566
+ retry_count: message.retry_count,
567
+ status: "failed"
568
+ })
569
+ else
570
+ logger.error("Message #{message_id} has exceeded maximum retries " \
571
+ "(#{message.retry_count}/#{message.max_retries}) and is marked as failed")
572
+ end
573
+ end
574
+
575
+ # Send to DLQ if enabled
576
+ # @param message [Pigeon::Models::OutboxMessage] Message to send
577
+ # @param result [Hash] Processing result to update
578
+ # @param logger [Logger] Logger to use
579
+ # @return [void]
580
+ def send_to_dlq_if_enabled(message, result, logger)
581
+ return unless Pigeon.config.respond_to?(:dead_letter_queue_enabled) && Pigeon.config.dead_letter_queue_enabled
582
+
583
+ result[:dead_lettered] = send_message_to_dlq(message, nil, logger)
584
+ end
585
+
586
+ # Handle a message that needs to be retried
587
+ # @param message [Pigeon::Models::OutboxMessage] Message to retry
588
+ # @param message_id [String, Integer] ID of the message
589
+ # @param result [Hash] Processing result to update
590
+ # @param logger [Pigeon::Logging::StructuredLogger] Logger with context
591
+ # @return [void]
592
+ def handle_retry(message, message_id, result, logger = nil)
593
+ logger = create_error_logger(message, message_id, logger)
594
+ message.increment_retry_count
595
+ next_retry = message.next_retry_at
596
+
597
+ record_retry_metrics(message, next_retry)
598
+ log_retry_scheduled(logger, message, message_id, next_retry)
599
+
600
+ result[:retried] = true
601
+ end
602
+
603
+ # Record retry metrics
604
+ # @param message [Pigeon::Models::OutboxMessage] Message being retried
605
+ # @param next_retry [Time] Next retry time
606
+ # @return [void]
607
+ def record_retry_metrics(message, next_retry)
608
+ @metrics.histogram(:message_retry_count, message.retry_count, { topic: message.topic })
609
+
610
+ return unless next_retry
611
+
612
+ time_until_retry = next_retry - Time.now
613
+ @metrics.histogram(:message_next_retry_delay_seconds, time_until_retry, { topic: message.topic })
614
+ end
615
+
616
+ # Log retry scheduled
617
+ # @param logger [Logger] Logger to use
618
+ # @param message [Pigeon::Models::OutboxMessage] Message being retried
619
+ # @param message_id [String, Integer] ID of the message
620
+ # @param next_retry [Time] Next retry time
621
+ # @return [void]
622
+ def log_retry_scheduled(logger, message, message_id, next_retry)
623
+ time_until_retry = next_retry ? (next_retry - Time.now).round(2) : 0
624
+
625
+ if logger.respond_to?(:info) && logger.method(:info).arity > 1
626
+ logger.info("Message scheduled for retry", {
627
+ retry_count: message.retry_count,
628
+ next_retry_at: next_retry&.iso8601,
629
+ retry_delay_seconds: time_until_retry,
630
+ status: "pending"
631
+ })
632
+ else
633
+ logger.info("Message #{message_id} will be retried at #{next_retry}, retry count: #{message.retry_count}")
634
+ end
635
+ end
636
+
637
+ # Send a message to the dead letter queue
638
+ # @param message [Pigeon::Models::OutboxMessage] Message to send to DLQ
639
+ # @param dlq_topic [String, nil] Optional dead letter queue topic
640
+ # @param logger [Pigeon::Logging::StructuredLogger] Logger with context
641
+ # @return [Boolean] Whether the message was sent successfully
642
+ def send_message_to_dlq(message, dlq_topic = nil, logger = nil)
643
+ logger = create_dlq_logger(message, logger)
644
+ topic = dlq_topic || "#{message.topic}#{Pigeon.config.dead_letter_queue_suffix}"
645
+ options = build_dlq_message_options(message, topic)
646
+ payload = prepare_message_payload(message)
647
+
648
+ log_dlq_send_attempt(logger, message, topic)
649
+ send_to_dlq_topic(message, payload, options, logger)
650
+ end
651
+
652
+ # Create DLQ logger with context
653
+ # @param message [Pigeon::Models::OutboxMessage] Message being sent to DLQ
654
+ # @param logger [Logger] Existing logger or nil
655
+ # @return [Logger] Logger with context
656
+ def create_dlq_logger(message, logger)
657
+ return logger || Pigeon.config.logger unless !logger && Pigeon.config.logger.respond_to?(:with_context)
658
+
659
+ context = { message_id: message.id, topic: message.topic }
660
+ if message.respond_to?(:correlation_id) && message.correlation_id
661
+ context[:correlation_id] =
662
+ message.correlation_id
663
+ end
664
+ Pigeon.config.logger.with_context(context)
665
+ end
666
+
667
+ # Build DLQ message options
668
+ # @param message [Pigeon::Models::OutboxMessage] Original message
669
+ # @param topic [String] DLQ topic
670
+ # @return [Hash] Message options
671
+ def build_dlq_message_options(message, topic)
672
+ {
673
+ topic: topic,
674
+ key: message.key,
675
+ headers: build_dlq_headers(message),
676
+ partition: message.partition
677
+ }
678
+ end
679
+
680
+ # Log DLQ send attempt
681
+ # @param logger [Logger] Logger to use
682
+ # @param message [Pigeon::Models::OutboxMessage] Message being sent
683
+ # @param topic [String] DLQ topic
684
+ # @return [void]
685
+ def log_dlq_send_attempt(logger, message, topic)
686
+ if logger.respond_to?(:info) && logger.method(:info).arity > 1
687
+ logger.info("Sending message to dead letter queue", {
688
+ dlq_topic: topic,
689
+ original_topic: message.topic,
690
+ retry_count: message.retry_count,
691
+ error_message: message.error_message
692
+ })
693
+ else
694
+ logger.info("Sending message #{message.id} to dead letter queue topic: #{topic}")
695
+ end
696
+ end
697
+
698
+ # Build headers for dead letter queue message
699
+ # @param message [Pigeon::Models::OutboxMessage] Original message
700
+ # @return [Hash] Headers for DLQ message
701
+ def build_dlq_headers(message)
702
+ headers = message.headers.dup || {}
703
+ headers["x-original-topic"] = message.topic
704
+ headers["x-error-message"] = message.error_message if message.error_message
705
+ headers["x-retry-count"] = message.retry_count.to_s
706
+ headers["x-original-timestamp"] = message.created_at.to_s
707
+ headers
708
+ end
709
+
710
+ # Send message to DLQ topic
711
+ # @param message [Pigeon::Models::OutboxMessage] Original message
712
+ # @param payload [String] Message payload
713
+ # @param options [Hash] Message options
714
+ # @param logger [Pigeon::Logging::StructuredLogger] Logger with context
715
+ # @return [Boolean] Whether the message was sent successfully
716
+ def send_to_dlq_topic(message, payload, options, logger = nil)
717
+ logger = create_dlq_topic_logger(message, options, logger)
718
+ start_time = Time.now
719
+
720
+ begin
721
+ Pigeon.karafka_producer.produce_sync(payload, **options)
722
+ handle_dlq_success(message, options, Time.now - start_time, logger)
723
+ true
724
+ rescue StandardError => e
725
+ handle_dlq_failure(message, options, e, logger)
726
+ false
727
+ end
728
+ end
729
+
730
+ # Create DLQ topic logger with context
731
+ # @param message [Pigeon::Models::OutboxMessage] Message being sent
732
+ # @param options [Hash] Message options
733
+ # @param logger [Logger] Existing logger or nil
734
+ # @return [Logger] Logger with context
735
+ def create_dlq_topic_logger(message, options, logger)
736
+ return logger || Pigeon.config.logger unless !logger && Pigeon.config.logger.respond_to?(:with_context)
737
+
738
+ context = {
739
+ message_id: message.id,
740
+ topic: message.topic,
741
+ dlq_topic: options[:topic]
742
+ }
743
+ if message.respond_to?(:correlation_id) && message.correlation_id
744
+ context[:correlation_id] =
745
+ message.correlation_id
746
+ end
747
+ Pigeon.config.logger.with_context(context)
748
+ end
749
+
750
+ # Handle successful DLQ send
751
+ # @param message [Pigeon::Models::OutboxMessage] Original message
752
+ # @param options [Hash] Message options
753
+ # @param dlq_time [Float] Time taken to send
754
+ # @param logger [Logger] Logger to use
755
+ # @return [void]
756
+ def handle_dlq_success(message, options, dlq_time, logger)
757
+ @metrics.histogram(:dlq_publish_duration_seconds, dlq_time, {
758
+ topic: message.topic,
759
+ dlq_topic: options[:topic]
760
+ })
761
+
762
+ if logger.respond_to?(:info) && logger.method(:info).arity > 1
763
+ logger.info("Successfully sent message to dead letter queue", {
764
+ dlq_topic: options[:topic],
765
+ processing_time_ms: (dlq_time * 1000).round(2)
766
+ })
767
+ else
768
+ logger.info("Successfully sent message #{message.id} to dead letter queue topic: #{options[:topic]}")
769
+ end
770
+ end
771
+
772
+ # Handle failed DLQ send
773
+ # @param message [Pigeon::Models::OutboxMessage] Original message
774
+ # @param options [Hash] Message options
775
+ # @param error [StandardError] Error that occurred
776
+ # @param logger [Logger] Logger to use
777
+ # @return [void]
778
+ def handle_dlq_failure(message, options, error, logger)
779
+ if logger.respond_to?(:error) && logger.method(:error).arity > 1
780
+ logger.error("Failed to send message to dead letter queue",
781
+ {
782
+ dlq_topic: options[:topic],
783
+ error_class: error.class.name,
784
+ error_message: error.message
785
+ },
786
+ error)
787
+ else
788
+ logger.error("Failed to send message #{message.id} to dead letter queue: #{error.message}")
789
+ end
790
+
791
+ @metrics.increment(:dlq_publish_failures_total, 1, {
792
+ topic: message.topic,
793
+ dlq_topic: options[:topic],
794
+ error: error.class.name
795
+ })
796
+ end
797
+ end
798
+ end