fractor 0.1.6 → 0.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (172) hide show
  1. checksums.yaml +4 -4
  2. data/.rubocop_todo.yml +227 -102
  3. data/README.adoc +113 -1940
  4. data/docs/.lycheeignore +16 -0
  5. data/docs/Gemfile +24 -0
  6. data/docs/README.md +157 -0
  7. data/docs/_config.yml +151 -0
  8. data/docs/_features/error-handling.adoc +1192 -0
  9. data/docs/_features/index.adoc +80 -0
  10. data/docs/_features/monitoring.adoc +589 -0
  11. data/docs/_features/signal-handling.adoc +202 -0
  12. data/docs/_features/workflows.adoc +1235 -0
  13. data/docs/_guides/continuous-mode.adoc +736 -0
  14. data/docs/_guides/cookbook.adoc +1133 -0
  15. data/docs/_guides/index.adoc +55 -0
  16. data/docs/_guides/pipeline-mode.adoc +730 -0
  17. data/docs/_guides/troubleshooting.adoc +358 -0
  18. data/docs/_pages/architecture.adoc +1390 -0
  19. data/docs/_pages/core-concepts.adoc +1392 -0
  20. data/docs/_pages/design-principles.adoc +862 -0
  21. data/docs/_pages/getting-started.adoc +290 -0
  22. data/docs/_pages/installation.adoc +143 -0
  23. data/docs/_reference/api.adoc +1080 -0
  24. data/docs/_reference/error-reporting.adoc +670 -0
  25. data/docs/_reference/examples.adoc +181 -0
  26. data/docs/_reference/index.adoc +96 -0
  27. data/docs/_reference/troubleshooting.adoc +862 -0
  28. data/docs/_tutorials/complex-workflows.adoc +1022 -0
  29. data/docs/_tutorials/data-processing-pipeline.adoc +740 -0
  30. data/docs/_tutorials/first-application.adoc +384 -0
  31. data/docs/_tutorials/index.adoc +48 -0
  32. data/docs/_tutorials/long-running-services.adoc +931 -0
  33. data/docs/assets/images/favicon-16.png +0 -0
  34. data/docs/assets/images/favicon-32.png +0 -0
  35. data/docs/assets/images/favicon-48.png +0 -0
  36. data/docs/assets/images/favicon.ico +0 -0
  37. data/docs/assets/images/favicon.png +0 -0
  38. data/docs/assets/images/favicon.svg +45 -0
  39. data/docs/assets/images/fractor-icon.svg +49 -0
  40. data/docs/assets/images/fractor-logo.svg +61 -0
  41. data/docs/index.adoc +131 -0
  42. data/docs/lychee.toml +39 -0
  43. data/examples/api_aggregator/README.adoc +627 -0
  44. data/examples/api_aggregator/api_aggregator.rb +376 -0
  45. data/examples/auto_detection/README.adoc +407 -29
  46. data/examples/continuous_chat_common/message_protocol.rb +1 -1
  47. data/examples/error_reporting.rb +207 -0
  48. data/examples/file_processor/README.adoc +170 -0
  49. data/examples/file_processor/file_processor.rb +615 -0
  50. data/examples/file_processor/sample_files/invalid.csv +1 -0
  51. data/examples/file_processor/sample_files/orders.xml +24 -0
  52. data/examples/file_processor/sample_files/products.json +23 -0
  53. data/examples/file_processor/sample_files/users.csv +6 -0
  54. data/examples/hierarchical_hasher/README.adoc +629 -41
  55. data/examples/image_processor/README.adoc +610 -0
  56. data/examples/image_processor/image_processor.rb +349 -0
  57. data/examples/image_processor/processed_images/sample_10_processed.jpg.json +12 -0
  58. data/examples/image_processor/processed_images/sample_1_processed.jpg.json +12 -0
  59. data/examples/image_processor/processed_images/sample_2_processed.jpg.json +12 -0
  60. data/examples/image_processor/processed_images/sample_3_processed.jpg.json +12 -0
  61. data/examples/image_processor/processed_images/sample_4_processed.jpg.json +12 -0
  62. data/examples/image_processor/processed_images/sample_5_processed.jpg.json +12 -0
  63. data/examples/image_processor/processed_images/sample_6_processed.jpg.json +12 -0
  64. data/examples/image_processor/processed_images/sample_7_processed.jpg.json +12 -0
  65. data/examples/image_processor/processed_images/sample_8_processed.jpg.json +12 -0
  66. data/examples/image_processor/processed_images/sample_9_processed.jpg.json +12 -0
  67. data/examples/image_processor/test_images/sample_1.png +1 -0
  68. data/examples/image_processor/test_images/sample_10.png +1 -0
  69. data/examples/image_processor/test_images/sample_2.png +1 -0
  70. data/examples/image_processor/test_images/sample_3.png +1 -0
  71. data/examples/image_processor/test_images/sample_4.png +1 -0
  72. data/examples/image_processor/test_images/sample_5.png +1 -0
  73. data/examples/image_processor/test_images/sample_6.png +1 -0
  74. data/examples/image_processor/test_images/sample_7.png +1 -0
  75. data/examples/image_processor/test_images/sample_8.png +1 -0
  76. data/examples/image_processor/test_images/sample_9.png +1 -0
  77. data/examples/log_analyzer/README.adoc +662 -0
  78. data/examples/log_analyzer/log_analyzer.rb +579 -0
  79. data/examples/log_analyzer/sample_logs/apache.log +20 -0
  80. data/examples/log_analyzer/sample_logs/json.log +15 -0
  81. data/examples/log_analyzer/sample_logs/nginx.log +15 -0
  82. data/examples/log_analyzer/sample_logs/rails.log +29 -0
  83. data/examples/multi_work_type/README.adoc +576 -26
  84. data/examples/performance_monitoring.rb +120 -0
  85. data/examples/pipeline_processing/README.adoc +740 -26
  86. data/examples/pipeline_processing/pipeline_processing.rb +2 -2
  87. data/examples/priority_work_example.rb +155 -0
  88. data/examples/producer_subscriber/README.adoc +889 -46
  89. data/examples/scatter_gather/README.adoc +829 -27
  90. data/examples/simple/README.adoc +347 -0
  91. data/examples/specialized_workers/README.adoc +622 -26
  92. data/examples/specialized_workers/specialized_workers.rb +44 -8
  93. data/examples/stream_processor/README.adoc +206 -0
  94. data/examples/stream_processor/stream_processor.rb +284 -0
  95. data/examples/web_scraper/README.adoc +625 -0
  96. data/examples/web_scraper/web_scraper.rb +285 -0
  97. data/examples/workflow/README.adoc +406 -0
  98. data/examples/workflow/circuit_breaker/README.adoc +360 -0
  99. data/examples/workflow/circuit_breaker/circuit_breaker_workflow.rb +225 -0
  100. data/examples/workflow/conditional/README.adoc +483 -0
  101. data/examples/workflow/conditional/conditional_workflow.rb +215 -0
  102. data/examples/workflow/dead_letter_queue/README.adoc +374 -0
  103. data/examples/workflow/dead_letter_queue/dead_letter_queue_workflow.rb +217 -0
  104. data/examples/workflow/fan_out/README.adoc +381 -0
  105. data/examples/workflow/fan_out/fan_out_workflow.rb +202 -0
  106. data/examples/workflow/retry/README.adoc +248 -0
  107. data/examples/workflow/retry/retry_workflow.rb +195 -0
  108. data/examples/workflow/simple_linear/README.adoc +267 -0
  109. data/examples/workflow/simple_linear/simple_linear_workflow.rb +175 -0
  110. data/examples/workflow/simplified/README.adoc +329 -0
  111. data/examples/workflow/simplified/simplified_workflow.rb +222 -0
  112. data/exe/fractor +10 -0
  113. data/lib/fractor/cli.rb +288 -0
  114. data/lib/fractor/configuration.rb +307 -0
  115. data/lib/fractor/continuous_server.rb +60 -65
  116. data/lib/fractor/error_formatter.rb +72 -0
  117. data/lib/fractor/error_report_generator.rb +152 -0
  118. data/lib/fractor/error_reporter.rb +244 -0
  119. data/lib/fractor/error_statistics.rb +147 -0
  120. data/lib/fractor/execution_tracer.rb +162 -0
  121. data/lib/fractor/logger.rb +230 -0
  122. data/lib/fractor/main_loop_handler.rb +406 -0
  123. data/lib/fractor/main_loop_handler3.rb +135 -0
  124. data/lib/fractor/main_loop_handler4.rb +299 -0
  125. data/lib/fractor/performance_metrics_collector.rb +181 -0
  126. data/lib/fractor/performance_monitor.rb +215 -0
  127. data/lib/fractor/performance_report_generator.rb +202 -0
  128. data/lib/fractor/priority_work.rb +93 -0
  129. data/lib/fractor/priority_work_queue.rb +189 -0
  130. data/lib/fractor/result_aggregator.rb +32 -0
  131. data/lib/fractor/shutdown_handler.rb +168 -0
  132. data/lib/fractor/signal_handler.rb +80 -0
  133. data/lib/fractor/supervisor.rb +382 -269
  134. data/lib/fractor/supervisor_logger.rb +88 -0
  135. data/lib/fractor/version.rb +1 -1
  136. data/lib/fractor/work.rb +12 -0
  137. data/lib/fractor/work_distribution_manager.rb +151 -0
  138. data/lib/fractor/work_queue.rb +20 -0
  139. data/lib/fractor/work_result.rb +181 -9
  140. data/lib/fractor/worker.rb +73 -0
  141. data/lib/fractor/workflow/builder.rb +210 -0
  142. data/lib/fractor/workflow/chain_builder.rb +169 -0
  143. data/lib/fractor/workflow/circuit_breaker.rb +183 -0
  144. data/lib/fractor/workflow/circuit_breaker_orchestrator.rb +208 -0
  145. data/lib/fractor/workflow/circuit_breaker_registry.rb +112 -0
  146. data/lib/fractor/workflow/dead_letter_queue.rb +334 -0
  147. data/lib/fractor/workflow/execution_hooks.rb +39 -0
  148. data/lib/fractor/workflow/execution_strategy.rb +225 -0
  149. data/lib/fractor/workflow/execution_trace.rb +134 -0
  150. data/lib/fractor/workflow/helpers.rb +191 -0
  151. data/lib/fractor/workflow/job.rb +290 -0
  152. data/lib/fractor/workflow/job_dependency_validator.rb +120 -0
  153. data/lib/fractor/workflow/logger.rb +110 -0
  154. data/lib/fractor/workflow/pre_execution_context.rb +193 -0
  155. data/lib/fractor/workflow/retry_config.rb +156 -0
  156. data/lib/fractor/workflow/retry_orchestrator.rb +184 -0
  157. data/lib/fractor/workflow/retry_strategy.rb +93 -0
  158. data/lib/fractor/workflow/structured_logger.rb +30 -0
  159. data/lib/fractor/workflow/type_compatibility_validator.rb +222 -0
  160. data/lib/fractor/workflow/visualizer.rb +211 -0
  161. data/lib/fractor/workflow/workflow_context.rb +132 -0
  162. data/lib/fractor/workflow/workflow_executor.rb +669 -0
  163. data/lib/fractor/workflow/workflow_result.rb +55 -0
  164. data/lib/fractor/workflow/workflow_validator.rb +295 -0
  165. data/lib/fractor/workflow.rb +333 -0
  166. data/lib/fractor/wrapped_ractor.rb +66 -101
  167. data/lib/fractor/wrapped_ractor3.rb +161 -0
  168. data/lib/fractor/wrapped_ractor4.rb +242 -0
  169. data/lib/fractor.rb +92 -4
  170. metadata +179 -6
  171. data/tests/sample.rb.bak +0 -309
  172. data/tests/sample_working.rb.bak +0 -209
@@ -6,22 +6,23 @@ module Fractor
6
6
  # High-level wrapper for running Fractor in continuous mode.
7
7
  # Handles threading, signal handling, and results processing automatically.
8
8
  class ContinuousServer
9
- attr_reader :supervisor, :work_queue
9
+ attr_reader :supervisor, :work_queue, :logger
10
10
 
11
11
  # Initialize a continuous server
12
12
  # @param worker_pools [Array<Hash>] Worker pool configurations
13
13
  # @param work_queue [WorkQueue, nil] Optional work queue to auto-register
14
14
  # @param log_file [String, nil] Optional log file path
15
- def initialize(worker_pools:, work_queue: nil, log_file: nil)
15
+ # @param logger [Logger, nil] Optional logger instance for isolation (defaults to Fractor.logger)
16
+ def initialize(worker_pools:, work_queue: nil, log_file: nil, logger: nil)
16
17
  @worker_pools = worker_pools
17
18
  @work_queue = work_queue
18
19
  @log_file_path = log_file
19
20
  @log_file = nil
21
+ @logger = logger # Store instance-specific logger for isolation
20
22
  @result_callbacks = []
21
23
  @error_callbacks = []
22
24
  @supervisor = nil
23
25
  @supervisor_thread = nil
24
- @results_thread = nil
25
26
  @running = false
26
27
  end
27
28
 
@@ -37,24 +38,32 @@ module Fractor
37
38
  @error_callbacks << block
38
39
  end
39
40
 
41
+ # Start the server (alias for run).
42
+ # Provides a consistent API with stop method.
43
+ #
44
+ # @see #run
45
+ def start
46
+ run
47
+ end
48
+
40
49
  # Start the server and block until shutdown
41
50
  # This method handles:
42
51
  # - Opening log file if specified
43
52
  # - Creating and starting supervisor
44
- # - Starting results processing thread
45
- # - Setting up signal handlers
53
+ # - Registering result callbacks with ResultAggregator
46
54
  # - Blocking until shutdown signal received
47
55
  def run
48
56
  setup_log_file
49
57
  setup_supervisor
58
+ register_result_callbacks
50
59
  start_supervisor_thread
51
- start_results_thread
52
60
 
53
61
  log_message("Continuous server started")
54
62
  log_message("Press Ctrl+C to stop")
55
63
 
56
64
  begin
57
- # Block until shutdown
65
+ # Event-driven: simply join the supervisor thread
66
+ # It will exit when @running = false and shutdown is complete
58
67
  @supervisor_thread&.join
59
68
  rescue Interrupt
60
69
  log_message("Interrupt received, shutting down...")
@@ -72,12 +81,11 @@ module Fractor
72
81
 
73
82
  @supervisor&.stop
74
83
 
75
- # Wait for threads to finish
76
- [@supervisor_thread, @results_thread].compact.each do |thread|
77
- thread.join(2) if thread.alive?
78
- end
79
-
80
- log_message("Continuous server stopped")
84
+ # Ensure log file is closed
85
+ # This is important when stop() is called from outside the run() thread
86
+ # The run() method's ensure block will also call cleanup, but we ensure
87
+ # it here as well for immediate cleanup
88
+ cleanup
81
89
  end
82
90
 
83
91
  private
@@ -93,6 +101,7 @@ module Fractor
93
101
  @supervisor = Supervisor.new(
94
102
  worker_pools: @worker_pools,
95
103
  continuous_mode: true,
104
+ logger: @logger, # Pass instance-specific logger for isolation
96
105
  )
97
106
 
98
107
  # Auto-register work queue if provided
@@ -104,62 +113,43 @@ module Fractor
104
113
  end
105
114
  end
106
115
 
107
- def start_supervisor_thread
108
- @running = true
109
- @supervisor_thread = Thread.new do
110
- @supervisor.run
111
- rescue StandardError => e
112
- log_message("Supervisor error: #{e.message}")
113
- log_message(e.backtrace.join("\n")) if ENV["FRACTOR_DEBUG"]
114
- end
115
-
116
- # Give supervisor time to start up
117
- sleep(0.1)
118
- end
119
-
120
- def start_results_thread
121
- @results_thread = Thread.new do
122
- log_message("Results processing thread started")
123
- process_results_loop
124
- rescue StandardError => e
125
- log_message("Results thread error: #{e.message}")
126
- log_message(e.backtrace.join("\n")) if ENV["FRACTOR_DEBUG"]
127
- end
128
- end
129
-
130
- def process_results_loop
131
- while @running
132
- sleep(0.05)
133
-
134
- process_successful_results
135
- process_error_results
116
+ def register_result_callbacks
117
+ # Register callbacks directly with ResultAggregator for event-driven processing
118
+ # This eliminates the need for a separate results polling thread
119
+ unless @result_callbacks.empty?
120
+ @supervisor.results.on_new_result do |result|
121
+ if result.success?
122
+ @result_callbacks.each do |callback|
123
+ callback.call(result)
124
+ rescue StandardError => e
125
+ log_message("Error in result callback: #{e.message}")
126
+ end
127
+ end
128
+ end
136
129
  end
137
- log_message("Results processing thread stopped")
138
- end
139
-
140
- def process_successful_results
141
- loop do
142
- result = @supervisor.results.results.shift
143
- break unless result
144
130
 
145
- @result_callbacks.each do |callback|
146
- callback.call(result)
147
- rescue StandardError => e
148
- log_message("Error in result callback: #{e.message}")
131
+ unless @error_callbacks.empty?
132
+ @supervisor.results.on_new_result do |result|
133
+ unless result.success?
134
+ @error_callbacks.each do |callback|
135
+ callback.call(result)
136
+ rescue StandardError => e
137
+ log_message("Error in error callback: #{e.message}")
138
+ end
139
+ end
149
140
  end
150
141
  end
151
142
  end
152
143
 
153
- def process_error_results
154
- loop do
155
- error_result = @supervisor.results.errors.shift
156
- break unless error_result
157
-
158
- @error_callbacks.each do |callback|
159
- callback.call(error_result)
160
- rescue StandardError => e
161
- log_message("Error in error callback: #{e.message}")
162
- end
144
+ def start_supervisor_thread
145
+ @running = true
146
+ @supervisor_thread = Thread.new do
147
+ @supervisor.run
148
+ rescue StandardError => e
149
+ log_message("Supervisor error: #{e.message}")
150
+ # Use instance logger or fall back to global
151
+ instance_logger = @logger || Fractor.logger
152
+ instance_logger.debug(e.backtrace.join("\n")) if instance_logger&.debug?
163
153
  end
164
154
  end
165
155
 
@@ -178,8 +168,13 @@ module Fractor
178
168
  log_entry = "[#{timestamp}] #{message}"
179
169
 
180
170
  if @log_file && !@log_file.closed?
181
- @log_file.puts(log_entry)
182
- @log_file.flush
171
+ begin
172
+ @log_file.puts(log_entry)
173
+ @log_file.flush
174
+ rescue IOError
175
+ # File was closed in another thread, stop trying to write to it
176
+ @log_file = nil
177
+ end
183
178
  end
184
179
 
185
180
  puts log_entry
@@ -0,0 +1,72 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Fractor
4
+ # Formats error messages with rich context for debugging.
5
+ # Extracted from Supervisor to follow Single Responsibility Principle.
6
+ #
7
+ # @example Basic usage
8
+ # formatter = ErrorFormatter.new
9
+ # error_message = formatter.format(wrapped_ractor, error_result)
10
+ class ErrorFormatter
11
+ # Format error context with rich information for debugging.
12
+ #
13
+ # @param wrapped_ractor [WrappedRactor] The worker that encountered the error
14
+ # @param error_result [WorkResult] The error result
15
+ # @return [String] Formatted error message with context
16
+ def format(wrapped_ractor, error_result)
17
+ timestamp = Time.now.strftime("%Y-%m-%d %H:%M:%S")
18
+ worker_class = wrapped_ractor.worker_class
19
+ worker_name = wrapped_ractor.name
20
+
21
+ # Build contextual error message
22
+ lines = [
23
+ "=" * 80,
24
+ "[#{timestamp}] ERROR PROCESSING WORK",
25
+ "=" * 80,
26
+ "Worker: #{worker_name} (#{worker_class})",
27
+ "Work Item: #{error_result.work&.inspect || 'unknown'}",
28
+ "Error: #{error_result.error}",
29
+ ]
30
+
31
+ # Add error category and severity if available
32
+ if error_result.respond_to?(:error_category) && error_result.error_category
33
+ lines << "Category: #{error_result.error_category}"
34
+ end
35
+ if error_result.respond_to?(:error_severity) && error_result.error_severity
36
+ lines << "Severity: #{error_result.error_severity}"
37
+ end
38
+
39
+ # Add suggestions based on error type
40
+ suggestion = suggest_fix_for(error_result)
41
+ lines << "Suggestion: #{suggestion}" if suggestion
42
+
43
+ lines << "=" * 80
44
+ lines.join("\n")
45
+ end
46
+
47
+ private
48
+
49
+ # Provide contextual suggestions for common errors.
50
+ #
51
+ # @param error_result [WorkResult] The error result
52
+ # @return [String, nil] Suggestion string or nil
53
+ def suggest_fix_for(error_result)
54
+ error_msg = error_result.error.to_s
55
+
56
+ case error_msg
57
+ when /negative number/i
58
+ "Check if input validation is needed. Consider using AbsWorker for positive-only values."
59
+ when /timeout/i
60
+ "Consider increasing timeout duration or breaking work into smaller chunks."
61
+ when /memory/i
62
+ "Try processing smaller batches or increasing available memory."
63
+ when /connection/i
64
+ "Verify network connectivity and service availability."
65
+ when /undefined method/i
66
+ "Ensure the Worker class implements all required methods for the Work type."
67
+ when /nil/i
68
+ "Check if work items are being initialized with valid input data."
69
+ end
70
+ end
71
+ end
72
+ end
@@ -0,0 +1,152 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+
5
+ module Fractor
6
+ # Generates error reports in multiple formats (text, JSON, Prometheus).
7
+ # Extracted from ErrorReporter for better separation of concerns.
8
+ class ErrorReportGenerator
9
+ # Generate a human-readable text report
10
+ #
11
+ # @param report_data [Hash] The report data from ErrorReporter
12
+ # @return [String] Formatted text report
13
+ def self.text_report(report_data)
14
+ lines = []
15
+ lines << "=" * 80
16
+ lines << "ERROR REPORT"
17
+ lines << "=" * 80
18
+ lines << ""
19
+
20
+ # Summary
21
+ lines << "SUMMARY"
22
+ lines << "-" * 80
23
+ summary = report_data[:summary]
24
+ lines << "Uptime: #{summary[:uptime]}s"
25
+ lines << "Total Errors: #{summary[:total_errors]}"
26
+ lines << "Total Successes: #{summary[:total_successes]}"
27
+ lines << "Error Rate: #{summary[:error_rate]}%"
28
+ lines << ""
29
+
30
+ # Errors by Severity
31
+ lines << "Errors by Severity:"
32
+ summary[:errors_by_severity].each do |severity, count|
33
+ lines << " #{severity.to_s.ljust(10)}: #{count}"
34
+ end
35
+ lines << ""
36
+
37
+ # Top Categories
38
+ lines << "TOP ERROR CATEGORIES"
39
+ lines << "-" * 80
40
+ report_data[:top_categories].each do |category, count|
41
+ lines << "#{category.to_s.ljust(20)}: #{count} errors"
42
+ end
43
+ lines << ""
44
+
45
+ # Top Jobs
46
+ unless report_data[:top_jobs].empty?
47
+ lines << "TOP ERROR JOBS"
48
+ lines << "-" * 80
49
+ report_data[:top_jobs].each do |job, count|
50
+ lines << "#{job.to_s.ljust(20)}: #{count} errors"
51
+ end
52
+ lines << ""
53
+ end
54
+
55
+ # Critical Errors
56
+ unless report_data[:critical_errors].empty?
57
+ lines << "CRITICAL ERRORS"
58
+ lines << "-" * 80
59
+ report_data[:critical_errors].each do |error_info|
60
+ lines << "Category: #{error_info[:category]}"
61
+ lines << "Count: #{error_info[:count]}"
62
+ lines << "Recent errors:"
63
+ error_info[:recent].each do |err|
64
+ lines << " - [#{err[:timestamp]}] #{err[:error_class]}: #{err[:error_message]}"
65
+ end
66
+ lines << ""
67
+ end
68
+ end
69
+
70
+ # Trending Errors
71
+ unless report_data[:trending_errors].empty?
72
+ lines << "TRENDING ERRORS (Increasing)"
73
+ lines << "-" * 80
74
+ report_data[:trending_errors].each do |trend|
75
+ stats = trend[:stats]
76
+ lines << "Category: #{stats[:category]}"
77
+ lines << "Total Count: #{stats[:total_count]}"
78
+ lines << "Error Rate: #{stats[:error_rate]}/s"
79
+ lines << "Trend: #{stats[:trending]}"
80
+ lines << ""
81
+ end
82
+ end
83
+
84
+ lines << "=" * 80
85
+ lines.join("\n")
86
+ end
87
+
88
+ # Export errors to Prometheus format
89
+ #
90
+ # @param reporter [ErrorReporter] The error reporter instance
91
+ # @return [String] Prometheus metrics
92
+ def self.to_prometheus(reporter)
93
+ lines = []
94
+
95
+ # Total errors
96
+ lines << "# HELP fractor_errors_total Total number of errors"
97
+ lines << "# TYPE fractor_errors_total counter"
98
+ lines << "fractor_errors_total #{reporter.total_errors}"
99
+ lines << ""
100
+
101
+ # Total successes
102
+ lines << "# HELP fractor_successes_total Total number of successes"
103
+ lines << "# TYPE fractor_successes_total counter"
104
+ lines << "fractor_successes_total #{reporter.total_successes}"
105
+ lines << ""
106
+
107
+ # Error rate
108
+ lines << "# HELP fractor_error_rate Error rate percentage"
109
+ lines << "# TYPE fractor_error_rate gauge"
110
+ lines << "fractor_error_rate #{reporter.overall_error_rate}"
111
+ lines << ""
112
+
113
+ # Errors by severity
114
+ lines << "# HELP fractor_errors_by_severity Errors by severity level"
115
+ lines << "# TYPE fractor_errors_by_severity gauge"
116
+ reporter.errors_by_severity.each do |severity, count|
117
+ lines << "fractor_errors_by_severity{severity=\"#{severity}\"} #{count}"
118
+ end
119
+ lines << ""
120
+
121
+ # Errors by category
122
+ lines << "# HELP fractor_errors_by_category Errors by category"
123
+ lines << "# TYPE fractor_errors_by_category gauge"
124
+ reporter.instance_variable_get(:@by_category)&.each do |category, stats|
125
+ lines << "fractor_errors_by_category{category=\"#{category}\"} #{stats.total_count}"
126
+ end
127
+ lines << ""
128
+
129
+ # Errors by job
130
+ by_job = reporter.instance_variable_get(:@by_job)
131
+ unless by_job&.empty?
132
+ lines << "# HELP fractor_errors_by_job Errors by job name"
133
+ lines << "# TYPE fractor_errors_by_job gauge"
134
+ by_job.each do |job, stats|
135
+ lines << "fractor_errors_by_job{job=\"#{job}\"} #{stats.total_count}"
136
+ end
137
+ lines << ""
138
+ end
139
+
140
+ lines.join("\n")
141
+ end
142
+
143
+ # Export errors to JSON format
144
+ #
145
+ # @param report_data [Hash] The report data from ErrorReporter
146
+ # @param args [Array] Additional arguments for JSON generation
147
+ # @return [String] JSON representation
148
+ def self.to_json(report_data, *args)
149
+ report_data.to_json(*args)
150
+ end
151
+ end
152
+ end
@@ -0,0 +1,244 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "error_statistics"
4
+ require_relative "error_report_generator"
5
+
6
+ module Fractor
7
+ # Error reporting and analytics system.
8
+ # Aggregates errors, tracks statistics, and provides actionable insights.
9
+ class ErrorReporter
10
+ attr_reader :start_time, :total_errors, :total_successes
11
+
12
+ def initialize
13
+ @start_time = Time.now
14
+ @total_errors = 0
15
+ @total_successes = 0
16
+ @by_category = {}
17
+ @by_job = {}
18
+ @error_handlers = []
19
+ @mutex = Mutex.new
20
+ end
21
+
22
+ # Record a work result
23
+ #
24
+ # @param work_result [WorkResult] The work result to record
25
+ # @param job_name [String, nil] Optional job name
26
+ # @return [void]
27
+ def record(work_result, job_name: nil)
28
+ @mutex.synchronize do
29
+ if work_result.success?
30
+ @total_successes += 1
31
+ else
32
+ @total_errors += 1
33
+ record_error(work_result, job_name)
34
+ end
35
+ end
36
+ end
37
+
38
+ # Register an error handler callback
39
+ #
40
+ # @yield [work_result, job_name] Block to call when error occurs
41
+ # @return [void]
42
+ def on_error(&block)
43
+ @error_handlers << block
44
+ end
45
+
46
+ # Get statistics for a category
47
+ #
48
+ # @param category [String, Symbol] The error category
49
+ # @return [Hash, nil] Statistics for the category
50
+ def category_stats(category)
51
+ @mutex.synchronize do
52
+ @by_category[category]&.to_h
53
+ end
54
+ end
55
+
56
+ # Get statistics for a job
57
+ #
58
+ # @param job_name [String] The job name
59
+ # @return [Hash, nil] Statistics for the job
60
+ def job_stats(job_name)
61
+ @mutex.synchronize do
62
+ @by_job[job_name]&.to_h
63
+ end
64
+ end
65
+
66
+ # Get overall error rate
67
+ #
68
+ # @return [Float] Error rate percentage
69
+ def overall_error_rate
70
+ total = @total_errors + @total_successes
71
+ return 0.0 if total.zero?
72
+
73
+ (@total_errors.to_f / total * 100).round(2)
74
+ end
75
+
76
+ # Get errors by severity
77
+ #
78
+ # @return [Hash] Error counts grouped by severity
79
+ def errors_by_severity
80
+ result = Hash.new(0)
81
+ @mutex.synchronize do
82
+ @by_category.each_value do |stats|
83
+ stats.by_severity.each do |severity, count|
84
+ result[severity] += count
85
+ end
86
+ end
87
+ end
88
+ result
89
+ end
90
+
91
+ # Get top error categories
92
+ #
93
+ # @param limit [Integer] Maximum number of categories to return
94
+ # @return [Hash] Top error categories with counts
95
+ def top_categories(limit: 5)
96
+ @mutex.synchronize do
97
+ @by_category
98
+ .map { |category, stats| [category, stats.total_count] }
99
+ .sort_by { |_category, count| -count }
100
+ .first(limit)
101
+ .to_h
102
+ end
103
+ end
104
+
105
+ # Get top error jobs
106
+ #
107
+ # @param limit [Integer] Maximum number of jobs to return
108
+ # @return [Hash] Top error jobs with counts
109
+ def top_jobs(limit: 5)
110
+ @mutex.synchronize do
111
+ @by_job
112
+ .map { |job, stats| [job, stats.total_count] }
113
+ .sort_by { |_job, count| -count }
114
+ .first(limit)
115
+ .to_h
116
+ end
117
+ end
118
+
119
+ # Get critical errors
120
+ #
121
+ # @return [Array<Hash>] Critical errors with recent occurrences
122
+ def critical_errors
123
+ errors = []
124
+ @mutex.synchronize do
125
+ @by_category.each do |category, stats|
126
+ critical_count = stats.by_severity[WorkResult::SEVERITY_CRITICAL] || 0
127
+ if critical_count.positive?
128
+ errors << {
129
+ category: category,
130
+ count: critical_count,
131
+ recent: stats.recent_errors.select do |e|
132
+ e[:error_severity] == WorkResult::SEVERITY_CRITICAL
133
+ end.last(5),
134
+ }
135
+ end
136
+ end
137
+ end
138
+ errors
139
+ end
140
+
141
+ # Get trending errors (increasing error rates)
142
+ #
143
+ # @return [Array<Hash>] Trending error categories
144
+ def trending_errors
145
+ trends = []
146
+ @mutex.synchronize do
147
+ @by_category.each do |category, stats|
148
+ if stats.increasing?
149
+ trends << { category: category,
150
+ stats: stats.to_h }
151
+ end
152
+ end
153
+ end
154
+ trends
155
+ end
156
+
157
+ # Generate comprehensive report
158
+ #
159
+ # @return [Hash] Report data with all statistics
160
+ def report
161
+ {
162
+ summary: {
163
+ uptime: (Time.now - @start_time).round(2),
164
+ total_errors: @total_errors,
165
+ total_successes: @total_successes,
166
+ error_rate: overall_error_rate,
167
+ errors_by_severity: errors_by_severity,
168
+ },
169
+ top_categories: top_categories,
170
+ top_jobs: top_jobs,
171
+ critical_errors: critical_errors,
172
+ trending_errors: trending_errors,
173
+ category_breakdown: category_breakdown,
174
+ }
175
+ end
176
+
177
+ # Generate formatted text report
178
+ #
179
+ # @return [String] Formatted text report
180
+ def formatted_report
181
+ ErrorReportGenerator.text_report(report)
182
+ end
183
+
184
+ # Export to Prometheus format
185
+ #
186
+ # @return [String] Prometheus metrics
187
+ def to_prometheus
188
+ ErrorReportGenerator.to_prometheus(self)
189
+ end
190
+
191
+ # Export to JSON format
192
+ #
193
+ # @param args [Array] Additional arguments for JSON generation
194
+ # @return [String] JSON representation
195
+ def to_json(*args)
196
+ ErrorReportGenerator.to_json(report, *args)
197
+ end
198
+
199
+ # Reset all statistics
200
+ #
201
+ # @return [void]
202
+ def reset
203
+ @mutex.synchronize do
204
+ @start_time = Time.now
205
+ @total_errors = 0
206
+ @total_successes = 0
207
+ @by_category.clear
208
+ @by_job.clear
209
+ end
210
+ end
211
+
212
+ private
213
+
214
+ def record_error(work_result, job_name)
215
+ # Record by category
216
+ category = work_result.error_category
217
+ @by_category[category] ||= ErrorStatistics.new(category)
218
+ @by_category[category].record(work_result)
219
+
220
+ # Record by job if provided
221
+ if job_name
222
+ @by_job[job_name] ||= ErrorStatistics.new(job_name)
223
+ @by_job[job_name].record(work_result)
224
+ end
225
+
226
+ # Invoke error handlers
227
+ @error_handlers.each do |handler|
228
+ handler.call(work_result, job_name)
229
+ rescue StandardError => e
230
+ warn "Error in error handler: #{e.message}"
231
+ end
232
+ end
233
+
234
+ def category_breakdown
235
+ breakdown = {}
236
+ @mutex.synchronize do
237
+ @by_category.each do |category, stats|
238
+ breakdown[category] = stats.to_h
239
+ end
240
+ end
241
+ breakdown
242
+ end
243
+ end
244
+ end