ace-test-runner 0.18.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. checksums.yaml +7 -0
  2. data/.ace-defaults/test/runner.yml +35 -0
  3. data/.ace-defaults/test/suite.yml +31 -0
  4. data/.ace-defaults/test-runner/config.yml +61 -0
  5. data/CHANGELOG.md +626 -0
  6. data/LICENSE +21 -0
  7. data/README.md +42 -0
  8. data/Rakefile +14 -0
  9. data/exe/ace-test +26 -0
  10. data/exe/ace-test-suite +149 -0
  11. data/lib/ace/test_runner/atoms/command_builder.rb +165 -0
  12. data/lib/ace/test_runner/atoms/lazy_loader.rb +62 -0
  13. data/lib/ace/test_runner/atoms/line_number_resolver.rb +86 -0
  14. data/lib/ace/test_runner/atoms/report_directory_resolver.rb +48 -0
  15. data/lib/ace/test_runner/atoms/report_path_resolver.rb +67 -0
  16. data/lib/ace/test_runner/atoms/result_parser.rb +254 -0
  17. data/lib/ace/test_runner/atoms/test_detector.rb +114 -0
  18. data/lib/ace/test_runner/atoms/test_folder_detector.rb +53 -0
  19. data/lib/ace/test_runner/atoms/test_type_detector.rb +83 -0
  20. data/lib/ace/test_runner/atoms/timestamp_generator.rb +103 -0
  21. data/lib/ace/test_runner/cli/commands/test.rb +326 -0
  22. data/lib/ace/test_runner/cli.rb +16 -0
  23. data/lib/ace/test_runner/formatters/base_formatter.rb +102 -0
  24. data/lib/ace/test_runner/formatters/json_formatter.rb +90 -0
  25. data/lib/ace/test_runner/formatters/markdown_formatter.rb +91 -0
  26. data/lib/ace/test_runner/formatters/progress_file_formatter.rb +164 -0
  27. data/lib/ace/test_runner/formatters/progress_formatter.rb +328 -0
  28. data/lib/ace/test_runner/models/test_configuration.rb +165 -0
  29. data/lib/ace/test_runner/models/test_failure.rb +95 -0
  30. data/lib/ace/test_runner/models/test_group.rb +105 -0
  31. data/lib/ace/test_runner/models/test_report.rb +145 -0
  32. data/lib/ace/test_runner/models/test_result.rb +86 -0
  33. data/lib/ace/test_runner/molecules/cli_argument_parser.rb +263 -0
  34. data/lib/ace/test_runner/molecules/config_loader.rb +162 -0
  35. data/lib/ace/test_runner/molecules/deprecation_fixer.rb +204 -0
  36. data/lib/ace/test_runner/molecules/failed_package_reporter.rb +100 -0
  37. data/lib/ace/test_runner/molecules/failure_analyzer.rb +249 -0
  38. data/lib/ace/test_runner/molecules/in_process_runner.rb +249 -0
  39. data/lib/ace/test_runner/molecules/package_resolver.rb +106 -0
  40. data/lib/ace/test_runner/molecules/pattern_resolver.rb +146 -0
  41. data/lib/ace/test_runner/molecules/rake_integration.rb +218 -0
  42. data/lib/ace/test_runner/molecules/report_storage.rb +303 -0
  43. data/lib/ace/test_runner/molecules/smart_test_executor.rb +107 -0
  44. data/lib/ace/test_runner/molecules/test_executor.rb +162 -0
  45. data/lib/ace/test_runner/organisms/agent_reporter.rb +384 -0
  46. data/lib/ace/test_runner/organisms/report_generator.rb +151 -0
  47. data/lib/ace/test_runner/organisms/sequential_group_executor.rb +185 -0
  48. data/lib/ace/test_runner/organisms/test_orchestrator.rb +648 -0
  49. data/lib/ace/test_runner/rake_task.rb +90 -0
  50. data/lib/ace/test_runner/suite/display_helpers.rb +117 -0
  51. data/lib/ace/test_runner/suite/display_manager.rb +204 -0
  52. data/lib/ace/test_runner/suite/duration_estimator.rb +50 -0
  53. data/lib/ace/test_runner/suite/orchestrator.rb +120 -0
  54. data/lib/ace/test_runner/suite/process_monitor.rb +268 -0
  55. data/lib/ace/test_runner/suite/result_aggregator.rb +176 -0
  56. data/lib/ace/test_runner/suite/simple_display_manager.rb +122 -0
  57. data/lib/ace/test_runner/suite.rb +22 -0
  58. data/lib/ace/test_runner/version.rb +7 -0
  59. data/lib/ace/test_runner.rb +69 -0
  60. metadata +246 -0
@@ -0,0 +1,107 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "test_executor"
4
+ require_relative "in_process_runner"
5
+ require_relative "../atoms/test_type_detector"
6
+
7
+ module Ace
8
+ module TestRunner
9
+ module Molecules
10
+ # Intelligently chooses between subprocess and in-process execution based on test type
11
+ class SmartTestExecutor
12
+ def initialize(command_builder: nil, timeout: nil, force_mode: nil)
13
+ @subprocess_executor = TestExecutor.new(command_builder: command_builder, timeout: timeout)
14
+ @in_process_runner = InProcessRunner.new(timeout: timeout)
15
+ @test_type_detector = Atoms::TestTypeDetector.new
16
+ @force_mode = force_mode # :subprocess, :direct, or nil for auto
17
+ end
18
+
19
+ def execute_tests(files, options = {})
20
+ return empty_result if files.empty?
21
+
22
+ # Determine execution mode
23
+ mode = determine_execution_mode(files, options)
24
+
25
+ # Execute with appropriate runner
26
+ if mode == :direct
27
+ @in_process_runner.execute_tests(files, options)
28
+ else
29
+ @subprocess_executor.execute_tests(files, options)
30
+ end
31
+ end
32
+
33
+ def execute_single_file(file, options = {})
34
+ mode = determine_execution_mode([file], options)
35
+
36
+ if mode == :direct
37
+ @in_process_runner.execute_single_file(file, options)
38
+ else
39
+ @subprocess_executor.execute_single_file(file, options)
40
+ end
41
+ end
42
+
43
+ def execute_command(command)
44
+ # Direct command execution always uses subprocess
45
+ @subprocess_executor.execute_command(command)
46
+ end
47
+
48
+ def execute_with_progress(files, options = {}, &block)
49
+ mode = determine_execution_mode(files, options)
50
+
51
+ # Add mode information to progress callback
52
+ yield({type: :execution_mode, mode: mode}) if block_given?
53
+
54
+ if mode == :direct
55
+ @in_process_runner.execute_with_progress(files, options, &block)
56
+ else
57
+ @subprocess_executor.execute_with_progress(files, options, &block)
58
+ end
59
+ end
60
+
61
+ private
62
+
63
+ def determine_execution_mode(files, options)
64
+ # Check for forced mode from options or initialization
65
+ return :subprocess if options[:subprocess] || @force_mode == :subprocess
66
+ return :direct if options[:direct] || @force_mode == :direct
67
+
68
+ # Check group_isolation config for sequential group execution
69
+ # true = subprocess for better isolation
70
+ if options[:group_isolation] == true
71
+ return :subprocess
72
+ end
73
+
74
+ # Use subprocess for line number filtering (file:line format)
75
+ # This provides cleaner output without Minitest reporter duplication
76
+ if files.any? { |f| f.match?(/:\d+$/) }
77
+ return :subprocess
78
+ end
79
+
80
+ # Auto-detect based on test content
81
+ needs_subprocess = files.any? { |file| @test_type_detector.needs_subprocess?(file) }
82
+
83
+ # Also use subprocess if running tests from multiple packages (different directories)
84
+ if files.size > 1
85
+ dirs = files.map { |f| File.dirname(f) }.uniq
86
+ needs_subprocess ||= dirs.size > 3 # Multiple directories suggest different packages
87
+ end
88
+
89
+ needs_subprocess ? :subprocess : :direct
90
+ end
91
+
92
+ def empty_result
93
+ {
94
+ stdout: "",
95
+ stderr: "No test files found",
96
+ status: OpenStruct.new(success?: true, exitstatus: 0),
97
+ command: "",
98
+ start_time: Time.now,
99
+ end_time: Time.now,
100
+ duration: 0.0,
101
+ success: true
102
+ }
103
+ end
104
+ end
105
+ end
106
+ end
107
+ end
@@ -0,0 +1,162 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "open3"
4
+ require "timeout"
5
+ require "ostruct"
6
+
7
+ module Ace
8
+ module TestRunner
9
+ module Molecules
10
+ # Executes test commands and captures output
11
+ class TestExecutor
12
+ def initialize(command_builder: nil, timeout: nil)
13
+ @command_builder = command_builder || Atoms::CommandBuilder.new
14
+ @timeout = timeout # In seconds, nil = no timeout
15
+ end
16
+
17
+ def execute_tests(files, options = {})
18
+ return empty_result if files.empty?
19
+
20
+ command = @command_builder.build_test_command(files, options)
21
+ execute_command(command)
22
+ end
23
+
24
+ def execute_single_file(file, options = {})
25
+ command = @command_builder.build_single_file_command(file, options)
26
+ execute_command(command)
27
+ end
28
+
29
+ def execute_command(command)
30
+ start_time = Time.now
31
+ stdout = ""
32
+ stderr = ""
33
+ status = nil
34
+
35
+ # Set environment to prevent Minitest autorun at_exit hook
36
+ # Also strip assignment context vars to prevent tests from resolving to wrong assignments
37
+ # Inherit parent environment and override specific vars (nil unsets at exec time)
38
+ env = ENV.to_h.merge({
39
+ "MT_NO_AUTORUN" => "1",
40
+ "ACE_ASSIGN_ID" => nil,
41
+ "ACE_ASSIGN_FORK_ROOT" => nil
42
+ })
43
+
44
+ # Remove MT_NO_AUTORUN=1 from command if it's there
45
+ command = command.sub(/^MT_NO_AUTORUN=1\s+/, "")
46
+
47
+ begin
48
+ if @timeout
49
+ Timeout.timeout(@timeout) do
50
+ stdout, stderr, status = Open3.capture3(env, command)
51
+ end
52
+ else
53
+ stdout, stderr, status = Open3.capture3(env, command)
54
+ end
55
+ rescue Timeout::Error
56
+ stderr = "Test execution timed out after #{@timeout} seconds"
57
+ status = OpenStruct.new(success?: false, exitstatus: 124)
58
+ end
59
+
60
+ end_time = Time.now
61
+
62
+ {
63
+ stdout: stdout,
64
+ stderr: stderr,
65
+ status: status,
66
+ command: command,
67
+ start_time: start_time,
68
+ end_time: end_time,
69
+ duration: end_time - start_time,
70
+ success: status.success?
71
+ }
72
+ end
73
+
74
+ def execute_with_progress(files, options = {}, &block)
75
+ # Fail-fast requires per-file execution to stop on first failure
76
+ # For performance, execute all files together unless explicitly disabled or fail-fast enabled
77
+ if options[:per_file] == true || options[:fail_fast]
78
+ execute_per_file_with_progress(files, options, &block)
79
+ else
80
+ # Execute all files in a single Ruby process for performance
81
+ result = execute_tests(files, options)
82
+
83
+ # Send stdout event for per-test progress parsing
84
+ if block_given? && result[:stdout]
85
+ yield({type: :stdout, content: result[:stdout]})
86
+ end
87
+
88
+ # Simulate progress callbacks for compatibility
89
+ if block_given?
90
+ files.each { |file| yield({type: :start, file: file}) }
91
+ files.each { |file| yield({type: :complete, file: file, success: result[:success], duration: result[:duration] / files.size}) }
92
+ end
93
+
94
+ result
95
+ end
96
+ end
97
+
98
+ def execute_per_file_with_progress(files, options = {}, &block)
99
+ results = []
100
+
101
+ files.each do |file|
102
+ yield({type: :start, file: file}) if block_given?
103
+
104
+ result = execute_single_file(file, options)
105
+ results << result
106
+
107
+ if block_given?
108
+ yield({
109
+ type: :complete,
110
+ file: file,
111
+ success: result[:success],
112
+ duration: result[:duration]
113
+ })
114
+ end
115
+
116
+ # Stop on first failure if fail_fast is set
117
+ break if options[:fail_fast] && !result[:success]
118
+ end
119
+
120
+ merge_results(results)
121
+ end
122
+
123
+ private
124
+
125
+ def empty_result
126
+ {
127
+ stdout: "",
128
+ stderr: "No test files found",
129
+ status: OpenStruct.new(success?: true, exitstatus: 0),
130
+ command: "",
131
+ start_time: Time.now,
132
+ end_time: Time.now,
133
+ duration: 0.0,
134
+ success: true
135
+ }
136
+ end
137
+
138
+ def merge_results(results)
139
+ return empty_result if results.empty?
140
+
141
+ merged = {
142
+ stdout: results.map { |r| r[:stdout] }.join("\n"),
143
+ stderr: results.map { |r| r[:stderr] }.join("\n"),
144
+ commands: results.map { |r| r[:command] },
145
+ start_time: results.first[:start_time],
146
+ end_time: results.last[:end_time],
147
+ duration: results.sum { |r| r[:duration] },
148
+ success: results.all? { |r| r[:success] }
149
+ }
150
+
151
+ # Create a synthetic status
152
+ merged[:status] = OpenStruct.new(
153
+ success?: merged[:success],
154
+ exitstatus: merged[:success] ? 0 : 1
155
+ )
156
+
157
+ merged
158
+ end
159
+ end
160
+ end
161
+ end
162
+ end
@@ -0,0 +1,384 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Ace
4
+ module TestRunner
5
+ module Organisms
6
+ # Specialized reporter for AI agent consumption (recreating lost functionality)
7
+ # This was originally a 242-line component that provided AI-friendly output
8
+ class AgentReporter
9
+ def initialize(options = {})
10
+ @verbose = options[:verbose] || false
11
+ @format = options[:format] || "structured"
12
+ @include_raw = options[:include_raw] || false
13
+ end
14
+
15
+ def generate_agent_report(result, report)
16
+ {
17
+ execution_summary: generate_execution_summary(result),
18
+ actionable_items: generate_actionable_items(result),
19
+ context_for_ai: generate_ai_context(result, report),
20
+ suggested_actions: generate_suggested_actions(result),
21
+ structured_failures: generate_structured_failures(result),
22
+ code_quality_insights: generate_quality_insights(result),
23
+ fix_commands: generate_fix_commands(result)
24
+ }
25
+ end
26
+
27
+ def format_for_agent(result)
28
+ lines = []
29
+
30
+ # Status indicator
31
+ lines << "TEST_EXECUTION_STATUS: #{result.success? ? "SUCCESS" : "FAILURE"}"
32
+ lines << "TEST_METRICS: passed=#{result.passed} failed=#{result.failed} errors=#{result.errors} skipped=#{result.skipped}"
33
+
34
+ # Actionable failures
35
+ if result.has_failures?
36
+ lines << "\nACTIONABLE_FAILURES:"
37
+ result.failures_detail.each_with_index do |failure, idx|
38
+ lines << " FAILURE_#{idx + 1}:"
39
+ lines << " TYPE: #{failure.type}"
40
+ lines << " TEST: #{failure.full_test_name}"
41
+ lines << " FILE: #{failure.file_path}"
42
+ lines << " LINE: #{failure.line_number}"
43
+ lines << " FIX_SUGGESTION: #{failure.fix_suggestion || "Review test logic"}"
44
+ end
45
+ end
46
+
47
+ # Deprecations that need fixing
48
+ if result.has_deprecations?
49
+ lines << "\nDEPRECATIONS_TO_FIX:"
50
+ result.deprecations.each_with_index do |dep, idx|
51
+ lines << " DEPRECATION_#{idx + 1}: #{dep}"
52
+ end
53
+ end
54
+
55
+ # Next steps
56
+ lines << "\nNEXT_STEPS:"
57
+ generate_next_steps(result).each do |step|
58
+ lines << " - #{step}"
59
+ end
60
+
61
+ lines.join("\n")
62
+ end
63
+
64
+ def generate_fix_script(result)
65
+ return nil unless result.has_failures? || result.has_deprecations?
66
+
67
+ script_lines = ["#!/usr/bin/env ruby", "# Auto-generated fix script", ""]
68
+
69
+ # Add deprecation fixes
70
+ if result.has_deprecations?
71
+ script_lines << "# Fix deprecations"
72
+ script_lines << "puts 'Fixing deprecations...'"
73
+ script_lines << "system('ace-test --fix-deprecations')"
74
+ script_lines << ""
75
+ end
76
+
77
+ # Add failure fixes based on patterns
78
+ if result.has_failures?
79
+ script_lines << "# Suggested fixes for failures"
80
+ result.failures_detail.each do |failure|
81
+ if failure.fix_suggestion
82
+ script_lines << "# #{failure.full_test_name}"
83
+ script_lines << "# Suggestion: #{failure.fix_suggestion}"
84
+ script_lines << "# TODO: Implement fix for #{failure.location}"
85
+ script_lines << ""
86
+ end
87
+ end
88
+ end
89
+
90
+ script_lines.join("\n")
91
+ end
92
+
93
+ private
94
+
95
+ def generate_execution_summary(result)
96
+ {
97
+ status: result.success? ? "all_passing" : "failures_detected",
98
+ test_count: result.total_tests,
99
+ pass_rate_percent: result.pass_rate,
100
+ duration_seconds: result.duration,
101
+ critical_failures: count_critical_failures(result),
102
+ needs_immediate_attention: result.failed > 0 || result.errors > 0
103
+ }
104
+ end
105
+
106
+ def generate_actionable_items(result)
107
+ items = []
108
+
109
+ # High priority: errors
110
+ if result.errors > 0
111
+ items << {
112
+ priority: "high",
113
+ type: "fix_errors",
114
+ count: result.errors,
115
+ action: "Fix runtime errors preventing test execution"
116
+ }
117
+ end
118
+
119
+ # Medium priority: failures
120
+ if result.failed > 0
121
+ items << {
122
+ priority: "medium",
123
+ type: "fix_failures",
124
+ count: result.failed,
125
+ action: "Fix assertion failures in test logic"
126
+ }
127
+ end
128
+
129
+ # Low priority: deprecations
130
+ if result.has_deprecations?
131
+ items << {
132
+ priority: "low",
133
+ type: "fix_deprecations",
134
+ count: result.deprecations.size,
135
+ action: "Update deprecated code patterns"
136
+ }
137
+ end
138
+
139
+ # Info: skipped tests
140
+ if result.skipped > 0
141
+ items << {
142
+ priority: "info",
143
+ type: "review_skips",
144
+ count: result.skipped,
145
+ action: "Review and enable skipped tests"
146
+ }
147
+ end
148
+
149
+ items
150
+ end
151
+
152
+ def generate_ai_context(result, report)
153
+ {
154
+ project_state: determine_project_state(result),
155
+ test_coverage: estimate_coverage(result),
156
+ code_stability: calculate_stability_score(result),
157
+ recommended_focus: recommend_focus_area(result),
158
+ environment: report.environment,
159
+ test_patterns: analyze_test_patterns(result)
160
+ }
161
+ end
162
+
163
+ def generate_suggested_actions(result)
164
+ actions = []
165
+
166
+ if result.errors > 0
167
+ actions << "Run failing tests individually to isolate errors"
168
+ actions << "Check for missing dependencies or setup issues"
169
+ end
170
+
171
+ if result.failed > 5
172
+ actions << "Focus on fixing the most common failure pattern first"
173
+ actions << "Consider running tests with --fail-fast to speed up debugging"
174
+ end
175
+
176
+ if result.pass_rate < 80
177
+ actions << "Review recent changes that may have broken tests"
178
+ actions << "Run git bisect to find the commit that introduced failures"
179
+ end
180
+
181
+ if result.has_deprecations?
182
+ actions << "Run 'ace-test --fix-deprecations' to auto-fix deprecation warnings"
183
+ end
184
+
185
+ if result.duration > 60
186
+ actions << "Consider parallel test execution to reduce runtime"
187
+ actions << "Profile slow tests and optimize or mark as slow"
188
+ end
189
+
190
+ actions
191
+ end
192
+
193
+ def generate_structured_failures(result)
194
+ return {} unless result.has_failures?
195
+
196
+ analyzer = Molecules::FailureAnalyzer.new
197
+ common_issues = analyzer.find_common_issues(result.failures_detail)
198
+
199
+ {
200
+ total_failures: result.failures_detail.size,
201
+ failure_types: result.failures_detail.group_by(&:type).transform_values(&:count),
202
+ common_patterns: common_issues,
203
+ affected_files: result.failures_detail.map(&:file_path).uniq.compact,
204
+ suggested_fix_order: prioritize_fixes(result.failures_detail)
205
+ }
206
+ end
207
+
208
+ def generate_quality_insights(result)
209
+ {
210
+ assertion_density: result.assertions.to_f / result.total_tests,
211
+ failure_clustering: analyze_failure_clustering(result),
212
+ test_performance: categorize_test_performance(result),
213
+ maintainability_score: calculate_maintainability_score(result)
214
+ }
215
+ end
216
+
217
+ def generate_fix_commands(result)
218
+ commands = []
219
+
220
+ # Commands based on failure types
221
+ if result.has_failures?
222
+ commands << "# Run only failing tests"
223
+ failing_files = result.failures_detail.map(&:file_path).uniq.compact
224
+ failing_files.each do |file|
225
+ commands << "ace-test --filter '#{File.basename(file)}'"
226
+ end
227
+ end
228
+
229
+ # Deprecation fix command
230
+ if result.has_deprecations?
231
+ commands << "# Fix deprecations"
232
+ commands << "ace-test --fix-deprecations"
233
+ end
234
+
235
+ # Debug commands
236
+ if result.errors > 0
237
+ commands << "# Debug with verbose output"
238
+ commands << "ace-test --verbose --fail-fast"
239
+ end
240
+
241
+ commands
242
+ end
243
+
244
+ def generate_next_steps(result)
245
+ steps = []
246
+
247
+ if result.success?
248
+ steps << "All tests passing - consider adding more test coverage"
249
+ steps << "Review code for potential optimizations"
250
+ elsif result.errors > 0
251
+ steps << "Fix critical errors preventing test execution"
252
+ steps << "Check test environment and dependencies"
253
+ elsif result.failed > 0
254
+ steps << "Fix failing assertions in order of priority"
255
+ steps << "Run focused tests on problem areas"
256
+ end
257
+
258
+ if result.has_deprecations?
259
+ steps << "Update deprecated code patterns"
260
+ end
261
+
262
+ steps
263
+ end
264
+
265
+ def count_critical_failures(result)
266
+ result.failures_detail.count { |f| f.type == :error }
267
+ end
268
+
269
+ def determine_project_state(result)
270
+ if result.success?
271
+ "stable"
272
+ elsif result.pass_rate >= 90
273
+ "mostly_stable"
274
+ elsif result.pass_rate >= 70
275
+ "unstable"
276
+ else
277
+ "broken"
278
+ end
279
+ end
280
+
281
+ def estimate_coverage(result)
282
+ # Rough estimation based on assertion density
283
+ assertion_density = (result.total_tests > 0) ? result.assertions.to_f / result.total_tests : 0
284
+
285
+ if assertion_density > 10
286
+ "high"
287
+ elsif assertion_density > 5
288
+ "medium"
289
+ else
290
+ "low"
291
+ end
292
+ end
293
+
294
+ def calculate_stability_score(result)
295
+ # Score from 0-100 based on various factors
296
+ score = 100
297
+
298
+ # Deduct for failures
299
+ score -= (result.failed * 5)
300
+ score -= (result.errors * 10)
301
+ score -= (result.skipped * 1)
302
+
303
+ # Bonus for high assertion count
304
+ score += [result.assertions / 10, 10].min
305
+
306
+ [score, 0].max
307
+ end
308
+
309
+ def recommend_focus_area(result)
310
+ if result.errors > 0
311
+ "critical_errors"
312
+ elsif result.failed > result.total_tests * 0.3
313
+ "widespread_failures"
314
+ elsif result.has_deprecations?
315
+ "technical_debt"
316
+ elsif result.skipped > result.total_tests * 0.2
317
+ "test_coverage"
318
+ else
319
+ "optimization"
320
+ end
321
+ end
322
+
323
+ def analyze_test_patterns(result)
324
+ {
325
+ uses_assertions: result.assertions > 0,
326
+ has_skipped_tests: result.skipped > 0,
327
+ has_error_tests: result.errors > 0,
328
+ average_assertions_per_test: (result.total_tests > 0) ? result.assertions.to_f / result.total_tests : 0
329
+ }
330
+ end
331
+
332
+ def analyze_failure_clustering(result)
333
+ return "none" unless result.has_failures?
334
+
335
+ files = result.failures_detail.map(&:file_path).compact
336
+ unique_files = files.uniq.size
337
+ total_failures = files.size
338
+
339
+ if unique_files == 1
340
+ "single_file"
341
+ elsif unique_files < total_failures * 0.3
342
+ "clustered"
343
+ else
344
+ "distributed"
345
+ end
346
+ end
347
+
348
+ def categorize_test_performance(result)
349
+ if result.duration < 1
350
+ "fast"
351
+ elsif result.duration < 10
352
+ "acceptable"
353
+ elsif result.duration < 60
354
+ "slow"
355
+ else
356
+ "very_slow"
357
+ end
358
+ end
359
+
360
+ def calculate_maintainability_score(result)
361
+ score = 100
362
+
363
+ # Factors that reduce maintainability
364
+ score -= result.deprecations.size * 2
365
+ score -= result.skipped * 1
366
+ score -= [result.duration / 10, 20].min # Slow tests are harder to maintain
367
+
368
+ [score, 0].max
369
+ end
370
+
371
+ def prioritize_fixes(failures)
372
+ # Sort by priority: errors first, then by file to fix related issues together
373
+ failures.sort_by do |failure|
374
+ [
375
+ (failure.type == :error) ? 0 : 1,
376
+ failure.file_path || "",
377
+ failure.line_number || 0
378
+ ]
379
+ end.map(&:full_test_name)
380
+ end
381
+ end
382
+ end
383
+ end
384
+ end