ace-test-runner 0.18.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.ace-defaults/test/runner.yml +35 -0
- data/.ace-defaults/test/suite.yml +31 -0
- data/.ace-defaults/test-runner/config.yml +61 -0
- data/CHANGELOG.md +626 -0
- data/LICENSE +21 -0
- data/README.md +42 -0
- data/Rakefile +14 -0
- data/exe/ace-test +26 -0
- data/exe/ace-test-suite +149 -0
- data/lib/ace/test_runner/atoms/command_builder.rb +165 -0
- data/lib/ace/test_runner/atoms/lazy_loader.rb +62 -0
- data/lib/ace/test_runner/atoms/line_number_resolver.rb +86 -0
- data/lib/ace/test_runner/atoms/report_directory_resolver.rb +48 -0
- data/lib/ace/test_runner/atoms/report_path_resolver.rb +67 -0
- data/lib/ace/test_runner/atoms/result_parser.rb +254 -0
- data/lib/ace/test_runner/atoms/test_detector.rb +114 -0
- data/lib/ace/test_runner/atoms/test_folder_detector.rb +53 -0
- data/lib/ace/test_runner/atoms/test_type_detector.rb +83 -0
- data/lib/ace/test_runner/atoms/timestamp_generator.rb +103 -0
- data/lib/ace/test_runner/cli/commands/test.rb +326 -0
- data/lib/ace/test_runner/cli.rb +16 -0
- data/lib/ace/test_runner/formatters/base_formatter.rb +102 -0
- data/lib/ace/test_runner/formatters/json_formatter.rb +90 -0
- data/lib/ace/test_runner/formatters/markdown_formatter.rb +91 -0
- data/lib/ace/test_runner/formatters/progress_file_formatter.rb +164 -0
- data/lib/ace/test_runner/formatters/progress_formatter.rb +328 -0
- data/lib/ace/test_runner/models/test_configuration.rb +165 -0
- data/lib/ace/test_runner/models/test_failure.rb +95 -0
- data/lib/ace/test_runner/models/test_group.rb +105 -0
- data/lib/ace/test_runner/models/test_report.rb +145 -0
- data/lib/ace/test_runner/models/test_result.rb +86 -0
- data/lib/ace/test_runner/molecules/cli_argument_parser.rb +263 -0
- data/lib/ace/test_runner/molecules/config_loader.rb +162 -0
- data/lib/ace/test_runner/molecules/deprecation_fixer.rb +204 -0
- data/lib/ace/test_runner/molecules/failed_package_reporter.rb +100 -0
- data/lib/ace/test_runner/molecules/failure_analyzer.rb +249 -0
- data/lib/ace/test_runner/molecules/in_process_runner.rb +249 -0
- data/lib/ace/test_runner/molecules/package_resolver.rb +106 -0
- data/lib/ace/test_runner/molecules/pattern_resolver.rb +146 -0
- data/lib/ace/test_runner/molecules/rake_integration.rb +218 -0
- data/lib/ace/test_runner/molecules/report_storage.rb +303 -0
- data/lib/ace/test_runner/molecules/smart_test_executor.rb +107 -0
- data/lib/ace/test_runner/molecules/test_executor.rb +162 -0
- data/lib/ace/test_runner/organisms/agent_reporter.rb +384 -0
- data/lib/ace/test_runner/organisms/report_generator.rb +151 -0
- data/lib/ace/test_runner/organisms/sequential_group_executor.rb +185 -0
- data/lib/ace/test_runner/organisms/test_orchestrator.rb +648 -0
- data/lib/ace/test_runner/rake_task.rb +90 -0
- data/lib/ace/test_runner/suite/display_helpers.rb +117 -0
- data/lib/ace/test_runner/suite/display_manager.rb +204 -0
- data/lib/ace/test_runner/suite/duration_estimator.rb +50 -0
- data/lib/ace/test_runner/suite/orchestrator.rb +120 -0
- data/lib/ace/test_runner/suite/process_monitor.rb +268 -0
- data/lib/ace/test_runner/suite/result_aggregator.rb +176 -0
- data/lib/ace/test_runner/suite/simple_display_manager.rb +122 -0
- data/lib/ace/test_runner/suite.rb +22 -0
- data/lib/ace/test_runner/version.rb +7 -0
- data/lib/ace/test_runner.rb +69 -0
- metadata +246 -0
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "base_formatter"
|
|
4
|
+
|
|
5
|
+
module Ace
|
|
6
|
+
module TestRunner
|
|
7
|
+
module Formatters
|
|
8
|
+
# Progress formatter that shows one dot per test file (faster execution)
|
|
9
|
+
class ProgressFileFormatter < BaseFormatter
|
|
10
|
+
def initialize(options = {})
|
|
11
|
+
super
|
|
12
|
+
@test_count = 0
|
|
13
|
+
@line_width = options[:line_width] || 80
|
|
14
|
+
@configuration = options
|
|
15
|
+
@max_failures_to_display = options[:max_failures_to_display] || 7
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
def format_stdout(result)
|
|
19
|
+
lines = []
|
|
20
|
+
|
|
21
|
+
# Progress dots are printed during execution, ensure newline
|
|
22
|
+
lines << "" if @test_count > 0
|
|
23
|
+
|
|
24
|
+
# Report directory - use actual report path if available
|
|
25
|
+
if @report_path
|
|
26
|
+
lines << "Details: #{@report_path}/"
|
|
27
|
+
elsif @configuration && @configuration[:save_reports]
|
|
28
|
+
lines << "Details: #{@configuration[:report_dir] || ".ace-local/test/reports"}/latest/"
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
# Compact single-line summary with emoji status
|
|
32
|
+
status = if result.success?
|
|
33
|
+
"✅"
|
|
34
|
+
elsif result.errors > 0
|
|
35
|
+
"💥"
|
|
36
|
+
else
|
|
37
|
+
"❌"
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
summary = "#{status} #{result.total_tests} tests, #{result.assertions} assertions, " +
|
|
41
|
+
"#{result.failed} failures, #{result.errors} errors (#{format_duration(result.duration)})"
|
|
42
|
+
lines << summary
|
|
43
|
+
|
|
44
|
+
# Add failure details if there are any
|
|
45
|
+
if result.has_failures?
|
|
46
|
+
lines << ""
|
|
47
|
+
total_failures = result.failed + result.errors
|
|
48
|
+
|
|
49
|
+
# Display up to max_failures_to_display failures
|
|
50
|
+
failures_to_show = result.failures_detail.take(@max_failures_to_display)
|
|
51
|
+
|
|
52
|
+
# Show failure count header with reference to full report if needed
|
|
53
|
+
if total_failures > @max_failures_to_display
|
|
54
|
+
report_path = @report_path || "#{@configuration[:report_dir] || ".ace-local/test/reports"}/latest"
|
|
55
|
+
lines << "FAILURES (#{failures_to_show.size}/#{total_failures}) → #{report_path}/failures.json:"
|
|
56
|
+
else
|
|
57
|
+
lines << "FAILURES (#{total_failures}):"
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
failures_to_show.each_with_index do |failure, idx|
|
|
61
|
+
# Extract file and line from location (e.g., "/path/file.rb:42:in `test_method'")
|
|
62
|
+
if failure.location
|
|
63
|
+
location_match = failure.location.match(/^([^:]+):(\d+)/)
|
|
64
|
+
if location_match
|
|
65
|
+
file = location_match[1].gsub(/^.*\/test\//, "test/") # Shorten path
|
|
66
|
+
line = location_match[2]
|
|
67
|
+
location = "#{file}:#{line}"
|
|
68
|
+
else
|
|
69
|
+
location = failure.location
|
|
70
|
+
end
|
|
71
|
+
else
|
|
72
|
+
location = failure.test_name || "unknown"
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
# Format: location - short message with individual failure report path
|
|
76
|
+
message = truncate_message(failure.message, 60)
|
|
77
|
+
lines << " #{location} - #{message}"
|
|
78
|
+
|
|
79
|
+
# Show individual failure report path if we have the report path
|
|
80
|
+
if @report_path
|
|
81
|
+
failure_filename = format("%03d-%s.md", idx + 1,
|
|
82
|
+
failure.full_test_name.gsub(/\W+/, "_").downcase[0...50])
|
|
83
|
+
lines << " → Details: #{@report_path}/failures/#{failure_filename}"
|
|
84
|
+
end
|
|
85
|
+
end
|
|
86
|
+
|
|
87
|
+
# If there are more failures than displayed
|
|
88
|
+
if result.failures_detail.size > @max_failures_to_display
|
|
89
|
+
remaining = result.failures_detail.size - @max_failures_to_display
|
|
90
|
+
report_path = @report_path || "#{@configuration[:report_dir] || ".ace-local/test/reports"}/latest"
|
|
91
|
+
lines << " ... and #{remaining} more #{(remaining == 1) ? "failure" : "failures"}. See full report: #{report_path}/failures.json"
|
|
92
|
+
end
|
|
93
|
+
end
|
|
94
|
+
|
|
95
|
+
lines.join("\n")
|
|
96
|
+
end
|
|
97
|
+
|
|
98
|
+
def format_report(report)
|
|
99
|
+
# For CI, keep the report simple and parseable
|
|
100
|
+
{
|
|
101
|
+
status: report.success? ? "success" : "failure",
|
|
102
|
+
stats: {
|
|
103
|
+
total: report.result.total_tests,
|
|
104
|
+
passed: report.result.passed,
|
|
105
|
+
failed: report.result.failed,
|
|
106
|
+
errors: report.result.errors,
|
|
107
|
+
skipped: report.result.skipped,
|
|
108
|
+
assertions: report.result.assertions,
|
|
109
|
+
duration: report.result.duration
|
|
110
|
+
},
|
|
111
|
+
failures: report.result.failures_detail.map { |f| failure_summary(f) }
|
|
112
|
+
}
|
|
113
|
+
end
|
|
114
|
+
|
|
115
|
+
def on_start(total_files)
|
|
116
|
+
# No verbose output in progress mode
|
|
117
|
+
@test_count = 0
|
|
118
|
+
end
|
|
119
|
+
|
|
120
|
+
def on_test_complete(file, success, duration)
|
|
121
|
+
# Print single character for each test file
|
|
122
|
+
char = if success
|
|
123
|
+
colorize(".", :green)
|
|
124
|
+
else
|
|
125
|
+
colorize("F", :red)
|
|
126
|
+
end
|
|
127
|
+
|
|
128
|
+
print char
|
|
129
|
+
@test_count += 1
|
|
130
|
+
|
|
131
|
+
# New line every N characters to prevent line overflow
|
|
132
|
+
puts if @test_count % @line_width == 0
|
|
133
|
+
end
|
|
134
|
+
|
|
135
|
+
def on_finish(result)
|
|
136
|
+
# Ensure we're on a new line
|
|
137
|
+
puts unless @test_count == 0 || @test_count % @line_width == 0
|
|
138
|
+
puts format_stdout(result)
|
|
139
|
+
end
|
|
140
|
+
|
|
141
|
+
private
|
|
142
|
+
|
|
143
|
+
def truncate_message(message, max_length = 100)
|
|
144
|
+
return "" unless message
|
|
145
|
+
|
|
146
|
+
msg = message.strip.tr("\n", " ")
|
|
147
|
+
if msg.length > max_length
|
|
148
|
+
"#{msg[0...max_length - 3]}..."
|
|
149
|
+
else
|
|
150
|
+
msg
|
|
151
|
+
end
|
|
152
|
+
end
|
|
153
|
+
|
|
154
|
+
def failure_summary(failure)
|
|
155
|
+
{
|
|
156
|
+
test: failure.full_test_name,
|
|
157
|
+
location: failure.location,
|
|
158
|
+
message: truncate_message(failure.message)
|
|
159
|
+
}
|
|
160
|
+
end
|
|
161
|
+
end
|
|
162
|
+
end
|
|
163
|
+
end
|
|
164
|
+
end
|
|
@@ -0,0 +1,328 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "base_formatter"
|
|
4
|
+
|
|
5
|
+
module Ace
|
|
6
|
+
module TestRunner
|
|
7
|
+
module Formatters
|
|
8
|
+
# Progress formatter that shows one dot per test (not per file)
|
|
9
|
+
class ProgressFormatter < BaseFormatter
|
|
10
|
+
def initialize(options = {})
|
|
11
|
+
super
|
|
12
|
+
@test_count = 0
|
|
13
|
+
@line_width = options[:line_width] || 80
|
|
14
|
+
@configuration = options
|
|
15
|
+
@max_failures_to_display = options[:max_failures_to_display] || 7
|
|
16
|
+
@test_results = []
|
|
17
|
+
@current_group = nil
|
|
18
|
+
@group_counts = Hash.new(0)
|
|
19
|
+
@files_by_group = Hash.new { |h, k| h[k] = [] }
|
|
20
|
+
@show_groups = options[:show_groups] != false
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
def format_stdout(result)
|
|
24
|
+
lines = []
|
|
25
|
+
|
|
26
|
+
# Progress dots are printed during execution, ensure newline
|
|
27
|
+
lines << "" if @test_count > 0
|
|
28
|
+
|
|
29
|
+
# Report directory - use actual report path if available
|
|
30
|
+
if @report_path
|
|
31
|
+
lines << "Details: #{@report_path}/"
|
|
32
|
+
elsif @configuration && @configuration[:save_reports]
|
|
33
|
+
lines << "Details: #{@configuration[:report_dir] || ".ace-local/test/reports"}/latest/"
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
# Compact single-line summary with emoji status
|
|
37
|
+
status = status_icon(result)
|
|
38
|
+
|
|
39
|
+
summary = "#{status} #{result.total_tests} tests, #{result.assertions} assertions, " +
|
|
40
|
+
"#{result.failed} failures, #{result.errors} errors"
|
|
41
|
+
summary += ", #{result.skipped} skipped" if result.has_skips?
|
|
42
|
+
summary += " (#{format_duration(result.duration)})"
|
|
43
|
+
lines << summary
|
|
44
|
+
|
|
45
|
+
# Add failure details if there are any
|
|
46
|
+
if result.has_failures?
|
|
47
|
+
lines << ""
|
|
48
|
+
total_failures = result.failed + result.errors
|
|
49
|
+
|
|
50
|
+
# Display up to max_failures_to_display failures
|
|
51
|
+
failures_to_show = result.failures_detail.take(@max_failures_to_display)
|
|
52
|
+
|
|
53
|
+
# Determine the label based on what types we have
|
|
54
|
+
failure_count = result.failures_detail.count { |f| f.type == :failure }
|
|
55
|
+
error_count = result.failures_detail.count { |f| f.type == :error }
|
|
56
|
+
|
|
57
|
+
label = if failure_count > 0 && error_count > 0
|
|
58
|
+
"FAILURES & ERRORS"
|
|
59
|
+
elsif error_count > 0
|
|
60
|
+
"ERRORS"
|
|
61
|
+
else
|
|
62
|
+
"FAILURES"
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
# Show failure count header with reference to full report if needed
|
|
66
|
+
if total_failures > @max_failures_to_display
|
|
67
|
+
report_path = @report_path || "#{@configuration[:report_dir] || ".ace-local/test/reports"}/latest"
|
|
68
|
+
lines << "#{label} (#{failures_to_show.size}/#{total_failures}) → #{report_path}/failures.json:"
|
|
69
|
+
else
|
|
70
|
+
lines << "#{label} (#{total_failures}):"
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
failures_to_show.each_with_index do |failure, idx|
|
|
74
|
+
# Extract file and line from location (e.g., "/path/file.rb:42:in `test_method'")
|
|
75
|
+
if failure.location
|
|
76
|
+
location_match = failure.location.match(/^([^:]+):(\d+)/)
|
|
77
|
+
if location_match
|
|
78
|
+
file = location_match[1].gsub(/^.*\/test\//, "test/") # Shorten path
|
|
79
|
+
line = location_match[2]
|
|
80
|
+
location = "#{file}:#{line}"
|
|
81
|
+
else
|
|
82
|
+
location = failure.location
|
|
83
|
+
end
|
|
84
|
+
else
|
|
85
|
+
location = failure.test_name || "unknown"
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
# Format: location - short message with individual failure report path
|
|
89
|
+
message = truncate_message(failure.message, 60)
|
|
90
|
+
lines << " #{location} - #{message}"
|
|
91
|
+
|
|
92
|
+
# Show individual failure report path if we have the report path
|
|
93
|
+
if @report_path
|
|
94
|
+
failure_filename = format("%03d-%s.md", idx + 1,
|
|
95
|
+
failure.full_test_name.gsub(/\W+/, "_").downcase[0...50])
|
|
96
|
+
lines << " → Details: #{@report_path}/failures/#{failure_filename}"
|
|
97
|
+
end
|
|
98
|
+
end
|
|
99
|
+
|
|
100
|
+
# If there are more failures than displayed
|
|
101
|
+
if result.failures_detail.size > @max_failures_to_display
|
|
102
|
+
remaining = result.failures_detail.size - @max_failures_to_display
|
|
103
|
+
report_path = @report_path || "#{@configuration[:report_dir] || ".ace-local/test/reports"}/latest"
|
|
104
|
+
lines << " ... and #{remaining} more #{(remaining == 1) ? "failure" : "failures"}. See full report: #{report_path}/failures.json"
|
|
105
|
+
end
|
|
106
|
+
end
|
|
107
|
+
|
|
108
|
+
lines.join("\n")
|
|
109
|
+
end
|
|
110
|
+
|
|
111
|
+
def format_report(report)
|
|
112
|
+
# For CI, keep the report simple and parseable
|
|
113
|
+
{
|
|
114
|
+
status: report.success? ? "success" : "failure",
|
|
115
|
+
stats: {
|
|
116
|
+
total: report.result.total_tests,
|
|
117
|
+
passed: report.result.passed,
|
|
118
|
+
failed: report.result.failed,
|
|
119
|
+
errors: report.result.errors,
|
|
120
|
+
skipped: report.result.skipped,
|
|
121
|
+
assertions: report.result.assertions,
|
|
122
|
+
duration: report.result.duration
|
|
123
|
+
},
|
|
124
|
+
failures: report.result.failures_detail.map { |f| failure_summary(f) }
|
|
125
|
+
}
|
|
126
|
+
end
|
|
127
|
+
|
|
128
|
+
def on_start(total_files)
|
|
129
|
+
# No verbose output in progress mode
|
|
130
|
+
@test_count = 0
|
|
131
|
+
@test_results = []
|
|
132
|
+
@current_group = nil
|
|
133
|
+
@group_counts = Hash.new(0)
|
|
134
|
+
@total_files = total_files
|
|
135
|
+
@total_available = nil
|
|
136
|
+
end
|
|
137
|
+
|
|
138
|
+
def on_start_with_totals(files_to_run, total_available)
|
|
139
|
+
on_start(files_to_run)
|
|
140
|
+
@total_available = total_available
|
|
141
|
+
|
|
142
|
+
# Show file count if different from total available
|
|
143
|
+
if total_available && total_available > files_to_run
|
|
144
|
+
puts "Running #{files_to_run}/#{total_available} test files"
|
|
145
|
+
puts ""
|
|
146
|
+
end
|
|
147
|
+
end
|
|
148
|
+
|
|
149
|
+
def on_test_stdout(stdout)
|
|
150
|
+
# Parse individual test results from stdout
|
|
151
|
+
return unless stdout
|
|
152
|
+
|
|
153
|
+
# Look for test result lines in Minitest output
|
|
154
|
+
# Handles both plain and ANSI-colored output from Minitest::Reporters
|
|
155
|
+
stdout.each_line do |line|
|
|
156
|
+
# Match test result lines like:
|
|
157
|
+
# test_something [32m PASS[0m (0.00s)
|
|
158
|
+
# test_other [31m FAIL[0m (0.01s)
|
|
159
|
+
# test_error ERROR (0.00s)
|
|
160
|
+
# test_skip SKIP (0.00s)
|
|
161
|
+
# Improved regex to handle ANSI codes and underscores in test names
|
|
162
|
+
# ANSI codes are: \e[32m (color start), \e[0m (reset)
|
|
163
|
+
if line =~ /^\s*test_[\w_]+.*\s+(PASS|FAIL|ERROR|SKIP).*\([0-9.]+s\)/
|
|
164
|
+
result = case $1
|
|
165
|
+
when "PASS"
|
|
166
|
+
"."
|
|
167
|
+
when "FAIL"
|
|
168
|
+
"F"
|
|
169
|
+
when "ERROR"
|
|
170
|
+
"E"
|
|
171
|
+
when "SKIP"
|
|
172
|
+
"S"
|
|
173
|
+
else
|
|
174
|
+
"."
|
|
175
|
+
end
|
|
176
|
+
|
|
177
|
+
print colorize(result, result_color(result))
|
|
178
|
+
@test_count += 1
|
|
179
|
+
@test_results << result
|
|
180
|
+
|
|
181
|
+
# New line every N characters to prevent line overflow
|
|
182
|
+
puts if @test_count % @line_width == 0
|
|
183
|
+
end
|
|
184
|
+
end
|
|
185
|
+
end
|
|
186
|
+
|
|
187
|
+
def on_test_complete(file, success, duration)
|
|
188
|
+
# Detect group from file path
|
|
189
|
+
group = detect_group(file)
|
|
190
|
+
|
|
191
|
+
# Print group header if it's a new group and groups are enabled
|
|
192
|
+
if @show_groups && group != @current_group
|
|
193
|
+
puts unless @test_count == 0
|
|
194
|
+
puts ""
|
|
195
|
+
puts colorize("═══ #{group.to_s.capitalize} Tests ═══", :cyan)
|
|
196
|
+
@current_group = group
|
|
197
|
+
@test_count = 0 # Reset count for new line
|
|
198
|
+
end
|
|
199
|
+
|
|
200
|
+
# For per-test progress, we handle output in on_test_stdout if available
|
|
201
|
+
# Otherwise fall back to per-file dots
|
|
202
|
+
if @test_results.empty?
|
|
203
|
+
# No per-test output received, show file-level dot
|
|
204
|
+
char = success ? colorize(".", :green) : colorize("F", :red)
|
|
205
|
+
print char
|
|
206
|
+
@test_count += 1
|
|
207
|
+
puts if @test_count % @line_width == 0
|
|
208
|
+
end
|
|
209
|
+
|
|
210
|
+
# Track group counts
|
|
211
|
+
@group_counts[group] += 1 if @show_groups
|
|
212
|
+
end
|
|
213
|
+
|
|
214
|
+
def detect_group(file_path)
|
|
215
|
+
case file_path
|
|
216
|
+
when /test\/unit\/atoms\//
|
|
217
|
+
:atoms
|
|
218
|
+
when /test\/unit\/molecules\//
|
|
219
|
+
:molecules
|
|
220
|
+
when /test\/unit\/organisms\//
|
|
221
|
+
:organisms
|
|
222
|
+
when /test\/unit\/models\//
|
|
223
|
+
:models
|
|
224
|
+
when /test\/integration\//
|
|
225
|
+
:integration
|
|
226
|
+
when /test\/system\//
|
|
227
|
+
:system
|
|
228
|
+
else
|
|
229
|
+
:other
|
|
230
|
+
end
|
|
231
|
+
end
|
|
232
|
+
|
|
233
|
+
def on_group_start(group_name, file_count)
|
|
234
|
+
# Print visual separator before group
|
|
235
|
+
puts "" unless @test_count == 0
|
|
236
|
+
puts ""
|
|
237
|
+
puts "Running #{group_name} (#{file_count} #{(file_count == 1) ? "file" : "files"})..."
|
|
238
|
+
@test_count = 0
|
|
239
|
+
end
|
|
240
|
+
|
|
241
|
+
def on_group_complete(group_name, success, duration, summary)
|
|
242
|
+
# Ensure we're on a new line after dots
|
|
243
|
+
puts unless @test_count == 0 || @test_count % @line_width == 0
|
|
244
|
+
|
|
245
|
+
# Show group completion status
|
|
246
|
+
status_icon = success ? "✓" : "✗"
|
|
247
|
+
test_count = summary[:runs] || 0
|
|
248
|
+
failure_count = summary[:failures] || 0
|
|
249
|
+
|
|
250
|
+
status_line = "#{status_icon} #{group_name} complete " +
|
|
251
|
+
"(#{format_duration(duration)}, #{test_count} tests, #{failure_count} failures)"
|
|
252
|
+
|
|
253
|
+
puts colorize(status_line, success ? :green : :red)
|
|
254
|
+
puts ""
|
|
255
|
+
end
|
|
256
|
+
|
|
257
|
+
def on_finish(result)
|
|
258
|
+
# Ensure we're on a new line
|
|
259
|
+
puts unless @test_count == 0 || @test_count % @line_width == 0
|
|
260
|
+
|
|
261
|
+
# Print group summary if we have groups and groups are enabled
|
|
262
|
+
if @show_groups && @group_counts.any?
|
|
263
|
+
puts ""
|
|
264
|
+
puts colorize("═══ Group Summary ═══", :cyan)
|
|
265
|
+
@group_counts.each do |group, count|
|
|
266
|
+
puts " #{group.to_s.capitalize}: #{count} #{(count == 1) ? "file" : "files"}"
|
|
267
|
+
end
|
|
268
|
+
end
|
|
269
|
+
|
|
270
|
+
puts format_stdout(result)
|
|
271
|
+
end
|
|
272
|
+
|
|
273
|
+
private
|
|
274
|
+
|
|
275
|
+
# Determines the appropriate status icon based on test results
|
|
276
|
+
# Returns ⚠️ for successful tests with skips (informational)
|
|
277
|
+
# Returns ✅ for successful tests without skips
|
|
278
|
+
# Returns 💥 for tests with errors
|
|
279
|
+
# Returns ❌ for tests with failures
|
|
280
|
+
def status_icon(result)
|
|
281
|
+
if result.success? && !result.has_skips?
|
|
282
|
+
"✅"
|
|
283
|
+
elsif result.success? && result.has_skips?
|
|
284
|
+
"⚠️"
|
|
285
|
+
elsif result.errors > 0
|
|
286
|
+
"💥"
|
|
287
|
+
else
|
|
288
|
+
"❌"
|
|
289
|
+
end
|
|
290
|
+
end
|
|
291
|
+
|
|
292
|
+
def result_color(result)
|
|
293
|
+
case result
|
|
294
|
+
when "."
|
|
295
|
+
:green
|
|
296
|
+
when "F"
|
|
297
|
+
:red
|
|
298
|
+
when "E"
|
|
299
|
+
:yellow
|
|
300
|
+
when "S"
|
|
301
|
+
:cyan
|
|
302
|
+
else
|
|
303
|
+
:default
|
|
304
|
+
end
|
|
305
|
+
end
|
|
306
|
+
|
|
307
|
+
def truncate_message(message, max_length = 100)
|
|
308
|
+
return "" unless message
|
|
309
|
+
|
|
310
|
+
msg = message.strip.tr("\n", " ")
|
|
311
|
+
if msg.length > max_length
|
|
312
|
+
"#{msg[0...max_length - 3]}..."
|
|
313
|
+
else
|
|
314
|
+
msg
|
|
315
|
+
end
|
|
316
|
+
end
|
|
317
|
+
|
|
318
|
+
def failure_summary(failure)
|
|
319
|
+
{
|
|
320
|
+
test: failure.full_test_name,
|
|
321
|
+
location: failure.location,
|
|
322
|
+
message: truncate_message(failure.message)
|
|
323
|
+
}
|
|
324
|
+
end
|
|
325
|
+
end
|
|
326
|
+
end
|
|
327
|
+
end
|
|
328
|
+
end
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Ace
|
|
4
|
+
module TestRunner
|
|
5
|
+
module Models
|
|
6
|
+
# Configuration for test execution
|
|
7
|
+
class TestConfiguration
|
|
8
|
+
attr_accessor :format, :report_dir, :save_reports, :fail_fast,
|
|
9
|
+
:verbose, :filter, :fix_deprecations, :patterns,
|
|
10
|
+
:timeout, :parallel, :color, :per_file, :groups,
|
|
11
|
+
:target, :config_path, :failure_limits, :profile,
|
|
12
|
+
:execution, :files, :run_in_single_batch
|
|
13
|
+
|
|
14
|
+
def initialize(attributes = {})
|
|
15
|
+
@format = attributes[:format] || "progress" # Default to per-test progress
|
|
16
|
+
@report_dir = attributes[:report_dir] || ".ace-local/test/reports"
|
|
17
|
+
@save_reports = attributes.fetch(:save_reports, true)
|
|
18
|
+
@fail_fast = attributes[:fail_fast] || false
|
|
19
|
+
@verbose = attributes[:verbose] || false
|
|
20
|
+
@filter = attributes[:filter]
|
|
21
|
+
@fix_deprecations = attributes[:fix_deprecations] || false
|
|
22
|
+
@patterns = attributes[:patterns] || default_patterns
|
|
23
|
+
@groups = attributes[:groups] || default_groups
|
|
24
|
+
@target = attributes[:target]
|
|
25
|
+
@config_path = attributes[:config_path]
|
|
26
|
+
@timeout = attributes[:timeout] # In seconds, nil = no timeout
|
|
27
|
+
@parallel = attributes[:parallel] || false
|
|
28
|
+
@color = attributes.fetch(:color, true)
|
|
29
|
+
@per_file = attributes[:per_file] || false # Default to grouped execution for performance
|
|
30
|
+
@failure_limits = attributes[:failure_limits] || {max_display: 7}
|
|
31
|
+
@profile = attributes[:profile] # nil means no profiling, number means show N slowest tests
|
|
32
|
+
@execution = attributes[:execution] || {}
|
|
33
|
+
@files = attributes[:files] # Specific files to test (overrides target/patterns)
|
|
34
|
+
@run_in_single_batch = attributes[:run_in_single_batch] || false
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
def valid_format?
|
|
38
|
+
%w[json progress progress-file].include?(format)
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
def validate!
|
|
42
|
+
unless valid_format?
|
|
43
|
+
raise ArgumentError, "Unknown format '#{format}'. Valid formats: progress, progress-file, json"
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
if save_reports && !writable_directory?(report_dir)
|
|
47
|
+
raise ArgumentError, "Cannot write to #{report_dir}. Check permissions"
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
# Validate execution_mode if provided
|
|
51
|
+
if execution_mode && !%w[grouped all-at-once].include?(execution_mode)
|
|
52
|
+
raise ArgumentError, "Unknown execution_mode '#{execution_mode}'. Valid modes: grouped, all-at-once"
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
true
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
def execution_mode
|
|
59
|
+
# Default to "all-at-once" for simple, fast execution
|
|
60
|
+
@execution&.[](:mode) || @execution&.dig("mode") || "all-at-once"
|
|
61
|
+
end
|
|
62
|
+
|
|
63
|
+
def group_isolation
|
|
64
|
+
# Default to true for better isolation in grouped mode
|
|
65
|
+
# Use fetch to handle false values correctly (|| would treat false as falsy)
|
|
66
|
+
mode = @execution&.[](:group_isolation)
|
|
67
|
+
mode = @execution&.dig("group_isolation") if mode.nil?
|
|
68
|
+
mode.nil? || mode
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
def formatter_class
|
|
72
|
+
# Use lazy loader to load formatter on demand
|
|
73
|
+
Atoms::LazyLoader.load_formatter(format)
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
def to_h
|
|
77
|
+
{
|
|
78
|
+
format: format,
|
|
79
|
+
report_dir: report_dir,
|
|
80
|
+
save_reports: save_reports,
|
|
81
|
+
fail_fast: fail_fast,
|
|
82
|
+
verbose: verbose,
|
|
83
|
+
filter: filter,
|
|
84
|
+
fix_deprecations: fix_deprecations,
|
|
85
|
+
patterns: patterns,
|
|
86
|
+
groups: groups,
|
|
87
|
+
target: target,
|
|
88
|
+
config_path: config_path,
|
|
89
|
+
timeout: timeout,
|
|
90
|
+
parallel: parallel,
|
|
91
|
+
color: color,
|
|
92
|
+
per_file: per_file,
|
|
93
|
+
failure_limits: failure_limits,
|
|
94
|
+
profile: profile,
|
|
95
|
+
execution: execution,
|
|
96
|
+
files: files,
|
|
97
|
+
run_in_single_batch: run_in_single_batch
|
|
98
|
+
}
|
|
99
|
+
end
|
|
100
|
+
|
|
101
|
+
def merge(options)
|
|
102
|
+
self.class.new(to_h.merge(options))
|
|
103
|
+
end
|
|
104
|
+
|
|
105
|
+
def self.from_file(path)
|
|
106
|
+
return new unless File.exist?(path)
|
|
107
|
+
|
|
108
|
+
config_data = YAML.load_file(path)
|
|
109
|
+
test_config = config_data["test"] || {}
|
|
110
|
+
new(test_config.transform_keys(&:to_sym))
|
|
111
|
+
end
|
|
112
|
+
|
|
113
|
+
def self.from_cascade
|
|
114
|
+
# Use ace-core configuration cascade if available
|
|
115
|
+
if defined?(Ace::Core::Configuration)
|
|
116
|
+
config = Ace::Core::Configuration.new
|
|
117
|
+
test_config = config.get("test", {})
|
|
118
|
+
new(test_config.transform_keys(&:to_sym))
|
|
119
|
+
else
|
|
120
|
+
new
|
|
121
|
+
end
|
|
122
|
+
end
|
|
123
|
+
|
|
124
|
+
private
|
|
125
|
+
|
|
126
|
+
def default_patterns
|
|
127
|
+
{
|
|
128
|
+
atoms: "test/unit/atoms/**/*_test.rb",
|
|
129
|
+
molecules: "test/unit/molecules/**/*_test.rb",
|
|
130
|
+
organisms: "test/unit/organisms/**/*_test.rb",
|
|
131
|
+
models: "test/unit/models/**/*_test.rb",
|
|
132
|
+
integration: "test/integration/**/*_test.rb",
|
|
133
|
+
system: "test/system/**/*_test.rb",
|
|
134
|
+
all: "test/**/*_test.rb"
|
|
135
|
+
}
|
|
136
|
+
end
|
|
137
|
+
|
|
138
|
+
def default_groups
|
|
139
|
+
{
|
|
140
|
+
unit: %w[atoms molecules organisms models],
|
|
141
|
+
all: %w[unit integration system],
|
|
142
|
+
quick: %w[atoms molecules]
|
|
143
|
+
}
|
|
144
|
+
end
|
|
145
|
+
|
|
146
|
+
def writable_directory?(dir)
|
|
147
|
+
if Dir.exist?(dir)
|
|
148
|
+
File.writable?(dir)
|
|
149
|
+
else
|
|
150
|
+
nearest_existing = dir
|
|
151
|
+
loop do
|
|
152
|
+
parent = File.dirname(nearest_existing)
|
|
153
|
+
break if parent == nearest_existing
|
|
154
|
+
|
|
155
|
+
nearest_existing = parent
|
|
156
|
+
break if Dir.exist?(nearest_existing)
|
|
157
|
+
end
|
|
158
|
+
|
|
159
|
+
Dir.exist?(nearest_existing) && File.writable?(nearest_existing)
|
|
160
|
+
end
|
|
161
|
+
end
|
|
162
|
+
end
|
|
163
|
+
end
|
|
164
|
+
end
|
|
165
|
+
end
|