ace-test-runner 0.18.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.ace-defaults/test/runner.yml +35 -0
- data/.ace-defaults/test/suite.yml +31 -0
- data/.ace-defaults/test-runner/config.yml +61 -0
- data/CHANGELOG.md +626 -0
- data/LICENSE +21 -0
- data/README.md +42 -0
- data/Rakefile +14 -0
- data/exe/ace-test +26 -0
- data/exe/ace-test-suite +149 -0
- data/lib/ace/test_runner/atoms/command_builder.rb +165 -0
- data/lib/ace/test_runner/atoms/lazy_loader.rb +62 -0
- data/lib/ace/test_runner/atoms/line_number_resolver.rb +86 -0
- data/lib/ace/test_runner/atoms/report_directory_resolver.rb +48 -0
- data/lib/ace/test_runner/atoms/report_path_resolver.rb +67 -0
- data/lib/ace/test_runner/atoms/result_parser.rb +254 -0
- data/lib/ace/test_runner/atoms/test_detector.rb +114 -0
- data/lib/ace/test_runner/atoms/test_folder_detector.rb +53 -0
- data/lib/ace/test_runner/atoms/test_type_detector.rb +83 -0
- data/lib/ace/test_runner/atoms/timestamp_generator.rb +103 -0
- data/lib/ace/test_runner/cli/commands/test.rb +326 -0
- data/lib/ace/test_runner/cli.rb +16 -0
- data/lib/ace/test_runner/formatters/base_formatter.rb +102 -0
- data/lib/ace/test_runner/formatters/json_formatter.rb +90 -0
- data/lib/ace/test_runner/formatters/markdown_formatter.rb +91 -0
- data/lib/ace/test_runner/formatters/progress_file_formatter.rb +164 -0
- data/lib/ace/test_runner/formatters/progress_formatter.rb +328 -0
- data/lib/ace/test_runner/models/test_configuration.rb +165 -0
- data/lib/ace/test_runner/models/test_failure.rb +95 -0
- data/lib/ace/test_runner/models/test_group.rb +105 -0
- data/lib/ace/test_runner/models/test_report.rb +145 -0
- data/lib/ace/test_runner/models/test_result.rb +86 -0
- data/lib/ace/test_runner/molecules/cli_argument_parser.rb +263 -0
- data/lib/ace/test_runner/molecules/config_loader.rb +162 -0
- data/lib/ace/test_runner/molecules/deprecation_fixer.rb +204 -0
- data/lib/ace/test_runner/molecules/failed_package_reporter.rb +100 -0
- data/lib/ace/test_runner/molecules/failure_analyzer.rb +249 -0
- data/lib/ace/test_runner/molecules/in_process_runner.rb +249 -0
- data/lib/ace/test_runner/molecules/package_resolver.rb +106 -0
- data/lib/ace/test_runner/molecules/pattern_resolver.rb +146 -0
- data/lib/ace/test_runner/molecules/rake_integration.rb +218 -0
- data/lib/ace/test_runner/molecules/report_storage.rb +303 -0
- data/lib/ace/test_runner/molecules/smart_test_executor.rb +107 -0
- data/lib/ace/test_runner/molecules/test_executor.rb +162 -0
- data/lib/ace/test_runner/organisms/agent_reporter.rb +384 -0
- data/lib/ace/test_runner/organisms/report_generator.rb +151 -0
- data/lib/ace/test_runner/organisms/sequential_group_executor.rb +185 -0
- data/lib/ace/test_runner/organisms/test_orchestrator.rb +648 -0
- data/lib/ace/test_runner/rake_task.rb +90 -0
- data/lib/ace/test_runner/suite/display_helpers.rb +117 -0
- data/lib/ace/test_runner/suite/display_manager.rb +204 -0
- data/lib/ace/test_runner/suite/duration_estimator.rb +50 -0
- data/lib/ace/test_runner/suite/orchestrator.rb +120 -0
- data/lib/ace/test_runner/suite/process_monitor.rb +268 -0
- data/lib/ace/test_runner/suite/result_aggregator.rb +176 -0
- data/lib/ace/test_runner/suite/simple_display_manager.rb +122 -0
- data/lib/ace/test_runner/suite.rb +22 -0
- data/lib/ace/test_runner/version.rb +7 -0
- data/lib/ace/test_runner.rb +69 -0
- metadata +246 -0
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Ace
|
|
4
|
+
module TestRunner
|
|
5
|
+
module Organisms
|
|
6
|
+
# Generates comprehensive test reports
|
|
7
|
+
class ReportGenerator
|
|
8
|
+
def initialize(configuration)
|
|
9
|
+
@configuration = configuration
|
|
10
|
+
@formatter = configuration.formatter_class.new(configuration.to_h)
|
|
11
|
+
end
|
|
12
|
+
|
|
13
|
+
def generate(result, files_tested)
|
|
14
|
+
report = Models::TestReport.new(
|
|
15
|
+
result: result,
|
|
16
|
+
configuration: @configuration,
|
|
17
|
+
timestamp: Time.now,
|
|
18
|
+
files_tested: files_tested,
|
|
19
|
+
metadata: generate_metadata
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
# Let formatter enhance the report if needed
|
|
23
|
+
if @formatter.respond_to?(:enhance_report)
|
|
24
|
+
@formatter.enhance_report(report)
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
report
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
def generate_summary(result)
|
|
31
|
+
{
|
|
32
|
+
total_tests: result.total_tests,
|
|
33
|
+
passed: result.passed,
|
|
34
|
+
failed: result.failed,
|
|
35
|
+
errors: result.errors,
|
|
36
|
+
skipped: result.skipped,
|
|
37
|
+
pass_rate: result.pass_rate,
|
|
38
|
+
duration: result.duration,
|
|
39
|
+
success: result.success?
|
|
40
|
+
}
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
def generate_failure_report(failures)
|
|
44
|
+
return nil if failures.empty?
|
|
45
|
+
|
|
46
|
+
{
|
|
47
|
+
count: failures.size,
|
|
48
|
+
by_type: group_failures_by_type(failures),
|
|
49
|
+
by_file: group_failures_by_file(failures),
|
|
50
|
+
details: failures.map(&:to_h)
|
|
51
|
+
}
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
def generate_deprecation_report(deprecations)
|
|
55
|
+
return nil if deprecations.empty?
|
|
56
|
+
|
|
57
|
+
fixer = Molecules::DeprecationFixer.new
|
|
58
|
+
fixes = deprecations.map do |deprecation|
|
|
59
|
+
fixer.fix_deprecations_in_output(deprecation)
|
|
60
|
+
end.flatten
|
|
61
|
+
|
|
62
|
+
{
|
|
63
|
+
count: deprecations.size,
|
|
64
|
+
warnings: deprecations,
|
|
65
|
+
suggested_fixes: fixes
|
|
66
|
+
}
|
|
67
|
+
end
|
|
68
|
+
|
|
69
|
+
def generate_performance_report(result)
|
|
70
|
+
{
|
|
71
|
+
total_duration: result.duration,
|
|
72
|
+
average_per_test: (result.total_tests > 0) ? result.duration / result.total_tests : 0,
|
|
73
|
+
tests_per_second: (result.duration > 0) ? result.total_tests / result.duration : 0,
|
|
74
|
+
assertions_per_second: (result.duration > 0) ? result.assertions / result.duration : 0
|
|
75
|
+
}
|
|
76
|
+
end
|
|
77
|
+
|
|
78
|
+
def generate_recommendations(result)
|
|
79
|
+
recommendations = []
|
|
80
|
+
|
|
81
|
+
# Performance recommendations
|
|
82
|
+
if result.duration > 60
|
|
83
|
+
recommendations << {
|
|
84
|
+
type: "performance",
|
|
85
|
+
message: "Tests took over a minute. Consider using --parallel or optimizing slow tests."
|
|
86
|
+
}
|
|
87
|
+
end
|
|
88
|
+
|
|
89
|
+
# Failure rate recommendations
|
|
90
|
+
if result.pass_rate < 50
|
|
91
|
+
recommendations << {
|
|
92
|
+
type: "quality",
|
|
93
|
+
message: "Less than 50% of tests passing. Focus on fixing critical failures first."
|
|
94
|
+
}
|
|
95
|
+
end
|
|
96
|
+
|
|
97
|
+
# Skip recommendations
|
|
98
|
+
if result.skipped > result.total_tests * 0.2
|
|
99
|
+
recommendations << {
|
|
100
|
+
type: "coverage",
|
|
101
|
+
message: "Over 20% of tests are skipped. Review and enable skipped tests."
|
|
102
|
+
}
|
|
103
|
+
end
|
|
104
|
+
|
|
105
|
+
# Deprecation recommendations
|
|
106
|
+
if result.has_deprecations?
|
|
107
|
+
recommendations << {
|
|
108
|
+
type: "maintenance",
|
|
109
|
+
message: "Deprecation warnings detected. Run with --fix-deprecations to auto-fix."
|
|
110
|
+
}
|
|
111
|
+
end
|
|
112
|
+
|
|
113
|
+
recommendations
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
private
|
|
117
|
+
|
|
118
|
+
def generate_metadata
|
|
119
|
+
{
|
|
120
|
+
generator: "ace-test-runner",
|
|
121
|
+
version: VERSION,
|
|
122
|
+
format: @configuration.format,
|
|
123
|
+
timestamp: Time.now.iso8601,
|
|
124
|
+
configuration_source: configuration_source
|
|
125
|
+
}
|
|
126
|
+
end
|
|
127
|
+
|
|
128
|
+
def configuration_source
|
|
129
|
+
if defined?(Ace::Core::Configuration)
|
|
130
|
+
"ace-core cascade"
|
|
131
|
+
elsif File.exist?(".ace/test.yml")
|
|
132
|
+
"project configuration"
|
|
133
|
+
else
|
|
134
|
+
"defaults"
|
|
135
|
+
end
|
|
136
|
+
end
|
|
137
|
+
|
|
138
|
+
def group_failures_by_type(failures)
|
|
139
|
+
failures.group_by(&:type).transform_values(&:count)
|
|
140
|
+
end
|
|
141
|
+
|
|
142
|
+
def group_failures_by_file(failures)
|
|
143
|
+
failures.group_by(&:file_path)
|
|
144
|
+
.transform_values(&:count)
|
|
145
|
+
.sort_by { |_, count| -count }
|
|
146
|
+
.to_h
|
|
147
|
+
end
|
|
148
|
+
end
|
|
149
|
+
end
|
|
150
|
+
end
|
|
151
|
+
end
|
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Ace
|
|
4
|
+
module TestRunner
|
|
5
|
+
module Organisms
|
|
6
|
+
# Executes test groups sequentially with visual separation and fail-fast support
|
|
7
|
+
class SequentialGroupExecutor
|
|
8
|
+
def initialize(test_executor:, result_parser:, formatter: nil)
|
|
9
|
+
@test_executor = test_executor
|
|
10
|
+
@result_parser = result_parser
|
|
11
|
+
@formatter = formatter
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
def execute_groups(groups, options = {})
|
|
15
|
+
fail_fast = options[:fail_fast] || options[:group_fail_fast]
|
|
16
|
+
all_results = []
|
|
17
|
+
all_files = []
|
|
18
|
+
group_results = []
|
|
19
|
+
|
|
20
|
+
groups.each do |group|
|
|
21
|
+
group_name = group[:name]
|
|
22
|
+
files = group[:files]
|
|
23
|
+
next if files.empty?
|
|
24
|
+
|
|
25
|
+
# Notify formatter of group start
|
|
26
|
+
if @formatter&.respond_to?(:on_group_start)
|
|
27
|
+
@formatter.on_group_start(group_name, files.size)
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
start_time = Time.now
|
|
31
|
+
|
|
32
|
+
# Execute tests for this group
|
|
33
|
+
result = @test_executor.execute_with_progress(files, options) do |event|
|
|
34
|
+
yield(event) if block_given?
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
# Parse result
|
|
38
|
+
parsed = if result[:commands] && result[:commands].is_a?(Array)
|
|
39
|
+
# Multiple commands executed (per-file)
|
|
40
|
+
aggregate_results(result[:stdout], @result_parser)
|
|
41
|
+
else
|
|
42
|
+
@result_parser.parse_output(result[:stdout])
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
duration = Time.now - start_time
|
|
46
|
+
success = result[:success]
|
|
47
|
+
|
|
48
|
+
# Store group result
|
|
49
|
+
group_result = {
|
|
50
|
+
name: group_name,
|
|
51
|
+
success: success,
|
|
52
|
+
duration: duration,
|
|
53
|
+
parsed: parsed,
|
|
54
|
+
files: files
|
|
55
|
+
}
|
|
56
|
+
group_results << group_result
|
|
57
|
+
|
|
58
|
+
# Notify formatter of group completion
|
|
59
|
+
if @formatter&.respond_to?(:on_group_complete)
|
|
60
|
+
@formatter.on_group_complete(
|
|
61
|
+
group_name,
|
|
62
|
+
success,
|
|
63
|
+
duration,
|
|
64
|
+
parsed[:summary]
|
|
65
|
+
)
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
# Collect all results and files
|
|
69
|
+
all_results << result
|
|
70
|
+
all_files.concat(files)
|
|
71
|
+
|
|
72
|
+
# Stop if group failed and fail_fast is enabled
|
|
73
|
+
if fail_fast && !success
|
|
74
|
+
# Return early with aggregated results so far
|
|
75
|
+
return build_aggregated_result(group_results, all_files, all_results, stopped: group_name)
|
|
76
|
+
end
|
|
77
|
+
end
|
|
78
|
+
|
|
79
|
+
# All groups completed successfully (or fail_fast not enabled)
|
|
80
|
+
build_aggregated_result(group_results, all_files, all_results)
|
|
81
|
+
end
|
|
82
|
+
|
|
83
|
+
private
|
|
84
|
+
|
|
85
|
+
def aggregate_results(combined_output, parser)
|
|
86
|
+
# Split output by test file executions
|
|
87
|
+
individual_outputs = combined_output.split(/^Started with run options/)
|
|
88
|
+
individual_outputs.shift if individual_outputs.first && individual_outputs.first.empty?
|
|
89
|
+
|
|
90
|
+
aggregated = {
|
|
91
|
+
summary: {
|
|
92
|
+
runs: 0,
|
|
93
|
+
assertions: 0,
|
|
94
|
+
failures: 0,
|
|
95
|
+
errors: 0,
|
|
96
|
+
skips: 0,
|
|
97
|
+
passed: 0
|
|
98
|
+
},
|
|
99
|
+
failures: [],
|
|
100
|
+
duration: 0.0,
|
|
101
|
+
deprecations: [],
|
|
102
|
+
test_times: []
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
individual_outputs.each do |output|
|
|
106
|
+
output = "Started with run options" + output
|
|
107
|
+
parsed = parser.parse_output(output)
|
|
108
|
+
|
|
109
|
+
aggregated[:summary][:runs] += parsed[:summary][:runs]
|
|
110
|
+
aggregated[:summary][:assertions] += parsed[:summary][:assertions]
|
|
111
|
+
aggregated[:summary][:failures] += parsed[:summary][:failures]
|
|
112
|
+
aggregated[:summary][:errors] += parsed[:summary][:errors]
|
|
113
|
+
aggregated[:summary][:skips] += parsed[:summary][:skips]
|
|
114
|
+
aggregated[:summary][:passed] += parsed[:summary][:passed]
|
|
115
|
+
|
|
116
|
+
aggregated[:failures].concat(parsed[:failures])
|
|
117
|
+
aggregated[:deprecations].concat(parsed[:deprecations])
|
|
118
|
+
aggregated[:duration] += parsed[:duration]
|
|
119
|
+
aggregated[:test_times].concat(parsed[:test_times]) if parsed[:test_times]
|
|
120
|
+
end
|
|
121
|
+
|
|
122
|
+
aggregated[:test_times].sort_by! { |t| -t[:duration] } if aggregated[:test_times]
|
|
123
|
+
aggregated
|
|
124
|
+
end
|
|
125
|
+
|
|
126
|
+
def build_aggregated_result(group_results, all_files, all_results, stopped: nil)
|
|
127
|
+
# Aggregate all parsed results
|
|
128
|
+
total_summary = {
|
|
129
|
+
runs: 0,
|
|
130
|
+
assertions: 0,
|
|
131
|
+
failures: 0,
|
|
132
|
+
errors: 0,
|
|
133
|
+
skips: 0,
|
|
134
|
+
passed: 0
|
|
135
|
+
}
|
|
136
|
+
all_failures = []
|
|
137
|
+
all_deprecations = []
|
|
138
|
+
total_duration = 0.0
|
|
139
|
+
all_test_times = []
|
|
140
|
+
|
|
141
|
+
group_results.each do |gr|
|
|
142
|
+
parsed = gr[:parsed]
|
|
143
|
+
total_summary[:runs] += parsed[:summary][:runs]
|
|
144
|
+
total_summary[:assertions] += parsed[:summary][:assertions]
|
|
145
|
+
total_summary[:failures] += parsed[:summary][:failures]
|
|
146
|
+
total_summary[:errors] += parsed[:summary][:errors]
|
|
147
|
+
total_summary[:skips] += parsed[:summary][:skips]
|
|
148
|
+
total_summary[:passed] += parsed[:summary][:passed]
|
|
149
|
+
|
|
150
|
+
all_failures.concat(parsed[:failures] || [])
|
|
151
|
+
all_deprecations.concat(parsed[:deprecations] || [])
|
|
152
|
+
total_duration += gr[:duration]
|
|
153
|
+
all_test_times.concat(parsed[:test_times] || [])
|
|
154
|
+
end
|
|
155
|
+
|
|
156
|
+
# Sort test times
|
|
157
|
+
all_test_times.sort_by! { |t| -t[:duration] } unless all_test_times.empty?
|
|
158
|
+
|
|
159
|
+
# Combine stdout and stderr
|
|
160
|
+
combined_stdout = all_results.map { |r| r[:stdout] }.join("\n")
|
|
161
|
+
combined_stderr = all_results.map { |r| r[:stderr] }.join("\n")
|
|
162
|
+
|
|
163
|
+
# Determine success
|
|
164
|
+
success = total_summary[:failures] == 0 && total_summary[:errors] == 0
|
|
165
|
+
|
|
166
|
+
{
|
|
167
|
+
stdout: combined_stdout,
|
|
168
|
+
stderr: combined_stderr,
|
|
169
|
+
success: success,
|
|
170
|
+
duration: total_duration,
|
|
171
|
+
commands: all_results.map { |r| r[:command] || r[:commands] }.flatten.compact,
|
|
172
|
+
parsed_result: {
|
|
173
|
+
summary: total_summary,
|
|
174
|
+
failures: all_failures,
|
|
175
|
+
deprecations: all_deprecations,
|
|
176
|
+
duration: total_duration,
|
|
177
|
+
test_times: all_test_times
|
|
178
|
+
},
|
|
179
|
+
stopped_at_group: stopped
|
|
180
|
+
}
|
|
181
|
+
end
|
|
182
|
+
end
|
|
183
|
+
end
|
|
184
|
+
end
|
|
185
|
+
end
|