ace-test-runner 0.18.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.ace-defaults/test/runner.yml +35 -0
- data/.ace-defaults/test/suite.yml +31 -0
- data/.ace-defaults/test-runner/config.yml +61 -0
- data/CHANGELOG.md +626 -0
- data/LICENSE +21 -0
- data/README.md +42 -0
- data/Rakefile +14 -0
- data/exe/ace-test +26 -0
- data/exe/ace-test-suite +149 -0
- data/lib/ace/test_runner/atoms/command_builder.rb +165 -0
- data/lib/ace/test_runner/atoms/lazy_loader.rb +62 -0
- data/lib/ace/test_runner/atoms/line_number_resolver.rb +86 -0
- data/lib/ace/test_runner/atoms/report_directory_resolver.rb +48 -0
- data/lib/ace/test_runner/atoms/report_path_resolver.rb +67 -0
- data/lib/ace/test_runner/atoms/result_parser.rb +254 -0
- data/lib/ace/test_runner/atoms/test_detector.rb +114 -0
- data/lib/ace/test_runner/atoms/test_folder_detector.rb +53 -0
- data/lib/ace/test_runner/atoms/test_type_detector.rb +83 -0
- data/lib/ace/test_runner/atoms/timestamp_generator.rb +103 -0
- data/lib/ace/test_runner/cli/commands/test.rb +326 -0
- data/lib/ace/test_runner/cli.rb +16 -0
- data/lib/ace/test_runner/formatters/base_formatter.rb +102 -0
- data/lib/ace/test_runner/formatters/json_formatter.rb +90 -0
- data/lib/ace/test_runner/formatters/markdown_formatter.rb +91 -0
- data/lib/ace/test_runner/formatters/progress_file_formatter.rb +164 -0
- data/lib/ace/test_runner/formatters/progress_formatter.rb +328 -0
- data/lib/ace/test_runner/models/test_configuration.rb +165 -0
- data/lib/ace/test_runner/models/test_failure.rb +95 -0
- data/lib/ace/test_runner/models/test_group.rb +105 -0
- data/lib/ace/test_runner/models/test_report.rb +145 -0
- data/lib/ace/test_runner/models/test_result.rb +86 -0
- data/lib/ace/test_runner/molecules/cli_argument_parser.rb +263 -0
- data/lib/ace/test_runner/molecules/config_loader.rb +162 -0
- data/lib/ace/test_runner/molecules/deprecation_fixer.rb +204 -0
- data/lib/ace/test_runner/molecules/failed_package_reporter.rb +100 -0
- data/lib/ace/test_runner/molecules/failure_analyzer.rb +249 -0
- data/lib/ace/test_runner/molecules/in_process_runner.rb +249 -0
- data/lib/ace/test_runner/molecules/package_resolver.rb +106 -0
- data/lib/ace/test_runner/molecules/pattern_resolver.rb +146 -0
- data/lib/ace/test_runner/molecules/rake_integration.rb +218 -0
- data/lib/ace/test_runner/molecules/report_storage.rb +303 -0
- data/lib/ace/test_runner/molecules/smart_test_executor.rb +107 -0
- data/lib/ace/test_runner/molecules/test_executor.rb +162 -0
- data/lib/ace/test_runner/organisms/agent_reporter.rb +384 -0
- data/lib/ace/test_runner/organisms/report_generator.rb +151 -0
- data/lib/ace/test_runner/organisms/sequential_group_executor.rb +185 -0
- data/lib/ace/test_runner/organisms/test_orchestrator.rb +648 -0
- data/lib/ace/test_runner/rake_task.rb +90 -0
- data/lib/ace/test_runner/suite/display_helpers.rb +117 -0
- data/lib/ace/test_runner/suite/display_manager.rb +204 -0
- data/lib/ace/test_runner/suite/duration_estimator.rb +50 -0
- data/lib/ace/test_runner/suite/orchestrator.rb +120 -0
- data/lib/ace/test_runner/suite/process_monitor.rb +268 -0
- data/lib/ace/test_runner/suite/result_aggregator.rb +176 -0
- data/lib/ace/test_runner/suite/simple_display_manager.rb +122 -0
- data/lib/ace/test_runner/suite.rb +22 -0
- data/lib/ace/test_runner/version.rb +7 -0
- data/lib/ace/test_runner.rb +69 -0
- metadata +246 -0
|
@@ -0,0 +1,268 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "open3"
|
|
4
|
+
require "json"
|
|
5
|
+
|
|
6
|
+
module Ace
|
|
7
|
+
module TestRunner
|
|
8
|
+
module Suite
|
|
9
|
+
class ProcessMonitor
|
|
10
|
+
attr_reader :processes, :max_parallel
|
|
11
|
+
|
|
12
|
+
def initialize(max_parallel = 10)
|
|
13
|
+
@max_parallel = max_parallel
|
|
14
|
+
@processes = {}
|
|
15
|
+
@queue = []
|
|
16
|
+
@completed = []
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
def start_package(package, test_options, &callback)
|
|
20
|
+
# Queue the package if we're at max capacity
|
|
21
|
+
if @processes.size >= @max_parallel
|
|
22
|
+
@queue << {package: package, options: test_options, callback: callback}
|
|
23
|
+
callback.call(package, {status: :waiting}, nil) if callback
|
|
24
|
+
return
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
# Build command
|
|
28
|
+
cmd = build_command(package, test_options)
|
|
29
|
+
|
|
30
|
+
# Start the process
|
|
31
|
+
# Strip assignment context vars to prevent tests from resolving to wrong assignments
|
|
32
|
+
env = ENV.to_h.merge({
|
|
33
|
+
"ACE_ASSIGN_ID" => nil,
|
|
34
|
+
"ACE_ASSIGN_FORK_ROOT" => nil
|
|
35
|
+
})
|
|
36
|
+
start_time = Time.now
|
|
37
|
+
stdin, stdout, stderr, thread = Open3.popen3(env, cmd, chdir: package["path"])
|
|
38
|
+
|
|
39
|
+
@processes[package["name"]] = {
|
|
40
|
+
package: package,
|
|
41
|
+
thread: thread,
|
|
42
|
+
stdout: stdout,
|
|
43
|
+
stderr: stderr,
|
|
44
|
+
stdin: stdin,
|
|
45
|
+
start_time: start_time,
|
|
46
|
+
callback: callback,
|
|
47
|
+
output: +"",
|
|
48
|
+
report_root: test_options["report_dir"],
|
|
49
|
+
test_count: 0,
|
|
50
|
+
tests_run: 0,
|
|
51
|
+
dots: +""
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
# Initial callback
|
|
55
|
+
callback.call(package, {status: :running, start_time: start_time}, nil) if callback
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
def check_processes
|
|
59
|
+
@processes.each do |name, process_info|
|
|
60
|
+
package = process_info[:package]
|
|
61
|
+
thread = process_info[:thread]
|
|
62
|
+
callback = process_info[:callback]
|
|
63
|
+
|
|
64
|
+
# Read available output
|
|
65
|
+
begin
|
|
66
|
+
if IO.select([process_info[:stdout]], nil, nil, 0)
|
|
67
|
+
chunk = process_info[:stdout].read_nonblock(4096)
|
|
68
|
+
process_info[:output] << chunk
|
|
69
|
+
|
|
70
|
+
# Parse progress from output
|
|
71
|
+
parse_progress(process_info, chunk)
|
|
72
|
+
|
|
73
|
+
# Update display with progress
|
|
74
|
+
if callback
|
|
75
|
+
elapsed = Time.now - process_info[:start_time]
|
|
76
|
+
callback.call(package, {
|
|
77
|
+
status: :running,
|
|
78
|
+
progress: process_info[:tests_run],
|
|
79
|
+
total: process_info[:test_count],
|
|
80
|
+
dots: process_info[:dots],
|
|
81
|
+
elapsed: elapsed
|
|
82
|
+
}, chunk)
|
|
83
|
+
end
|
|
84
|
+
end
|
|
85
|
+
rescue IO::WaitReadable, EOFError
|
|
86
|
+
# No data available or stream closed
|
|
87
|
+
end
|
|
88
|
+
|
|
89
|
+
# Check if process completed
|
|
90
|
+
unless thread.alive?
|
|
91
|
+
elapsed = Time.now - process_info[:start_time]
|
|
92
|
+
exit_status = thread.value.exitstatus
|
|
93
|
+
|
|
94
|
+
# Get final output
|
|
95
|
+
remaining_output = begin
|
|
96
|
+
process_info[:stdout].read
|
|
97
|
+
rescue
|
|
98
|
+
""
|
|
99
|
+
end
|
|
100
|
+
process_info[:output] << remaining_output
|
|
101
|
+
|
|
102
|
+
# Try to get accurate results from summary.json first
|
|
103
|
+
results = nil
|
|
104
|
+
reports_dir = Atoms::ReportPathResolver.report_directory(
|
|
105
|
+
package["path"],
|
|
106
|
+
report_root: process_info[:report_root],
|
|
107
|
+
package_name: package["name"]
|
|
108
|
+
)
|
|
109
|
+
summary_file = reports_dir ? File.join(reports_dir, "summary.json") : nil
|
|
110
|
+
if summary_file && File.exist?(summary_file)
|
|
111
|
+
begin
|
|
112
|
+
json_data = JSON.parse(File.read(summary_file))
|
|
113
|
+
results = {
|
|
114
|
+
tests: json_data["total"] || 0,
|
|
115
|
+
assertions: json_data["assertions"] || 0,
|
|
116
|
+
failures: json_data["failed"] || 0,
|
|
117
|
+
errors: json_data["errors"] || 0,
|
|
118
|
+
duration: json_data["duration"] || elapsed,
|
|
119
|
+
success: json_data["success"] || false
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
# Also try to get assertions from report.json if not in summary
|
|
123
|
+
if results[:assertions] == 0
|
|
124
|
+
report_file = File.join(reports_dir, "report.json")
|
|
125
|
+
if File.exist?(report_file)
|
|
126
|
+
report_data = JSON.parse(File.read(report_file))
|
|
127
|
+
results[:assertions] = report_data.dig("result", "assertions") || 0
|
|
128
|
+
end
|
|
129
|
+
end
|
|
130
|
+
rescue JSON::ParserError
|
|
131
|
+
# Fall back to parsing output
|
|
132
|
+
end
|
|
133
|
+
end
|
|
134
|
+
|
|
135
|
+
# Fall back to parsing output if no JSON data
|
|
136
|
+
results ||= parse_results(process_info[:output])
|
|
137
|
+
|
|
138
|
+
# Close streams
|
|
139
|
+
begin
|
|
140
|
+
process_info[:stdout].close
|
|
141
|
+
rescue
|
|
142
|
+
nil
|
|
143
|
+
end
|
|
144
|
+
begin
|
|
145
|
+
process_info[:stderr].close
|
|
146
|
+
rescue
|
|
147
|
+
nil
|
|
148
|
+
end
|
|
149
|
+
begin
|
|
150
|
+
process_info[:stdin].close
|
|
151
|
+
rescue
|
|
152
|
+
nil
|
|
153
|
+
end
|
|
154
|
+
|
|
155
|
+
# Final callback
|
|
156
|
+
if callback
|
|
157
|
+
# Use results[:success] from summary.json if available, otherwise check exit code
|
|
158
|
+
# This ensures the package status matches what ace-test actually reported
|
|
159
|
+
success_status = (!results[:success].nil?) ? results[:success] : (exit_status == 0)
|
|
160
|
+
|
|
161
|
+
callback.call(package, {
|
|
162
|
+
status: :completed,
|
|
163
|
+
completed: true,
|
|
164
|
+
success: success_status,
|
|
165
|
+
exit_code: exit_status,
|
|
166
|
+
elapsed: elapsed,
|
|
167
|
+
results: results
|
|
168
|
+
}, process_info[:output])
|
|
169
|
+
end
|
|
170
|
+
|
|
171
|
+
# Remove from active processes
|
|
172
|
+
@completed << name
|
|
173
|
+
end
|
|
174
|
+
end
|
|
175
|
+
|
|
176
|
+
# Remove completed processes
|
|
177
|
+
@completed.each { |name| @processes.delete(name) }
|
|
178
|
+
@completed.clear
|
|
179
|
+
|
|
180
|
+
# Start queued processes if we have capacity
|
|
181
|
+
while @processes.size < @max_parallel && !@queue.empty?
|
|
182
|
+
queued = @queue.shift
|
|
183
|
+
start_package(queued[:package], queued[:options], &queued[:callback])
|
|
184
|
+
end
|
|
185
|
+
end
|
|
186
|
+
|
|
187
|
+
def running?
|
|
188
|
+
!@processes.empty? || !@queue.empty?
|
|
189
|
+
end
|
|
190
|
+
|
|
191
|
+
def wait_all
|
|
192
|
+
while running?
|
|
193
|
+
check_processes
|
|
194
|
+
sleep 0.1
|
|
195
|
+
end
|
|
196
|
+
end
|
|
197
|
+
|
|
198
|
+
private
|
|
199
|
+
|
|
200
|
+
def build_command(package, options)
|
|
201
|
+
cmd_parts = ["ace-test"]
|
|
202
|
+
|
|
203
|
+
# Always run in single batch for suite execution
|
|
204
|
+
# This avoids nested group headers and improves performance
|
|
205
|
+
cmd_parts << "--run-in-single-batch"
|
|
206
|
+
|
|
207
|
+
# Add format (use progress if compact is specified since ace-test doesn't have compact format)
|
|
208
|
+
format = options["format"] || "progress"
|
|
209
|
+
format = "progress" if format == "compact" # Handle legacy compact format
|
|
210
|
+
cmd_parts << "--format" << format
|
|
211
|
+
|
|
212
|
+
# Add other options
|
|
213
|
+
cmd_parts << "--no-save" unless options["save_reports"]
|
|
214
|
+
cmd_parts << "--fail-fast" if options["fail_fast"]
|
|
215
|
+
cmd_parts << "--no-color" unless options.fetch("color", true)
|
|
216
|
+
if options["report_dir"]
|
|
217
|
+
short_name = package["name"].to_s.sub(/\Aace-/, "")
|
|
218
|
+
pkg_report_dir = File.join(options["report_dir"], short_name)
|
|
219
|
+
cmd_parts << "--report-dir" << pkg_report_dir
|
|
220
|
+
end
|
|
221
|
+
|
|
222
|
+
# Build command string
|
|
223
|
+
# Note: Do NOT set CI=true here - respect the existing environment
|
|
224
|
+
# Tests that need CI-aware behavior should check ENV['CI'] directly
|
|
225
|
+
cmd_parts.join(" ")
|
|
226
|
+
end
|
|
227
|
+
|
|
228
|
+
def parse_progress(process_info, chunk)
|
|
229
|
+
# Count dots, F, E, S in the output for progress
|
|
230
|
+
dots = chunk.scan(/[.FES]/).join
|
|
231
|
+
process_info[:dots] << dots
|
|
232
|
+
process_info[:tests_run] += dots.length
|
|
233
|
+
|
|
234
|
+
# Try to extract total test count from output
|
|
235
|
+
if process_info[:test_count] == 0 && chunk =~ /Running (\d+)(?:\/\d+)? test files/
|
|
236
|
+
process_info[:test_count] = $1.to_i * 10 # Estimate tests per file
|
|
237
|
+
elsif chunk =~ /(\d+) tests?,/
|
|
238
|
+
process_info[:test_count] = $1.to_i
|
|
239
|
+
end
|
|
240
|
+
end
|
|
241
|
+
|
|
242
|
+
def parse_results(output)
|
|
243
|
+
# Look for summary line in output
|
|
244
|
+
if output =~ /(\d+) tests?, (\d+) assertions?, (\d+) failures?, (\d+) errors? \(([\d.]+)s\)/
|
|
245
|
+
{
|
|
246
|
+
tests: $1.to_i,
|
|
247
|
+
assertions: $2.to_i,
|
|
248
|
+
failures: $3.to_i,
|
|
249
|
+
errors: $4.to_i,
|
|
250
|
+
duration: $5.to_f,
|
|
251
|
+
success: $3.to_i == 0 && $4.to_i == 0
|
|
252
|
+
}
|
|
253
|
+
else
|
|
254
|
+
# Fallback to counting dots/F/E/S
|
|
255
|
+
dots = output.scan(/[.FES]/).join
|
|
256
|
+
{
|
|
257
|
+
tests: dots.length,
|
|
258
|
+
failures: dots.count("F"),
|
|
259
|
+
errors: dots.count("E"),
|
|
260
|
+
skipped: dots.count("S"),
|
|
261
|
+
success: !dots.include?("F") && !dots.include?("E")
|
|
262
|
+
}
|
|
263
|
+
end
|
|
264
|
+
end
|
|
265
|
+
end
|
|
266
|
+
end
|
|
267
|
+
end
|
|
268
|
+
end
|
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "json"
|
|
4
|
+
|
|
5
|
+
module Ace
|
|
6
|
+
module TestRunner
|
|
7
|
+
module Suite
|
|
8
|
+
class ResultAggregator
|
|
9
|
+
attr_reader :packages
|
|
10
|
+
|
|
11
|
+
def initialize(packages, report_root: nil)
|
|
12
|
+
@packages = packages
|
|
13
|
+
@report_root = report_root
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
def aggregate
|
|
17
|
+
results = collect_results
|
|
18
|
+
|
|
19
|
+
# Calculate assertion totals
|
|
20
|
+
total_assertions = 0
|
|
21
|
+
assertions_failed = 0
|
|
22
|
+
|
|
23
|
+
results.each do |r|
|
|
24
|
+
# Get assertions from either summary or report data
|
|
25
|
+
if r[:assertions]
|
|
26
|
+
total_assertions += r[:assertions]
|
|
27
|
+
elsif r[:report_data]
|
|
28
|
+
total_assertions += r[:report_data].dig(:result, :assertions) || 0
|
|
29
|
+
end
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
{
|
|
33
|
+
total_tests: results.sum { |r| r[:total] || 0 },
|
|
34
|
+
total_passed: results.sum { |r| r[:passed] || 0 },
|
|
35
|
+
total_failed: results.sum { |r| (r[:failed] || 0) + (r[:errors] || 0) },
|
|
36
|
+
total_skipped: results.sum { |r| r[:skipped] || 0 },
|
|
37
|
+
total_assertions: total_assertions,
|
|
38
|
+
assertions_failed: assertions_failed,
|
|
39
|
+
total_duration: results.map { |r| r[:duration] || 0 }.max,
|
|
40
|
+
packages_passed: results.count { |r| r[:success] },
|
|
41
|
+
packages_failed: results.count { |r| !r[:success] },
|
|
42
|
+
failed_packages: collect_failed_packages(results),
|
|
43
|
+
results: results
|
|
44
|
+
}
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
def collect_results
|
|
48
|
+
@packages.map do |package|
|
|
49
|
+
reports_dir = Atoms::ReportPathResolver.report_directory(
|
|
50
|
+
package["path"],
|
|
51
|
+
report_root: @report_root,
|
|
52
|
+
package_name: package["name"]
|
|
53
|
+
)
|
|
54
|
+
summary_path = reports_dir ? File.join(reports_dir, "summary.json") : nil
|
|
55
|
+
|
|
56
|
+
if summary_path && File.exist?(summary_path)
|
|
57
|
+
begin
|
|
58
|
+
data = JSON.parse(File.read(summary_path), symbolize_names: true)
|
|
59
|
+
data[:package] = package["name"]
|
|
60
|
+
data[:path] = package["path"]
|
|
61
|
+
data[:report_root] = @report_root
|
|
62
|
+
|
|
63
|
+
# Try to get assertions from report.json if not in summary
|
|
64
|
+
if !data[:assertions] || data[:assertions] == 0
|
|
65
|
+
report_path = File.join(reports_dir, "report.json")
|
|
66
|
+
if File.exist?(report_path)
|
|
67
|
+
begin
|
|
68
|
+
report_data = JSON.parse(File.read(report_path), symbolize_names: true)
|
|
69
|
+
data[:assertions] = report_data.dig(:result, :assertions) || 0
|
|
70
|
+
data[:report_data] = report_data
|
|
71
|
+
rescue JSON::ParserError
|
|
72
|
+
# Ignore if report.json can't be parsed
|
|
73
|
+
end
|
|
74
|
+
end
|
|
75
|
+
end
|
|
76
|
+
|
|
77
|
+
data
|
|
78
|
+
rescue JSON::ParserError => e
|
|
79
|
+
# If we can't parse the summary, create a failure result
|
|
80
|
+
{
|
|
81
|
+
package: package["name"],
|
|
82
|
+
path: package["path"],
|
|
83
|
+
report_root: @report_root,
|
|
84
|
+
success: false,
|
|
85
|
+
error: "Failed to parse summary.json: #{e.message}",
|
|
86
|
+
total: 0,
|
|
87
|
+
passed: 0,
|
|
88
|
+
failed: 0,
|
|
89
|
+
errors: 1
|
|
90
|
+
}
|
|
91
|
+
end
|
|
92
|
+
else
|
|
93
|
+
# No summary file means tests didn't complete or save
|
|
94
|
+
{
|
|
95
|
+
package: package["name"],
|
|
96
|
+
path: package["path"],
|
|
97
|
+
report_root: @report_root,
|
|
98
|
+
success: false,
|
|
99
|
+
error: "No test results found (summary.json missing)",
|
|
100
|
+
total: 0,
|
|
101
|
+
passed: 0,
|
|
102
|
+
failed: 0,
|
|
103
|
+
errors: 1
|
|
104
|
+
}
|
|
105
|
+
end
|
|
106
|
+
end
|
|
107
|
+
end
|
|
108
|
+
|
|
109
|
+
def collect_failed_packages(results)
|
|
110
|
+
results.select { |r| !r[:success] }.map do |result|
|
|
111
|
+
{
|
|
112
|
+
name: result[:package],
|
|
113
|
+
path: result[:path],
|
|
114
|
+
report_root: result[:report_root] || @report_root,
|
|
115
|
+
failures: result[:failed] || 0,
|
|
116
|
+
errors: result[:errors] || 0,
|
|
117
|
+
error_message: result[:error]
|
|
118
|
+
}
|
|
119
|
+
end
|
|
120
|
+
end
|
|
121
|
+
|
|
122
|
+
def generate_report(summary)
|
|
123
|
+
report = []
|
|
124
|
+
report << "# ACE Test Suite Report"
|
|
125
|
+
report << ""
|
|
126
|
+
report << "## Summary"
|
|
127
|
+
report << ""
|
|
128
|
+
|
|
129
|
+
report << if summary[:packages_failed] == 0
|
|
130
|
+
"✅ **All tests passed!**"
|
|
131
|
+
else
|
|
132
|
+
"❌ **Some tests failed**"
|
|
133
|
+
end
|
|
134
|
+
|
|
135
|
+
report << ""
|
|
136
|
+
report << "- Packages: #{summary[:packages_passed]} passed, #{summary[:packages_failed]} failed"
|
|
137
|
+
report << "- Tests: #{summary[:total_tests]} total, #{summary[:total_passed]} passed, #{summary[:total_failed]} failed"
|
|
138
|
+
report << "- Duration: #{sprintf("%.2f", summary[:total_duration])}s"
|
|
139
|
+
report << ""
|
|
140
|
+
|
|
141
|
+
if summary[:failed_packages] && !summary[:failed_packages].empty?
|
|
142
|
+
report << "## Failed Packages"
|
|
143
|
+
report << ""
|
|
144
|
+
|
|
145
|
+
summary[:failed_packages].each do |pkg|
|
|
146
|
+
report << "### #{pkg[:name]}"
|
|
147
|
+
report << ""
|
|
148
|
+
report << "- Failures: #{pkg[:failures]}"
|
|
149
|
+
report << "- Errors: #{pkg[:errors]}"
|
|
150
|
+
report << "- Error: #{pkg[:error_message]}" if pkg[:error_message]
|
|
151
|
+
report << Ace::TestRunner::Molecules::FailedPackageReporter.format_for_markdown(pkg)
|
|
152
|
+
report << ""
|
|
153
|
+
end
|
|
154
|
+
end
|
|
155
|
+
|
|
156
|
+
report << "## Package Results"
|
|
157
|
+
report << ""
|
|
158
|
+
report << "| Package | Status | Tests | Passed | Failed | Skipped | Duration |"
|
|
159
|
+
report << "|---------|--------|-------|--------|--------|---------|----------|"
|
|
160
|
+
|
|
161
|
+
summary[:results].each do |result|
|
|
162
|
+
status = result[:success] ? "✅ Pass" : "❌ Fail"
|
|
163
|
+
report << "| #{result[:package]} | #{status} | #{result[:total]} | #{result[:passed]} | #{result[:failed] || 0} | #{result[:skipped] || 0} | #{sprintf("%.2f", result[:duration] || 0)}s |"
|
|
164
|
+
end
|
|
165
|
+
|
|
166
|
+
report.join("\n")
|
|
167
|
+
end
|
|
168
|
+
|
|
169
|
+
def save_report(summary, path = "test-suite-report.md")
|
|
170
|
+
File.write(path, generate_report(summary))
|
|
171
|
+
path
|
|
172
|
+
end
|
|
173
|
+
end
|
|
174
|
+
end
|
|
175
|
+
end
|
|
176
|
+
end
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "display_helpers"
|
|
4
|
+
|
|
5
|
+
module Ace
|
|
6
|
+
module TestRunner
|
|
7
|
+
module Suite
|
|
8
|
+
# SimpleDisplayManager provides line-by-line output without ANSI cursor control.
|
|
9
|
+
# This is the default display mode, optimized for piping and agent consumption.
|
|
10
|
+
#
|
|
11
|
+
# Unlike DisplayManager which uses ANSI escape codes to update lines in place,
|
|
12
|
+
# SimpleDisplayManager simply prints one line per package as it completes.
|
|
13
|
+
class SimpleDisplayManager
|
|
14
|
+
include DisplayHelpers
|
|
15
|
+
|
|
16
|
+
attr_reader :packages, :config, :start_time
|
|
17
|
+
|
|
18
|
+
def initialize(packages, config)
|
|
19
|
+
@packages = packages
|
|
20
|
+
@config = config
|
|
21
|
+
@package_status = {}
|
|
22
|
+
@start_time = Time.now
|
|
23
|
+
@use_color = config.dig("test_suite", "display", "color") != false
|
|
24
|
+
@package_width = calculate_package_width
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
# Print initial message showing how many packages will run
|
|
28
|
+
def initialize_display
|
|
29
|
+
puts "Running tests for #{@packages.size} packages..."
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
# Called when a package status changes. Prints a line when package completes.
|
|
33
|
+
def update_package(package, status, _output = nil)
|
|
34
|
+
@package_status[package["name"]] = status
|
|
35
|
+
|
|
36
|
+
# Only print when package completes
|
|
37
|
+
return unless status[:completed]
|
|
38
|
+
|
|
39
|
+
print_completion_line(package, status)
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
# No-op for simple mode - we don't need live updates
|
|
43
|
+
def refresh
|
|
44
|
+
# Intentionally empty - simple mode doesn't refresh
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
# No-op for simple mode - results already printed as packages complete
|
|
48
|
+
def show_final_results
|
|
49
|
+
# Intentionally empty - completion lines already printed in update_package
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
# Display the summary section using shared helpers
|
|
53
|
+
def show_summary(summary)
|
|
54
|
+
render_summary(summary, @start_time, separator)
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
private
|
|
58
|
+
|
|
59
|
+
# Print completion line with columnar format for easy scanning:
|
|
60
|
+
# STATUS TIME PACKAGE TESTS ASSERTS FAIL
|
|
61
|
+
# OK 1.57s ace-handbook 1 3 0
|
|
62
|
+
def print_completion_line(package, status)
|
|
63
|
+
results = status[:results] || {}
|
|
64
|
+
duration = results[:duration] || status[:elapsed] || 0
|
|
65
|
+
elapsed = sprintf("%5.2fs", duration)
|
|
66
|
+
|
|
67
|
+
tests = results[:tests] || 0
|
|
68
|
+
assertions = results[:assertions] || 0
|
|
69
|
+
failures = results[:failures] || 0
|
|
70
|
+
errors = results[:errors] || 0
|
|
71
|
+
skipped = results[:skipped] || 0
|
|
72
|
+
|
|
73
|
+
if status[:success]
|
|
74
|
+
icon = package_status_icon(true, skipped)
|
|
75
|
+
failure_count = failures
|
|
76
|
+
else
|
|
77
|
+
icon = package_status_icon(false, 0)
|
|
78
|
+
failure_count = failures + errors
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
# Format: ICON TIME PACKAGE TESTS ASSERTS FAIL [SKIP]
|
|
82
|
+
name = package["name"].ljust(@package_width)
|
|
83
|
+
tests_col = "#{tests.to_s.rjust(4)} tests"
|
|
84
|
+
asserts_col = "#{assertions.to_s.rjust(5)} asserts"
|
|
85
|
+
fail_col = "#{failure_count.to_s.rjust(3)} fail"
|
|
86
|
+
|
|
87
|
+
line = "#{icon} #{elapsed} #{name} #{tests_col} #{asserts_col} #{fail_col}"
|
|
88
|
+
line += " #{skipped} skip" if skipped > 0
|
|
89
|
+
|
|
90
|
+
puts line
|
|
91
|
+
end
|
|
92
|
+
|
|
93
|
+
def package_status_icon(success, skipped_count)
|
|
94
|
+
return color("✗", :red) unless success
|
|
95
|
+
(skipped_count > 0) ? color("?", :yellow) : color("✓", :green)
|
|
96
|
+
end
|
|
97
|
+
|
|
98
|
+
def separator
|
|
99
|
+
"=" * 65
|
|
100
|
+
end
|
|
101
|
+
|
|
102
|
+
def color(text, color_name)
|
|
103
|
+
return text unless @use_color
|
|
104
|
+
|
|
105
|
+
colors = {
|
|
106
|
+
green: "\033[32m",
|
|
107
|
+
red: "\033[31m",
|
|
108
|
+
yellow: "\033[33m",
|
|
109
|
+
reset: "\033[0m"
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
"#{colors[color_name]}#{text}#{colors[:reset]}"
|
|
113
|
+
end
|
|
114
|
+
|
|
115
|
+
def calculate_package_width
|
|
116
|
+
max_length = @packages.map { |p| p["name"].length }.max || 0
|
|
117
|
+
[max_length, 15].max # Minimum width of 15 for readability
|
|
118
|
+
end
|
|
119
|
+
end
|
|
120
|
+
end
|
|
121
|
+
end
|
|
122
|
+
end
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "suite/display_helpers"
|
|
4
|
+
require_relative "suite/duration_estimator"
|
|
5
|
+
require_relative "suite/orchestrator"
|
|
6
|
+
require_relative "suite/process_monitor"
|
|
7
|
+
require_relative "suite/display_manager"
|
|
8
|
+
require_relative "suite/simple_display_manager"
|
|
9
|
+
require_relative "suite/result_aggregator"
|
|
10
|
+
|
|
11
|
+
module Ace
|
|
12
|
+
module TestRunner
|
|
13
|
+
module Suite
|
|
14
|
+
class Error < StandardError; end
|
|
15
|
+
|
|
16
|
+
def self.run(config)
|
|
17
|
+
orchestrator = Orchestrator.new(config)
|
|
18
|
+
orchestrator.run
|
|
19
|
+
end
|
|
20
|
+
end
|
|
21
|
+
end
|
|
22
|
+
end
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "ace/core"
|
|
4
|
+
# Don't require ace/test_support here - it loads minitest/autorun which causes double runs
|
|
5
|
+
# Test files will require it themselves
|
|
6
|
+
|
|
7
|
+
# Only require minitest when actually running tests
|
|
8
|
+
# require "minitest" is done by test files or CommandBuilder
|
|
9
|
+
# require "minitest/reporters" is done when needed
|
|
10
|
+
require "open3"
|
|
11
|
+
require "json"
|
|
12
|
+
require "yaml"
|
|
13
|
+
require "fileutils"
|
|
14
|
+
require "time"
|
|
15
|
+
|
|
16
|
+
require_relative "test_runner/version"
|
|
17
|
+
|
|
18
|
+
# CLI and commands
|
|
19
|
+
require_relative "test_runner/cli"
|
|
20
|
+
|
|
21
|
+
# Models - Pure data structures
|
|
22
|
+
require_relative "test_runner/models/test_result"
|
|
23
|
+
require_relative "test_runner/models/test_failure"
|
|
24
|
+
require_relative "test_runner/models/test_configuration"
|
|
25
|
+
require_relative "test_runner/models/test_report"
|
|
26
|
+
|
|
27
|
+
# Atoms - Basic utilities (always needed)
|
|
28
|
+
require_relative "test_runner/atoms/test_detector"
|
|
29
|
+
require_relative "test_runner/atoms/command_builder"
|
|
30
|
+
require_relative "test_runner/atoms/result_parser"
|
|
31
|
+
require_relative "test_runner/atoms/timestamp_generator"
|
|
32
|
+
require_relative "test_runner/atoms/lazy_loader"
|
|
33
|
+
require_relative "test_runner/atoms/report_path_resolver"
|
|
34
|
+
require_relative "test_runner/atoms/report_directory_resolver"
|
|
35
|
+
|
|
36
|
+
# Molecules - Core operations (always needed for basic test running)
|
|
37
|
+
require_relative "test_runner/molecules/test_executor"
|
|
38
|
+
require_relative "test_runner/molecules/failure_analyzer"
|
|
39
|
+
require_relative "test_runner/molecules/report_storage"
|
|
40
|
+
require_relative "test_runner/molecules/config_loader"
|
|
41
|
+
require_relative "test_runner/molecules/pattern_resolver"
|
|
42
|
+
require_relative "test_runner/molecules/cli_argument_parser"
|
|
43
|
+
require_relative "test_runner/molecules/failed_package_reporter"
|
|
44
|
+
# Other molecules loaded lazily (deprecation_fixer, rake_integration)
|
|
45
|
+
|
|
46
|
+
# Formatters - Load only base formatter, others loaded on demand
|
|
47
|
+
require_relative "test_runner/formatters/base_formatter"
|
|
48
|
+
# Other formatters loaded lazily via LazyLoader
|
|
49
|
+
|
|
50
|
+
# Organisms - Core orchestrators always needed for test execution
|
|
51
|
+
require_relative "test_runner/organisms/test_orchestrator"
|
|
52
|
+
require_relative "test_runner/organisms/report_generator"
|
|
53
|
+
# Agent reporter loaded lazily when needed
|
|
54
|
+
|
|
55
|
+
module Ace
|
|
56
|
+
module TestRunner
|
|
57
|
+
class Error < StandardError; end
|
|
58
|
+
|
|
59
|
+
# Define module namespaces
|
|
60
|
+
module Commands; end
|
|
61
|
+
|
|
62
|
+
class << self
|
|
63
|
+
def run(options = {})
|
|
64
|
+
orchestrator = Organisms::TestOrchestrator.new(options)
|
|
65
|
+
orchestrator.run
|
|
66
|
+
end
|
|
67
|
+
end
|
|
68
|
+
end
|
|
69
|
+
end
|