tryouts 3.1.1 → 3.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. checksums.yaml +4 -4
  2. data/exe/try +3 -3
  3. data/lib/tryouts/cli/formatters/base.rb +108 -48
  4. data/lib/tryouts/cli/formatters/compact.rb +97 -105
  5. data/lib/tryouts/cli/formatters/factory.rb +8 -2
  6. data/lib/tryouts/cli/formatters/live_status_manager.rb +138 -0
  7. data/lib/tryouts/cli/formatters/output_manager.rb +78 -66
  8. data/lib/tryouts/cli/formatters/quiet.rb +54 -102
  9. data/lib/tryouts/cli/formatters/test_run_state.rb +122 -0
  10. data/lib/tryouts/cli/formatters/tty_status_display.rb +273 -0
  11. data/lib/tryouts/cli/formatters/verbose.rb +103 -105
  12. data/lib/tryouts/cli/formatters.rb +3 -0
  13. data/lib/tryouts/cli/opts.rb +17 -8
  14. data/lib/tryouts/cli/tty_detector.rb +92 -0
  15. data/lib/tryouts/console.rb +1 -1
  16. data/lib/tryouts/expectation_evaluators/boolean.rb +1 -1
  17. data/lib/tryouts/expectation_evaluators/exception.rb +2 -2
  18. data/lib/tryouts/expectation_evaluators/expectation_result.rb +3 -3
  19. data/lib/tryouts/expectation_evaluators/false.rb +1 -1
  20. data/lib/tryouts/expectation_evaluators/intentional_failure.rb +2 -2
  21. data/lib/tryouts/expectation_evaluators/output.rb +6 -6
  22. data/lib/tryouts/expectation_evaluators/performance_time.rb +3 -3
  23. data/lib/tryouts/expectation_evaluators/regex_match.rb +2 -2
  24. data/lib/tryouts/expectation_evaluators/regular.rb +1 -1
  25. data/lib/tryouts/expectation_evaluators/result_type.rb +1 -1
  26. data/lib/tryouts/expectation_evaluators/true.rb +2 -2
  27. data/lib/tryouts/failure_collector.rb +109 -0
  28. data/lib/tryouts/prism_parser.rb +17 -17
  29. data/lib/tryouts/test_batch.rb +9 -5
  30. data/lib/tryouts/test_case.rb +4 -4
  31. data/lib/tryouts/test_runner.rb +12 -1
  32. data/lib/tryouts/version.rb +1 -1
  33. data/lib/tryouts.rb +0 -9
  34. metadata +21 -22
@@ -18,8 +18,16 @@ class Tryouts
18
18
 
19
19
  File Format:
20
20
  ## Test description # Test case marker
21
- code_to_test # Ruby code
22
- #=> expected_result # Expectation
21
+ code_to_test # Ruby code
22
+ #=> expected_result # Expectation (various types available)
23
+
24
+ Great Expectations System:
25
+ Multiple expectation types are supported for different testing needs.
26
+
27
+ #=> Value equality #==> Must be true #=/=> Must be false
28
+ #=|> True OR false #=!> Must raise error #=:> Type matching
29
+ #=~> Regex matching #=%> Time constraints #=1> STDOUT content
30
+ #=2> STDERR content #=<> Intentional failure
23
31
  HELP
24
32
 
25
33
  class << self
@@ -50,12 +58,13 @@ class Tryouts
50
58
  end
51
59
 
52
60
  opts.separator "\nExecution Options:"
53
- opts.on('--shared-context', 'Override default context mode') { options[:shared_context] = true }
54
- opts.on('--no-shared-context', 'Override default context mode') { options[:shared_context] = false }
55
- opts.on('-v', '--verbose', 'Show detailed test output with line numbers') { options[:verbose] = true }
56
- opts.on('-f', '--fails', 'Show only failing tests (with --verbose)') { options[:fails_only] = true }
57
- opts.on('-q', '--quiet', 'Minimal output (dots and summary only)') { options[:quiet] = true }
58
- opts.on('-c', '--compact', 'Compact single-line output') { options[:compact] = true }
61
+ opts.on('--shared-context', 'Override default context mode') { options[:shared_context] = true }
62
+ opts.on('--no-shared-context', 'Override default context mode') { options[:shared_context] = false }
63
+ opts.on('-v', '--verbose', 'Show detailed test output with line numbers') { options[:verbose] = true }
64
+ opts.on('-f', '--fails', 'Show only failing tests') { options[:fails_only] = true }
65
+ opts.on('-q', '--quiet', 'Minimal output (dots and summary only)') { options[:quiet] = true }
66
+ opts.on('-c', '--compact', 'Compact single-line output') { options[:compact] = true }
67
+ opts.on('-l', '--live', 'Live status display') { options[:live_status] = true }
59
68
 
60
69
  opts.separator "\nInspection Options:"
61
70
  opts.on('-i', '--inspect', 'Inspect file structure without running tests') { options[:inspect] = true }
@@ -0,0 +1,92 @@
1
+ # lib/tryouts/cli/tty_detector.rb
2
+
3
+ require 'tty-screen'
4
+
5
+ class Tryouts
6
+ class CLI
7
+ # TTY detection utility for determining live formatter availability
8
+ module TTYDetector
9
+ STATUS_LINES = 4 # Lines needed for live formatter status area
10
+
11
+ # Check if TTY features are available for live formatting
12
+ # Returns: { available: boolean, reason: string }
13
+ def self.check_tty_support(debug: false)
14
+ result = { available: false, reason: nil }
15
+
16
+ # FORCE_LIVE override for testing
17
+ if ENV['FORCE_LIVE'] == '1'
18
+ debug_log('FORCE_LIVE=1 - forcing TTY support', debug)
19
+ result[:available] = true
20
+ result[:reason] = 'Forced via FORCE_LIVE=1'
21
+ return result
22
+ end
23
+
24
+ # Enhanced TTY detection to work with bundler and other execution contexts
25
+ debug_log('TTY Detection:', debug)
26
+ debug_log(" $stdout.tty? = #{$stdout.tty?}", debug)
27
+ debug_log(" $stderr.tty? = #{$stderr.tty?}", debug)
28
+ debug_log(" $stdin.tty? = #{$stdin.tty?}", debug)
29
+
30
+ # Check if any standard stream is a TTY or if we have a controlling terminal
31
+ has_tty = $stdout.tty? || $stderr.tty? || $stdin.tty?
32
+ debug_log(" Combined streams TTY: #{has_tty}", debug)
33
+
34
+ # Additional check: try to access controlling terminal directly
35
+ unless has_tty
36
+ begin
37
+ # On Unix systems, /dev/tty represents the controlling terminal
38
+ File.open('/dev/tty', 'r') { |f| has_tty = f.tty? }
39
+ debug_log(" /dev/tty accessible: #{has_tty}", debug)
40
+ rescue StandardError => ex
41
+ debug_log(" /dev/tty error: #{ex.class}: #{ex.message}", debug)
42
+ end
43
+ end
44
+
45
+ unless has_tty
46
+ debug_log(' Final result: No TTY detected', debug)
47
+ result[:reason] = 'No TTY detected (not running in terminal)'
48
+ return result
49
+ end
50
+
51
+ # Skip in CI or dumb terminals
52
+ if ENV['CI'] || ENV['TERM'] == 'dumb'
53
+ debug_log(" CI or dumb terminal detected (CI=#{ENV['CI']}, TERM=#{ENV.fetch('TERM', nil)})", debug)
54
+ result[:reason] = 'CI environment or dumb terminal detected'
55
+ return result
56
+ end
57
+
58
+ # Test TTY gem availability and basic functionality
59
+ begin
60
+ height = TTY::Screen.height
61
+ debug_log(" Screen height: #{height}, need minimum: #{STATUS_LINES + 5}", debug)
62
+
63
+ if height < STATUS_LINES + 5 # Need minimum screen space
64
+ debug_log(' Screen too small', debug)
65
+ result[:reason] = "Terminal too small (#{height} lines < #{STATUS_LINES + 5} needed)"
66
+ return result
67
+ end
68
+
69
+ # Test cursor control (basic check without actually saving)
70
+ require 'tty-cursor'
71
+ TTY::Cursor.save # Just test that it exists
72
+
73
+ debug_log(' TTY support enabled', debug)
74
+ result[:available] = true
75
+ result[:reason] = 'TTY support available'
76
+ rescue LoadError => ex
77
+ debug_log(" TTY gem loading failed: #{ex.message}", debug)
78
+ result[:reason] = "TTY gems not available: #{ex.message}"
79
+ rescue StandardError => ex
80
+ debug_log(" TTY setup failed: #{ex.class}: #{ex.message}", debug)
81
+ result[:reason] = "TTY setup failed: #{ex.message}"
82
+ end
83
+
84
+ result
85
+ end
86
+
87
+ def self.debug_log(message, debug_enabled)
88
+ $stderr.puts "DEBUG: #{message}" if debug_enabled
89
+ end
90
+ end
91
+ end
92
+ end
@@ -143,7 +143,7 @@ class Tryouts
143
143
 
144
144
  # Enable colors if neither appears redirected
145
145
  return "\e[%sm" % att.join(';') unless stdout_redirected || stderr_redirected
146
- rescue
146
+ rescue StandardError
147
147
  # If stat fails, fall back to enabling colors with TERM set
148
148
  return "\e[%sm" % att.join(';')
149
149
  end
@@ -45,7 +45,7 @@ class Tryouts
45
45
 
46
46
  def evaluate(actual_result = nil)
47
47
  expectation_result = ExpectationResult.from_result(actual_result)
48
- expression_result = eval_expectation_content(@expectation.content, expectation_result)
48
+ expression_result = eval_expectation_content(@expectation.content, expectation_result)
49
49
 
50
50
  build_result(
51
51
  passed: [true, false].include?(expression_result),
@@ -22,7 +22,7 @@ class Tryouts
22
22
 
23
23
  # Create result packet for evaluation to show what was expected
24
24
  expectation_result = ExpectationResult.from_result(nil)
25
- expected_value = eval_expectation_content(@expectation.content, expectation_result)
25
+ expected_value = eval_expectation_content(@expectation.content, expectation_result)
26
26
 
27
27
  build_result(
28
28
  passed: false,
@@ -41,7 +41,7 @@ class Tryouts
41
41
  @context.define_singleton_method(:error) { caught_error }
42
42
 
43
43
  expectation_result = ExpectationResult.from_result(caught_error)
44
- expected_value = eval_expectation_content(@expectation.content, expectation_result)
44
+ expected_value = eval_expectation_content(@expectation.content, expectation_result)
45
45
 
46
46
  build_result(
47
47
  passed: !!expected_value,
@@ -32,7 +32,7 @@ class Tryouts
32
32
  start_time_ns: nil,
33
33
  end_time_ns: nil,
34
34
  stdout_content: nil,
35
- stderr_content: nil
35
+ stderr_content: nil,
36
36
  )
37
37
  end
38
38
 
@@ -46,7 +46,7 @@ class Tryouts
46
46
  start_time_ns: start_time_ns,
47
47
  end_time_ns: end_time_ns,
48
48
  stdout_content: nil,
49
- stderr_content: nil
49
+ stderr_content: nil,
50
50
  )
51
51
  end
52
52
 
@@ -59,7 +59,7 @@ class Tryouts
59
59
  start_time_ns: nil,
60
60
  end_time_ns: nil,
61
61
  stdout_content: stdout_content,
62
- stderr_content: stderr_content
62
+ stderr_content: stderr_content,
63
63
  )
64
64
  end
65
65
  end
@@ -45,7 +45,7 @@ class Tryouts
45
45
 
46
46
  def evaluate(actual_result = nil)
47
47
  expectation_result = ExpectationResult.from_result(actual_result)
48
- expression_result = eval_expectation_content(@expectation.content, expectation_result)
48
+ expression_result = eval_expectation_content(@expectation.content, expectation_result)
49
49
 
50
50
  build_result(
51
51
  passed: expression_result == false,
@@ -56,14 +56,14 @@ class Tryouts
56
56
 
57
57
  # Delegate to regular evaluator
58
58
  regular_evaluator = Regular.new(regular_expectation, @test_case, @context)
59
- regular_result = regular_evaluator.evaluate(actual_result)
59
+ regular_result = regular_evaluator.evaluate(actual_result)
60
60
 
61
61
  # Invert the result while preserving metadata
62
62
  build_result(
63
63
  passed: !regular_result[:passed],
64
64
  actual: regular_result[:actual],
65
65
  expected: "NOT #{regular_result[:expected]} (intentional failure)",
66
- expectation: @expectation.content
66
+ expectation: @expectation.content,
67
67
  )
68
68
  rescue StandardError => ex
69
69
  # If evaluation itself fails (not the expectation), that's a real error
@@ -56,9 +56,9 @@ class Tryouts
56
56
 
57
57
  # Get the appropriate captured content
58
58
  captured_content = case pipe_number
59
- when 1 then stdout_content || ""
60
- when 2 then stderr_content || ""
61
- else ""
59
+ when 1 then stdout_content || ''
60
+ when 2 then stderr_content || ''
61
+ else ''
62
62
  end
63
63
 
64
64
  # Create result packet for expression evaluation
@@ -82,8 +82,8 @@ class Tryouts
82
82
 
83
83
  # Build result with appropriate pipe description
84
84
  pipe_name = case pipe_number
85
- when 1 then "stdout"
86
- when 2 then "stderr"
85
+ when 1 then 'stdout'
86
+ when 2 then 'stderr'
87
87
  else "pipe#{pipe_number}"
88
88
  end
89
89
 
@@ -91,7 +91,7 @@ class Tryouts
91
91
  passed: matched,
92
92
  actual: "#{pipe_name}: #{captured_content.inspect}",
93
93
  expected: expected_pattern.inspect,
94
- expectation: @expectation.content
94
+ expectation: @expectation.content,
95
95
  )
96
96
  rescue StandardError => ex
97
97
  handle_evaluation_error(ex, actual_result)
@@ -54,18 +54,18 @@ class Tryouts
54
54
  passed: false,
55
55
  actual: 'No timing data available',
56
56
  expected: 'Performance measurement',
57
- error: 'Performance expectations require execution timing data'
57
+ error: 'Performance expectations require execution timing data',
58
58
  )
59
59
  end
60
60
 
61
61
  # Create result packet with timing data available to expectation
62
62
  expectation_result = ExpectationResult.from_timing(actual_result, execution_time_ns)
63
- expected_limit_ms = eval_expectation_content(@expectation.content, expectation_result)
63
+ expected_limit_ms = eval_expectation_content(@expectation.content, expectation_result)
64
64
 
65
65
  actual_time_ms = expectation_result.execution_time_ms
66
66
 
67
67
  # Performance tolerance: actual <= expected + 10% (not strict window)
68
- max_allowed_ms = expected_limit_ms * 1.1
68
+ max_allowed_ms = expected_limit_ms * 1.1
69
69
  within_tolerance = actual_time_ms <= max_allowed_ms
70
70
 
71
71
  build_result(
@@ -38,11 +38,11 @@ class Tryouts
38
38
 
39
39
  def evaluate(actual_result = nil)
40
40
  expectation_result = ExpectationResult.from_result(actual_result)
41
- pattern = eval_expectation_content(@expectation.content, expectation_result)
41
+ pattern = eval_expectation_content(@expectation.content, expectation_result)
42
42
 
43
43
  # Convert actual_result to string for regex matching
44
44
  string_result = actual_result.to_s
45
- match_result = string_result =~ pattern
45
+ match_result = string_result =~ pattern
46
46
 
47
47
  build_result(
48
48
  passed: !match_result.nil?,
@@ -52,7 +52,7 @@ class Tryouts
52
52
 
53
53
  def evaluate(actual_result = nil)
54
54
  expectation_result = ExpectationResult.from_result(actual_result)
55
- expected_value = eval_expectation_content(@expectation.content, expectation_result)
55
+ expected_value = eval_expectation_content(@expectation.content, expectation_result)
56
56
 
57
57
  build_result(
58
58
  passed: actual_result == expected_value,
@@ -36,7 +36,7 @@ class Tryouts
36
36
 
37
37
  def evaluate(actual_result = nil)
38
38
  expectation_result = ExpectationResult.from_result(actual_result)
39
- expected_class = eval_expectation_content(@expectation.content, expectation_result)
39
+ expected_class = eval_expectation_content(@expectation.content, expectation_result)
40
40
 
41
41
  build_result(
42
42
  passed: actual_result.is_a?(expected_class),
@@ -42,8 +42,8 @@ class Tryouts
42
42
  end
43
43
 
44
44
  def evaluate(actual_result = nil)
45
- expectation_result = ExpectationResult.from_result(actual_result)
46
- expression_result = eval_expectation_content(@expectation.content, expectation_result)
45
+ expectation_result = ExpectationResult.from_result(actual_result)
46
+ expression_result = eval_expectation_content(@expectation.content, expectation_result)
47
47
 
48
48
  build_result(
49
49
  passed: expression_result == true,
@@ -0,0 +1,109 @@
1
+ # lib/tryouts/failure_collector.rb
2
+
3
+ require_relative 'console'
4
+
5
+ class Tryouts
6
+ # Collects and organizes failed test results across files for summary display
7
+ # Similar to RSpec's failure summary at the end of test runs
8
+ class FailureCollector
9
+ # Data structure for a single failure entry
10
+ FailureEntry = Data.define(:file_path, :test_case, :result_packet) do
11
+ def line_number
12
+ # Use last line of range (expectation line) for failure display
13
+ test_case.line_range&.last || test_case.first_expectation_line || 0
14
+ end
15
+
16
+ def description
17
+ desc = test_case.description.to_s.strip
18
+ desc.empty? ? 'unnamed test' : desc
19
+ end
20
+
21
+ def failure_reason
22
+ case result_packet.status
23
+ when :failed
24
+ if result_packet.actual_results.any? && result_packet.expected_results.any?
25
+ "expected #{result_packet.first_expected.inspect}, got #{result_packet.first_actual.inspect}"
26
+ else
27
+ 'test failed'
28
+ end
29
+ when :error
30
+ error_msg = result_packet.error&.message || 'unknown error'
31
+ "#{result_packet.error&.class&.name || 'Error'}: #{error_msg}"
32
+ else
33
+ 'test did not pass'
34
+ end
35
+ end
36
+
37
+ def source_context
38
+ return [] unless test_case.source_lines
39
+
40
+ # Show the test code (excluding setup/teardown)
41
+ test_case.source_lines.reject do |line|
42
+ line.strip.empty? || line.strip.start_with?('#')
43
+ end.first(3) # Limit to first 3 relevant lines
44
+ end
45
+ end
46
+
47
+ def initialize
48
+ @failures = []
49
+ @files_with_failures = Set.new
50
+ end
51
+
52
+ # Add a failed test result
53
+ def add_failure(file_path, result_packet)
54
+ return unless result_packet.failed? || result_packet.error?
55
+
56
+ entry = FailureEntry.new(
57
+ file_path: file_path,
58
+ test_case: result_packet.test_case,
59
+ result_packet: result_packet,
60
+ )
61
+
62
+ @failures << entry
63
+ @files_with_failures << file_path
64
+ end
65
+
66
+ # Check if any failures were collected
67
+ def any_failures?
68
+ !@failures.empty?
69
+ end
70
+
71
+ # Get count of total failures
72
+ def failure_count
73
+ @failures.count { |f| f.result_packet.failed? }
74
+ end
75
+
76
+ # Get count of total errors
77
+ def error_count
78
+ @failures.count { |f| f.result_packet.error? }
79
+ end
80
+
81
+ # Get total issues (failures + errors)
82
+ def total_issues
83
+ @failures.size
84
+ end
85
+
86
+ # Get count of files with failures
87
+ def files_with_failures_count
88
+ @files_with_failures.size
89
+ end
90
+
91
+ # Get failures grouped by file for summary display
92
+ def failures_by_file
93
+ @failures.group_by(&:file_path).transform_values do |file_failures|
94
+ file_failures.sort_by(&:line_number)
95
+ end
96
+ end
97
+
98
+ # Get all failure entries (for detailed processing)
99
+ def all_failures
100
+ @failures.dup
101
+ end
102
+
103
+ # Reset the collector (useful for testing)
104
+ def clear
105
+ @failures.clear
106
+ @files_with_failures.clear
107
+ end
108
+ end
109
+ end
@@ -83,7 +83,7 @@ class Tryouts
83
83
  # Find the end of this test case by looking for the last expectation
84
84
  # before the next description or end of file
85
85
  start_line = token[:line]
86
- end_line = find_test_case_end(tokens, index)
86
+ end_line = find_test_case_end(tokens, index)
87
87
 
88
88
  boundaries << { start: start_line, end: end_line } if end_line
89
89
  end
@@ -118,10 +118,10 @@ class Tryouts
118
118
  tokens.map.with_index do |token, index|
119
119
  if token[:type] == :potential_description
120
120
  # Check if this comment falls within any test case boundary
121
- line_num = token[:line]
122
- within_test_case = test_boundaries.any? { |boundary|
121
+ line_num = token[:line]
122
+ within_test_case = test_boundaries.any? do |boundary|
123
123
  line_num >= boundary[:start] && line_num <= boundary[:end]
124
- }
124
+ end
125
125
 
126
126
  if within_test_case
127
127
  # This comment is within a test case, treat as regular comment
@@ -134,10 +134,10 @@ class Tryouts
134
134
 
135
135
  # Check if this looks like a test description based on content
136
136
  looks_like_test_description = content.match?(/test|example|demonstrate|show|should|when|given/i) &&
137
- content.length > 10
137
+ content.length > 10
138
138
 
139
139
  # Check if there's code immediately before this (suggesting it's mid-test)
140
- prev_token = index > 0 ? tokens[index - 1] : nil
140
+ prev_token = index > 0 ? tokens[index - 1] : nil
141
141
  has_code_before = prev_token && prev_token[:type] == :code
142
142
 
143
143
  if has_code_before || !looks_like_test_description
@@ -151,8 +151,8 @@ class Tryouts
151
151
  meaningful_following = following_tokens.reject { |t| [:blank, :comment].include?(t[:type]) }
152
152
 
153
153
  # Look for test pattern within next 5 tokens (more restrictive)
154
- test_window = meaningful_following.first(5)
155
- has_code = test_window.any? { |t| t[:type] == :code }
154
+ test_window = meaningful_following.first(5)
155
+ has_code = test_window.any? { |t| t[:type] == :code }
156
156
  has_expectation = test_window.any? { |t| is_expectation_type?(t[:type]) }
157
157
 
158
158
  # Only promote to description if BOTH code and expectation are found nearby
@@ -180,11 +180,11 @@ class Tryouts
180
180
  # Skip if it's clearly just a regular comment (short, lowercase, etc.)
181
181
  # Test descriptions are typically longer and more descriptive
182
182
  looks_like_regular_comment = content.length < 20 &&
183
- content.downcase == content &&
184
- !content.match?(/test|example|demonstrate|show/i)
183
+ content.downcase == content &&
184
+ !content.match?(/test|example|demonstrate|show/i)
185
185
 
186
186
  # Check if there's code immediately before this (suggesting it's mid-test)
187
- prev_token = index > 0 ? tokens[index - 1] : nil
187
+ prev_token = index > 0 ? tokens[index - 1] : nil
188
188
  has_code_before = prev_token && prev_token[:type] == :code
189
189
 
190
190
  if looks_like_regular_comment || has_code_before
@@ -199,8 +199,8 @@ class Tryouts
199
199
 
200
200
  # Look for test pattern: at least one code token followed by at least one expectation
201
201
  # within the next 10 meaningful tokens (to avoid matching setup/teardown)
202
- test_window = meaningful_following.first(10)
203
- has_code = test_window.any? { |t| t[:type] == :code }
202
+ test_window = meaningful_following.first(10)
203
+ has_code = test_window.any? { |t| t[:type] == :code }
204
204
  has_expectation = test_window.any? { |t| is_expectation_type?(t[:type]) }
205
205
 
206
206
  if has_code && has_expectation
@@ -466,12 +466,12 @@ class Tryouts
466
466
  TestCase.new(
467
467
  description: desc,
468
468
  code: extract_code_content(code_tokens),
469
- expectations: exp_tokens.map { |token|
469
+ expectations: exp_tokens.map do |token|
470
470
  type = case token[:type]
471
471
  when :exception_expectation then :exception
472
472
  when :intentional_failure_expectation then :intentional_failure
473
- when :true_expectation then :true
474
- when :false_expectation then :false
473
+ when :true_expectation then :true # rubocop:disable Lint/BooleanSymbol
474
+ when :false_expectation then :false # rubocop:disable Lint/BooleanSymbol
475
475
  when :boolean_expectation then :boolean
476
476
  when :result_type_expectation then :result_type
477
477
  when :regex_match_expectation then :regex_match
@@ -486,7 +486,7 @@ class Tryouts
486
486
  else
487
487
  Expectation.new(content: token[:content], type: type)
488
488
  end
489
- },
489
+ end,
490
490
  line_range: start_line..end_line,
491
491
  path: @source_path,
492
492
  source_lines: source_lines,
@@ -110,7 +110,7 @@ class Tryouts
110
110
 
111
111
  result
112
112
  rescue StandardError => ex
113
- @output_manager&.test_end(test_case, idx, @test_case_count, status: :failed, error: ex)
113
+ @output_manager&.test_end(test_case, idx, @test_case_count)
114
114
  # Create error result packet to maintain consistent data flow
115
115
  error_result = build_error_result(test_case, ex)
116
116
  process_test_result(error_result)
@@ -367,13 +367,18 @@ class Tryouts
367
367
 
368
368
  if result.failed? || result.error?
369
369
  @failed_count += 1
370
+
371
+ # Collect failure details for end-of-run summary
372
+ if @global_tally && @global_tally[:failure_collector]
373
+ @global_tally[:failure_collector].add_failure(@testrun.source_file, result)
374
+ end
370
375
  end
371
376
 
372
377
  show_test_result(result)
373
378
 
374
379
  # Show captured output if any exists
375
380
  if result.has_output?
376
- @output_manager&.test_output(result.test_case, result.captured_output)
381
+ @output_manager&.test_output(result.test_case, result.captured_output, result)
377
382
  end
378
383
  end
379
384
 
@@ -494,9 +499,8 @@ class Tryouts
494
499
  end
495
500
 
496
501
  def show_summary(elapsed_time)
497
- # Use actual executed test count, not total tests in file
498
- executed_count = @results.size
499
- @output_manager&.batch_summary(executed_count, @failed_count, elapsed_time)
502
+ # Summary is now handled by TestRunner with failure details
503
+ # This method kept for compatibility but no longer calls batch_summary
500
504
  end
501
505
 
502
506
  # Helper methods using pattern matching
@@ -89,7 +89,7 @@ class Tryouts
89
89
  :error, # Exception object (if any)
90
90
  :captured_output, # Captured stdout/stderr content
91
91
  :elapsed_time, # Execution timing (future use)
92
- :metadata # Hash for future extensibility
92
+ :metadata, # Hash for future extensibility
93
93
  ) do
94
94
  def passed?
95
95
  status == :passed
@@ -131,7 +131,7 @@ class Tryouts
131
131
  error: nil,
132
132
  captured_output: captured_output,
133
133
  elapsed_time: elapsed_time,
134
- metadata: metadata
134
+ metadata: metadata,
135
135
  )
136
136
  end
137
137
 
@@ -146,7 +146,7 @@ class Tryouts
146
146
  error: nil,
147
147
  captured_output: captured_output,
148
148
  elapsed_time: elapsed_time,
149
- metadata: metadata
149
+ metadata: metadata,
150
150
  )
151
151
  end
152
152
 
@@ -171,7 +171,7 @@ class Tryouts
171
171
  error: error,
172
172
  captured_output: captured_output,
173
173
  elapsed_time: elapsed_time,
174
- metadata: metadata
174
+ metadata: metadata,
175
175
  )
176
176
  end
177
177
  end
@@ -5,6 +5,7 @@ require_relative 'test_batch'
5
5
  require_relative 'translators/rspec_translator'
6
6
  require_relative 'translators/minitest_translator'
7
7
  require_relative 'file_processor'
8
+ require_relative 'failure_collector'
8
9
 
9
10
  class Tryouts
10
11
  class TestRunner
@@ -32,6 +33,7 @@ class Tryouts
32
33
  validate_framework
33
34
 
34
35
  result = process_files
36
+ show_failure_summary
35
37
  show_grand_total if @global_tally[:file_count] > 1
36
38
  result
37
39
  end
@@ -73,13 +75,14 @@ class Tryouts
73
75
  file_count: 0,
74
76
  start_time: Time.now,
75
77
  successful_files: 0,
78
+ failure_collector: FailureCollector.new,
76
79
  }
77
80
  end
78
81
 
79
82
  def process_files
80
83
  failure_count = 0
81
84
 
82
- @files.each_with_index do |file, idx|
85
+ @files.each_with_index do |file, _idx|
83
86
  result = process_file(file)
84
87
  failure_count += result unless result.zero?
85
88
  status = result.zero? ? Console.color(:green, 'PASS') : Console.color(:red, 'FAIL')
@@ -104,8 +107,16 @@ class Tryouts
104
107
  1
105
108
  end
106
109
 
110
+ def show_failure_summary
111
+ # Show failure summary if any failures exist
112
+ if @global_tally[:failure_collector].any_failures?
113
+ @output_manager.batch_summary(@global_tally[:failure_collector])
114
+ end
115
+ end
116
+
107
117
  def show_grand_total
108
118
  elapsed_time = Time.now - @global_tally[:start_time]
119
+
109
120
  @output_manager.grand_total(
110
121
  @global_tally[:total_tests],
111
122
  @global_tally[:total_failed],
@@ -1,5 +1,5 @@
1
1
  # lib/tryouts/version.rb
2
2
 
3
3
  class Tryouts
4
- VERSION = '3.1.1'
4
+ VERSION = '3.2.0'
5
5
  end