tryouts 3.0.0 → 3.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +51 -115
- data/exe/try +25 -4
- data/lib/tryouts/cli/formatters/base.rb +33 -21
- data/lib/tryouts/cli/formatters/compact.rb +122 -84
- data/lib/tryouts/cli/formatters/factory.rb +1 -1
- data/lib/tryouts/cli/formatters/output_manager.rb +13 -2
- data/lib/tryouts/cli/formatters/quiet.rb +22 -16
- data/lib/tryouts/cli/formatters/verbose.rb +101 -60
- data/lib/tryouts/console.rb +53 -17
- data/lib/tryouts/expectation_evaluators/base.rb +101 -0
- data/lib/tryouts/expectation_evaluators/boolean.rb +60 -0
- data/lib/tryouts/expectation_evaluators/exception.rb +61 -0
- data/lib/tryouts/expectation_evaluators/expectation_result.rb +67 -0
- data/lib/tryouts/expectation_evaluators/false.rb +60 -0
- data/lib/tryouts/expectation_evaluators/intentional_failure.rb +74 -0
- data/lib/tryouts/expectation_evaluators/output.rb +101 -0
- data/lib/tryouts/expectation_evaluators/performance_time.rb +81 -0
- data/lib/tryouts/expectation_evaluators/regex_match.rb +57 -0
- data/lib/tryouts/expectation_evaluators/registry.rb +66 -0
- data/lib/tryouts/expectation_evaluators/regular.rb +67 -0
- data/lib/tryouts/expectation_evaluators/result_type.rb +51 -0
- data/lib/tryouts/expectation_evaluators/true.rb +58 -0
- data/lib/tryouts/prism_parser.rb +112 -15
- data/lib/tryouts/test_executor.rb +6 -4
- data/lib/tryouts/test_runner.rb +1 -1
- data/lib/tryouts/testbatch.rb +288 -98
- data/lib/tryouts/testcase.rb +141 -0
- data/lib/tryouts/translators/minitest_translator.rb +40 -11
- data/lib/tryouts/translators/rspec_translator.rb +47 -12
- data/lib/tryouts/version.rb +1 -1
- data/lib/tryouts.rb +42 -0
- metadata +16 -3
data/lib/tryouts/testbatch.rb
CHANGED
@@ -1,47 +1,117 @@
|
|
1
1
|
# lib/tryouts/testbatch.rb
|
2
2
|
|
3
3
|
require 'stringio'
|
4
|
+
require_relative 'expectation_evaluators/registry'
|
4
5
|
|
5
6
|
class Tryouts
|
7
|
+
# Factory for creating fresh context containers for each test
|
8
|
+
class FreshContextFactory
|
9
|
+
def initialize
|
10
|
+
@containers_created = 0
|
11
|
+
end
|
12
|
+
|
13
|
+
def create_container
|
14
|
+
@containers_created += 1
|
15
|
+
Object.new
|
16
|
+
end
|
17
|
+
|
18
|
+
def containers_created_count
|
19
|
+
@containers_created
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
6
23
|
# Modern TestBatch using Ruby 3.4+ patterns and formatter system
|
7
24
|
class TestBatch
|
8
25
|
attr_reader :testrun, :failed_count, :container, :status, :results, :formatter, :output_manager
|
9
26
|
|
10
27
|
def initialize(testrun, **options)
|
11
|
-
@testrun
|
12
|
-
@container
|
13
|
-
@options
|
14
|
-
@formatter
|
15
|
-
@output_manager
|
16
|
-
@global_tally
|
17
|
-
@failed_count
|
18
|
-
@status
|
19
|
-
@results
|
20
|
-
@start_time
|
28
|
+
@testrun = testrun
|
29
|
+
@container = Object.new
|
30
|
+
@options = options
|
31
|
+
@formatter = Tryouts::CLI::FormatterFactory.create_formatter(options)
|
32
|
+
@output_manager = options[:output_manager]
|
33
|
+
@global_tally = options[:global_tally]
|
34
|
+
@failed_count = 0
|
35
|
+
@status = :pending
|
36
|
+
@results = []
|
37
|
+
@start_time = nil
|
38
|
+
@test_case_count = 0
|
39
|
+
@setup_failed = false
|
40
|
+
|
41
|
+
# Circuit breaker for batch-level failure protection
|
42
|
+
@consecutive_failures = 0
|
43
|
+
@max_consecutive_failures = options[:max_consecutive_failures] || 10
|
44
|
+
@circuit_breaker_active = false
|
45
|
+
|
46
|
+
# Expose context objects for testing - different strategies for each mode
|
47
|
+
@shared_context = if options[:shared_context]
|
48
|
+
@container # Shared mode: single container reused across tests
|
49
|
+
else
|
50
|
+
FreshContextFactory.new # Fresh mode: factory that creates new containers
|
51
|
+
end
|
21
52
|
end
|
22
53
|
|
23
54
|
# Main execution pipeline using functional composition
|
24
55
|
def run(before_test_hook = nil, &)
|
25
56
|
return false if empty?
|
26
57
|
|
27
|
-
@start_time
|
28
|
-
@
|
58
|
+
@start_time = Time.now
|
59
|
+
@test_case_count = test_cases.size
|
60
|
+
|
61
|
+
@output_manager&.execution_phase(@test_case_count)
|
29
62
|
@output_manager&.info("Context: #{@options[:shared_context] ? 'shared' : 'fresh'}", 1)
|
30
63
|
@output_manager&.file_start(path, context: @options[:shared_context] ? :shared : :fresh)
|
31
64
|
|
32
65
|
if shared_context?
|
33
66
|
@output_manager&.info('Running global setup...', 2)
|
34
67
|
execute_global_setup
|
68
|
+
|
69
|
+
# Stop execution if setup failed
|
70
|
+
if @setup_failed
|
71
|
+
@output_manager&.error("Stopping batch execution due to setup failure")
|
72
|
+
@status = :failed
|
73
|
+
finalize_results([])
|
74
|
+
return false
|
75
|
+
end
|
35
76
|
end
|
36
77
|
|
37
78
|
idx = 0
|
38
79
|
execution_results = test_cases.map do |test_case|
|
39
|
-
@output_manager&.trace("Test #{idx + 1}/#{
|
40
|
-
idx
|
41
|
-
|
80
|
+
@output_manager&.trace("Test #{idx + 1}/#{@test_case_count}: #{test_case.description}", 2)
|
81
|
+
idx += 1
|
82
|
+
|
83
|
+
# Check circuit breaker before executing test
|
84
|
+
if @circuit_breaker_active
|
85
|
+
@output_manager&.error("Circuit breaker active - skipping remaining tests after #{@consecutive_failures} consecutive failures")
|
86
|
+
break
|
87
|
+
end
|
88
|
+
|
89
|
+
@output_manager&.test_start(test_case, idx, @test_case_count)
|
90
|
+
result = execute_single_test(test_case, before_test_hook, &) # runs the test code
|
91
|
+
@output_manager&.test_end(test_case, idx, @test_case_count)
|
92
|
+
|
93
|
+
# Update circuit breaker state based on result
|
94
|
+
update_circuit_breaker(result)
|
95
|
+
|
42
96
|
result
|
97
|
+
rescue StandardError => e
|
98
|
+
@output_manager&.test_end(test_case, idx, @test_case_count, status: :failed, error: e)
|
99
|
+
# Create error result packet to maintain consistent data flow
|
100
|
+
error_result = build_error_result(test_case, e)
|
101
|
+
process_test_result(error_result)
|
102
|
+
|
103
|
+
# Update circuit breaker for exception cases
|
104
|
+
update_circuit_breaker(error_result)
|
105
|
+
|
106
|
+
error_result
|
43
107
|
end
|
44
108
|
|
109
|
+
# Used for a separate purpose then execution_phase.
|
110
|
+
# e.g. the quiet formatter prints a newline after all test dots
|
111
|
+
@output_manager&.file_end(path, context: @options[:shared_context] ? :shared : :fresh)
|
112
|
+
|
113
|
+
@output_manager&.execution_phase(test_cases.size)
|
114
|
+
|
45
115
|
execute_global_teardown
|
46
116
|
finalize_results(execution_results)
|
47
117
|
|
@@ -92,8 +162,21 @@ class Tryouts
|
|
92
162
|
end
|
93
163
|
end
|
94
164
|
|
95
|
-
# Add captured output to the result
|
96
|
-
|
165
|
+
# Add captured output to the result if any exists
|
166
|
+
if captured_output && !captured_output.empty?
|
167
|
+
# Create new result packet with captured output
|
168
|
+
result = result.class.new(
|
169
|
+
test_case: result.test_case,
|
170
|
+
status: result.status,
|
171
|
+
result_value: result.result_value,
|
172
|
+
actual_results: result.actual_results,
|
173
|
+
expected_results: result.expected_results,
|
174
|
+
error: result.error,
|
175
|
+
captured_output: captured_output,
|
176
|
+
elapsed_time: result.elapsed_time,
|
177
|
+
metadata: result.metadata
|
178
|
+
)
|
179
|
+
end
|
97
180
|
|
98
181
|
process_test_result(result)
|
99
182
|
yield(test_case) if block_given?
|
@@ -102,21 +185,16 @@ class Tryouts
|
|
102
185
|
|
103
186
|
# Shared context execution - setup runs once, all tests share state
|
104
187
|
def execute_with_shared_context(test_case)
|
105
|
-
|
106
|
-
path = test_case.path
|
107
|
-
range = test_case.line_range
|
108
|
-
|
109
|
-
result_value = @container.instance_eval(code, path, range.first + 1)
|
110
|
-
expectations_result = evaluate_expectations(test_case, result_value, @container)
|
111
|
-
|
112
|
-
build_test_result(test_case, result_value, expectations_result)
|
113
|
-
rescue StandardError => ex
|
114
|
-
build_error_result(test_case, ex.message, ex)
|
188
|
+
execute_test_case_with_container(test_case, @container)
|
115
189
|
end
|
116
190
|
|
117
191
|
# Fresh context execution - setup runs per test, isolated state
|
118
192
|
def execute_with_fresh_context(test_case)
|
119
|
-
fresh_container =
|
193
|
+
fresh_container = if @shared_context.is_a?(FreshContextFactory)
|
194
|
+
@shared_context.create_container
|
195
|
+
else
|
196
|
+
Object.new # Fallback for backwards compatibility
|
197
|
+
end
|
120
198
|
|
121
199
|
# Execute setup in fresh context if present
|
122
200
|
setup = @testrun.setup
|
@@ -124,101 +202,161 @@ class Tryouts
|
|
124
202
|
fresh_container.instance_eval(setup.code, setup.path, 1)
|
125
203
|
end
|
126
204
|
|
127
|
-
|
128
|
-
|
129
|
-
path = test_case.path
|
130
|
-
range = test_case.line_range
|
205
|
+
execute_test_case_with_container(test_case, fresh_container)
|
206
|
+
end
|
131
207
|
|
132
|
-
|
133
|
-
|
208
|
+
# Common test execution logic shared by both context modes
|
209
|
+
def execute_test_case_with_container(test_case, container)
|
210
|
+
# Individual test timeout protection
|
211
|
+
test_timeout = @options[:test_timeout] || 30 # 30 second default
|
134
212
|
|
135
|
-
|
213
|
+
if test_case.exception_expectations?
|
214
|
+
# For exception tests, don't execute code here - let evaluate_expectations handle it
|
215
|
+
expectations_result = execute_with_timeout(test_timeout, test_case) do
|
216
|
+
evaluate_expectations(test_case, nil, container)
|
217
|
+
end
|
218
|
+
build_test_result(test_case, nil, expectations_result)
|
219
|
+
else
|
220
|
+
# Regular execution for non-exception tests with timing and output capture
|
221
|
+
code = test_case.code
|
222
|
+
path = test_case.path
|
223
|
+
range = test_case.line_range
|
224
|
+
|
225
|
+
# Check if we need output capture for any expectations
|
226
|
+
needs_output_capture = test_case.expectations.any?(&:output?)
|
227
|
+
|
228
|
+
result_value, execution_time_ns, stdout_content, stderr_content, expectations_result =
|
229
|
+
execute_with_timeout(test_timeout, test_case) do
|
230
|
+
if needs_output_capture
|
231
|
+
# Execute with output capture using Fiber-local isolation
|
232
|
+
result_value, execution_time_ns, stdout_content, stderr_content =
|
233
|
+
execute_with_output_capture(container, code, path, range)
|
234
|
+
|
235
|
+
expectations_result = evaluate_expectations(
|
236
|
+
test_case, result_value, container, execution_time_ns, stdout_content, stderr_content
|
237
|
+
)
|
238
|
+
[result_value, execution_time_ns, stdout_content, stderr_content, expectations_result]
|
239
|
+
else
|
240
|
+
# Regular execution with timing capture only
|
241
|
+
execution_start_ns = Process.clock_gettime(Process::CLOCK_MONOTONIC, :nanosecond)
|
242
|
+
result_value = container.instance_eval(code, path, range.first + 1)
|
243
|
+
execution_end_ns = Process.clock_gettime(Process::CLOCK_MONOTONIC, :nanosecond)
|
244
|
+
execution_time_ns = execution_end_ns - execution_start_ns
|
245
|
+
|
246
|
+
expectations_result = evaluate_expectations(test_case, result_value, container, execution_time_ns)
|
247
|
+
[result_value, execution_time_ns, nil, nil, expectations_result]
|
248
|
+
end
|
249
|
+
end
|
250
|
+
|
251
|
+
build_test_result(test_case, result_value, expectations_result)
|
252
|
+
end
|
136
253
|
rescue StandardError => ex
|
137
|
-
build_error_result(test_case, ex
|
254
|
+
build_error_result(test_case, ex)
|
255
|
+
rescue SystemExit, SignalException => ex
|
256
|
+
# Handle process control exceptions gracefully
|
257
|
+
Tryouts.debug "Test received #{ex.class}: #{ex.message}"
|
258
|
+
build_error_result(test_case, StandardError.new("Test terminated by #{ex.class}: #{ex.message}"))
|
138
259
|
end
|
139
260
|
|
140
|
-
#
|
141
|
-
def
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
261
|
+
# Execute test code with Fiber-based stdout/stderr capture
|
262
|
+
def execute_with_output_capture(container, code, path, range)
|
263
|
+
# Fiber-local storage for output redirection
|
264
|
+
original_stdout = $stdout
|
265
|
+
original_stderr = $stderr
|
266
|
+
|
267
|
+
# Create StringIO objects for capturing output
|
268
|
+
captured_stdout = StringIO.new
|
269
|
+
captured_stderr = StringIO.new
|
270
|
+
|
271
|
+
begin
|
272
|
+
# Redirect output streams using Fiber-local variables
|
273
|
+
Fiber.new do
|
274
|
+
$stdout = captured_stdout
|
275
|
+
$stderr = captured_stderr
|
276
|
+
|
277
|
+
# Execute with timing capture
|
278
|
+
execution_start_ns = Process.clock_gettime(Process::CLOCK_MONOTONIC, :nanosecond)
|
279
|
+
result_value = container.instance_eval(code, path, range.first + 1)
|
280
|
+
execution_end_ns = Process.clock_gettime(Process::CLOCK_MONOTONIC, :nanosecond)
|
281
|
+
execution_time_ns = execution_end_ns - execution_start_ns
|
282
|
+
|
283
|
+
[result_value, execution_time_ns]
|
284
|
+
end.resume.tap do |result_value, execution_time_ns|
|
285
|
+
# Return captured content along with result
|
286
|
+
return [result_value, execution_time_ns, captured_stdout.string, captured_stderr.string]
|
147
287
|
end
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
expected_results: evaluation_results.map { |r| r[:expected] },
|
153
|
-
}
|
288
|
+
ensure
|
289
|
+
# Always restore original streams
|
290
|
+
$stdout = original_stdout
|
291
|
+
$stderr = original_stderr
|
154
292
|
end
|
155
293
|
end
|
156
294
|
|
157
|
-
|
158
|
-
|
159
|
-
|
295
|
+
# Evaluate expectations using new object-oriented evaluation system
|
296
|
+
def evaluate_expectations(test_case, actual_result, context, execution_time_ns = nil, stdout_content = nil, stderr_content = nil)
|
297
|
+
return { passed: true, actual_results: [], expected_results: [] } if test_case.expectations.empty?
|
160
298
|
|
161
|
-
|
299
|
+
evaluation_results = test_case.expectations.map do |expectation|
|
300
|
+
evaluator = ExpectationEvaluators::Registry.evaluator_for(expectation, test_case, context)
|
162
301
|
|
302
|
+
# Pass appropriate data to different evaluator types
|
303
|
+
if expectation.performance_time? && execution_time_ns
|
304
|
+
evaluator.evaluate(actual_result, execution_time_ns)
|
305
|
+
elsif expectation.output? && (stdout_content || stderr_content)
|
306
|
+
evaluator.evaluate(actual_result, stdout_content, stderr_content)
|
307
|
+
else
|
308
|
+
evaluator.evaluate(actual_result)
|
309
|
+
end
|
310
|
+
end
|
311
|
+
|
312
|
+
aggregate_evaluation_results(evaluation_results)
|
313
|
+
end
|
314
|
+
|
315
|
+
# Aggregate individual evaluation results into the expected format
|
316
|
+
def aggregate_evaluation_results(evaluation_results)
|
163
317
|
{
|
164
|
-
passed:
|
165
|
-
|
166
|
-
|
167
|
-
expectation: expectation,
|
168
|
-
}
|
169
|
-
rescue StandardError => ex
|
170
|
-
{
|
171
|
-
passed: false,
|
172
|
-
actual: actual_result,
|
173
|
-
expected: "EXPECTED: #{ex.message}",
|
174
|
-
expectation: expectation,
|
318
|
+
passed: evaluation_results.all? { |r| r[:passed] },
|
319
|
+
actual_results: evaluation_results.map { |r| r[:actual] },
|
320
|
+
expected_results: evaluation_results.map { |r| r[:expected] }
|
175
321
|
}
|
176
322
|
end
|
177
323
|
|
178
|
-
# Build structured test results using
|
324
|
+
# Build structured test results using TestCaseResultPacket
|
179
325
|
def build_test_result(test_case, result_value, expectations_result)
|
180
326
|
if expectations_result[:passed]
|
181
|
-
|
182
|
-
test_case
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
}
|
327
|
+
TestCaseResultPacket.from_success(
|
328
|
+
test_case,
|
329
|
+
result_value,
|
330
|
+
expectations_result[:actual_results],
|
331
|
+
expectations_result[:expected_results]
|
332
|
+
)
|
188
333
|
else
|
189
|
-
|
190
|
-
test_case
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
}
|
334
|
+
TestCaseResultPacket.from_failure(
|
335
|
+
test_case,
|
336
|
+
result_value,
|
337
|
+
expectations_result[:actual_results],
|
338
|
+
expectations_result[:expected_results]
|
339
|
+
)
|
196
340
|
end
|
197
341
|
end
|
198
342
|
|
199
|
-
def build_error_result(test_case,
|
200
|
-
|
201
|
-
test_case: test_case,
|
202
|
-
status: :error,
|
203
|
-
result_value: nil,
|
204
|
-
actual_results: ["ACTUAL: #{message}"],
|
205
|
-
error: exception,
|
206
|
-
}
|
343
|
+
def build_error_result(test_case, exception)
|
344
|
+
TestCaseResultPacket.from_error(test_case, exception)
|
207
345
|
end
|
208
346
|
|
209
347
|
# Process and display test results using formatter
|
210
348
|
def process_test_result(result)
|
211
349
|
@results << result
|
212
350
|
|
213
|
-
if
|
351
|
+
if result.failed? || result.error?
|
214
352
|
@failed_count += 1
|
215
353
|
end
|
216
354
|
|
217
355
|
show_test_result(result)
|
218
356
|
|
219
357
|
# Show captured output if any exists
|
220
|
-
if result
|
221
|
-
@output_manager&.test_output(result
|
358
|
+
if result.has_output?
|
359
|
+
@output_manager&.test_output(result.test_case, result.captured_output)
|
222
360
|
end
|
223
361
|
end
|
224
362
|
|
@@ -237,8 +375,23 @@ class Tryouts
|
|
237
375
|
@output_manager&.setup_output(captured_output) if captured_output && !captured_output.empty?
|
238
376
|
end
|
239
377
|
rescue StandardError => ex
|
378
|
+
@setup_failed = true
|
240
379
|
@global_tally[:total_errors] += 1 if @global_tally
|
241
|
-
|
380
|
+
|
381
|
+
# Classify error and handle appropriately
|
382
|
+
error_type = Tryouts.classify_error(ex)
|
383
|
+
|
384
|
+
Tryouts.debug "Setup failed with #{error_type} error: (#{ex.class}): #{ex.message}"
|
385
|
+
Tryouts.trace ex.backtrace
|
386
|
+
|
387
|
+
# For non-catastrophic errors, we still stop batch execution
|
388
|
+
unless Tryouts.batch_stopping_error?(ex)
|
389
|
+
@output_manager&.error("Global setup failed: #{ex.message}")
|
390
|
+
return
|
391
|
+
end
|
392
|
+
|
393
|
+
# For catastrophic errors, still raise to stop execution
|
394
|
+
raise "Global setup failed (#{ex.class}): #{ex.message}"
|
242
395
|
end
|
243
396
|
|
244
397
|
# Global teardown execution
|
@@ -257,7 +410,22 @@ class Tryouts
|
|
257
410
|
end
|
258
411
|
rescue StandardError => ex
|
259
412
|
@global_tally[:total_errors] += 1 if @global_tally
|
413
|
+
|
414
|
+
# Classify error and handle appropriately
|
415
|
+
error_type = Tryouts.classify_error(ex)
|
416
|
+
|
417
|
+
Tryouts.debug "Teardown failed with #{error_type} error: (#{ex.class}): #{ex.message}"
|
418
|
+
Tryouts.trace ex.backtrace
|
419
|
+
|
260
420
|
@output_manager&.error("Teardown failed: #{ex.message}")
|
421
|
+
|
422
|
+
# Teardown failures are generally non-fatal - log and continue
|
423
|
+
unless Tryouts.batch_stopping_error?(ex)
|
424
|
+
@output_manager&.error("Continuing despite teardown failure")
|
425
|
+
else
|
426
|
+
# Only catastrophic errors should potentially affect batch completion
|
427
|
+
@output_manager&.error("Teardown failure may affect subsequent operations")
|
428
|
+
end
|
261
429
|
end
|
262
430
|
|
263
431
|
# Result finalization and summary display
|
@@ -267,17 +435,14 @@ class Tryouts
|
|
267
435
|
show_summary(elapsed_time)
|
268
436
|
end
|
269
437
|
|
270
|
-
|
271
438
|
def show_test_result(result)
|
272
|
-
|
273
|
-
status = result[:status]
|
274
|
-
actuals = result[:actual_results]
|
275
|
-
|
276
|
-
@output_manager&.test_result(test_case, status, actuals)
|
439
|
+
@output_manager&.test_result(result)
|
277
440
|
end
|
278
441
|
|
279
442
|
def show_summary(elapsed_time)
|
280
|
-
|
443
|
+
# Use actual executed test count, not total tests in file
|
444
|
+
executed_count = @results.size
|
445
|
+
@output_manager&.batch_summary(executed_count, @failed_count, elapsed_time)
|
281
446
|
end
|
282
447
|
|
283
448
|
# Helper methods using pattern matching
|
@@ -310,5 +475,30 @@ class Tryouts
|
|
310
475
|
|
311
476
|
@output_manager&.error(error_message, backtrace)
|
312
477
|
end
|
478
|
+
|
479
|
+
# Timeout protection for individual test execution
|
480
|
+
def execute_with_timeout(timeout_seconds, test_case)
|
481
|
+
Timeout.timeout(timeout_seconds) do
|
482
|
+
yield
|
483
|
+
end
|
484
|
+
rescue Timeout::Error => e
|
485
|
+
Tryouts.debug "Test timeout after #{timeout_seconds}s: #{test_case.description}"
|
486
|
+
raise StandardError.new("Test execution timeout (#{timeout_seconds}s)")
|
487
|
+
end
|
488
|
+
|
489
|
+
# Circuit breaker pattern for batch-level failure protection
|
490
|
+
def update_circuit_breaker(result)
|
491
|
+
if result.failed? || result.error?
|
492
|
+
@consecutive_failures += 1
|
493
|
+
if @consecutive_failures >= @max_consecutive_failures
|
494
|
+
@circuit_breaker_active = true
|
495
|
+
Tryouts.debug "Circuit breaker activated after #{@consecutive_failures} consecutive failures"
|
496
|
+
end
|
497
|
+
else
|
498
|
+
# Reset on success
|
499
|
+
@consecutive_failures = 0
|
500
|
+
@circuit_breaker_active = false
|
501
|
+
end
|
502
|
+
end
|
313
503
|
end
|
314
504
|
end
|
data/lib/tryouts/testcase.rb
CHANGED
@@ -11,6 +11,48 @@ class Tryouts
|
|
11
11
|
def expectations?
|
12
12
|
!expectations.empty?
|
13
13
|
end
|
14
|
+
|
15
|
+
def exception_expectations?
|
16
|
+
expectations.any?(&:exception?)
|
17
|
+
end
|
18
|
+
|
19
|
+
def regular_expectations
|
20
|
+
expectations.filter(&:regular?)
|
21
|
+
end
|
22
|
+
|
23
|
+
def exception_expectations
|
24
|
+
expectations.filter(&:exception?)
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
Expectation = Data.define(:content, :type) do
|
29
|
+
def regular? = type == :regular
|
30
|
+
def exception? = type == :exception
|
31
|
+
def boolean? = type == :boolean
|
32
|
+
def true? = type == :true
|
33
|
+
def false? = type == :false
|
34
|
+
def result_type? = type == :result_type
|
35
|
+
def regex_match? = type == :regex_match
|
36
|
+
def performance_time? = type == :performance_time
|
37
|
+
def intentional_failure? = type == :intentional_failure
|
38
|
+
def output? = type == :output
|
39
|
+
end
|
40
|
+
|
41
|
+
# Special expectation type for output capturing with pipe information
|
42
|
+
OutputExpectation = Data.define(:content, :type, :pipe) do
|
43
|
+
def regular? = type == :regular
|
44
|
+
def exception? = type == :exception
|
45
|
+
def boolean? = type == :boolean
|
46
|
+
def true? = type == :true
|
47
|
+
def false? = type == :false
|
48
|
+
def result_type? = type == :result_type
|
49
|
+
def regex_match? = type == :regex_match
|
50
|
+
def performance_time? = type == :performance_time
|
51
|
+
def intentional_failure? = type == :intentional_failure
|
52
|
+
def output? = type == :output
|
53
|
+
|
54
|
+
def stdout? = pipe == 1
|
55
|
+
def stderr? = pipe == 2
|
14
56
|
end
|
15
57
|
|
16
58
|
Setup = Data.define(:code, :line_range, :path) do
|
@@ -35,6 +77,105 @@ class Tryouts
|
|
35
77
|
end
|
36
78
|
end
|
37
79
|
|
80
|
+
# Test case result packet for formatters
|
81
|
+
# Replaces the simple Hash aggregation with a rich, immutable data structure
|
82
|
+
# containing all execution context and results needed by formatters
|
83
|
+
TestCaseResultPacket = Data.define(
|
84
|
+
:test_case, # TestCase object
|
85
|
+
:status, # :passed, :failed, :error
|
86
|
+
:result_value, # Actual execution result
|
87
|
+
:actual_results, # Array of actual values from expectations
|
88
|
+
:expected_results, # Array of expected values from expectations
|
89
|
+
:error, # Exception object (if any)
|
90
|
+
:captured_output, # Captured stdout/stderr content
|
91
|
+
:elapsed_time, # Execution timing (future use)
|
92
|
+
:metadata # Hash for future extensibility
|
93
|
+
) do
|
94
|
+
def passed?
|
95
|
+
status == :passed
|
96
|
+
end
|
97
|
+
|
98
|
+
def failed?
|
99
|
+
status == :failed
|
100
|
+
end
|
101
|
+
|
102
|
+
def error?
|
103
|
+
status == :error
|
104
|
+
end
|
105
|
+
|
106
|
+
def has_output?
|
107
|
+
captured_output && !captured_output.empty?
|
108
|
+
end
|
109
|
+
|
110
|
+
def has_error?
|
111
|
+
!error.nil?
|
112
|
+
end
|
113
|
+
|
114
|
+
# Helper for formatter access to first actual/expected values
|
115
|
+
def first_actual
|
116
|
+
actual_results&.first
|
117
|
+
end
|
118
|
+
|
119
|
+
def first_expected
|
120
|
+
expected_results&.first
|
121
|
+
end
|
122
|
+
|
123
|
+
# Create a basic result packet for successful tests
|
124
|
+
def self.from_success(test_case, result_value, actual_results, expected_results, captured_output: nil, elapsed_time: nil, metadata: {})
|
125
|
+
new(
|
126
|
+
test_case: test_case,
|
127
|
+
status: :passed,
|
128
|
+
result_value: result_value,
|
129
|
+
actual_results: actual_results,
|
130
|
+
expected_results: expected_results,
|
131
|
+
error: nil,
|
132
|
+
captured_output: captured_output,
|
133
|
+
elapsed_time: elapsed_time,
|
134
|
+
metadata: metadata
|
135
|
+
)
|
136
|
+
end
|
137
|
+
|
138
|
+
# Create a result packet for failed tests
|
139
|
+
def self.from_failure(test_case, result_value, actual_results, expected_results, captured_output: nil, elapsed_time: nil, metadata: {})
|
140
|
+
new(
|
141
|
+
test_case: test_case,
|
142
|
+
status: :failed,
|
143
|
+
result_value: result_value,
|
144
|
+
actual_results: actual_results,
|
145
|
+
expected_results: expected_results,
|
146
|
+
error: nil,
|
147
|
+
captured_output: captured_output,
|
148
|
+
elapsed_time: elapsed_time,
|
149
|
+
metadata: metadata
|
150
|
+
)
|
151
|
+
end
|
152
|
+
|
153
|
+
# Create a result packet for error cases
|
154
|
+
def self.from_error(test_case, error, captured_output: nil, elapsed_time: nil, metadata: {})
|
155
|
+
error_message = error ? error.message : '<exception is nil>'
|
156
|
+
|
157
|
+
# Include backtrace in error message when in debug/verbose mode
|
158
|
+
error_display = if error && Tryouts.debug?
|
159
|
+
backtrace_preview = error.backtrace&.first(3)&.join("\n ")
|
160
|
+
"(#{error.class}) #{error_message}\n #{backtrace_preview}"
|
161
|
+
else
|
162
|
+
"(#{error.class}) #{error_message}"
|
163
|
+
end
|
164
|
+
|
165
|
+
new(
|
166
|
+
test_case: test_case,
|
167
|
+
status: :error,
|
168
|
+
result_value: nil,
|
169
|
+
actual_results: [error_display],
|
170
|
+
expected_results: [],
|
171
|
+
error: error,
|
172
|
+
captured_output: captured_output,
|
173
|
+
elapsed_time: elapsed_time,
|
174
|
+
metadata: metadata
|
175
|
+
)
|
176
|
+
end
|
177
|
+
end
|
178
|
+
|
38
179
|
# Enhanced error with context
|
39
180
|
class TryoutSyntaxError < StandardError
|
40
181
|
attr_reader :line_number, :context, :source_file
|