parallel_tests 1.3.7 → 3.7.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,14 +1,23 @@
1
+ # frozen_string_literal: true
1
2
  require 'optparse'
2
3
  require 'tempfile'
3
4
  require 'parallel_tests'
5
+ require 'shellwords'
6
+ require 'pathname'
4
7
 
5
8
  module ParallelTests
6
9
  class CLI
7
10
  def run(argv)
11
+ Signal.trap("INT") { handle_interrupt }
12
+
8
13
  options = parse_options!(argv)
9
14
 
15
+ ENV['DISABLE_SPRING'] ||= '1'
16
+
10
17
  num_processes = ParallelTests.determine_number_of_processes(options[:count])
11
- num_processes = num_processes * (options[:multiply] || 1)
18
+ num_processes *= (options[:multiply] || 1)
19
+
20
+ options[:first_is_1] ||= first_is_1?
12
21
 
13
22
  if options[:execute]
14
23
  execute_shell_command_in_parallel(options[:execute], num_processes, options)
@@ -19,12 +28,28 @@ module ParallelTests
19
28
 
20
29
  private
21
30
 
31
+ def handle_interrupt
32
+ @graceful_shutdown_attempted ||= false
33
+ Kernel.exit if @graceful_shutdown_attempted
34
+
35
+ # The Pid class's synchronize method can't be called directly from a trap
36
+ # Using Thread workaround https://github.com/ddollar/foreman/issues/332
37
+ Thread.new { ParallelTests.stop_all_processes }
38
+
39
+ @graceful_shutdown_attempted = true
40
+ end
41
+
22
42
  def execute_in_parallel(items, num_processes, options)
23
43
  Tempfile.open 'parallel_tests-lock' do |lock|
24
- return Parallel.map(items, :in_threads => num_processes) do |item|
25
- result = yield(item)
26
- report_output(result, lock) if options[:serialize_stdout]
27
- result
44
+ ParallelTests.with_pid_file do
45
+ simulate_output_for_ci options[:serialize_stdout] do
46
+ Parallel.map(items, in_threads: num_processes) do |item|
47
+ result = yield(item)
48
+ reprint_output(result, lock.path) if options[:serialize_stdout]
49
+ ParallelTests.stop_all_processes if options[:fail_fast] && result[:exit_status] != 0
50
+ result
51
+ end
52
+ end
28
53
  end
29
54
  end
30
55
  end
@@ -32,145 +57,287 @@ module ParallelTests
32
57
  def run_tests_in_parallel(num_processes, options)
33
58
  test_results = nil
34
59
 
35
- report_time_taken do
60
+ run_tests_proc = -> do
36
61
  groups = @runner.tests_in_groups(options[:files], num_processes, options)
62
+ groups.reject!(&:empty?)
37
63
 
38
64
  test_results = if options[:only_group]
39
- groups_to_run = options[:only_group].collect{|i| groups[i - 1]}
40
- report_number_of_tests(groups_to_run)
65
+ groups_to_run = options[:only_group].map { |i| groups[i - 1] }.compact
66
+ report_number_of_tests(groups_to_run) unless options[:quiet]
41
67
  execute_in_parallel(groups_to_run, groups_to_run.size, options) do |group|
42
68
  run_tests(group, groups_to_run.index(group), 1, options)
43
69
  end
44
70
  else
45
- report_number_of_tests(groups)
71
+ report_number_of_tests(groups) unless options[:quiet]
46
72
 
47
73
  execute_in_parallel(groups, groups.size, options) do |group|
48
74
  run_tests(group, groups.index(group), num_processes, options)
49
75
  end
50
76
  end
51
77
 
52
- report_results(test_results)
78
+ report_results(test_results, options) unless options[:quiet]
53
79
  end
54
80
 
55
- abort final_fail_message if any_test_failed?(test_results)
81
+ if options[:quiet]
82
+ run_tests_proc.call
83
+ else
84
+ report_time_taken(&run_tests_proc)
85
+ end
86
+
87
+ if any_test_failed?(test_results)
88
+ warn final_fail_message
89
+
90
+ # return the highest exit status to allow sub-processes to send things other than 1
91
+ exit_status = if options[:highest_exit_status]
92
+ test_results.map { |data| data.fetch(:exit_status) }.max
93
+ else
94
+ 1
95
+ end
96
+
97
+ exit exit_status
98
+ end
56
99
  end
57
100
 
58
101
  def run_tests(group, process_number, num_processes, options)
59
102
  if group.empty?
60
- {:stdout => '', :exit_status => 0}
103
+ { stdout: '', exit_status: 0, command: '', seed: nil }
61
104
  else
62
105
  @runner.run_tests(group, process_number, num_processes, options)
63
106
  end
64
107
  end
65
108
 
66
- def report_output(result, lock)
67
- lock.flock File::LOCK_EX
68
- $stdout.puts result[:stdout]
69
- $stdout.flush
70
- ensure
71
- lock.flock File::LOCK_UN
109
+ def reprint_output(result, lockfile)
110
+ lock(lockfile) do
111
+ $stdout.puts
112
+ $stdout.puts result[:stdout]
113
+ $stdout.flush
114
+ end
115
+ end
116
+
117
+ def lock(lockfile)
118
+ File.open(lockfile) do |lock|
119
+ lock.flock File::LOCK_EX
120
+ yield
121
+ ensure
122
+ # This shouldn't be necessary, but appears to be
123
+ lock.flock File::LOCK_UN
124
+ end
72
125
  end
73
126
 
74
- def report_results(test_results)
75
- results = @runner.find_results(test_results.map { |result| result[:stdout] }*"")
127
+ def report_results(test_results, options)
128
+ results = @runner.find_results(test_results.map { |result| result[:stdout] } * "")
76
129
  puts ""
77
130
  puts @runner.summarize_results(results)
131
+
132
+ report_failure_rerun_commmand(test_results, options)
133
+ end
134
+
135
+ def report_failure_rerun_commmand(test_results, options)
136
+ failing_sets = test_results.reject { |r| r[:exit_status] == 0 }
137
+ return if failing_sets.none?
138
+
139
+ if options[:verbose] || options[:verbose_rerun_command]
140
+ puts "\n\nTests have failed for a parallel_test group. Use the following command to run the group again:\n\n"
141
+ failing_sets.each do |failing_set|
142
+ command = failing_set[:command]
143
+ command = command.gsub(/;export [A-Z_]+;/, ' ') # remove ugly export statements
144
+ command = @runner.command_with_seed(command, failing_set[:seed]) if failing_set[:seed]
145
+ puts command
146
+ end
147
+ end
78
148
  end
79
149
 
80
150
  def report_number_of_tests(groups)
81
151
  name = @runner.test_file_name
82
152
  num_processes = groups.size
83
- num_tests = groups.map(&:size).inject(:+)
84
- puts "#{num_processes} processes for #{num_tests} #{name}s, ~ #{num_tests / groups.size} #{name}s per process"
153
+ num_tests = groups.map(&:size).sum
154
+ tests_per_process = (num_processes == 0 ? 0 : num_tests / num_processes)
155
+ puts "#{pluralize(num_processes, 'process')} for #{pluralize(num_tests, name)}, ~ #{pluralize(tests_per_process, name)} per process"
85
156
  end
86
157
 
87
- #exit with correct status code so rake parallel:test && echo 123 works
158
+ def pluralize(n, singular)
159
+ if n == 1
160
+ "1 #{singular}"
161
+ elsif singular.end_with?('s', 'sh', 'ch', 'x', 'z')
162
+ "#{n} #{singular}es"
163
+ else
164
+ "#{n} #{singular}s"
165
+ end
166
+ end
167
+
168
+ # exit with correct status code so rake parallel:test && echo 123 works
88
169
  def any_test_failed?(test_results)
89
170
  test_results.any? { |result| result[:exit_status] != 0 }
90
171
  end
91
172
 
92
173
  def parse_options!(argv)
174
+ newline_padding = " " * 37
93
175
  options = {}
94
176
  OptionParser.new do |opts|
95
- opts.banner = <<-BANNER.gsub(/^ /, '')
177
+ opts.banner = <<~BANNER
96
178
  Run all tests in parallel, giving each process ENV['TEST_ENV_NUMBER'] ('', '2', '3', ...)
97
179
 
98
- [optional] Only run selected files & folders:
99
- parallel_test test/bar test/baz/xxx_text.rb
180
+ [optional] Only selected files & folders:
181
+ parallel_test test/bar test/baz/xxx_text.rb
182
+
183
+ [optional] Pass test-options and files via `--`:
184
+ parallel_test -- -t acceptance -f progress -- spec/foo_spec.rb spec/acceptance
100
185
 
101
186
  Options are:
102
187
  BANNER
103
188
  opts.on("-n [PROCESSES]", Integer, "How many processes to use, default: available CPUs") { |n| options[:count] = n }
104
- opts.on("-p", "--pattern [PATTERN]", "run tests matching this pattern") { |pattern| options[:pattern] = /#{pattern}/ }
105
- opts.on("--group-by [TYPE]", <<-TEXT.gsub(/^ /, '')
106
- group tests by:
107
- found - order of finding files
108
- steps - number of cucumber/spinach steps
109
- scenarios - individual cucumber scenarios
110
- filesize - by size of the file
111
- runtime - info from runtime log
112
- default - runtime when runtime log is filled otherwise filesize
189
+ opts.on("-p", "--pattern [PATTERN]", "run tests matching this regex pattern") { |pattern| options[:pattern] = /#{pattern}/ }
190
+ opts.on("--exclude-pattern", "--exclude-pattern [PATTERN]", "exclude tests matching this regex pattern") { |pattern| options[:exclude_pattern] = /#{pattern}/ }
191
+ opts.on(
192
+ "--group-by [TYPE]",
193
+ <<~TEXT.rstrip.split("\n").join("\n#{newline_padding}")
194
+ group tests by:
195
+ found - order of finding files
196
+ steps - number of cucumber/spinach steps
197
+ scenarios - individual cucumber scenarios
198
+ filesize - by size of the file
199
+ runtime - info from runtime log
200
+ default - runtime when runtime log is filled otherwise filesize
113
201
  TEXT
114
- ) { |type| options[:group_by] = type.to_sym }
115
- opts.on("-m [FLOAT]", "--multiply-processes [FLOAT]", Float, "use given number as a multiplier of processes to run") { |multiply| options[:multiply] = multiply }
202
+ ) { |type| options[:group_by] = type.to_sym }
203
+ opts.on("-m [FLOAT]", "--multiply-processes [FLOAT]", Float, "use given number as a multiplier of processes to run") do |multiply|
204
+ options[:multiply] = multiply
205
+ end
116
206
 
117
- opts.on("-s [PATTERN]", "--single [PATTERN]",
118
- "Run all matching files in the same process") do |pattern|
207
+ opts.on("-s [PATTERN]", "--single [PATTERN]", "Run all matching files in the same process") do |pattern|
208
+ (options[:single_process] ||= []) << /#{pattern}/
209
+ end
119
210
 
120
- options[:single_process] ||= []
121
- options[:single_process] << /#{pattern}/
211
+ opts.on("-i", "--isolate", "Do not run any other tests in the group used by --single(-s)") do
212
+ options[:isolate] = true
122
213
  end
123
214
 
124
- opts.on("-i", "--isolate",
125
- "Do not run any other tests in the group used by --single(-s)") do |pattern|
215
+ opts.on(
216
+ "--isolate-n [PROCESSES]",
217
+ Integer,
218
+ "Use 'isolate' singles with number of processes, default: 1."
219
+ ) { |n| options[:isolate_count] = n }
126
220
 
127
- options[:isolate] = true
221
+ opts.on("--highest-exit-status", "Exit with the highest exit status provided by test run(s)") do
222
+ options[:highest_exit_status] = true
128
223
  end
129
224
 
130
- opts.on("--only-group INT[, INT]", Array) { |groups| options[:only_group] = groups.map(&:to_i) }
225
+ opts.on(
226
+ "--specify-groups [SPECS]",
227
+ <<~TEXT.rstrip.split("\n").join("\n#{newline_padding}")
228
+ Use 'specify-groups' if you want to specify multiple specs running in multiple
229
+ processes in a specific formation. Commas indicate specs in the same process,
230
+ pipes indicate specs in a new process. Cannot use with --single, --isolate, or
231
+ --isolate-n. Ex.
232
+ $ parallel_tests -n 3 . --specify-groups '1_spec.rb,2_spec.rb|3_spec.rb'
233
+ Process 1 will contain 1_spec.rb and 2_spec.rb
234
+ Process 2 will contain 3_spec.rb
235
+ Process 3 will contain all other specs
236
+ TEXT
237
+ ) { |groups| options[:specify_groups] = groups }
238
+
239
+ opts.on("--only-group INT[,INT]", Array) { |groups| options[:only_group] = groups.map(&:to_i) }
131
240
 
132
- opts.on("-e", "--exec [COMMAND]", "execute this code parallel and with ENV['TEST_ENV_NUM']") { |path| options[:execute] = path }
133
- opts.on("-o", "--test-options '[OPTIONS]'", "execute test commands with those options") { |arg| options[:test_options] = arg }
241
+ opts.on("-e", "--exec [COMMAND]", "execute this code parallel and with ENV['TEST_ENV_NUMBER']") { |path| options[:execute] = path }
242
+ opts.on("-o", "--test-options '[OPTIONS]'", "execute test commands with those options") { |arg| options[:test_options] = arg.lstrip }
134
243
  opts.on("-t", "--type [TYPE]", "test(default) / rspec / cucumber / spinach") do |type|
135
- begin
136
- @runner = load_runner(type)
137
- rescue NameError, LoadError => e
138
- puts "Runner for `#{type}` type has not been found! (#{e})"
139
- abort
140
- end
244
+ @runner = load_runner(type)
245
+ rescue NameError, LoadError => e
246
+ puts "Runner for `#{type}` type has not been found! (#{e})"
247
+ abort
141
248
  end
249
+ opts.on(
250
+ "--suffix [PATTERN]",
251
+ <<~TEXT.rstrip.split("\n").join("\n#{newline_padding}")
252
+ override built in test file pattern (should match suffix):
253
+ '_spec\.rb$' - matches rspec files
254
+ '_(test|spec).rb$' - matches test or spec files
255
+ TEXT
256
+ ) { |pattern| options[:suffix] = /#{pattern}/ }
142
257
  opts.on("--serialize-stdout", "Serialize stdout output, nothing will be written until everything is done") { options[:serialize_stdout] = true }
258
+ opts.on("--prefix-output-with-test-env-number", "Prefixes test env number to the output when not using --serialize-stdout") { options[:prefix_output_with_test_env_number] = true }
143
259
  opts.on("--combine-stderr", "Combine stderr into stdout, useful in conjunction with --serialize-stdout") { options[:combine_stderr] = true }
144
260
  opts.on("--non-parallel", "execute same commands but do not in parallel, needs --exec") { options[:non_parallel] = true }
145
261
  opts.on("--no-symlinks", "Do not traverse symbolic links to find test files") { options[:symlinks] = false }
146
- opts.on('--ignore-tags [PATTERN]', 'When counting steps ignore scenarios with tags that match this pattern') { |arg| options[:ignore_tag_pattern] = arg }
262
+ opts.on('--ignore-tags [PATTERN]', 'When counting steps ignore scenarios with tags that match this pattern') { |arg| options[:ignore_tag_pattern] = arg }
147
263
  opts.on("--nice", "execute test commands with low priority.") { options[:nice] = true }
148
264
  opts.on("--runtime-log [PATH]", "Location of previously recorded test runtimes") { |path| options[:runtime_log] = path }
149
- opts.on("--verbose", "Print more output") { options[:verbose] = true }
150
- opts.on("-v", "--version", "Show Version") { puts ParallelTests::VERSION; exit }
151
- opts.on("-h", "--help", "Show this.") { puts opts; exit }
265
+ opts.on("--allowed-missing [INT]", Integer, "Allowed percentage of missing runtimes (default = 50)") { |percent| options[:allowed_missing_percent] = percent }
266
+ opts.on("--unknown-runtime [FLOAT]", Float, "Use given number as unknown runtime (otherwise use average time)") { |time| options[:unknown_runtime] = time }
267
+ opts.on("--first-is-1", "Use \"1\" as TEST_ENV_NUMBER to not reuse the default test environment") { options[:first_is_1] = true }
268
+ opts.on("--fail-fast", "Stop all groups when one group fails (best used with --test-options '--fail-fast' if supported") { options[:fail_fast] = true }
269
+ opts.on("--verbose", "Print debug output") { options[:verbose] = true }
270
+ opts.on("--verbose-process-command", "Displays only the command that will be executed by each process") { options[:verbose_process_command] = true }
271
+ opts.on("--verbose-rerun-command", "When there are failures, displays the command executed by each process that failed") { options[:verbose_rerun_command] = true }
272
+ opts.on("--quiet", "Print only tests output") { options[:quiet] = true }
273
+ opts.on("-v", "--version", "Show Version") do
274
+ puts ParallelTests::VERSION
275
+ exit 0
276
+ end
277
+ opts.on("-h", "--help", "Show this.") do
278
+ puts opts
279
+ exit 0
280
+ end
152
281
  end.parse!(argv)
153
282
 
283
+ raise "Both options are mutually exclusive: verbose & quiet" if options[:verbose] && options[:quiet]
284
+
154
285
  if options[:count] == 0
155
286
  options.delete(:count)
156
287
  options[:non_parallel] = true
157
288
  end
158
289
 
159
- abort "Pass files or folders to run" if argv.empty? && !options[:execute]
290
+ files, remaining = extract_file_paths(argv)
291
+ unless options[:execute]
292
+ if files.empty?
293
+ default_test_folder = @runner.default_test_folder
294
+ if File.directory?(default_test_folder)
295
+ files = [default_test_folder]
296
+ else
297
+ abort "Pass files or folders to run"
298
+ end
299
+ end
300
+ options[:files] = files.map { |file_path| Pathname.new(file_path).cleanpath.to_s }
301
+ end
160
302
 
161
- options[:files] = argv
303
+ append_test_options(options, remaining)
162
304
 
163
305
  options[:group_by] ||= :filesize if options[:only_group]
164
306
 
165
- raise "--group-by found and --single-process are not supported" if options[:group_by] == :found and options[:single_process]
307
+ if options[:group_by] == :found && options[:single_process]
308
+ raise "--group-by found and --single-process are not supported"
309
+ end
166
310
  allowed = [:filesize, :runtime, :found]
167
311
  if !allowed.include?(options[:group_by]) && options[:only_group]
168
312
  raise "--group-by #{allowed.join(" or ")} is required for --only-group"
169
313
  end
170
314
 
315
+ if options[:specify_groups] && (options.keys & [:single_process, :isolate, :isolate_count]).any?
316
+ raise "Can't pass --specify-groups with any of these keys: --single, --isolate, or --isolate-n"
317
+ end
318
+
171
319
  options
172
320
  end
173
321
 
322
+ def extract_file_paths(argv)
323
+ dash_index = argv.rindex("--")
324
+ file_args_at = (dash_index || -1) + 1
325
+ [argv[file_args_at..-1], argv[0...(dash_index || 0)]]
326
+ end
327
+
328
+ def extract_test_options(argv)
329
+ dash_index = argv.index("--") || -1
330
+ argv[dash_index + 1..-1]
331
+ end
332
+
333
+ def append_test_options(options, argv)
334
+ new_opts = extract_test_options(argv)
335
+ return if new_opts.empty?
336
+
337
+ prev_and_new = [options[:test_options], new_opts.shelljoin]
338
+ options[:test_options] = prev_and_new.compact.join(' ')
339
+ end
340
+
174
341
  def load_runner(type)
175
342
  require "parallel_tests/#{type}/runner"
176
343
  runner_classname = type.split("_").map(&:capitalize).join.sub("Rspec", "RSpec")
@@ -179,13 +346,19 @@ module ParallelTests
179
346
  end
180
347
 
181
348
  def execute_shell_command_in_parallel(command, num_processes, options)
182
- runs = (0...num_processes).to_a
349
+ runs = if options[:only_group]
350
+ options[:only_group].map { |g| g - 1 }
351
+ else
352
+ (0...num_processes).to_a
353
+ end
183
354
  results = if options[:non_parallel]
184
- runs.map do |i|
185
- ParallelTests::Test::Runner.execute_command(command, i, num_processes, options)
355
+ ParallelTests.with_pid_file do
356
+ runs.map do |i|
357
+ ParallelTests::Test::Runner.execute_command(command, i, num_processes, options)
358
+ end
186
359
  end
187
360
  else
188
- execute_in_parallel(runs, num_processes, options) do |i|
361
+ execute_in_parallel(runs, runs.size, options) do |i|
189
362
  ParallelTests::Test::Runner.execute_command(command, i, num_processes, options)
190
363
  end
191
364
  end.flatten
@@ -193,27 +366,49 @@ module ParallelTests
193
366
  abort if results.any? { |r| r[:exit_status] != 0 }
194
367
  end
195
368
 
196
- def report_time_taken
197
- seconds = ParallelTests.delta { yield }.to_i
369
+ def report_time_taken(&block)
370
+ seconds = ParallelTests.delta(&block).to_i
198
371
  puts "\nTook #{seconds} seconds#{detailed_duration(seconds)}"
199
372
  end
200
373
 
201
374
  def detailed_duration(seconds)
202
- parts = [ seconds / 3600, seconds % 3600 / 60, seconds % 60 ].drop_while(&:zero?)
375
+ parts = [seconds / 3600, seconds % 3600 / 60, seconds % 60].drop_while(&:zero?)
203
376
  return if parts.size < 2
204
377
  parts = parts.map { |i| "%02d" % i }.join(':').sub(/^0/, '')
205
378
  " (#{parts})"
206
379
  end
207
380
 
208
381
  def final_fail_message
209
- fail_message = "#{@runner.name}s Failed"
382
+ fail_message = "Tests Failed"
210
383
  fail_message = "\e[31m#{fail_message}\e[0m" if use_colors?
211
-
212
384
  fail_message
213
385
  end
214
386
 
215
387
  def use_colors?
216
388
  $stdout.tty?
217
389
  end
390
+
391
+ def first_is_1?
392
+ val = ENV["PARALLEL_TEST_FIRST_IS_1"]
393
+ ['1', 'true'].include?(val)
394
+ end
395
+
396
+ # CI systems often fail when there is no output for a long time, so simulate some output
397
+ def simulate_output_for_ci(simulate)
398
+ if simulate
399
+ progress_indicator = Thread.new do
400
+ interval = Float(ENV.fetch('PARALLEL_TEST_HEARTBEAT_INTERVAL', 60))
401
+ loop do
402
+ sleep interval
403
+ print '.'
404
+ end
405
+ end
406
+ test_results = yield
407
+ progress_indicator.exit
408
+ test_results
409
+ else
410
+ yield
411
+ end
412
+ end
218
413
  end
219
414
  end
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
  require 'cucumber/formatter/rerun'
2
3
  require 'parallel_tests/gherkin/io'
3
4
 
@@ -6,20 +7,21 @@ module ParallelTests
6
7
  class FailuresLogger < ::Cucumber::Formatter::Rerun
7
8
  include ParallelTests::Gherkin::Io
8
9
 
9
- def initialize(runtime, path_or_io, options)
10
- @io = prepare_io(path_or_io)
10
+ def initialize(config)
11
+ super
12
+ @io = prepare_io(config.out_stream)
11
13
  end
12
14
 
13
- def after_feature(feature)
14
- unless @lines.empty?
15
- lock_output do
16
- @lines.each do |line|
17
- @io.puts "#{feature.file}:#{line}"
15
+ def done
16
+ return if @failures.empty?
17
+ lock_output do
18
+ @failures.each do |file, lines|
19
+ lines.each do |line|
20
+ @io.print "#{file}:#{line} "
18
21
  end
19
22
  end
20
23
  end
21
24
  end
22
-
23
25
  end
24
26
  end
25
27
  end
@@ -0,0 +1,32 @@
1
+ # frozen_string_literal: true
2
+ begin
3
+ gem "cuke_modeler", "~> 3.0"
4
+ require 'cuke_modeler'
5
+ rescue LoadError
6
+ raise 'Grouping by number of cucumber steps requires the `cuke_modeler` modeler gem with requirement `~> 3.0`. Add `gem "cuke_modeler", "~> 3.0"` to your `Gemfile`, run `bundle install` and try again.'
7
+ end
8
+
9
+ module ParallelTests
10
+ module Cucumber
11
+ class FeaturesWithSteps
12
+ class << self
13
+ def all(tests, options)
14
+ ignore_tag_pattern = options[:ignore_tag_pattern].nil? ? nil : Regexp.compile(options[:ignore_tag_pattern])
15
+ # format of hash will be FILENAME => NUM_STEPS
16
+ steps_per_file = tests.each_with_object({}) do |file, steps|
17
+ feature = ::CukeModeler::FeatureFile.new(file).feature
18
+
19
+ # skip feature if it matches tag regex
20
+ next if feature.tags.grep(ignore_tag_pattern).any?
21
+
22
+ # count the number of steps in the file
23
+ # will only include a feature if the regex does not match
24
+ all_steps = feature.scenarios.map { |a| a.steps.count if a.tags.grep(ignore_tag_pattern).empty? }.compact
25
+ steps[file] = all_steps.sum
26
+ end
27
+ steps_per_file.sort_by { |_, value| -value }
28
+ end
29
+ end
30
+ end
31
+ end
32
+ end
@@ -1,24 +1,32 @@
1
+ # frozen_string_literal: true
1
2
  require "parallel_tests/gherkin/runner"
2
3
 
3
4
  module ParallelTests
4
5
  module Cucumber
5
6
  class Runner < ParallelTests::Gherkin::Runner
7
+ SCENARIOS_RESULTS_BOUNDARY_REGEX = /^(Failing|Flaky) Scenarios:$/.freeze
8
+ SCENARIO_REGEX = %r{^cucumber features/.+:\d+}.freeze
9
+
6
10
  class << self
7
11
  def name
8
12
  'cucumber'
9
13
  end
10
14
 
15
+ def default_test_folder
16
+ 'features'
17
+ end
18
+
11
19
  def line_is_result?(line)
12
- super or line =~ failing_scenario_regex
20
+ super || line =~ SCENARIO_REGEX || line =~ SCENARIOS_RESULTS_BOUNDARY_REGEX
13
21
  end
14
22
 
15
23
  def summarize_results(results)
16
24
  output = []
17
25
 
18
- failing_scenarios = results.grep(failing_scenario_regex)
19
- if failing_scenarios.any?
20
- failing_scenarios.unshift("Failing Scenarios:")
21
- output << failing_scenarios.join("\n")
26
+ scenario_groups = results.slice_before(SCENARIOS_RESULTS_BOUNDARY_REGEX).group_by(&:first)
27
+ scenario_groups.each do |header, group|
28
+ scenarios = group.flatten.grep(SCENARIO_REGEX)
29
+ output << ([header] + scenarios).join("\n") if scenarios.any?
22
30
  end
23
31
 
24
32
  output << super
@@ -26,10 +34,9 @@ module ParallelTests
26
34
  output.join("\n\n")
27
35
  end
28
36
 
29
- private
30
-
31
- def failing_scenario_regex
32
- /^cucumber features\/.+:\d+/
37
+ def command_with_seed(cmd, seed)
38
+ clean = cmd.sub(/\s--order\s+random(:\d+)?\b/, '')
39
+ "#{clean} --order random:#{seed}"
33
40
  end
34
41
  end
35
42
  end