parallel_tests 3.4.0 → 4.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
  require 'optparse'
2
3
  require 'tempfile'
3
4
  require 'parallel_tests'
@@ -14,12 +15,12 @@ module ParallelTests
14
15
  ENV['DISABLE_SPRING'] ||= '1'
15
16
 
16
17
  num_processes = ParallelTests.determine_number_of_processes(options[:count])
17
- num_processes = num_processes * (options[:multiply] || 1)
18
+ num_processes *= (options[:multiply] || 1)
18
19
 
19
20
  options[:first_is_1] ||= first_is_1?
20
21
 
21
22
  if options[:execute]
22
- execute_shell_command_in_parallel(options[:execute], num_processes, options)
23
+ execute_command_in_parallel(options[:execute], num_processes, options)
23
24
  else
24
25
  run_tests_in_parallel(num_processes, options)
25
26
  end
@@ -31,9 +32,23 @@ module ParallelTests
31
32
  @graceful_shutdown_attempted ||= false
32
33
  Kernel.exit if @graceful_shutdown_attempted
33
34
 
34
- # The Pid class's synchronize method can't be called directly from a trap
35
- # Using Thread workaround https://github.com/ddollar/foreman/issues/332
36
- Thread.new { ParallelTests.stop_all_processes }
35
+ # In a shell, all sub-processes also get an interrupt, so they shut themselves down.
36
+ # In a background process this does not happen and we need to do it ourselves.
37
+ # We cannot always send the interrupt since then the sub-processes would get interrupted twice when in foreground
38
+ # and that messes with interrupt handling.
39
+ #
40
+ # (can simulate detached with `(bundle exec parallel_rspec test/a_spec.rb -n 2 &)`)
41
+ # also the integration test "passes on int signal to child processes" is detached.
42
+ #
43
+ # On windows getpgid does not work so we resort to always killing which is the smaller bug.
44
+ #
45
+ # The ParallelTests::Pids `synchronize` method can't be called directly from a trap,
46
+ # using Thread workaround https://github.com/ddollar/foreman/issues/332
47
+ Thread.new do
48
+ if Gem.win_platform? || ((child_pid = ParallelTests.pids.all.first) && Process.getpgid(child_pid) != Process.pid)
49
+ ParallelTests.stop_all_processes
50
+ end
51
+ end
37
52
 
38
53
  @graceful_shutdown_attempted = true
39
54
  end
@@ -56,26 +71,21 @@ module ParallelTests
56
71
  def run_tests_in_parallel(num_processes, options)
57
72
  test_results = nil
58
73
 
59
- run_tests_proc = -> {
74
+ run_tests_proc = -> do
60
75
  groups = @runner.tests_in_groups(options[:files], num_processes, options)
61
- groups.reject! &:empty?
62
-
63
- test_results = if options[:only_group]
64
- groups_to_run = options[:only_group].collect{|i| groups[i - 1]}.compact
65
- report_number_of_tests(groups_to_run) unless options[:quiet]
66
- execute_in_parallel(groups_to_run, groups_to_run.size, options) do |group|
67
- run_tests(group, groups_to_run.index(group), 1, options)
68
- end
69
- else
70
- report_number_of_tests(groups) unless options[:quiet]
76
+ groups.reject!(&:empty?)
71
77
 
72
- execute_in_parallel(groups, groups.size, options) do |group|
73
- run_tests(group, groups.index(group), num_processes, options)
74
- end
78
+ if options[:only_group]
79
+ groups = options[:only_group].map { |i| groups[i - 1] }.compact
80
+ num_processes = 1
75
81
  end
76
82
 
83
+ report_number_of_tests(groups) unless options[:quiet]
84
+ test_results = execute_in_parallel(groups, groups.size, options) do |group|
85
+ run_tests(group, groups.index(group), num_processes, options)
86
+ end
77
87
  report_results(test_results, options) unless options[:quiet]
78
- }
88
+ end
79
89
 
80
90
  if options[:quiet]
81
91
  run_tests_proc.call
@@ -83,12 +93,23 @@ module ParallelTests
83
93
  report_time_taken(&run_tests_proc)
84
94
  end
85
95
 
86
- abort final_fail_message if any_test_failed?(test_results)
96
+ if any_test_failed?(test_results)
97
+ warn final_fail_message
98
+
99
+ # return the highest exit status to allow sub-processes to send things other than 1
100
+ exit_status = if options[:highest_exit_status]
101
+ test_results.map { |data| data.fetch(:exit_status) }.max
102
+ else
103
+ 1
104
+ end
105
+
106
+ exit exit_status
107
+ end
87
108
  end
88
109
 
89
110
  def run_tests(group, process_number, num_processes, options)
90
111
  if group.empty?
91
- {:stdout => '', :exit_status => 0, :command => '', :seed => nil}
112
+ { stdout: '', exit_status: 0, command: nil, seed: nil }
92
113
  else
93
114
  @runner.run_tests(group, process_number, num_processes, options)
94
115
  end
@@ -104,18 +125,16 @@ module ParallelTests
104
125
 
105
126
  def lock(lockfile)
106
127
  File.open(lockfile) do |lock|
107
- begin
108
- lock.flock File::LOCK_EX
109
- yield
110
- ensure
111
- # This shouldn't be necessary, but appears to be
112
- lock.flock File::LOCK_UN
113
- end
128
+ lock.flock File::LOCK_EX
129
+ yield
130
+ ensure
131
+ # This shouldn't be necessary, but appears to be
132
+ lock.flock File::LOCK_UN
114
133
  end
115
134
  end
116
135
 
117
136
  def report_results(test_results, options)
118
- results = @runner.find_results(test_results.map { |result| result[:stdout] }*"")
137
+ results = @runner.find_results(test_results.map { |result| result[:stdout] } * "")
119
138
  puts ""
120
139
  puts @runner.summarize_results(results)
121
140
 
@@ -126,13 +145,12 @@ module ParallelTests
126
145
  failing_sets = test_results.reject { |r| r[:exit_status] == 0 }
127
146
  return if failing_sets.none?
128
147
 
129
- if options[:verbose] || options[:verbose_rerun_command]
148
+ if options[:verbose] || options[:verbose_command]
130
149
  puts "\n\nTests have failed for a parallel_test group. Use the following command to run the group again:\n\n"
131
150
  failing_sets.each do |failing_set|
132
151
  command = failing_set[:command]
133
- command = command.gsub(/;export [A-Z_]+;/, ' ') # remove ugly export statements
134
152
  command = @runner.command_with_seed(command, failing_set[:seed]) if failing_set[:seed]
135
- puts command
153
+ @runner.print_command(command, failing_set[:env] || {})
136
154
  end
137
155
  end
138
156
  end
@@ -140,20 +158,31 @@ module ParallelTests
140
158
  def report_number_of_tests(groups)
141
159
  name = @runner.test_file_name
142
160
  num_processes = groups.size
143
- num_tests = groups.map(&:size).inject(0, :+)
161
+ num_tests = groups.map(&:size).sum
144
162
  tests_per_process = (num_processes == 0 ? 0 : num_tests / num_processes)
145
- puts "#{num_processes} processes for #{num_tests} #{name}s, ~ #{tests_per_process} #{name}s per process"
163
+ puts "#{pluralize(num_processes, 'process')} for #{pluralize(num_tests, name)}, ~ #{pluralize(tests_per_process, name)} per process"
164
+ end
165
+
166
+ def pluralize(n, singular)
167
+ if n == 1
168
+ "1 #{singular}"
169
+ elsif singular.end_with?('s', 'sh', 'ch', 'x', 'z')
170
+ "#{n} #{singular}es"
171
+ else
172
+ "#{n} #{singular}s"
173
+ end
146
174
  end
147
175
 
148
- #exit with correct status code so rake parallel:test && echo 123 works
176
+ # exit with correct status code so rake parallel:test && echo 123 works
149
177
  def any_test_failed?(test_results)
150
178
  test_results.any? { |result| result[:exit_status] != 0 }
151
179
  end
152
180
 
153
181
  def parse_options!(argv)
182
+ newline_padding = " " * 37
154
183
  options = {}
155
184
  OptionParser.new do |opts|
156
- opts.banner = <<-BANNER.gsub(/^ /, '')
185
+ opts.banner = <<~BANNER
157
186
  Run all tests in parallel, giving each process ENV['TEST_ENV_NUMBER'] ('', '2', '3', ...)
158
187
 
159
188
  [optional] Only selected files & folders:
@@ -167,61 +196,85 @@ module ParallelTests
167
196
  opts.on("-n [PROCESSES]", Integer, "How many processes to use, default: available CPUs") { |n| options[:count] = n }
168
197
  opts.on("-p", "--pattern [PATTERN]", "run tests matching this regex pattern") { |pattern| options[:pattern] = /#{pattern}/ }
169
198
  opts.on("--exclude-pattern", "--exclude-pattern [PATTERN]", "exclude tests matching this regex pattern") { |pattern| options[:exclude_pattern] = /#{pattern}/ }
170
- opts.on("--group-by [TYPE]", <<-TEXT.gsub(/^ /, '')
171
- group tests by:
172
- found - order of finding files
173
- steps - number of cucumber/spinach steps
174
- scenarios - individual cucumber scenarios
175
- filesize - by size of the file
176
- runtime - info from runtime log
177
- default - runtime when runtime log is filled otherwise filesize
199
+ opts.on(
200
+ "--group-by [TYPE]",
201
+ <<~TEXT.rstrip.split("\n").join("\n#{newline_padding}")
202
+ group tests by:
203
+ found - order of finding files
204
+ steps - number of cucumber/spinach steps
205
+ scenarios - individual cucumber scenarios
206
+ filesize - by size of the file
207
+ runtime - info from runtime log
208
+ default - runtime when runtime log is filled otherwise filesize
178
209
  TEXT
179
- ) { |type| options[:group_by] = type.to_sym }
180
- opts.on("-m [FLOAT]", "--multiply-processes [FLOAT]", Float, "use given number as a multiplier of processes to run") { |multiply| options[:multiply] = multiply }
181
-
182
- opts.on("-s [PATTERN]", "--single [PATTERN]",
183
- "Run all matching files in the same process") do |pattern|
184
-
185
- options[:single_process] ||= []
186
- options[:single_process] << /#{pattern}/
210
+ ) { |type| options[:group_by] = type.to_sym }
211
+ opts.on("-m [FLOAT]", "--multiply-processes [FLOAT]", Float, "use given number as a multiplier of processes to run") do |multiply|
212
+ options[:multiply] = multiply
187
213
  end
188
214
 
189
- opts.on("-i", "--isolate",
190
- "Do not run any other tests in the group used by --single(-s)") do |pattern|
215
+ opts.on("-s [PATTERN]", "--single [PATTERN]", "Run all matching files in the same process") do |pattern|
216
+ (options[:single_process] ||= []) << /#{pattern}/
217
+ end
191
218
 
219
+ opts.on("-i", "--isolate", "Do not run any other tests in the group used by --single(-s)") do
192
220
  options[:isolate] = true
193
221
  end
194
222
 
195
- opts.on("--isolate-n [PROCESSES]",
223
+ opts.on(
224
+ "--isolate-n [PROCESSES]",
196
225
  Integer,
197
- "Use 'isolate' singles with number of processes, default: 1.") do |n|
198
- options[:isolate_count] = n
226
+ "Use 'isolate' singles with number of processes, default: 1."
227
+ ) { |n| options[:isolate_count] = n }
228
+
229
+ opts.on("--highest-exit-status", "Exit with the highest exit status provided by test run(s)") do
230
+ options[:highest_exit_status] = true
199
231
  end
200
232
 
201
- opts.on("--only-group INT[, INT]", Array) { |groups| options[:only_group] = groups.map(&:to_i) }
233
+ opts.on(
234
+ "--specify-groups [SPECS]",
235
+ <<~TEXT.rstrip.split("\n").join("\n#{newline_padding}")
236
+ Use 'specify-groups' if you want to specify multiple specs running in multiple
237
+ processes in a specific formation. Commas indicate specs in the same process,
238
+ pipes indicate specs in a new process. Cannot use with --single, --isolate, or
239
+ --isolate-n. Ex.
240
+ $ parallel_test -n 3 . --specify-groups '1_spec.rb,2_spec.rb|3_spec.rb'
241
+ Process 1 will contain 1_spec.rb and 2_spec.rb
242
+ Process 2 will contain 3_spec.rb
243
+ Process 3 will contain all other specs
244
+ TEXT
245
+ ) { |groups| options[:specify_groups] = groups }
246
+
247
+ opts.on(
248
+ "--only-group INT[,INT]",
249
+ Array,
250
+ <<~TEXT.rstrip.split("\n").join("\n#{newline_padding}")
251
+ Only run the given group numbers.
252
+ Changes `--group-by` default to 'filesize'.
253
+ TEXT
254
+ ) { |groups| options[:only_group] = groups.map(&:to_i) }
202
255
 
203
- opts.on("-e", "--exec [COMMAND]", "execute this code parallel and with ENV['TEST_ENV_NUMBER']") { |path| options[:execute] = path }
204
- opts.on("-o", "--test-options '[OPTIONS]'", "execute test commands with those options") { |arg| options[:test_options] = arg.lstrip }
256
+ opts.on("-e", "--exec [COMMAND]", "execute this code parallel and with ENV['TEST_ENV_NUMBER']") { |arg| options[:execute] = Shellwords.shellsplit(arg) }
257
+ opts.on("-o", "--test-options '[OPTIONS]'", "execute test commands with those options") { |arg| options[:test_options] = Shellwords.shellsplit(arg) }
205
258
  opts.on("-t", "--type [TYPE]", "test(default) / rspec / cucumber / spinach") do |type|
206
- begin
207
- @runner = load_runner(type)
208
- rescue NameError, LoadError => e
209
- puts "Runner for `#{type}` type has not been found! (#{e})"
210
- abort
211
- end
259
+ @runner = load_runner(type)
260
+ rescue NameError, LoadError => e
261
+ puts "Runner for `#{type}` type has not been found! (#{e})"
262
+ abort
212
263
  end
213
- opts.on("--suffix [PATTERN]", <<-TEXT.gsub(/^ /, '')
214
- override built in test file pattern (should match suffix):
215
- '_spec\.rb$' - matches rspec files
216
- '_(test|spec).rb$' - matches test or spec files
264
+ opts.on(
265
+ "--suffix [PATTERN]",
266
+ <<~TEXT.rstrip.split("\n").join("\n#{newline_padding}")
267
+ override built in test file pattern (should match suffix):
268
+ '_spec.rb$' - matches rspec files
269
+ '_(test|spec).rb$' - matches test or spec files
217
270
  TEXT
218
- ) { |pattern| options[:suffix] = /#{pattern}/ }
271
+ ) { |pattern| options[:suffix] = /#{pattern}/ }
219
272
  opts.on("--serialize-stdout", "Serialize stdout output, nothing will be written until everything is done") { options[:serialize_stdout] = true }
220
273
  opts.on("--prefix-output-with-test-env-number", "Prefixes test env number to the output when not using --serialize-stdout") { options[:prefix_output_with_test_env_number] = true }
221
274
  opts.on("--combine-stderr", "Combine stderr into stdout, useful in conjunction with --serialize-stdout") { options[:combine_stderr] = true }
222
275
  opts.on("--non-parallel", "execute same commands but do not in parallel, needs --exec") { options[:non_parallel] = true }
223
276
  opts.on("--no-symlinks", "Do not traverse symbolic links to find test files") { options[:symlinks] = false }
224
- opts.on('--ignore-tags [PATTERN]', 'When counting steps ignore scenarios with tags that match this pattern') { |arg| options[:ignore_tag_pattern] = arg }
277
+ opts.on('--ignore-tags [PATTERN]', 'When counting steps ignore scenarios with tags that match this pattern') { |arg| options[:ignore_tag_pattern] = arg }
225
278
  opts.on("--nice", "execute test commands with low priority.") { options[:nice] = true }
226
279
  opts.on("--runtime-log [PATH]", "Location of previously recorded test runtimes") { |path| options[:runtime_log] = path }
227
280
  opts.on("--allowed-missing [INT]", Integer, "Allowed percentage of missing runtimes (default = 50)") { |percent| options[:allowed_missing_percent] = percent }
@@ -229,16 +282,19 @@ module ParallelTests
229
282
  opts.on("--first-is-1", "Use \"1\" as TEST_ENV_NUMBER to not reuse the default test environment") { options[:first_is_1] = true }
230
283
  opts.on("--fail-fast", "Stop all groups when one group fails (best used with --test-options '--fail-fast' if supported") { options[:fail_fast] = true }
231
284
  opts.on("--verbose", "Print debug output") { options[:verbose] = true }
232
- opts.on("--verbose-process-command", "Displays only the command that will be executed by each process") { options[:verbose_process_command] = true }
233
- opts.on("--verbose-rerun-command", "When there are failures, displays the command executed by each process that failed") { options[:verbose_rerun_command] = true }
285
+ opts.on("--verbose-command", "Displays the command that will be executed by each process and when there are failures displays the command executed by each process that failed") { options[:verbose_command] = true }
234
286
  opts.on("--quiet", "Print only tests output") { options[:quiet] = true }
235
- opts.on("-v", "--version", "Show Version") { puts ParallelTests::VERSION; exit }
236
- opts.on("-h", "--help", "Show this.") { puts opts; exit }
287
+ opts.on("-v", "--version", "Show Version") do
288
+ puts ParallelTests::VERSION
289
+ exit 0
290
+ end
291
+ opts.on("-h", "--help", "Show this.") do
292
+ puts opts
293
+ exit 0
294
+ end
237
295
  end.parse!(argv)
238
296
 
239
- if options[:verbose] && options[:quiet]
240
- raise "Both options are mutually exclusive: verbose & quiet"
241
- end
297
+ raise "Both options are mutually exclusive: verbose & quiet" if options[:verbose] && options[:quiet]
242
298
 
243
299
  if options[:count] == 0
244
300
  options.delete(:count)
@@ -247,7 +303,14 @@ module ParallelTests
247
303
 
248
304
  files, remaining = extract_file_paths(argv)
249
305
  unless options[:execute]
250
- abort "Pass files or folders to run" unless files.any?
306
+ if files.empty?
307
+ default_test_folder = @runner.default_test_folder
308
+ if File.directory?(default_test_folder)
309
+ files = [default_test_folder]
310
+ else
311
+ abort "Pass files or folders to run"
312
+ end
313
+ end
251
314
  options[:files] = files.map { |file_path| Pathname.new(file_path).cleanpath.to_s }
252
315
  end
253
316
 
@@ -255,32 +318,38 @@ module ParallelTests
255
318
 
256
319
  options[:group_by] ||= :filesize if options[:only_group]
257
320
 
258
- raise "--group-by found and --single-process are not supported" if options[:group_by] == :found and options[:single_process]
321
+ if options[:group_by] == :found && options[:single_process]
322
+ raise "--group-by found and --single-process are not supported"
323
+ end
259
324
  allowed = [:filesize, :runtime, :found]
260
325
  if !allowed.include?(options[:group_by]) && options[:only_group]
261
326
  raise "--group-by #{allowed.join(" or ")} is required for --only-group"
262
327
  end
263
328
 
329
+ if options[:specify_groups] && (options.keys & [:single_process, :isolate, :isolate_count]).any?
330
+ raise "Can't pass --specify-groups with any of these keys: --single, --isolate, or --isolate-n"
331
+ end
332
+
264
333
  options
265
334
  end
266
335
 
267
336
  def extract_file_paths(argv)
268
337
  dash_index = argv.rindex("--")
269
338
  file_args_at = (dash_index || -1) + 1
270
- [argv[file_args_at..-1], argv[0...(dash_index || 0)]]
339
+ [argv[file_args_at..], argv[0...(dash_index || 0)]]
271
340
  end
272
341
 
273
342
  def extract_test_options(argv)
274
343
  dash_index = argv.index("--") || -1
275
- argv[dash_index+1..-1]
344
+ argv[dash_index + 1..]
276
345
  end
277
346
 
278
347
  def append_test_options(options, argv)
279
348
  new_opts = extract_test_options(argv)
280
349
  return if new_opts.empty?
281
350
 
282
- prev_and_new = [options[:test_options], new_opts.shelljoin]
283
- options[:test_options] = prev_and_new.compact.join(' ')
351
+ options[:test_options] ||= []
352
+ options[:test_options] += new_opts
284
353
  end
285
354
 
286
355
  def load_runner(type)
@@ -290,9 +359,9 @@ module ParallelTests
290
359
  klass_name.split('::').inject(Object) { |x, y| x.const_get(y) }
291
360
  end
292
361
 
293
- def execute_shell_command_in_parallel(command, num_processes, options)
362
+ def execute_command_in_parallel(command, num_processes, options)
294
363
  runs = if options[:only_group]
295
- options[:only_group].map{|g| g - 1}
364
+ options[:only_group].map { |g| g - 1 }
296
365
  else
297
366
  (0...num_processes).to_a
298
367
  end
@@ -311,13 +380,13 @@ module ParallelTests
311
380
  abort if results.any? { |r| r[:exit_status] != 0 }
312
381
  end
313
382
 
314
- def report_time_taken
315
- seconds = ParallelTests.delta { yield }.to_i
383
+ def report_time_taken(&block)
384
+ seconds = ParallelTests.delta(&block).to_i
316
385
  puts "\nTook #{seconds} seconds#{detailed_duration(seconds)}"
317
386
  end
318
387
 
319
388
  def detailed_duration(seconds)
320
- parts = [ seconds / 3600, seconds % 3600 / 60, seconds % 60 ].drop_while(&:zero?)
389
+ parts = [seconds / 3600, seconds % 3600 / 60, seconds % 60].drop_while(&:zero?)
321
390
  return if parts.size < 2
322
391
  parts = parts.map { |i| "%02d" % i }.join(':').sub(/^0/, '')
323
392
  " (#{parts})"
@@ -342,7 +411,7 @@ module ParallelTests
342
411
  def simulate_output_for_ci(simulate)
343
412
  if simulate
344
413
  progress_indicator = Thread.new do
345
- interval = Float(ENV.fetch('PARALLEL_TEST_HEARTBEAT_INTERVAL', 60))
414
+ interval = Float(ENV['PARALLEL_TEST_HEARTBEAT_INTERVAL'] || 60)
346
415
  loop do
347
416
  sleep interval
348
417
  print '.'
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
  require 'cucumber/formatter/rerun'
2
3
  require 'parallel_tests/gherkin/io'
3
4
 
@@ -21,7 +22,6 @@ module ParallelTests
21
22
  end
22
23
  end
23
24
  end
24
-
25
25
  end
26
26
  end
27
27
  end
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
  begin
2
3
  gem "cuke_modeler", "~> 3.0"
3
4
  require 'cuke_modeler'
@@ -12,7 +13,7 @@ module ParallelTests
12
13
  def all(tests, options)
13
14
  ignore_tag_pattern = options[:ignore_tag_pattern].nil? ? nil : Regexp.compile(options[:ignore_tag_pattern])
14
15
  # format of hash will be FILENAME => NUM_STEPS
15
- steps_per_file = tests.each_with_object({}) do |file,steps|
16
+ steps_per_file = tests.each_with_object({}) do |file, steps|
16
17
  feature = ::CukeModeler::FeatureFile.new(file).feature
17
18
 
18
19
  # skip feature if it matches tag regex
@@ -20,8 +21,8 @@ module ParallelTests
20
21
 
21
22
  # count the number of steps in the file
22
23
  # will only include a feature if the regex does not match
23
- all_steps = feature.scenarios.map{|a| a.steps.count if a.tags.grep(ignore_tag_pattern).empty? }.compact
24
- steps[file] = all_steps.inject(0,:+)
24
+ all_steps = feature.scenarios.map { |a| a.steps.count if a.tags.grep(ignore_tag_pattern).empty? }.compact
25
+ steps[file] = all_steps.sum
25
26
  end
26
27
  steps_per_file.sort_by { |_, value| -value }
27
28
  end
@@ -1,16 +1,21 @@
1
+ # frozen_string_literal: true
1
2
  require "parallel_tests/gherkin/runner"
2
3
 
3
4
  module ParallelTests
4
5
  module Cucumber
5
6
  class Runner < ParallelTests::Gherkin::Runner
6
- SCENARIOS_RESULTS_BOUNDARY_REGEX = /^(Failing|Flaky) Scenarios:$/
7
- SCENARIO_REGEX = /^cucumber features\/.+:\d+/
7
+ SCENARIOS_RESULTS_BOUNDARY_REGEX = /^(Failing|Flaky) Scenarios:$/.freeze
8
+ SCENARIO_REGEX = %r{^cucumber features/.+:\d+}.freeze
8
9
 
9
10
  class << self
10
11
  def name
11
12
  'cucumber'
12
13
  end
13
14
 
15
+ def default_test_folder
16
+ 'features'
17
+ end
18
+
14
19
  def line_is_result?(line)
15
20
  super || line =~ SCENARIO_REGEX || line =~ SCENARIOS_RESULTS_BOUNDARY_REGEX
16
21
  end
@@ -21,9 +26,7 @@ module ParallelTests
21
26
  scenario_groups = results.slice_before(SCENARIOS_RESULTS_BOUNDARY_REGEX).group_by(&:first)
22
27
  scenario_groups.each do |header, group|
23
28
  scenarios = group.flatten.grep(SCENARIO_REGEX)
24
- if scenarios.any?
25
- output << ([header] + scenarios).join("\n")
26
- end
29
+ output << ([header] + scenarios).join("\n") if scenarios.any?
27
30
  end
28
31
 
29
32
  output << super
@@ -32,8 +35,8 @@ module ParallelTests
32
35
  end
33
36
 
34
37
  def command_with_seed(cmd, seed)
35
- clean = cmd.sub(/\s--order\s+random(:\d+)?\b/, '')
36
- "#{clean} --order random:#{seed}"
38
+ clean = remove_command_arguments(cmd, '--order')
39
+ [*clean, '--order', "random:#{seed}"]
37
40
  end
38
41
  end
39
42
  end
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
  module ParallelTests
2
3
  module Cucumber
3
4
  module Formatters
@@ -10,7 +11,7 @@ module ParallelTests
10
11
  end
11
12
 
12
13
  def visit_feature_element(uri, feature_element, feature_tags, line_numbers: [])
13
- scenario_tags = feature_element.tags.map { |tag| tag.name }
14
+ scenario_tags = feature_element.tags.map(&:name)
14
15
  scenario_tags = feature_tags + scenario_tags
15
16
  if feature_element.is_a?(CukeModeler::Scenario) # :Scenario
16
17
  test_line = feature_element.source_line
@@ -26,7 +27,7 @@ module ParallelTests
26
27
  example_tags = example.tags.map(&:name)
27
28
  example_tags = scenario_tags + example_tags
28
29
  next unless matches_tags?(example_tags)
29
- example.rows[1..-1].each do |row|
30
+ example.rows[1..].each do |row|
30
31
  test_line = row.source_line
31
32
  next if line_numbers.any? && !line_numbers.include?(test_line)
32
33
 
@@ -36,8 +37,7 @@ module ParallelTests
36
37
  end
37
38
  end
38
39
 
39
- def method_missing(*args)
40
- end
40
+ def method_missing(*); end # # rubocop:disable Style/MissingRespondToMissing
41
41
 
42
42
  private
43
43
 
@@ -1,9 +1,9 @@
1
+ # frozen_string_literal: true
1
2
  require 'cucumber/tag_expressions/parser'
2
3
  require 'cucumber/runtime'
3
4
  require 'cucumber'
4
5
  require 'parallel_tests/cucumber/scenario_line_logger'
5
6
  require 'parallel_tests/gherkin/listener'
6
- require 'shellwords'
7
7
 
8
8
  begin
9
9
  gem "cuke_modeler", "~> 3.0"
@@ -16,11 +16,11 @@ module ParallelTests
16
16
  module Cucumber
17
17
  class Scenarios
18
18
  class << self
19
- def all(files, options={})
19
+ def all(files, options = {})
20
20
  # Parse tag expression from given test options and ignore tag pattern. Refer here to understand how new tag expression syntax works - https://github.com/cucumber/cucumber/tree/master/tag-expressions
21
21
  tags = []
22
- words = options[:test_options].to_s.shellsplit
23
- words.each_with_index { |w,i| tags << words[i+1] if ["-t", "--tags"].include?(w) }
22
+ words = options[:test_options] || []
23
+ words.each_with_index { |w, i| tags << words[i + 1] if ["-t", "--tags"].include?(w) }
24
24
  if ignore = options[:ignore_tag_pattern]
25
25
  tags << "not (#{ignore})"
26
26
  end
@@ -31,8 +31,7 @@ module ParallelTests
31
31
 
32
32
  private
33
33
 
34
- def split_into_scenarios(files, tags='')
35
-
34
+ def split_into_scenarios(files, tags = '')
36
35
  # Create the tag expression instance from cucumber tag expressions parser, this is needed to know if the scenario matches with the tags invoked by the request
37
36
  # Create the ScenarioLineLogger which will filter the scenario we want
38
37
  args = []
@@ -40,7 +39,7 @@ module ParallelTests
40
39
  scenario_line_logger = ParallelTests::Cucumber::Formatters::ScenarioLineLogger.new(*args)
41
40
 
42
41
  # here we loop on the files map, each file will contain one or more scenario
43
- features ||= files.map do |path|
42
+ files.each do |path|
44
43
  # Gather up any line numbers attached to the file path
45
44
  path, *test_lines = path.split(/:(?=\d+)/)
46
45
  test_lines.map!(&:to_i)
@@ -53,7 +52,9 @@ module ParallelTests
53
52
  feature_tags = feature.tags.map(&:name)
54
53
 
55
54
  # We loop on each children of the feature
56
- feature.tests.each do |test|
55
+ test_models = feature.tests
56
+ test_models += feature.rules.flat_map(&:tests) if feature.respond_to?(:rules) # cuke_modeler >= 3.2 supports rules
57
+ test_models.each do |test|
57
58
  # It's a scenario, we add it to the scenario_line_logger
58
59
  scenario_line_logger.visit_feature_element(document.path, test, feature_tags, line_numbers: test_lines)
59
60
  end
@@ -1,9 +1,9 @@
1
+ # frozen_string_literal: true
1
2
  require 'parallel_tests'
2
3
 
3
4
  module ParallelTests
4
5
  module Gherkin
5
6
  module Io
6
-
7
7
  def prepare_io(path_or_io)
8
8
  if path_or_io.respond_to?(:write)
9
9
  path_or_io
@@ -24,7 +24,7 @@ module ParallelTests
24
24
 
25
25
  # do not let multiple processes get in each others way
26
26
  def lock_output
27
- if File === @io
27
+ if @io.is_a?(File)
28
28
  begin
29
29
  @io.flock File::LOCK_EX
30
30
  yield
@@ -35,7 +35,6 @@ module ParallelTests
35
35
  yield
36
36
  end
37
37
  end
38
-
39
38
  end
40
39
  end
41
40
  end