parallel_tests 3.11.1 → 4.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: e052a26b97662bef05987559eda2c84f64b950a1117a4fcfd2cc7708274a497b
4
- data.tar.gz: 95c02ca904bc927db9b7c747c48de76d7d73eb4adb328a163f6d9e97669c7f80
3
+ metadata.gz: fcf9040fbe779832d8f04c064321136d832b115d493353ba3e5efa473a02a774
4
+ data.tar.gz: f9179c53e375db68d53d2c9136b1cc36567ebeb143ad85a44f185d3ccf91a90c
5
5
  SHA512:
6
- metadata.gz: ae8f340c1213e5fd51bf761d7e3f98ae249607808f7c8b375311a719f1c8f14c86c8502a357ecefcfbd79207f791fb336e633da4f369eb2da770320dd9070380
7
- data.tar.gz: ef54bdfc2a8c6d1e3aa41338b130a9a36f33b1aa615ff6de471bea30217f0282229bfe837ce579c7ea96b6ba7c160a5ddaf1023253482dfab8169246771859a3
6
+ metadata.gz: 80a68d9b21660e5f0294730cc42a591f13a1f9b75b0e1d6649ab94be5a1dacb48c3fef1dac8e0733abc08a01b063d917a05f6dd791b2887a20921139dc87bde3
7
+ data.tar.gz: 0b0f70fdff83e0fd14594d4a6eaa99d7576bafb746d2c5a6913d3bfca6d7c6cf9d4a11098e7593102a007bddf8345a3ddf6ac712eeef2647e7a6f699cfb2d37f
data/Readme.md CHANGED
@@ -403,6 +403,7 @@ inspired by [pivotal labs](https://blog.pivotal.io/labs/labs/parallelize-your-rs
403
403
  - [Joshua Pinter](https://github.com/joshuapinter)
404
404
  - [Zach Dennis](https://github.com/zdennis)
405
405
  - [Jon Dufresne](https://github.com/jdufresne)
406
+ - [Eric Kessler](https://github.com/enkessler)
406
407
 
407
408
  [Michael Grosser](http://grosser.it)<br/>
408
409
  michael@grosser.it<br/>
@@ -131,12 +131,12 @@ module ParallelTests
131
131
  failing_sets = test_results.reject { |r| r[:exit_status] == 0 }
132
132
  return if failing_sets.none?
133
133
 
134
- if options[:verbose] || options[:verbose_rerun_command]
134
+ if options[:verbose] || options[:verbose_command]
135
135
  puts "\n\nTests have failed for a parallel_test group. Use the following command to run the group again:\n\n"
136
136
  failing_sets.each do |failing_set|
137
137
  command = failing_set[:command]
138
138
  command = @runner.command_with_seed(command, failing_set[:seed]) if failing_set[:seed]
139
- puts Shellwords.shelljoin(command)
139
+ @runner.print_command(command, failing_set[:env] || {})
140
140
  end
141
141
  end
142
142
  end
@@ -261,8 +261,7 @@ module ParallelTests
261
261
  opts.on("--first-is-1", "Use \"1\" as TEST_ENV_NUMBER to not reuse the default test environment") { options[:first_is_1] = true }
262
262
  opts.on("--fail-fast", "Stop all groups when one group fails (best used with --test-options '--fail-fast' if supported") { options[:fail_fast] = true }
263
263
  opts.on("--verbose", "Print debug output") { options[:verbose] = true }
264
- opts.on("--verbose-process-command", "Displays only the command that will be executed by each process") { options[:verbose_process_command] = true }
265
- opts.on("--verbose-rerun-command", "When there are failures, displays the command executed by each process that failed") { options[:verbose_rerun_command] = true }
264
+ opts.on("--verbose-command", "Displays the command that will be executed by each process and when there are failures displays the command executed by each process that failed") { options[:verbose_command] = true }
266
265
  opts.on("--quiet", "Print only tests output") { options[:quiet] = true }
267
266
  opts.on("-v", "--version", "Show Version") do
268
267
  puts ParallelTests::VERSION
@@ -316,12 +315,12 @@ module ParallelTests
316
315
  def extract_file_paths(argv)
317
316
  dash_index = argv.rindex("--")
318
317
  file_args_at = (dash_index || -1) + 1
319
- [argv[file_args_at..-1], argv[0...(dash_index || 0)]]
318
+ [argv[file_args_at..], argv[0...(dash_index || 0)]]
320
319
  end
321
320
 
322
321
  def extract_test_options(argv)
323
322
  dash_index = argv.index("--") || -1
324
- argv[dash_index + 1..-1]
323
+ argv[dash_index + 1..]
325
324
  end
326
325
 
327
326
  def append_test_options(options, argv)
@@ -391,7 +390,7 @@ module ParallelTests
391
390
  def simulate_output_for_ci(simulate)
392
391
  if simulate
393
392
  progress_indicator = Thread.new do
394
- interval = Float(ENV.fetch('PARALLEL_TEST_HEARTBEAT_INTERVAL', 60))
393
+ interval = Float(ENV['PARALLEL_TEST_HEARTBEAT_INTERVAL'] || 60)
395
394
  loop do
396
395
  sleep interval
397
396
  print '.'
@@ -27,7 +27,7 @@ module ParallelTests
27
27
  example_tags = example.tags.map(&:name)
28
28
  example_tags = scenario_tags + example_tags
29
29
  next unless matches_tags?(example_tags)
30
- example.rows[1..-1].each do |row|
30
+ example.rows[1..].each do |row|
31
31
  test_line = row.source_line
32
32
  next if line_numbers.any? && !line_numbers.include?(test_line)
33
33
 
@@ -52,7 +52,9 @@ module ParallelTests
52
52
  feature_tags = feature.tags.map(&:name)
53
53
 
54
54
  # We loop on each children of the feature
55
- feature.tests.each do |test|
55
+ test_models = feature.tests
56
+ test_models += feature.rules.flat_map(&:tests) if feature.respond_to?(:rules) # cuke_modeler >= 3.2 supports rules
57
+ test_models.each do |test|
56
58
  # It's a scenario, we add it to the scenario_line_logger
57
59
  scenario_line_logger.visit_feature_element(document.path, test, feature_tags, line_numbers: test_lines)
58
60
  end
@@ -57,7 +57,7 @@ module ParallelTests
57
57
  plural = "s" if (word == group) && (number != 1)
58
58
  "#{number} #{word}#{plural}"
59
59
  end
60
- "#{sums[0]} (#{sums[1..-1].join(", ")})"
60
+ "#{sums[0]} (#{sums[1..].join(", ")})"
61
61
  end.compact.join("\n")
62
62
  end
63
63
 
@@ -38,7 +38,7 @@ module ParallelTests
38
38
  # add all files that should run in a multiple isolated processes to their own groups
39
39
  group_features_by_size(items_to_group(single_items), groups[0..(isolate_count - 1)])
40
40
  # group the non-isolated by size
41
- group_features_by_size(items_to_group(items), groups[isolate_count..-1])
41
+ group_features_by_size(items_to_group(items), groups[isolate_count..])
42
42
  else
43
43
  # add all files that should run in a single non-isolated process to first group
44
44
  single_items.each { |item, size| add_to_group(groups.first, item, size) }
@@ -73,7 +73,7 @@ module ParallelTests
73
73
  []
74
74
  end
75
75
  if runtimes.size * 1.5 > tests.size
76
- puts "Using recorded test runtime"
76
+ puts "Using recorded test runtime" unless options[:quiet]
77
77
  sort_by_runtime(tests, runtimes)
78
78
  else
79
79
  sort_by_filesize(tests)
@@ -97,11 +97,16 @@ module ParallelTests
97
97
  # being able to run with for example `-output foo-$TEST_ENV_NUMBER` worked originally and is convenient
98
98
  cmd.map! { |c| c.gsub("$TEST_ENV_NUMBER", number).gsub("${TEST_ENV_NUMBER}", number) }
99
99
 
100
- puts Shellwords.shelljoin(cmd) if report_process_command?(options) && !options[:serialize_stdout]
100
+ print_command(cmd, env) if report_process_command?(options) && !options[:serialize_stdout]
101
101
 
102
102
  execute_command_and_capture_output(env, cmd, options)
103
103
  end
104
104
 
105
+ def print_command(command, env)
106
+ env_str = ['TEST_ENV_NUMBER', 'PARALLEL_TEST_GROUPS'].map { |e| "#{e}=#{env[e]}" }.join(' ')
107
+ puts [env_str, Shellwords.shelljoin(command)].compact.join(' ')
108
+ end
109
+
105
110
  def execute_command_and_capture_output(env, cmd, options)
106
111
  pid = nil
107
112
 
@@ -119,7 +124,7 @@ module ParallelTests
119
124
 
120
125
  output = "#{Shellwords.shelljoin(cmd)}\n#{output}" if report_process_command?(options) && options[:serialize_stdout]
121
126
 
122
- { stdout: output, exit_status: exitstatus, command: cmd, seed: seed }
127
+ { env: env, stdout: output, exit_status: exitstatus, command: cmd, seed: seed }
123
128
  end
124
129
 
125
130
  def find_results(test_output)
@@ -153,8 +158,8 @@ module ParallelTests
153
158
  protected
154
159
 
155
160
  def executable
156
- if ENV.include?('PARALLEL_TESTS_EXECUTABLE')
157
- [ENV['PARALLEL_TESTS_EXECUTABLE']]
161
+ if (executable = ENV['PARALLEL_TESTS_EXECUTABLE'])
162
+ [executable]
158
163
  else
159
164
  determine_executable
160
165
  end
@@ -286,7 +291,7 @@ module ParallelTests
286
291
  end
287
292
 
288
293
  def report_process_command?(options)
289
- options[:verbose] || options[:verbose_process_command]
294
+ options[:verbose] || options[:verbose_command]
290
295
  end
291
296
  end
292
297
  end
@@ -1,4 +1,4 @@
1
1
  # frozen_string_literal: true
2
2
  module ParallelTests
3
- VERSION = '3.11.1'
3
+ VERSION = '4.0.0'
4
4
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: parallel_tests
3
3
  version: !ruby/object:Gem::Version
4
- version: 3.11.1
4
+ version: 4.0.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Michael Grosser
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2022-06-25 00:00:00.000000000 Z
11
+ date: 2022-11-05 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: parallel
@@ -68,8 +68,8 @@ licenses:
68
68
  - MIT
69
69
  metadata:
70
70
  bug_tracker_uri: https://github.com/grosser/parallel_tests/issues
71
- documentation_uri: https://github.com/grosser/parallel_tests/blob/v3.11.1/Readme.md
72
- source_code_uri: https://github.com/grosser/parallel_tests/tree/v3.11.1
71
+ documentation_uri: https://github.com/grosser/parallel_tests/blob/v4.0.0/Readme.md
72
+ source_code_uri: https://github.com/grosser/parallel_tests/tree/v4.0.0
73
73
  wiki_uri: https://github.com/grosser/parallel_tests/wiki
74
74
  post_install_message:
75
75
  rdoc_options: []
@@ -79,7 +79,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
79
79
  requirements:
80
80
  - - ">="
81
81
  - !ruby/object:Gem::Version
82
- version: 2.5.0
82
+ version: 2.7.0
83
83
  required_rubygems_version: !ruby/object:Gem::Requirement
84
84
  requirements:
85
85
  - - ">="