parallel_cucumber 0.2.0.pre.36 → 0.2.3

Sign up to get free protection for your applications and to get access to all the features.
@@ -9,27 +9,10 @@ module ParallelCucumber
9
9
  module Helper
10
10
  module Cucumber
11
11
  class << self
12
- def dry_run_report(options, args)
13
- options = options.dup
14
- options = expand_profiles(options) unless config_file.nil?
15
- options = remove_formatters(options)
16
- content = nil
17
-
18
- Tempfile.open(%w(dry-run .json)) do |f|
19
- dry_run_options = "--dry-run --format json --out #{f.path}"
20
-
21
- cmd = "cucumber #{options} #{dry_run_options} #{args.join(' ')}"
22
- _stdout, stderr, status = Open3.capture3(cmd)
23
- f.close
24
-
25
- unless status == 0
26
- cmd = "bundle exec #{cmd}" if ENV['BUNDLE_BIN_PATH']
27
- raise("Can't generate dry run report: #{status}:\n\t#{cmd}\n\t#{stderr}")
28
- end
29
-
30
- content = File.read(f.path)
31
- end
32
- content
12
+ def selected_tests(options, args_string)
13
+ puts "selected_tests (#{options.inspect} #{args_string.inspect})"
14
+ dry_run_report = dry_run_report(options, args_string)
15
+ parse_json_report(dry_run_report).keys
33
16
  end
34
17
 
35
18
  def batch_mapped_files(options, batch, env)
@@ -52,7 +35,7 @@ module ParallelCucumber
52
35
  next
53
36
  end
54
37
  steps = [background['steps'], scenario['steps']].flatten.compact
55
- status = case
38
+ status = case # rubocop:disable Style/EmptyCaseCondition
56
39
  when steps.map { |step| step['result'] }.all? { |result| result['status'] == 'skipped' }
57
40
  Status::SKIPPED
58
41
  when steps.map { |step| step['result'] }.any? { |result| result['status'] == 'failed' }
@@ -71,6 +54,29 @@ module ParallelCucumber
71
54
 
72
55
  private
73
56
 
57
+ def dry_run_report(options, args_string)
58
+ options = options.dup
59
+ options = expand_profiles(options) unless config_file.nil?
60
+ options = remove_formatters(options)
61
+ content = nil
62
+
63
+ Tempfile.open(%w(dry-run .json)) do |f|
64
+ dry_run_options = "--dry-run --format json --out #{f.path}"
65
+
66
+ cmd = "cucumber #{options} #{dry_run_options} #{args_string}"
67
+ _stdout, stderr, status = Open3.capture3(cmd)
68
+ f.close
69
+
70
+ unless status == 0
71
+ cmd = "bundle exec #{cmd}" if ENV['BUNDLE_BIN_PATH']
72
+ raise("Can't generate dry run report: #{status}:\n\t#{cmd}\n\t#{stderr}")
73
+ end
74
+
75
+ content = File.read(f.path)
76
+ end
77
+ content
78
+ end
79
+
74
80
  def expand_profiles(options, env = {})
75
81
  e = ENV.to_h
76
82
  ENV.replace(e.merge(env))
@@ -2,39 +2,75 @@ module ParallelCucumber
2
2
  module Helper
3
3
  module Processes
4
4
  class << self
5
+ def ms_windows?
6
+ RUBY_PLATFORM =~ /mswin|mingw|migw32|cygwin/
7
+ end
8
+
9
+ def cp_rv(source, dest, logger = nil)
10
+ cp_out = if ms_windows?
11
+ %x(powershell cp #{source} #{dest} -recurse -force 2>&1)
12
+ else
13
+ # Use system cp -r because Ruby's has crap diagnostics in weird situations.
14
+ %x(cp -Rv #{source} #{dest} 2>&1)
15
+ end
16
+ logger.debug("Copy of #{source} to #{dest} said: #{cp_out}") if logger
17
+ end
18
+
5
19
  def ps_tree
6
- ` ps -ax -o ppid= -o pid= -o lstart= -o command= `
7
- .each_line.map { |l| l.strip.split(/ +/, 3) }.to_a
8
- .each_with_object({}) do |(ppid, pid, signature), tree|
9
- (tree[pid] ||= { children: [] })[:signature] = signature
10
- (tree[ppid] ||= { children: [] })[:children] << pid
20
+ if ms_windows?
21
+ system('powershell scripts/process_tree.ps1')
22
+ else
23
+ %x(ps -ax -o ppid= -o pid= -o lstart= -o command=)
24
+ .each_line.map { |l| l.strip.split(/ +/, 3) }.to_a
25
+ .each_with_object({}) do |(ppid, pid, signature), tree|
26
+ (tree[pid] ||= { children: [] })[:signature] = signature
27
+ (tree[ppid] ||= { children: [] })[:children] << pid
28
+ end
11
29
  end
12
30
  end
13
31
 
14
- def kill_tree(sig, root, tree = nil, old_tree = nil)
15
- descendants(root, tree, old_tree) do |pid|
16
- begin
17
- Process.kill(sig, pid.to_i)
18
- rescue Errno::ESRCH
19
- nil # It's gone already? Hurrah!
32
+ def kill_tree(sig, root, logger, tree = nil, old_tree = nil)
33
+ if ms_windows?
34
+ system("taskkill /pid #{root} /T")
35
+ else
36
+ descendants(root, logger, tree, old_tree, 'kill') do |pid, node|
37
+ begin
38
+ logger.warn "Killing #{node}"
39
+ Process.kill(sig, pid.to_i)
40
+ rescue Errno::ESRCH
41
+ nil # It's gone already? Hurrah!
42
+ end
20
43
  end
21
44
  end
45
+ # Let's kill pid unconditionally: descendants will go astray once reparented.
46
+ begin
47
+ logger.warn "Killing #{root} just in case"
48
+ Process.kill(sig, root.to_i)
49
+ rescue Errno::ESRCH
50
+ nil # It's gone already? Hurrah!
51
+ end
22
52
  end
23
53
 
24
- def all_pids_dead?(root, tree = nil, old_tree = nil)
25
- # Note: returns from THIS function as well as descendants: short-circuit evaluation.
26
- descendants(root, tree, old_tree) { return false }
54
+ def all_pids_dead?(root, logger, tree = nil, old_tree = nil)
55
+ # Note: returns from THIS function as well as descendants: short-circuit evaluation if any descendants remain.
56
+ descendants(root, logger, tree, old_tree, 'dead?') { return false }
27
57
  true
28
58
  end
29
59
 
30
60
  # Walks old_tree, and yields all processes (alive or dead) that match the pid, start time, and command in
31
61
  # the new tree. Note that this will fumble children created since old_tree was created, but this thing is
32
62
  # riddled with race conditions anyway.
33
- def descendants(pid, tree = nil, old_tree = nil, &block)
63
+ def descendants(pid, logger, tree = nil, old_tree = nil, why = '-', # rubocop:disable Metrics/ParameterLists
64
+ level = 0, &block)
34
65
  tree ||= ps_tree
35
66
  old_tree ||= tree
36
- old_tree[pid][:children].each { |c| descendants(c, tree, old_tree, &block) }
37
- yield(pid) if tree[pid] && (tree[pid][:signature] == old_tree[pid][:signature])
67
+ old_tree_node = old_tree[pid]
68
+ unless old_tree_node
69
+ logger.warn "== old tree node went missing - #{why} - skipping subtree level=#{level}: #{pid}"
70
+ return
71
+ end
72
+ old_tree_node.fetch(:children, []).each { |c| descendants(c, logger, tree, old_tree, why, level + 1, &block) }
73
+ yield(pid, old_tree_node) if tree[pid] && (tree[pid][:signature] == old_tree_node[:signature])
38
74
  end
39
75
  end
40
76
  end
@@ -5,7 +5,7 @@ module ParallelCucumber
5
5
  class Queue
6
6
  attr_reader :name
7
7
 
8
- def initialize(queue_connection_params)
8
+ def initialize(queue_connection_params, append = '')
9
9
  # queue_connection_params:
10
10
  # `url--[name]`
11
11
  # url:
@@ -15,11 +15,11 @@ module ParallelCucumber
15
15
  # queue name, default is `queue`
16
16
  url, name = queue_connection_params
17
17
  @redis = Redis.new(url: url)
18
- @name = name
18
+ @name = name + append
19
19
  end
20
20
 
21
21
  def enqueue(elements)
22
- @redis.lpush(@name, elements)
22
+ @redis.lpush(@name, elements) unless elements.empty?
23
23
  end
24
24
 
25
25
  def dequeue
@@ -0,0 +1,18 @@
1
+ module ParallelCucumber
2
+ class Hooks
3
+ @after_batch_hooks ||= []
4
+
5
+ class << self
6
+ def register_after_batch( proc)
7
+ raise(ArgumentError, 'Please provide a valid callback') unless proc.respond_to?(:call)
8
+ @after_batch_hooks << proc
9
+ end
10
+
11
+ def fire_after_batch_hooks(*args)
12
+ @after_batch_hooks.each do |hook|
13
+ hook.call(*args)
14
+ end
15
+ end
16
+ end
17
+ end
18
+ end
@@ -2,8 +2,46 @@ require 'logger'
2
2
 
3
3
  module ParallelCucumber
4
4
  class CustomLogger < Logger
5
+ def initialize(*)
6
+ super
7
+ @mark = 0
8
+ # Don't want to log half-lines.
9
+ @incomplete_line = nil
10
+ end
11
+
12
+ def synch
13
+ mutex.synchronize { yield self }
14
+ end
15
+
16
+ def update_into(other_logger)
17
+ # TODO: This should write the #teamcity block wrapper: update(other_logger, 'qa-w12> precheck') etc.
18
+ @logdev.dev.fsync # Helpful, but inadequate: a child process might still have buffered stuff.
19
+ other_logger.synch do |l|
20
+ l << File.open(@logdev.filename || @logdev.dev.path) do |f|
21
+ begin
22
+ f.seek(@mark)
23
+ lines = f.readlines
24
+ if @incomplete_line && lines.count > 0
25
+ lines[0] = @incomplete_line + lines[0]
26
+ @incomplete_line = nil
27
+ end
28
+ unless lines.last && lines.last.end_with?("\n", "\r")
29
+ @incomplete_line = lines.pop
30
+ end
31
+ lines.join
32
+ ensure
33
+ @mark = f.tell
34
+ end
35
+ end
36
+ end
37
+ end
38
+
5
39
  private
6
40
 
41
+ def mutex
42
+ @mutex ||= Mutex.new
43
+ end
44
+
7
45
  def format_message(severity, datetime, progname, msg)
8
46
  if @level == DEBUG
9
47
  "[#{datetime.strftime('%Y-%m-%d %H:%M:%S')}]\t#{progname}\t#{severity}\t#{msg.gsub(/\s+/, ' ').strip}\n"
@@ -8,93 +8,90 @@ module ParallelCucumber
8
8
  @options = options
9
9
 
10
10
  @logger = ParallelCucumber::CustomLogger.new(STDOUT)
11
+ load_external_files
11
12
  @logger.progname = 'Primary' # Longer than 'Main', to make the log file pretty.
12
13
  @logger.level = options[:debug] ? ParallelCucumber::CustomLogger::DEBUG : ParallelCucumber::CustomLogger::INFO
13
14
  end
14
15
 
16
+ def load_external_files
17
+ return if @options[:load_files].nil?
18
+ @options[:load_files].each do |file|
19
+ @logger.debug("Loading File: #{file}")
20
+ load file
21
+ end
22
+ end
23
+
15
24
  def run
16
- queue = Helper::Queue.new(@options[:queue_connection_params])
17
25
  @logger.debug("Connecting to Queue: #{@options[:queue_connection_params]}")
26
+ queue = Helper::Queue.new(@options[:queue_connection_params])
18
27
 
19
28
  unless queue.empty?
20
29
  @logger.error("Queue '#{queue.name}' is not empty")
21
30
  exit(1)
22
31
  end
23
32
 
24
- tests = []
25
- mm, ss = time_it do
26
- dry_run_report = Helper::Cucumber.dry_run_report(@options[:cucumber_options], @options[:cucumber_args])
27
- tests = Helper::Cucumber.parse_json_report(dry_run_report).keys
28
- end
29
- tests.shuffle!
30
- @logger.debug("Generating all tests took #{mm} minutes #{ss} seconds")
33
+ all_tests = Helper::Cucumber.selected_tests(@options[:cucumber_options], @options[:cucumber_args])
31
34
 
32
- if tests.empty?
35
+ if all_tests.empty?
33
36
  @logger.error('There are no tests to run')
34
37
  exit(1)
35
38
  end
36
39
 
37
- @logger.info("Adding #{tests.count} tests to Queue")
38
- queue.enqueue(tests)
40
+ count = all_tests.count
39
41
 
40
- if @options[:n] == 0
41
- @options[:n] = [1, @options[:env_variables].map { |_k, v| v.is_a?(Array) ? v.count : 0 }].flatten.max
42
- @logger.info("Inferred worker count #{@options[:n]} from env_variables option")
42
+ long_running_tests = if @options[:long_running_tests]
43
+ Helper::Cucumber.selected_tests(@options[:cucumber_options], @options[:long_running_tests])
44
+ else
45
+ []
46
+ end
47
+ first_tests = long_running_tests & all_tests
48
+ if !long_running_tests.empty? && first_tests.empty?
49
+ @logger.info("No long running tests found in common with main options: #{long_running_tests}")
43
50
  end
51
+ tests = first_tests + (all_tests - first_tests).shuffle
44
52
 
45
- number_of_workers = [@options[:n], tests.count].min
46
- unless number_of_workers == @options[:n]
47
- @logger.info(<<-LOG)
48
- Number of workers was overridden to #{number_of_workers}.
49
- Was requested more workers (#{@options[:n]}) than tests (#{tests.count})".
50
- LOG
53
+ @options[:directed_tests].each do |k, v|
54
+ directed_tests = Helper::Cucumber.selected_tests(@options[:cucumber_options], v)
55
+ if directed_tests.empty?
56
+ @logger.warn("Queue for #{k} is empty - nothing selected by #{v}")
57
+ else
58
+ directed_tests = (directed_tests & long_running_tests) + (directed_tests - long_running_tests).shuffle
59
+ @logger.debug("Connecting to Queue: _#{k}")
60
+ directed_queue = Helper::Queue.new(@options[:queue_connection_params], "_#{k}")
61
+ @logger.info("Adding #{directed_tests.count} tests to queue _#{k}")
62
+ directed_queue.enqueue(directed_tests)
63
+ tests -= directed_tests
64
+ end
51
65
  end
52
66
 
53
- if (@options[:batch_size] - 1) * number_of_workers >= tests.count
54
- original_batch_size = @options[:batch_size]
55
- @options[:batch_size] = [(tests.count.to_f / number_of_workers).floor, 1].max
56
- @logger.info(<<-LOG)
57
- Batch size was overridden to #{@options[:batch_size]}.
58
- Presumably it will be more optimal for #{tests.count} tests and #{number_of_workers} workers
59
- than #{original_batch_size}
60
- LOG
61
- end
67
+ @logger.info("Adding #{tests.count} tests to Queue")
68
+ queue.enqueue(tests) unless tests.empty?
69
+
70
+ number_of_workers = determine_work_and_batch_size(count)
62
71
 
63
- diff = []
64
- info = {}
72
+ unrun = []
73
+ status_totals = {}
65
74
  total_mm, total_ss = time_it do
66
- results = Helper::Command.wrap_block(@options[:log_decoration],
67
- @options[:log_decoration]['worker_block'] || 'workers',
68
- @logger) do
69
- finished = []
70
- Parallel.map(
71
- 0...number_of_workers,
72
- in_processes: number_of_workers,
73
- finish: -> (_, index, _) { @logger.info("Finished: #{finished[index] = index} #{finished - [nil]}") }
74
- ) do |index|
75
- Worker.new(@options, index).start(env_for_worker(@options[:env_variables], index))
76
- end.inject(:merge) # Returns hash of file:line to statuses + :worker-index to summary.
77
- end
78
- results ||= {}
75
+ results = run_parallel_workers(number_of_workers) || {}
79
76
  unrun = tests - results.keys
80
- @logger.error("Tests #{unrun.join(' ')} were not run") unless diff.empty?
77
+ @logger.error("Tests #{unrun.join(' ')} were not run") unless unrun.empty?
81
78
  @logger.error("Queue #{queue.name} is not empty") unless queue.empty?
82
79
 
83
- Helper::Command.wrap_block(
84
- @options[:log_decoration],
85
- 'Worker summary',
86
- @logger
87
- ) { results.find_all { |w| @logger.info("#{w.first} #{w.last.sort}") if w.first =~ /^:worker-/ } }
88
-
89
- info = Status.constants.map do |status|
80
+ status_totals = Status.constants.map do |status|
90
81
  status = Status.const_get(status)
91
82
  tests_with_status = results.select { |_t, s| s == status }.keys
92
83
  [status, tests_with_status]
93
84
  end.to_h
85
+
86
+ Helper::Command.wrap_block(@options[:log_decoration], 'Worker summary', @logger) do
87
+ results.find_all { |w| w.first =~ /^:worker-/ }.each { |w| @logger.info("#{w.first} #{w.last.sort}") }
88
+ end
89
+
90
+ report_by_group(results)
94
91
  end
95
92
 
96
93
  @logger.debug("SUMMARY=#{@options[:summary]}") if @options[:summary]
97
- info.each do |s, tt|
94
+ status_totals.each do |s, tt|
98
95
  next if tt.empty?
99
96
  @logger.info("Total: #{s.to_s.upcase} tests (#{tt.count}): #{tt.join(' ')}")
100
97
  filename = @options[:summary] && @options[:summary][s.to_s.downcase]
@@ -103,7 +100,78 @@ module ParallelCucumber
103
100
 
104
101
  @logger.info("\nTook #{total_mm} minutes #{total_ss} seconds")
105
102
 
106
- exit((diff + info[Status::FAILED] + info[Status::UNKNOWN]).empty? ? 0 : 1)
103
+ exit((unrun + status_totals[Status::FAILED] + status_totals[Status::UNKNOWN]).empty? ? 0 : 1)
104
+ end
105
+
106
+ def report_by_group(results)
107
+ group = Hash.new { |h, k| h[k] = Hash.new(0) } # Default new keys to 0
108
+
109
+ Helper::Command.wrap_block(@options[:log_decoration], 'Worker summary', @logger) do
110
+ results.find_all { |w| w.first =~ /^:worker-/ }.each do |w|
111
+ # w = [:worker-0, [[:batches, 7], [:group, "localhost2"], [:skipped, 7]]]
112
+ gp = w.last[:group]
113
+ next unless gp
114
+ w.last.each { |(k, v)| group[gp][k] += w.last[k] if v && k != :group }
115
+ group[gp][:group] = {} unless group[gp].key?(:group)
116
+ group[gp][:group][w.first] = 1
117
+ end
118
+ end
119
+
120
+ @logger.info "== Groups key count #{group.keys.count}"
121
+
122
+ return unless group.keys.count > 1
123
+
124
+ Helper::Command.wrap_block(@options[:log_decoration], 'Group summary', @logger) do
125
+ group.each { |(k, v)| @logger.info("#{k} #{v.sort}") }
126
+ end
127
+ end
128
+
129
+ def run_parallel_workers(number_of_workers)
130
+ Helper::Command.wrap_block(@options[:log_decoration],
131
+ @options[:log_decoration]['worker_block'] || 'workers',
132
+ @logger) do
133
+ remaining = (0...number_of_workers).to_a
134
+ map = Parallel.map(
135
+ remaining.dup,
136
+ in_threads: number_of_workers,
137
+ finish: -> (_, ix, _) { @logger.synch { |l| l.info("Finished: #{ix} remaining: #{remaining -= [ix]}") } }
138
+ ) do |index|
139
+ ParallelCucumber::Worker
140
+ .new(@options, index, @logger)
141
+ .start(env_for_worker(@options[:env_variables], index))
142
+ end
143
+ map.inject(:merge) # Returns hash of file:line to statuses + :worker-index to summary.
144
+ end
145
+ end
146
+
147
+ def determine_work_and_batch_size(count)
148
+ if @options[:n] == 0
149
+ @options[:n] = [1, @options[:env_variables].map { |_k, v| v.is_a?(Array) ? v.count : 0 }].flatten.max
150
+ @logger.info("Inferred worker count #{@options[:n]} from env_variables option")
151
+ end
152
+
153
+ number_of_workers = [@options[:n], count].min
154
+ unless number_of_workers == @options[:n]
155
+ @logger.info(<<-LOG)
156
+ Number of workers was overridden to #{number_of_workers}.
157
+ More workers (#{@options[:n]}) requested than tests (#{count})".
158
+ LOG
159
+ end
160
+
161
+ @logger.info(<<-LOG)
162
+ Number of workers is #{number_of_workers}.
163
+ LOG
164
+
165
+ if (@options[:batch_size] - 1) * number_of_workers >= count
166
+ original_batch_size = @options[:batch_size]
167
+ @options[:batch_size] = [(count.to_f / number_of_workers).floor, 1].max
168
+ @logger.info(<<-LOG)
169
+ Batch size was overridden to #{@options[:batch_size]}.
170
+ Presumably it will be more optimal for #{count} tests and #{number_of_workers} workers
171
+ than #{original_batch_size}
172
+ LOG
173
+ end
174
+ number_of_workers
107
175
  end
108
176
 
109
177
  private