forking_test_runner 0.1.2 → 0.2.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: c7dc622ff1b3350704d3ccf31cf25c1d51b13ee4
4
- data.tar.gz: 5cdb2b8430b13547d2cafcb9cbaa752565fad7db
3
+ metadata.gz: 691b5e04b0f5d69105cfd566e00c7c080dcedba2
4
+ data.tar.gz: fac4175373591f336a5d15aa946c8333032aab49
5
5
  SHA512:
6
- metadata.gz: 1e45ea05cbbb26bebb567c1462026648a980a0f4dde7bc0b5bad0093c85459a01e28d7ab5dcf7dd5edd72597e25ae742e455fc8973f2d55aef750c96e4f02a26
7
- data.tar.gz: 9f27f7a7a11484da1548c838553e8d5b08249e405491d7d30f8f96a91d4e2351efa40a21e091d1395253deb6ccb8c29322d4f0f9c4ab09e653d17bdb94636369
6
+ metadata.gz: 169eaf23e55d0ef7930cd04dbab2d3017940c51fc4fafb0efe81fb1e27ae4954229d87f9cb3ea3fa9a901a2ed364ae78e72d886963bb0777333dff474d6f98b2
7
+ data.tar.gz: 2a3e69f9865c50d8cc46e26c9075a2a9169c5a3b80c1bc49a7a37b54c89ea03382b8a541fe3354389e6cf1bce0c697dfb9c22db3011d7f0ccace44b997d08d98
@@ -5,51 +5,4 @@ root = File.expand_path("../..", __FILE__)
5
5
  $LOAD_PATH << "#{root}/lib" if File.exist?("#{root}/Gemfile")
6
6
 
7
7
  require 'forking_test_runner'
8
-
9
- ForkingTestRunner.disable_minitest_autorun
10
-
11
- runtime_log = ForkingTestRunner.delete_argv("--runtime-log", ARGV)
12
- helper = ForkingTestRunner.delete_argv("--helper", ARGV) || "test/test_helper"
13
-
14
- require "./#{helper}"
15
-
16
- tests = ForkingTestRunner.find_tests_for_group(ARGV, runtime_log)
17
- puts "Running tests #{tests.map(&:first).join(" ")}"
18
-
19
- show_time = tests[0][1]
20
-
21
- clear = "------"
22
- results = tests.map do |file, expected|
23
- puts "#{clear} >>> #{file}"
24
- success = false
25
- time = Benchmark.realtime do
26
- success = ForkingTestRunner.run_test(file)
27
- end
28
- puts "Time: expected #{expected.round(2)}, actual #{time.round(2)}" if show_time
29
- puts "#{clear} <<< #{file} ---- #{success ? "OK" : "Failed"}"
30
- [file, time, expected, success]
31
- end
32
-
33
- puts "\nResults:"
34
- puts results.map { |f,_,_,r| "#{f}: #{r ? "OK" : "Fail"}"}
35
-
36
- if show_time
37
- puts "Time: #{results.map { |_,time,expected,_| time - expected }.inject(:+).to_f.round(2)} diff to expected"
38
- end
39
-
40
- # log runtime and then curl it into the runtime log location
41
- if ENV["RECORD_RUNTIME"]
42
- require 'tempfile'
43
- slug = ENV.fetch("TRAVIS_REPO_SLUG").sub("/", "-")
44
- id = ENV.fetch("TRAVIS_BUILD_NUMBER")
45
- url = "https://amend.herokuapp.com/amend/#{slug}-#{id}"
46
- data = results.map { |f,time,_,_| "#{f}:#{time.round(2)}" }.join("\n") << "\n"
47
- Tempfile.open("runtime.log") do |f|
48
- f.write(data)
49
- f.close
50
- result = `curl -X POST --data-binary @#{f.path} #{url}`
51
- puts "amended runtime log\ncurl #{url} | sort > #{runtime_log}\nStatus: #{$?.success?}\nResponse: #{result}"
52
- end
53
- end
54
-
55
- exit(results.map(&:last).all? ? 0 : 1)
8
+ exit ForkingTestRunner.cli(ARGV)
@@ -1,8 +1,109 @@
1
1
  module ForkingTestRunner
2
+ CLEAR = "------"
3
+
2
4
  class << self
3
- # This forces Rails to load all fixtures, then prevents it from deleting and then
4
- # re-inserting all fixtures when a test is run.
5
- # Saves us a couple of seconds when the test includes a call to fixtures :all.
5
+ def cli(argv)
6
+ disable_minitest_autorun
7
+ load_test_env(delete_argv("--helper", argv))
8
+
9
+ # figure out what we need to run
10
+ record_runtime = delete_argv("--record-runtime", argv)
11
+ runtime_log = delete_argv("--runtime-log", argv)
12
+ group, group_count, tests = extract_group_args(argv)
13
+ tests = find_tests_for_group(group, group_count, tests, runtime_log)
14
+ puts "Running tests #{tests.map(&:first).join(" ")}"
15
+
16
+ # run all the tests
17
+ results = tests.map do |file, expected|
18
+ puts "#{CLEAR} >>> #{file}"
19
+ time, success = benchmark { run_test(file) }
20
+ puts "Time: expected #{expected.round(2)}, actual #{time.round(2)}" if runtime_log
21
+ puts "#{CLEAR} <<< #{file} ---- #{success ? "OK" : "Failed"}"
22
+ [file, time, expected, success]
23
+ end
24
+
25
+ # pretty print the results
26
+ puts "\nResults:"
27
+ puts results.map { |f,_,_,r| "#{f}: #{r ? "OK" : "Fail"}"}
28
+
29
+ if runtime_log
30
+ # show how long they ran vs expected
31
+ diff = results.map { |_,time,expected,_| time - expected }.inject(:+).to_f
32
+ puts "Time: #{diff.round(2)} diff to expected"
33
+ end
34
+
35
+ if record_runtime
36
+ # store runtime log
37
+ log = runtime_log || 'runtime.log'
38
+ record_test_runtime(record_runtime, results, log)
39
+ end
40
+
41
+ # exit with success or failure
42
+ results.map(&:last).all? ? 0 : 1
43
+ end
44
+
45
+ private
46
+
47
+ def benchmark
48
+ result = false
49
+ time = Benchmark.realtime do
50
+ result = yield
51
+ end
52
+ return time, result
53
+ end
54
+
55
+ # log runtime via dumping or curling it into the runtime log location
56
+ def record_test_runtime(mode, results, log)
57
+ data = results.map { |test, time| "#{test}:#{time.round(2)}" }.join("\n") << "\n"
58
+
59
+ case mode
60
+ when 'simple'
61
+ File.write(log, data)
62
+ when 'amend'
63
+ slug = ENV.fetch("TRAVIS_REPO_SLUG").sub("/", "-")
64
+ id = ENV.fetch("TRAVIS_BUILD_NUMBER")
65
+ url = "https://amend.herokuapp.com/amend/#{slug}-#{id}"
66
+
67
+ require 'tempfile'
68
+ Tempfile.open("runtime.log") do |f|
69
+ f.write(data)
70
+ f.close
71
+ result = `curl -X POST --data-binary @#{f.path} #{url}`
72
+ puts "amended runtime log\ncurl #{url} | sort > #{log}\nStatus: #{$?.success?}\nResponse: #{result}"
73
+ end
74
+ else
75
+ raise "Unsupported record-runtime flag: #{mode}"
76
+ end
77
+ end
78
+
79
+ def extract_group_args(argv)
80
+ if argv.include?("--group")
81
+ # delete options we want while leaving others as they are (-v / --seed etc)
82
+ group, group_count = ['--group', '--groups'].map do |arg|
83
+ value = delete_argv(arg, argv) || raise("Did not find option #{arg}")
84
+ value.to_i
85
+ end
86
+ dir = argv.shift
87
+ raise "Unable to find directory #{dir.inspect}" unless File.exist?(dir.to_s)
88
+ tests = [dir]
89
+ else
90
+ group = 1
91
+ group_count = 1
92
+ size = argv.index("--") || argv.size
93
+ tests = argv.slice!(0, size)
94
+ argv.shift # remove --
95
+ end
96
+
97
+ [group, group_count, tests]
98
+ end
99
+
100
+ def load_test_env(helper=nil)
101
+ helper = helper || "test/test_helper"
102
+ require "./#{helper}"
103
+ end
104
+
105
+ # This forces Rails to load all fixtures, then prevents it from
106
+ # "deleting and re-inserting all fixtures" when a new connection is used (forked).
6
107
  def preload_fixtures
7
108
  return if @preloaded
8
109
  @preloaded = true
@@ -28,7 +129,7 @@ module ForkingTestRunner
28
129
  toggle_minitest_autorun false
29
130
  end
30
131
 
31
- def enabled_minitest_autorun
132
+ def enable_minitest_autorun
32
133
  toggle_minitest_autorun true
33
134
  end
34
135
 
@@ -38,36 +139,24 @@ module ForkingTestRunner
38
139
  child = fork do
39
140
  key = (ActiveRecord::VERSION::STRING >= "4.1.0" ? :test : "test")
40
141
  ActiveRecord::Base.establish_connection key
41
- enabled_minitest_autorun
142
+ enable_minitest_autorun
42
143
  require "./#{file}"
43
144
  end
44
145
  Process.wait(child)
45
146
  $?.success?
46
147
  end
47
148
 
48
- def find_tests_for_group(argv, runtime_log)
149
+ def find_tests_for_group(group, group_count, tests, runtime_log)
49
150
  require 'parallel_tests/test/runner'
50
151
 
51
- if argv.include?("--group")
52
- # delete options we want while leaving others as they are (-v / --seed etc)
53
- group, number_of_groups = ['--group', '--groups'].map do |arg|
54
- value = delete_argv(arg, argv) || raise("Did not find option #{arg}")
55
- value.to_i
56
- end
57
- dir = ARGV.shift
58
- raise "Unable to find directory #{dir.inspect}" unless File.exist?(dir.to_s)
59
- tests = [dir]
60
- else
61
- group = 1
62
- number_of_groups = 1
63
- size = argv.index("--") || argv.size
64
- tests = argv.slice!(0, size)
65
- argv.shift # remove --
66
- end
67
-
68
152
  group_by = (runtime_log ? :runtime : :filesize)
69
- tests = ParallelTests::Test::Runner.send(:tests_with_size, tests, runtime_log: runtime_log, group_by: group_by)
70
- groups = ParallelTests::Grouper.in_even_groups_by_size(tests, number_of_groups, {})
153
+ tests = ParallelTests::Test::Runner.send(
154
+ :tests_with_size,
155
+ tests,
156
+ runtime_log: runtime_log,
157
+ group_by: group_by
158
+ )
159
+ groups = ParallelTests::Grouper.in_even_groups_by_size(tests, group_count, {})
71
160
  group = groups[group - 1] || raise("Group #{group} not found")
72
161
 
73
162
  # return tests with runtime
@@ -75,14 +164,12 @@ module ForkingTestRunner
75
164
  group.map { |test| [test, (tests[test] if group_by == :runtime)] }
76
165
  end
77
166
 
78
- def delete_argv(arg, argv)
79
- return unless index = argv.index(arg)
80
- argv.delete_at(index)
167
+ def delete_argv(name, argv)
168
+ return unless index = argv.index(name)
81
169
  argv.delete_at(index)
170
+ argv.delete_at(index) || raise("Missing argument for #{name}")
82
171
  end
83
172
 
84
- private
85
-
86
173
  def toggle_minitest_autorun(value)
87
174
  klass = begin
88
175
  require 'minitest/unit' # only exists on 4
@@ -1,3 +1,3 @@
1
1
  module ForkingTestRunner
2
- VERSION = "0.1.2"
2
+ VERSION = "0.2.0"
3
3
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: forking_test_runner
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.2
4
+ version: 0.2.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Michael Grosser
@@ -145,7 +145,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
145
145
  requirements:
146
146
  - - ">="
147
147
  - !ruby/object:Gem::Version
148
- version: '0'
148
+ version: 2.0.0
149
149
  required_rubygems_version: !ruby/object:Gem::Requirement
150
150
  requirements:
151
151
  - - ">="