minitest-distributed 0.2.2 → 0.2.5

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 6a8b0499822e88334b7ce7d06210c43d3054485d4dbbc3170f97e485c5d78c3d
4
- data.tar.gz: 2e4c720af2bf722152ace13867b0cf66c58aafbf712ac1a5d4b9e308f0c2fd8f
3
+ metadata.gz: '0208858e32026a813e488ae30c72ae1ab9e274574aa55f5eb873f50db170a9d4'
4
+ data.tar.gz: 2e12cb8be3eb47009eb6aa30a5d58b9e7ef796a4e6851b02754fed3d3e261fdb
5
5
  SHA512:
6
- metadata.gz: 8c974a610c8770a9a0ff8c944780182a178fbe1d22737a53a474b9f8f2abcd90765aac1f6a0ee9134cddb1e19dca235bb124b7950585337579b7dba5d4ebf956
7
- data.tar.gz: 8337bf3b5c849d386ccbdb13e4cba6d725a274ead9893c60f3d60f9599e5ba7cde0e12ceea035219483ebd9aee0fc06ee9cc9e6a4747c08663f133dab30fd4d0
6
+ metadata.gz: e18ffe94425ae0726468df99f69d748a4c4d6744960d7f731079bd9dd2a927a924108d6a26e9e6383abe3d91bf311ae7619c666ae563605cacfd7f41c1b7de02
7
+ data.tar.gz: 83ad27805dd3285a398e9266f7ed9d3a0f7e67468d3a9b8020300c7cc81b89b94222b424aa0ec2e5cb3dd231563d75d1c342f55c9c4918435a490697a326318e
@@ -0,0 +1,20 @@
1
+ version: 2
2
+ registries:
3
+ rubygems-server-pkgs-shopify-io:
4
+ type: rubygems-server
5
+ url: https://pkgs.shopify.io
6
+ username: ${{secrets.RUBYGEMS_SERVER_PKGS_SHOPIFY_IO_USERNAME}}
7
+ password: ${{secrets.RUBYGEMS_SERVER_PKGS_SHOPIFY_IO_PASSWORD}}
8
+ github-com:
9
+ type: git
10
+ url: https://github.com
11
+ username: ${{secrets.DEPENDENCIES_GITHUB_USER}}
12
+ password: ${{secrets.DEPENDENCIES_GITHUB_TOKEN}}
13
+ updates:
14
+ - package-ecosystem: bundler
15
+ directory: "/"
16
+ schedule:
17
+ interval: daily
18
+ open-pull-requests-limit: 100
19
+ insecure-external-code-execution: allow
20
+ registries: "*"
@@ -10,7 +10,24 @@ name: Ruby
10
10
  on: push
11
11
 
12
12
  jobs:
13
- test:
13
+ test_30:
14
+ runs-on: ubuntu-latest
15
+ container: ruby:3.0
16
+
17
+ services:
18
+ redis:
19
+ image: redis
20
+
21
+ steps:
22
+ - uses: actions/checkout@v2
23
+ - name: Install dependencies
24
+ run: gem install bundler && bundle install
25
+ - name: Run tests
26
+ run: bin/rake test
27
+ env:
28
+ REDIS_URL: redis://redis:6379
29
+
30
+ test_26:
14
31
  runs-on: ubuntu-latest
15
32
  container: ruby:2.6
16
33
 
data/.rubocop.yml CHANGED
@@ -3,6 +3,8 @@ inherit_gem:
3
3
 
4
4
  require:
5
5
  - rubocop-sorbet
6
+ - rubocop-rake
7
+ - rubocop-minitest
6
8
 
7
9
  AllCops:
8
10
  TargetRubyVersion: 2.6
@@ -34,6 +36,8 @@ Sorbet/FalseSigil:
34
36
 
35
37
  Sorbet/TrueSigil:
36
38
  Enabled: true
39
+ Exclude:
40
+ - 'test/fixtures/*'
37
41
 
38
42
  Sorbet/EnforceSigilOrder:
39
43
  Enabled: true
data/Gemfile CHANGED
@@ -1,4 +1,5 @@
1
1
  # frozen_string_literal: true
2
+
2
3
  source "https://rubygems.org"
3
4
 
4
5
  # Specify your gem's dependencies in minitest-distributed.gemspec
@@ -10,3 +11,5 @@ gem "sorbet"
10
11
  gem "rubocop"
11
12
  gem "rubocop-shopify"
12
13
  gem "rubocop-sorbet"
14
+ gem "rubocop-minitest"
15
+ gem "rubocop-rake"
data/README.md CHANGED
@@ -1,5 +1,12 @@
1
1
  # minitest-distributed
2
2
 
3
+ [![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](LICENSE.md)
4
+
5
+ [About this repo](#about-this-repo) | [Commands](#commands) | [How to use this repo](#how-to-use-this-repo) | [Contribute to this repo](#contribute-to-this-repo) | [License](#license)
6
+
7
+ ## About this repo
8
+ **Introduction:**
9
+
3
10
  `minitest-distributed` is a plugin for [minitest](https://github.com/seattlerb/minitest)
4
11
  for executing tests on a distributed set of unreliable workers.
5
12
 
@@ -16,14 +23,14 @@ flakiness. To combat flakiness, minitest-distributed implements resiliency
16
23
  patterns, like re-running a test on a different worker on failure, and a
17
24
  circuit breaker for misbehaving workers.
18
25
 
19
- ## Usage
20
-
21
- Add `minitest-distributed` to your `Gemfile`, and run `bundle install`. The
22
- plugin will be loaded by minitest automatically. The plugin exposes some
23
- command line arguments that you can use to influence its behavior. They can
24
- also be set using environment variables.
26
+ | | |
27
+ |----------------|--------------------------------------------------------------------------------------------------------------------------------------|
28
+ | Current status | Ongoing |
29
+ | Owner | [@Shopify/test-infra](https://github.com/orgs/Shopify/teams/test-infra) |
30
+ | Help | [#team-test-infra](https://shopify.slack.com/archives/team-test-infra) |
25
31
 
26
- ### Distributed invocation
32
+ ## Commands
33
+ **Distributed invocation**
27
34
 
28
35
  To actually run tests with multiple workers, you have to point every worker to
29
36
  a Redis coordinator, and use the same run identifier.
@@ -52,7 +59,7 @@ Rake::TestTask.new(:test) do |t|
52
59
  end
53
60
  ```
54
61
 
55
- ### Worker retries
62
+ **Worker retries**
56
63
 
57
64
  Many CI systems offer the options to retry jobs that fail. When jobs are
58
65
  retried that were previously part of a worker cluster, all the retried jobs
@@ -61,7 +68,7 @@ during the previous run attempt. This is to make it faster to re-run tests
61
68
  that failed due to flakiness, or confirm that it was not flakiness that caused
62
69
  them to fail.
63
70
 
64
- ### Other optional command line arguments
71
+ **Other optional command line arguments**
65
72
 
66
73
  - `--test-timeout=SECONDS` or `ENV[MINITEST_TEST_TIMEOUT_SECONDS]` (default: 30s):
67
74
  the maximum amount a test is allowed to run before it times out. In a distributed
@@ -87,7 +94,7 @@ them to fail.
87
94
  the test run. The file should include test identifiers seperated by
88
95
  newlines.
89
96
 
90
- ## Limitations
97
+ **Limitations**
91
98
 
92
99
  **Parallel tests not supported:** Minitest comes bundled with a parallel test
93
100
  executor, which will run tests that are specifically tagged as such in
@@ -96,7 +103,20 @@ in parallel using separate processes, generally on different VMs. For this
96
103
  reason, tests marked as `parallel` will not be treated any differently than
97
104
  other tests.
98
105
 
99
- ## Development
106
+ ## How to use this repo
107
+ Add `minitest-distributed` to your `Gemfile`, and run `bundle install`. The
108
+ plugin will be loaded by minitest automatically. The plugin exposes some
109
+ command line arguments that you can use to influence its behavior. They can
110
+ also be set using environment variables.
111
+
112
+ ## Contribute to this repo
113
+ Bug reports and pull requests are welcome on GitHub at
114
+ https://github.com/Shopify/minitest-distributed. This project is intended to
115
+ be a safe, welcoming space for collaboration, and contributors are expected to
116
+ adhere to the [code of
117
+ conduct](https://github.com/Shopify/minitest-distributed/blob/master/CODE_OF_CONDUCT.md).
118
+
119
+ **Development**
100
120
 
101
121
  To bootstrap a local development environment:
102
122
 
@@ -109,7 +129,7 @@ To bootstrap a local development environment:
109
129
  - You can also run `bin/console` for an interactive prompt that will allow you
110
130
  to experiment.
111
131
 
112
- ### Releasing a new version
132
+ **Releasing a new version**
113
133
 
114
134
  - To install this gem onto your local machine, run `bin/rake install`.
115
135
  - Only people at Shopify can release a new version to
@@ -117,21 +137,6 @@ To bootstrap a local development environment:
117
137
  in `version.rb`, and merge to master. Shipit will take care of building the
118
138
  `.gem` bundle, and pushing it to rubygems.org.
119
139
 
120
- ## Contributing
121
-
122
- Bug reports and pull requests are welcome on GitHub at
123
- https://github.com/Shopify/minitest-distributed. This project is intended to
124
- be a safe, welcoming space for collaboration, and contributors are expected to
125
- adhere to the [code of
126
- conduct](https://github.com/Shopify/minitest-distributed/blob/master/CODE_OF_CONDUCT.md).
127
-
128
140
  ## License
129
-
130
141
  The gem is available as open source under the terms of the [MIT
131
142
  License](https://opensource.org/licenses/MIT).
132
-
133
- ## Code of Conduct
134
-
135
- Everyone interacting in the `minitest-distributed` project's codebases, issue
136
- trackers, chat rooms and mailing lists is expected to follow the [code of
137
- conduct](https://github.com/Shopify/minitest-distributed/blob/master/CODE_OF_CONDUCT.md).
data/Rakefile CHANGED
@@ -1,4 +1,5 @@
1
1
  # frozen_string_literal: true
2
+
2
3
  require "bundler/gem_tasks"
3
4
  require "rake/testtask"
4
5
 
@@ -1,8 +1,8 @@
1
1
  # typed: strict
2
2
  # frozen_string_literal: true
3
3
 
4
- require 'uri'
5
- require 'securerandom'
4
+ require "uri"
5
+ require "securerandom"
6
6
 
7
7
  module Minitest
8
8
  module Distributed
@@ -17,13 +17,13 @@ module Minitest
17
17
  sig { params(env: T::Hash[String, T.nilable(String)]).returns(T.attached_class) }
18
18
  def from_env(env = ENV.to_h)
19
19
  new(
20
- coordinator_uri: URI(env['MINITEST_COORDINATOR'] || 'memory:'),
21
- run_id: env['MINITEST_RUN_ID'] || SecureRandom.uuid,
22
- worker_id: env['MINITEST_WORKER_ID'] || SecureRandom.uuid,
23
- test_timeout_seconds: Float(env['MINITEST_TEST_TIMEOUT_SECONDS'] || DEFAULT_TEST_TIMEOUT_SECONDS),
24
- test_batch_size: Integer(env['MINITEST_TEST_BATCH_SIZE'] || DEFAULT_BATCH_SIZE),
25
- max_attempts: Integer(env['MINITEST_MAX_ATTEMPTS'] || DEFAULT_MAX_ATTEMPTS),
26
- max_failures: (max_failures_env = env['MINITEST_MAX_FAILURES']) ? Integer(max_failures_env) : nil,
20
+ coordinator_uri: URI(env["MINITEST_COORDINATOR"] || "memory:"),
21
+ run_id: env["MINITEST_RUN_ID"] || SecureRandom.uuid,
22
+ worker_id: env["MINITEST_WORKER_ID"] || SecureRandom.uuid,
23
+ test_timeout_seconds: Float(env["MINITEST_TEST_TIMEOUT_SECONDS"] || DEFAULT_TEST_TIMEOUT_SECONDS),
24
+ test_batch_size: Integer(env["MINITEST_TEST_BATCH_SIZE"] || DEFAULT_BATCH_SIZE),
25
+ max_attempts: Integer(env["MINITEST_MAX_ATTEMPTS"] || DEFAULT_MAX_ATTEMPTS),
26
+ max_failures: (max_failures_env = env["MINITEST_MAX_FAILURES"]) ? Integer(max_failures_env) : nil,
27
27
  )
28
28
  end
29
29
 
@@ -32,50 +32,50 @@ module Minitest
32
32
  configuration = from_env
33
33
  configuration.progress = options[:io].tty?
34
34
 
35
- opts.on('--coordinator=URI', "The URI pointing to the coordinator") do |uri|
35
+ opts.on("--coordinator=URI", "The URI pointing to the coordinator") do |uri|
36
36
  configuration.coordinator_uri = URI.parse(uri)
37
37
  end
38
38
 
39
- opts.on('--test-timeout=TIMEOUT', "The maximum run time for a single test in seconds") do |timeout|
39
+ opts.on("--test-timeout=TIMEOUT", "The maximum run time for a single test in seconds") do |timeout|
40
40
  configuration.test_timeout_seconds = Float(timeout)
41
41
  end
42
42
 
43
- opts.on('--max-attempts=ATTEMPTS', "The maximum number of attempts to run a test") do |attempts|
43
+ opts.on("--max-attempts=ATTEMPTS", "The maximum number of attempts to run a test") do |attempts|
44
44
  configuration.max_attempts = Integer(attempts)
45
45
  end
46
46
 
47
- opts.on('--test-batch-size=NUMBER', "The number of tests to process per batch") do |batch_size|
47
+ opts.on("--test-batch-size=NUMBER", "The number of tests to process per batch") do |batch_size|
48
48
  configuration.test_batch_size = Integer(batch_size)
49
49
  end
50
50
 
51
- opts.on('--max-failures=FAILURES', "The maximum allowed failure before aborting a run") do |failures|
51
+ opts.on("--max-failures=FAILURES", "The maximum allowed failure before aborting a run") do |failures|
52
52
  configuration.max_failures = Integer(failures)
53
53
  end
54
54
 
55
- opts.on('--run-id=ID', "The ID for this run shared between coordinated workers") do |id|
55
+ opts.on("--run-id=ID", "The ID for this run shared between coordinated workers") do |id|
56
56
  configuration.run_id = id
57
57
  end
58
58
 
59
- opts.on('--worker-id=ID', "The unique ID for this worker") do |id|
59
+ opts.on("--worker-id=ID", "The unique ID for this worker") do |id|
60
60
  configuration.worker_id = id
61
61
  end
62
62
 
63
63
  opts.on(
64
- '--[no-]retry-failures', "Retry failed and errored tests from a previous run attempt " \
65
- "with the same run ID (default: enabled)"
64
+ "--[no-]retry-failures", "Retry failed and errored tests from a previous run attempt " \
65
+ "with the same run ID (default: enabled)"
66
66
  ) do |enabled|
67
67
  configuration.retry_failures = enabled
68
68
  end
69
69
 
70
- opts.on('--[no-]progress', "Show progress during the test run") do |enabled|
70
+ opts.on("--[no-]progress", "Show progress during the test run") do |enabled|
71
71
  configuration.progress = enabled
72
72
  end
73
73
 
74
- opts.on('--exclude-file=FILE_PATH', "Specify a file of tests to be excluded from running") do |file_path|
74
+ opts.on("--exclude-file=FILE_PATH", "Specify a file of tests to be excluded from running") do |file_path|
75
75
  configuration.exclude_file = file_path
76
76
  end
77
77
 
78
- opts.on('--include-file=FILE_PATH', "Specify a file of tests to be included in the test run") do |file_path|
78
+ opts.on("--include-file=FILE_PATH", "Specify a file of tests to be included in the test run") do |file_path|
79
79
  configuration.include_file = file_path
80
80
  end
81
81
 
@@ -86,7 +86,7 @@ module Minitest
86
86
  extend T::Sig
87
87
 
88
88
  # standard minitest options don't need to be specified
89
- prop :coordinator_uri, URI::Generic, default: URI('memory:')
89
+ prop :coordinator_uri, URI::Generic, default: URI("memory:")
90
90
  prop :run_id, String, factory: -> { SecureRandom.uuid }
91
91
  prop :worker_id, String, factory: -> { SecureRandom.uuid }
92
92
  prop :test_timeout_seconds, Float, default: DEFAULT_TEST_TIMEOUT_SECONDS
@@ -102,9 +102,9 @@ module Minitest
102
102
  def coordinator
103
103
  @coordinator = T.let(@coordinator, T.nilable(Coordinators::CoordinatorInterface))
104
104
  @coordinator ||= case coordinator_uri.scheme
105
- when 'redis'
105
+ when "redis"
106
106
  Coordinators::RedisCoordinator.new(configuration: self)
107
- when 'memory'
107
+ when "memory"
108
108
  Coordinators::MemoryCoordinator.new(configuration: self)
109
109
  else
110
110
  raise NotImplementedError, "Unknown coordinator implementation: #{coordinator_uri.scheme}"
@@ -1,7 +1,8 @@
1
1
  # typed: strict
2
2
  # frozen_string_literal: true
3
3
 
4
- require 'redis'
4
+ require "redis"
5
+ require "set"
5
6
 
6
7
  module Minitest
7
8
  module Distributed
@@ -65,8 +66,8 @@ module Minitest
65
66
  @configuration = configuration
66
67
 
67
68
  @redis = T.let(nil, T.nilable(Redis))
68
- @stream_key = T.let(key('queue'), String)
69
- @group_name = T.let('minitest-distributed', String)
69
+ @stream_key = T.let(key("queue"), String)
70
+ @group_name = T.let("minitest-distributed", String)
70
71
  @local_results = T.let(ResultAggregate.new, ResultAggregate)
71
72
  @combined_results = T.let(nil, T.nilable(ResultAggregate))
72
73
  @reclaimed_timeout_tests = T.let(Set.new, T::Set[EnqueuedRunnable])
@@ -82,9 +83,9 @@ module Minitest
82
83
  sig { override.returns(ResultAggregate) }
83
84
  def combined_results
84
85
  @combined_results ||= begin
85
- stats_as_string = redis.mget(key('runs'), key('assertions'), key('passes'),
86
- key('failures'), key('errors'), key('skips'), key('requeues'), key('discards'),
87
- key('acks'), key('size'))
86
+ stats_as_string = redis.mget(key("runs"), key("assertions"), key("passes"),
87
+ key("failures"), key("errors"), key("skips"), key("requeues"), key("discards"),
88
+ key("acks"), key("size"))
88
89
 
89
90
  ResultAggregate.new(
90
91
  max_failures: configuration.max_failures,
@@ -124,13 +125,12 @@ module Minitest
124
125
  # fails, and the DEL never gets executed for followers.
125
126
  keys_deleted = redis.evalsha(
126
127
  register_consumergroup_script,
127
- keys: [stream_key, key('size'), key('acks')],
128
+ keys: [stream_key, key("size"), key("acks")],
128
129
  argv: [group_name],
129
130
  )
130
131
  keys_deleted == 0
131
-
132
132
  rescue Redis::CommandError => ce
133
- if ce.message.include?('BUSYGROUP')
133
+ if ce.message.include?("BUSYGROUP")
134
134
  # If Redis returns a BUSYGROUP error, it means that the consumer group already
135
135
  # exists. In our case, it means that another worker managed to successfully
136
136
  # run the XGROUP command, and will act as leader and publish the tests.
@@ -165,10 +165,10 @@ module Minitest
165
165
  adjust_combined_results(ResultAggregate.new(size: 0))
166
166
  T.let([], T::Array[Minitest::Runnable])
167
167
  else
168
- previous_failures, previous_errors, _deleted = redis.multi do
169
- redis.lrange(list_key(ResultType::Failed.serialize), 0, -1)
170
- redis.lrange(list_key(ResultType::Error.serialize), 0, -1)
171
- redis.del(list_key(ResultType::Failed.serialize), list_key(ResultType::Error.serialize))
168
+ previous_failures, previous_errors, _deleted = redis.multi do |pipeline|
169
+ pipeline.lrange(list_key(ResultType::Failed.serialize), 0, -1)
170
+ pipeline.lrange(list_key(ResultType::Error.serialize), 0, -1)
171
+ pipeline.del(list_key(ResultType::Failed.serialize), list_key(ResultType::Error.serialize))
172
172
  end
173
173
 
174
174
  # We set the `size` key to the number of tests we are planning to schedule.
@@ -199,8 +199,10 @@ module Minitest
199
199
  T.let([], T::Array[Minitest::Runnable])
200
200
  end
201
201
 
202
- redis.pipelined do
203
- tests.each { |test| redis.xadd(stream_key, class_name: T.must(test.class.name), method_name: test.name) }
202
+ redis.pipelined do |pipeline|
203
+ tests.each do |test|
204
+ pipeline.xadd(stream_key, { class_name: T.must(test.class.name), method_name: test.name })
205
+ end
204
206
  end
205
207
  end
206
208
 
@@ -238,7 +240,7 @@ module Minitest
238
240
 
239
241
  cleanup
240
242
  rescue Redis::CommandError => ce
241
- if ce.message.start_with?('NOGROUP')
243
+ if ce.message.start_with?("NOGROUP")
242
244
  # When a redis conumer group commands fails with a NOGROUP error, we assume the
243
245
  # consumer group was deleted by the first worker that detected the run is complete.
244
246
  # So this worker can exit its loop as well.
@@ -277,7 +279,7 @@ module Minitest
277
279
 
278
280
  sig { params(block: Integer).returns(T::Array[EnqueuedRunnable]) }
279
281
  def claim_fresh_runnables(block:)
280
- result = redis.xreadgroup(group_name, configuration.worker_id, stream_key, '>',
282
+ result = redis.xreadgroup(group_name, configuration.worker_id, stream_key, ">",
281
283
  block: block, count: configuration.test_batch_size)
282
284
  EnqueuedRunnable.from_redis_stream_claim(result.fetch(stream_key, []), configuration: configuration)
283
285
  end
@@ -290,6 +292,7 @@ module Minitest
290
292
  end
291
293
  def xclaim_messages(pending_messages, max_idle_time_ms:)
292
294
  return [] if pending_messages.empty?
295
+
293
296
  claimed = redis.xclaim(stream_key, group_name, configuration.worker_id,
294
297
  max_idle_time_ms, pending_messages.keys)
295
298
 
@@ -305,7 +308,7 @@ module Minitest
305
308
  max_idle_time_ms_with_jitter = max_idle_time_ms * rand(1.0...1.2)
306
309
 
307
310
  # Find all the pending messages to see if we want to attenpt to claim some.
308
- pending = redis.xpending(stream_key, group_name, '-', '+', configuration.test_batch_size)
311
+ pending = redis.xpending(stream_key, group_name, "-", "+", configuration.test_batch_size)
309
312
  return [] if pending.empty?
310
313
 
311
314
  active_consumers = Set[configuration.worker_id]
@@ -327,9 +330,9 @@ module Minitest
327
330
  # We can skip this if we already know that there is more than one active one.
328
331
  if active_consumers.size == 1
329
332
  begin
330
- redis.xinfo('consumers', stream_key, group_name).each do |consumer|
331
- if consumer.fetch('idle') < max_idle_time_ms
332
- active_consumers << consumer.fetch('name')
333
+ redis.xinfo("consumers", stream_key, group_name).each do |consumer|
334
+ if consumer.fetch("idle") < max_idle_time_ms
335
+ active_consumers << consumer.fetch("name")
333
336
  end
334
337
  end
335
338
  rescue Redis::CommandError
@@ -381,9 +384,9 @@ module Minitest
381
384
  # timeout. If the worker crashes between removing an item from the retry setm the test
382
385
  # will eventually be picked up by another worker.
383
386
  messages_in_retry_set = {}
384
- redis.multi do
387
+ redis.multi do |pipeline|
385
388
  active_messages.each do |key, message|
386
- messages_in_retry_set[key] = redis.srem(key('retry_set'), message.attempt_id)
389
+ messages_in_retry_set[key] = pipeline.srem(key("retry_set"), message.attempt_id)
387
390
  end
388
391
  end
389
392
 
@@ -405,17 +408,17 @@ module Minitest
405
408
 
406
409
  sig { params(results: ResultAggregate).void }
407
410
  def adjust_combined_results(results)
408
- updated = redis.multi do
409
- redis.incrby(key('runs'), results.runs)
410
- redis.incrby(key('assertions'), results.assertions)
411
- redis.incrby(key('passes'), results.passes)
412
- redis.incrby(key('failures'), results.failures)
413
- redis.incrby(key('errors'), results.errors)
414
- redis.incrby(key('skips'), results.skips)
415
- redis.incrby(key('requeues'), results.requeues)
416
- redis.incrby(key('discards'), results.discards)
417
- redis.incrby(key('acks'), results.acks)
418
- redis.incrby(key('size'), results.size)
411
+ updated = redis.multi do |pipeline|
412
+ pipeline.incrby(key("runs"), results.runs)
413
+ pipeline.incrby(key("assertions"), results.assertions)
414
+ pipeline.incrby(key("passes"), results.passes)
415
+ pipeline.incrby(key("failures"), results.failures)
416
+ pipeline.incrby(key("errors"), results.errors)
417
+ pipeline.incrby(key("skips"), results.skips)
418
+ pipeline.incrby(key("requeues"), results.requeues)
419
+ pipeline.incrby(key("discards"), results.discards)
420
+ pipeline.incrby(key("acks"), results.acks)
421
+ pipeline.incrby(key("size"), results.size)
419
422
  end
420
423
 
421
424
  @combined_results = ResultAggregate.new(max_failures: configuration.max_failures,
@@ -448,14 +451,14 @@ module Minitest
448
451
 
449
452
  # Try to commit all the results of this batch to Redis
450
453
  runnable_results = []
451
- redis.multi do
454
+ redis.multi do |pipeline|
452
455
  results.each do |enqueued_runnable, initial_result|
453
456
  runnable_results << enqueued_runnable.commit_result(initial_result) do |result_to_commit|
454
457
  if ResultType.of(result_to_commit) == ResultType::Requeued
455
- sadd_future = redis.sadd(key('retry_set'), enqueued_runnable.attempt_id)
458
+ sadd_future = pipeline.sadd(key("retry_set"), enqueued_runnable.attempt_id)
456
459
  EnqueuedRunnable::Result::Commit.new { sadd_future.value }
457
460
  else
458
- xack_future = redis.xack(stream_key, group_name, enqueued_runnable.entry_id)
461
+ xack_future = pipeline.xack(stream_key, group_name, enqueued_runnable.entry_id)
459
462
  EnqueuedRunnable::Result::Commit.new { xack_future.value == 1 }
460
463
  end
461
464
  end
@@ -19,10 +19,10 @@ module Minitest
19
19
  sig { params(xpending_result: T::Hash[String, T.untyped]).returns(T.attached_class) }
20
20
  def self.from_xpending(xpending_result)
21
21
  new(
22
- worker_id: xpending_result.fetch('consumer'),
23
- entry_id: xpending_result.fetch('entry_id'),
24
- elapsed_time_ms: xpending_result.fetch('elapsed'),
25
- attempt: xpending_result.fetch('count'),
22
+ worker_id: xpending_result.fetch("consumer"),
23
+ entry_id: xpending_result.fetch("entry_id"),
24
+ elapsed_time_ms: xpending_result.fetch("elapsed"),
25
+ attempt: xpending_result.fetch("count"),
26
26
  )
27
27
  end
28
28
  end
@@ -33,7 +33,7 @@ module Minitest
33
33
 
34
34
  sig { params(name: String).returns(T.class_of(Minitest::Runnable)) }
35
35
  def self.find_class(name)
36
- name.split('::')
36
+ name.split("::")
37
37
  .reduce(Object) { |ns, const| ns.const_get(const) } # rubocop:disable Sorbet/ConstantsFromStrings
38
38
  end
39
39
 
@@ -44,7 +44,7 @@ module Minitest
44
44
 
45
45
  sig { params(identifier: String).returns(Minitest::Runnable) }
46
46
  def self.from_identifier(identifier)
47
- class_name, method_name = identifier.split('#', 2)
47
+ class_name, method_name = identifier.split("#", 2)
48
48
  find_class(T.must(class_name)).new(T.must(method_name))
49
49
  end
50
50
  end
@@ -133,8 +133,8 @@ module Minitest
133
133
  attempt = pending_messages.key?(entry_id) ? pending_messages.fetch(entry_id).attempt + 1 : 1
134
134
 
135
135
  new(
136
- class_name: runnable_method_info.fetch('class_name'),
137
- method_name: runnable_method_info.fetch('method_name'),
136
+ class_name: runnable_method_info.fetch("class_name"),
137
+ method_name: runnable_method_info.fetch("method_name"),
138
138
  entry_id: entry_id,
139
139
  attempt: attempt,
140
140
  max_attempts: configuration.max_attempts,
@@ -1,7 +1,7 @@
1
1
  # typed: strict
2
2
  # frozen_string_literal: true
3
3
 
4
- require 'io/console'
4
+ require "io/console"
5
5
 
6
6
  module Minitest
7
7
  module Distributed
@@ -25,11 +25,11 @@ module Minitest
25
25
 
26
26
  sig { override.void }
27
27
  def start
28
- Signal.trap('WINCH') { @window_line_width = nil }
28
+ Signal.trap("WINCH") { @window_line_width = nil }
29
29
  super
30
30
  end
31
31
 
32
- # Note: due to batching and parallel tests, we have no guarantee that `prerecord`
32
+ # NOTE: due to batching and parallel tests, we have no guarantee that `prerecord`
33
33
  # and `record` will be called in succession for the same test without calls to
34
34
  # either method being interjected for other tests.
35
35
  #
@@ -74,7 +74,7 @@ module Minitest
74
74
 
75
75
  sig { void }
76
76
  def clear_current_line
77
- io.print("\r" + (' ' * window_line_width) + "\r")
77
+ io.print("\r" + (" " * window_line_width) + "\r")
78
78
  end
79
79
 
80
80
  sig { returns(Integer) }
@@ -1,8 +1,8 @@
1
1
  # typed: strict
2
2
  # frozen_string_literal: true
3
3
 
4
- require 'rexml/document'
5
- require 'fileutils'
4
+ require "rexml/document"
5
+ require "fileutils"
6
6
 
7
7
  module Minitest
8
8
  module Distributed
@@ -27,7 +27,7 @@ module Minitest
27
27
 
28
28
  sig { params(io: IO, options: T::Hash[Symbol, T.untyped]).void }
29
29
  def initialize(io, options)
30
- @io = io
30
+ super
31
31
  @report_path = T.let(options.fetch(:junitxml), String)
32
32
  @results = T.let(Hash.new { |hash, key| hash[key] = [] }, T::Hash[String, T::Array[Minitest::Result]])
33
33
  end
@@ -48,7 +48,7 @@ module Minitest
48
48
  sig { override.void }
49
49
  def report
50
50
  FileUtils.mkdir_p(File.dirname(@report_path))
51
- File.open(@report_path, 'w+') do |file|
51
+ File.open(@report_path, "w+") do |file|
52
52
  format_document(generate_document, file)
53
53
  end
54
54
  end
@@ -56,9 +56,9 @@ module Minitest
56
56
  sig { returns(REXML::Document) }
57
57
  def generate_document
58
58
  doc = REXML::Document.new(nil, prologue_quote: :quote, attribute_quote: :quote)
59
- doc << REXML::XMLDecl.new('1.1', 'utf-8')
59
+ doc << REXML::XMLDecl.new("1.1", "utf-8")
60
60
 
61
- testsuites = doc.add_element('testsuites')
61
+ testsuites = doc.add_element("testsuites")
62
62
  results.each do |suite, tests|
63
63
  add_tests_to(testsuites, suite, tests)
64
64
  end
@@ -81,21 +81,21 @@ module Minitest
81
81
  lineno = T.must(results.first).source_location.last
82
82
 
83
83
  testsuite = testsuites.add_element(
84
- 'testsuite',
85
- { 'name' => suite, 'filepath' => relative_path }.merge(aggregate_suite_results(results))
84
+ "testsuite",
85
+ { "name" => suite, "filepath" => relative_path }.merge(aggregate_suite_results(results))
86
86
  )
87
87
 
88
88
  results.each do |test|
89
89
  attributes = {
90
- 'name' => test.name,
91
- 'classname' => suite,
92
- 'assertions' => test.assertions,
93
- 'time' => test.time,
90
+ "name" => test.name,
91
+ "classname" => suite,
92
+ "assertions" => test.assertions,
93
+ "time" => test.time,
94
94
  # 'run-command' => ... # TODO
95
95
  }
96
- attributes['lineno'] = lineno if lineno != -1
96
+ attributes["lineno"] = lineno if lineno != -1
97
97
 
98
- testcase_tag = testsuite.add_element('testcase', attributes)
98
+ testcase_tag = testsuite.add_element("testcase", attributes)
99
99
  add_failure_tag_if_needed(testcase_tag, test)
100
100
  end
101
101
  end
@@ -107,9 +107,9 @@ module Minitest
107
107
  # noop
108
108
  when ResultType::Error, ResultType::Failed
109
109
  failure = T.must(result.failure)
110
- failure_tag = testcase.add_element('failure',
111
- 'type' => result_type.serialize,
112
- 'message' => truncate_message(failure.message))
110
+ failure_tag = testcase.add_element("failure",
111
+ "type" => result_type.serialize,
112
+ "message" => truncate_message(failure.message))
113
113
  failure_tag.add_text(REXML::CData.new(result.to_s))
114
114
  else
115
115
  T.absurd(result_type)
@@ -118,17 +118,17 @@ module Minitest
118
118
 
119
119
  sig { params(message: String).returns(String) }
120
120
  def truncate_message(message)
121
- T.must(message.lines.first).chomp.gsub(/\e\[[^m]+m/, '')
121
+ T.must(message.lines.first).chomp.gsub(/\e\[[^m]+m/, "")
122
122
  end
123
123
 
124
124
  sig { params(results: T::Array[Minitest::Result]).returns(T::Hash[String, Numeric]) }
125
125
  def aggregate_suite_results(results)
126
126
  aggregate = Hash.new(0)
127
127
  results.each do |result|
128
- aggregate['assertions'] += result.assertions
129
- aggregate['failures'] += 1 if failure?(ResultType.of(result))
130
- aggregate['tests'] += 1
131
- aggregate['time'] += result.time
128
+ aggregate["assertions"] += result.assertions
129
+ aggregate["failures"] += 1 if failure?(ResultType.of(result))
130
+ aggregate["tests"] += 1
131
+ aggregate["time"] += result.time
132
132
  end
133
133
  aggregate
134
134
  end
@@ -25,8 +25,8 @@ module Minitest
25
25
  if result.time > test_timeout_seconds
26
26
  message << format(
27
27
  "\n\nThe test took %0.3fs to run, longer than the test timeout which is configured to be %0.1fs.\n" \
28
- "Another worker likely claimed ownership of this test, and will commit the result instead.\n" \
29
- "For best results, make sure that all your tests finish within %0.1fs.",
28
+ "Another worker likely claimed ownership of this test, and will commit the result instead.\n" \
29
+ "For best results, make sure that all your tests finish within %0.1fs.",
30
30
  result.time, test_timeout_seconds, test_timeout_seconds
31
31
  )
32
32
  end
@@ -1,7 +1,7 @@
1
1
  # typed: strict
2
2
  # frozen_string_literal: true
3
3
 
4
- require 'pathname'
4
+ require "pathname"
5
5
 
6
6
  module Minitest
7
7
  module Distributed
@@ -44,6 +44,7 @@ module Minitest
44
44
  sig { params(tests: T::Array[Minitest::Runnable]).returns(T::Array[Minitest::Runnable]) }
45
45
  def select_tests(tests)
46
46
  return tests if filters.empty?
47
+
47
48
  tests.flat_map do |runnable_method|
48
49
  filters.flat_map do |filter|
49
50
  filter.call(runnable_method)
@@ -3,6 +3,6 @@
3
3
 
4
4
  module Minitest
5
5
  module Distributed
6
- VERSION = "0.2.2"
6
+ VERSION = "0.2.5"
7
7
  end
8
8
  end
@@ -1,8 +1,8 @@
1
1
  # typed: strict
2
2
  # frozen_string_literal: true
3
3
 
4
- require 'minitest'
5
- require 'sorbet-runtime'
4
+ require "minitest"
5
+ require "sorbet-runtime"
6
6
 
7
7
  require "minitest/distributed/configuration"
8
8
  require "minitest/distributed/test_runner"
@@ -1,14 +1,14 @@
1
1
  # typed: true
2
2
  # frozen_string_literal: true
3
3
 
4
- require_relative './distributed'
4
+ require_relative "./distributed"
5
5
 
6
6
  module Minitest
7
7
  class << self
8
8
  extend T::Sig
9
9
 
10
10
  def plugin_distributed_options(opts, options)
11
- opts.on('--disable-distributed', "Disable the distributed plugin") do
11
+ opts.on("--disable-distributed", "Disable the distributed plugin") do
12
12
  options[:disable_distributed] = true
13
13
  end
14
14
 
@@ -4,9 +4,9 @@
4
4
  module Minitest
5
5
  class << self
6
6
  def plugin_junitxml_options(opts, options)
7
- options[:junitxml] = ENV['MINITEST_JUNITXML']
7
+ options[:junitxml] = ENV["MINITEST_JUNITXML"]
8
8
 
9
- opts.on('--junitxml=PATH', "Generate a JUnitXML report at the specified path") do |path|
9
+ opts.on("--junitxml=PATH", "Generate a JUnitXML report at the specified path") do |path|
10
10
  options[:junitxml] = path
11
11
  end
12
12
  end
@@ -14,7 +14,7 @@ module Minitest
14
14
  def plugin_junitxml_init(options)
15
15
  return if options[:junitxml].nil?
16
16
 
17
- require 'minitest/distributed/reporters/junitxml_reporter'
17
+ require "minitest/distributed/reporters/junitxml_reporter"
18
18
  reporter << Minitest::Distributed::Reporters::JUnitXMLReporter.new(options[:io], options)
19
19
  end
20
20
  end
data/sorbet/rbi/redis.rbi CHANGED
@@ -1,4 +1,5 @@
1
1
  # typed: true
2
+ # frozen_string_literal: true
2
3
 
3
4
  class Redis
4
5
  class Error < StandardError
@@ -21,10 +22,10 @@ class Redis
21
22
  sig { void }
22
23
  def flushdb; end
23
24
 
24
- sig { params(block: T.proc.void).returns(T::Array[T.untyped]) }
25
+ sig { params(block: T.proc.params(arg0: Redis::PipelinedConnection).void).returns(T::Array[T.untyped]) }
25
26
  def pipelined(&block); end
26
27
 
27
- sig { params(block: T.proc.void).returns(T::Array[T.untyped]) }
28
+ sig { params(block: T.proc.params(arg0: Redis::PipelinedConnection).void).returns(T::Array[T.untyped]) }
28
29
  def multi(&block); end
29
30
 
30
31
  sig { params(script: String, keys: T::Array[String], argv: T::Array[String]).returns(T.untyped) }
@@ -83,3 +84,23 @@ class Redis
83
84
  def xclaim(*); end
84
85
  def xinfo(*); end
85
86
  end
87
+
88
+ class Redis::PipelinedConnection
89
+ sig { params(key: String, value: T.untyped).returns(T.untyped) }
90
+ def sadd(key, value); end
91
+
92
+ sig { params(key: String, amount: Integer).returns(Integer) }
93
+ def incrby(key, amount); end
94
+
95
+ sig { params(key: String, value: T.untyped).returns(T::Boolean) }
96
+ def srem(key, value); end
97
+
98
+ sig { params(keys: String).void }
99
+ def del(*keys); end
100
+
101
+ sig { params(key: String, start: Integer, stop: Integer).void }
102
+ def lrange(key, start, stop); end
103
+
104
+ def xack(stream_key, group_name, *entry_ids); end
105
+ def xadd(key, value); end
106
+ end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: minitest-distributed
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.2
4
+ version: 0.2.5
5
5
  platform: ruby
6
6
  authors:
7
7
  - Willem van Bergen
@@ -74,6 +74,7 @@ executables: []
74
74
  extensions: []
75
75
  extra_rdoc_files: []
76
76
  files:
77
+ - ".github/dependabot.yml"
77
78
  - ".github/workflows/ruby.yml"
78
79
  - ".gitignore"
79
80
  - ".rubocop.yml"