polyrun 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CODE_OF_CONDUCT.md +31 -0
- data/CONTRIBUTING.md +84 -0
- data/LICENSE +21 -0
- data/README.md +140 -0
- data/SECURITY.md +27 -0
- data/bin/polyrun +6 -0
- data/docs/SETUP_PROFILE.md +106 -0
- data/lib/polyrun/cli/coverage_commands.rb +150 -0
- data/lib/polyrun/cli/coverage_merge_io.rb +124 -0
- data/lib/polyrun/cli/database_commands.rb +149 -0
- data/lib/polyrun/cli/env_commands.rb +43 -0
- data/lib/polyrun/cli/helpers.rb +113 -0
- data/lib/polyrun/cli/init_command.rb +99 -0
- data/lib/polyrun/cli/plan_command.rb +134 -0
- data/lib/polyrun/cli/prepare_command.rb +71 -0
- data/lib/polyrun/cli/prepare_recipe.rb +77 -0
- data/lib/polyrun/cli/queue_command.rb +101 -0
- data/lib/polyrun/cli/quick_command.rb +13 -0
- data/lib/polyrun/cli/report_commands.rb +94 -0
- data/lib/polyrun/cli/run_shards_command.rb +88 -0
- data/lib/polyrun/cli/run_shards_plan_boot_phases.rb +91 -0
- data/lib/polyrun/cli/run_shards_plan_options.rb +45 -0
- data/lib/polyrun/cli/run_shards_planning.rb +124 -0
- data/lib/polyrun/cli/run_shards_run.rb +168 -0
- data/lib/polyrun/cli/start_bootstrap.rb +99 -0
- data/lib/polyrun/cli/timing_command.rb +31 -0
- data/lib/polyrun/cli.rb +184 -0
- data/lib/polyrun/config.rb +61 -0
- data/lib/polyrun/coverage/cobertura_zero_lines.rb +32 -0
- data/lib/polyrun/coverage/collector.rb +184 -0
- data/lib/polyrun/coverage/collector_finish.rb +95 -0
- data/lib/polyrun/coverage/filter.rb +22 -0
- data/lib/polyrun/coverage/formatter.rb +115 -0
- data/lib/polyrun/coverage/merge/formatters.rb +181 -0
- data/lib/polyrun/coverage/merge/formatters_html.rb +55 -0
- data/lib/polyrun/coverage/merge.rb +127 -0
- data/lib/polyrun/coverage/merge_fragment_meta.rb +47 -0
- data/lib/polyrun/coverage/merge_merge_two.rb +117 -0
- data/lib/polyrun/coverage/rails.rb +128 -0
- data/lib/polyrun/coverage/reporting.rb +41 -0
- data/lib/polyrun/coverage/result.rb +18 -0
- data/lib/polyrun/coverage/track_files.rb +141 -0
- data/lib/polyrun/data/cached_fixtures.rb +122 -0
- data/lib/polyrun/data/factory_counts.rb +35 -0
- data/lib/polyrun/data/factory_instrumentation.rb +50 -0
- data/lib/polyrun/data/fixtures.rb +68 -0
- data/lib/polyrun/data/parallel_provisioning.rb +93 -0
- data/lib/polyrun/data/snapshot.rb +84 -0
- data/lib/polyrun/database/clone_shards.rb +81 -0
- data/lib/polyrun/database/provision.rb +72 -0
- data/lib/polyrun/database/shard.rb +63 -0
- data/lib/polyrun/database/url_builder/connection/infer.rb +49 -0
- data/lib/polyrun/database/url_builder/connection/url_builders.rb +43 -0
- data/lib/polyrun/database/url_builder/connection.rb +191 -0
- data/lib/polyrun/database/url_builder/template_prepare.rb +21 -0
- data/lib/polyrun/database/url_builder.rb +160 -0
- data/lib/polyrun/debug.rb +81 -0
- data/lib/polyrun/env/ci.rb +65 -0
- data/lib/polyrun/log.rb +70 -0
- data/lib/polyrun/minitest.rb +17 -0
- data/lib/polyrun/partition/constraints.rb +69 -0
- data/lib/polyrun/partition/hrw.rb +33 -0
- data/lib/polyrun/partition/min_heap.rb +64 -0
- data/lib/polyrun/partition/paths.rb +28 -0
- data/lib/polyrun/partition/paths_build.rb +128 -0
- data/lib/polyrun/partition/plan.rb +189 -0
- data/lib/polyrun/partition/plan_lpt.rb +49 -0
- data/lib/polyrun/partition/plan_sharding.rb +48 -0
- data/lib/polyrun/partition/stable_shuffle.rb +18 -0
- data/lib/polyrun/prepare/artifacts.rb +40 -0
- data/lib/polyrun/prepare/assets.rb +57 -0
- data/lib/polyrun/queue/file_store.rb +199 -0
- data/lib/polyrun/queue/file_store_pending.rb +48 -0
- data/lib/polyrun/quick/assertions.rb +32 -0
- data/lib/polyrun/quick/errors.rb +6 -0
- data/lib/polyrun/quick/example_group.rb +66 -0
- data/lib/polyrun/quick/example_runner.rb +93 -0
- data/lib/polyrun/quick/matchers.rb +156 -0
- data/lib/polyrun/quick/reporter.rb +42 -0
- data/lib/polyrun/quick/runner.rb +180 -0
- data/lib/polyrun/quick.rb +1 -0
- data/lib/polyrun/railtie.rb +7 -0
- data/lib/polyrun/reporting/junit.rb +125 -0
- data/lib/polyrun/reporting/junit_emit.rb +58 -0
- data/lib/polyrun/reporting/rspec_junit.rb +39 -0
- data/lib/polyrun/rspec.rb +15 -0
- data/lib/polyrun/templates/POLYRUN.md +45 -0
- data/lib/polyrun/templates/ci_matrix.polyrun.yml +14 -0
- data/lib/polyrun/templates/minimal_gem.polyrun.yml +13 -0
- data/lib/polyrun/templates/rails_prepare.polyrun.yml +31 -0
- data/lib/polyrun/timing/merge.rb +35 -0
- data/lib/polyrun/timing/summary.rb +25 -0
- data/lib/polyrun/version.rb +3 -0
- data/lib/polyrun.rb +58 -0
- data/polyrun.gemspec +37 -0
- data/sig/polyrun/cli.rbs +6 -0
- data/sig/polyrun/config.rbs +20 -0
- data/sig/polyrun/debug.rbs +12 -0
- data/sig/polyrun/log.rbs +12 -0
- data/sig/polyrun/minitest.rbs +5 -0
- data/sig/polyrun/quick.rbs +19 -0
- data/sig/polyrun/rspec.rbs +5 -0
- data/sig/polyrun.rbs +11 -0
- metadata +288 -0
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
module Polyrun
|
|
2
|
+
module Data
|
|
3
|
+
# Process-local memoization for expensive fixture setup (+register+ / +cached+).
|
|
4
|
+
# Use inside +before(:suite)+ or a support file so each parallel **process** builds once; not for threads
|
|
5
|
+
# without external locking (see {ParallelProvisioning}).
|
|
6
|
+
#
|
|
7
|
+
# Example:
|
|
8
|
+
#
|
|
9
|
+
# Polyrun::Data::CachedFixtures.register(:admin) { User.create!(email: "a@example.com") }
|
|
10
|
+
# Polyrun::Data::CachedFixtures.fetch(:admin) # => same object
|
|
11
|
+
#
|
|
12
|
+
module CachedFixtures
|
|
13
|
+
# :nodoc:
|
|
14
|
+
class Cache
|
|
15
|
+
attr_reader :store
|
|
16
|
+
|
|
17
|
+
def initialize
|
|
18
|
+
@store = {}
|
|
19
|
+
@stats = {}
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
def fetch(key, &block)
|
|
23
|
+
k = key.to_s
|
|
24
|
+
if store.key?(k)
|
|
25
|
+
@stats[k][:hits] += 1
|
|
26
|
+
return store[k]
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
t0 = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
|
30
|
+
store[k] = yield
|
|
31
|
+
@stats[k] = {
|
|
32
|
+
build_time: Process.clock_gettime(Process::CLOCK_MONOTONIC) - t0,
|
|
33
|
+
hits: 0
|
|
34
|
+
}
|
|
35
|
+
store[k]
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
def clear
|
|
39
|
+
store.clear
|
|
40
|
+
@stats.clear
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
def stats_snapshot
|
|
44
|
+
@stats.transform_values(&:dup)
|
|
45
|
+
end
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
# Mutable process-local state lives on a plain object so the singleton class avoids class ivars
|
|
49
|
+
# (ThreadSafety/ClassInstanceVariable); still one cache per process, not thread-safe for concurrent threads.
|
|
50
|
+
class Registry
|
|
51
|
+
attr_accessor :disabled
|
|
52
|
+
attr_reader :cache, :callbacks
|
|
53
|
+
|
|
54
|
+
def initialize
|
|
55
|
+
@disabled = false
|
|
56
|
+
@cache = Cache.new
|
|
57
|
+
@callbacks = {before_reset: [], after_reset: []}
|
|
58
|
+
end
|
|
59
|
+
end
|
|
60
|
+
private_constant :Registry
|
|
61
|
+
|
|
62
|
+
REGISTRY = Registry.new
|
|
63
|
+
private_constant :REGISTRY
|
|
64
|
+
|
|
65
|
+
class << self
|
|
66
|
+
def fetch(id, &block)
|
|
67
|
+
return yield if disabled?
|
|
68
|
+
|
|
69
|
+
REGISTRY.cache.fetch(id.to_s, &block)
|
|
70
|
+
end
|
|
71
|
+
|
|
72
|
+
alias_method :register, :fetch
|
|
73
|
+
|
|
74
|
+
def cached(id)
|
|
75
|
+
return unless REGISTRY.cache.store.key?(id.to_s)
|
|
76
|
+
|
|
77
|
+
REGISTRY.cache.store[id.to_s]
|
|
78
|
+
end
|
|
79
|
+
|
|
80
|
+
def reset!
|
|
81
|
+
REGISTRY.callbacks[:before_reset].each(&:call)
|
|
82
|
+
REGISTRY.cache.clear
|
|
83
|
+
REGISTRY.callbacks[:after_reset].each(&:call)
|
|
84
|
+
REGISTRY.callbacks[:before_reset].clear
|
|
85
|
+
REGISTRY.callbacks[:after_reset].clear
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
def before_reset(&block)
|
|
89
|
+
REGISTRY.callbacks[:before_reset] << block if block
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
def after_reset(&block)
|
|
93
|
+
REGISTRY.callbacks[:after_reset] << block if block
|
|
94
|
+
end
|
|
95
|
+
|
|
96
|
+
def disable!
|
|
97
|
+
REGISTRY.disabled = true
|
|
98
|
+
end
|
|
99
|
+
|
|
100
|
+
def enable!
|
|
101
|
+
REGISTRY.disabled = false
|
|
102
|
+
end
|
|
103
|
+
|
|
104
|
+
def disabled?
|
|
105
|
+
REGISTRY.disabled == true
|
|
106
|
+
end
|
|
107
|
+
|
|
108
|
+
def stats
|
|
109
|
+
REGISTRY.cache.stats_snapshot
|
|
110
|
+
end
|
|
111
|
+
|
|
112
|
+
def format_stats_report(title: "Polyrun cached fixtures")
|
|
113
|
+
lines = [title]
|
|
114
|
+
REGISTRY.cache.stats_snapshot.each do |key, s|
|
|
115
|
+
lines << format(" %-40s build: %0.4fs hits: %d", key, s[:build_time], s[:hits])
|
|
116
|
+
end
|
|
117
|
+
lines.join("\n") + "\n"
|
|
118
|
+
end
|
|
119
|
+
end
|
|
120
|
+
end
|
|
121
|
+
end
|
|
122
|
+
end
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
module Polyrun
|
|
2
|
+
module Data
|
|
3
|
+
# Lightweight per-example factory/build counters with zero dependencies.
|
|
4
|
+
# Call +reset!+ in +before(:suite)+ or +setup+, +record+ inside factory helpers, +summary+ in +after(:suite)+.
|
|
5
|
+
module FactoryCounts
|
|
6
|
+
class << self
|
|
7
|
+
def reset!
|
|
8
|
+
@counts = Hash.new(0)
|
|
9
|
+
end
|
|
10
|
+
|
|
11
|
+
def record(factory_name)
|
|
12
|
+
@counts ||= Hash.new(0)
|
|
13
|
+
@counts[factory_name.to_s] += 1
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
def counts
|
|
17
|
+
@counts ||= Hash.new(0)
|
|
18
|
+
@counts.dup
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
def summary_lines(top: 20)
|
|
22
|
+
@counts ||= Hash.new(0)
|
|
23
|
+
sorted = @counts.sort_by { |_, n| -n }
|
|
24
|
+
sorted[0, top].map { |name, n| " #{name}: #{n}" }
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
def format_summary(title: "Polyrun factory counts")
|
|
28
|
+
lines = [title]
|
|
29
|
+
lines.concat(summary_lines)
|
|
30
|
+
lines.join("\n") + "\n"
|
|
31
|
+
end
|
|
32
|
+
end
|
|
33
|
+
end
|
|
34
|
+
end
|
|
35
|
+
end
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
module Polyrun
|
|
2
|
+
module Data
|
|
3
|
+
# Opt-in FactoryBot hook so {#FactoryCounts} sees every factory run (minimal patch).
|
|
4
|
+
# Requires the +factory_bot+ gem and must run after FactoryBot is loaded.
|
|
5
|
+
#
|
|
6
|
+
# require "factory_bot"
|
|
7
|
+
# Polyrun::Data::FactoryInstrumentation.instrument_factory_bot!
|
|
8
|
+
module FactoryInstrumentation
|
|
9
|
+
class << self
|
|
10
|
+
def instrument_factory_bot!
|
|
11
|
+
return false unless defined?(FactoryBot)
|
|
12
|
+
|
|
13
|
+
factory_class = resolve_factory_runner_class
|
|
14
|
+
return false unless factory_class
|
|
15
|
+
|
|
16
|
+
return true if factory_class.instance_variable_defined?(:@polyrun_factory_instrumented) &&
|
|
17
|
+
factory_class.instance_variable_get(:@polyrun_factory_instrumented)
|
|
18
|
+
|
|
19
|
+
patch = Module.new do
|
|
20
|
+
def run(...)
|
|
21
|
+
Polyrun::Data::FactoryCounts.record(name)
|
|
22
|
+
super
|
|
23
|
+
end
|
|
24
|
+
end
|
|
25
|
+
factory_class.prepend(patch)
|
|
26
|
+
factory_class.instance_variable_set(:@polyrun_factory_instrumented, true)
|
|
27
|
+
true
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
def instrumented?
|
|
31
|
+
return false unless defined?(FactoryBot)
|
|
32
|
+
|
|
33
|
+
fc = resolve_factory_runner_class
|
|
34
|
+
return false unless fc
|
|
35
|
+
|
|
36
|
+
fc.instance_variable_defined?(:@polyrun_factory_instrumented) &&
|
|
37
|
+
fc.instance_variable_get(:@polyrun_factory_instrumented)
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
private
|
|
41
|
+
|
|
42
|
+
def resolve_factory_runner_class
|
|
43
|
+
return FactoryBot::Factory if defined?(FactoryBot::Factory)
|
|
44
|
+
|
|
45
|
+
nil
|
|
46
|
+
end
|
|
47
|
+
end
|
|
48
|
+
end
|
|
49
|
+
end
|
|
50
|
+
end
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
require "yaml"
|
|
2
|
+
|
|
3
|
+
module Polyrun
|
|
4
|
+
module Data
|
|
5
|
+
# Declarative YAML fixture batches (**YAML → table → rows**).
|
|
6
|
+
# Polyrun does **not** ship a seed/register loader DSL—only **stdlib YAML** + iteration helpers.
|
|
7
|
+
# Typical layout: +spec/fixtures/polyrun/*.yml+ with top-level keys = table names.
|
|
8
|
+
#
|
|
9
|
+
# users:
|
|
10
|
+
# - name: Ada
|
|
11
|
+
# email: ada@example.com
|
|
12
|
+
module Fixtures
|
|
13
|
+
module_function
|
|
14
|
+
|
|
15
|
+
def load_yaml(path)
|
|
16
|
+
YAML.safe_load_file(path, permitted_classes: [Symbol], aliases: true) || {}
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
# Returns { "batch_name" => { "table" => [rows] } } for every +.yml+ under +dir+ (recursive).
|
|
20
|
+
def load_directory(dir)
|
|
21
|
+
Dir.glob(File.join(dir, "**", "*.yml")).sort.each_with_object({}) do |path, acc|
|
|
22
|
+
key = File.basename(path, ".*")
|
|
23
|
+
acc[key] = load_yaml(path)
|
|
24
|
+
end
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
# Iterates each table in a single batch hash. Skips keys starting with "_".
|
|
28
|
+
def each_table(batch)
|
|
29
|
+
return enum_for(:each_table, batch) unless block_given?
|
|
30
|
+
|
|
31
|
+
batch.each do |table, rows|
|
|
32
|
+
t = table.to_s
|
|
33
|
+
next if t.start_with?("_")
|
|
34
|
+
|
|
35
|
+
raise Polyrun::Error, "fixtures: #{t} must be an Array of rows" unless rows.is_a?(Array)
|
|
36
|
+
|
|
37
|
+
yield(t, rows)
|
|
38
|
+
end
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
# Loads all batches from +dir+ and yields (batch_name, table, rows).
|
|
42
|
+
def each_table_in_directory(dir)
|
|
43
|
+
return enum_for(:each_table_in_directory, dir) unless block_given?
|
|
44
|
+
|
|
45
|
+
load_directory(dir).each do |batch_name, batch|
|
|
46
|
+
each_table(batch) do |table, rows|
|
|
47
|
+
yield(batch_name, table, rows)
|
|
48
|
+
end
|
|
49
|
+
end
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
# Bulk insert YAML rows via ActiveRecord (batch load optimization). Requires ActiveRecord
|
|
53
|
+
# and a +connection+ that responds to +insert_all(table_name, records)+ (Rails 6+).
|
|
54
|
+
def apply_insert_all!(batch, connection: nil)
|
|
55
|
+
unless defined?(ActiveRecord::Base)
|
|
56
|
+
raise Polyrun::Error, "Fixtures.apply_insert_all! requires ActiveRecord"
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
conn = connection || ActiveRecord::Base.connection
|
|
60
|
+
each_table(batch) do |table, rows|
|
|
61
|
+
next if rows.empty?
|
|
62
|
+
|
|
63
|
+
conn.insert_all(table, rows)
|
|
64
|
+
end
|
|
65
|
+
end
|
|
66
|
+
end
|
|
67
|
+
end
|
|
68
|
+
end
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
module Polyrun
|
|
2
|
+
module Data
|
|
3
|
+
# Branching helpers for **serial** vs **parallel worker** test DB setup (seeds, truncate).
|
|
4
|
+
# Polyrun does not call Rails +truncate+ or +load_seed+ for you — wire those in the callbacks you assign.
|
|
5
|
+
#
|
|
6
|
+
# Typical split (empty parallel DBs get seeds only; serial run truncates then seeds):
|
|
7
|
+
#
|
|
8
|
+
# Polyrun::Data::ParallelProvisioning.configure do |c|
|
|
9
|
+
# c.serial { replant_and_load_seed }
|
|
10
|
+
# c.parallel_worker { load_seed_only }
|
|
11
|
+
# end
|
|
12
|
+
# # In spec_helper after configure:
|
|
13
|
+
# Polyrun::Data::ParallelProvisioning.run_suite_hooks!
|
|
14
|
+
#
|
|
15
|
+
# Or use {Polyrun::RSpec.install_parallel_provisioning!} (+before(:suite)+) or {Polyrun::Minitest.install_parallel_provisioning!}
|
|
16
|
+
# (+require+ +polyrun/minitest+ from +test/test_helper.rb+).
|
|
17
|
+
module ParallelProvisioning
|
|
18
|
+
class Configuration
|
|
19
|
+
attr_accessor :serial_hook, :parallel_worker_hook
|
|
20
|
+
|
|
21
|
+
def serial(&block)
|
|
22
|
+
self.serial_hook = block if block
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
def parallel_worker(&block)
|
|
26
|
+
self.parallel_worker_hook = block if block
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
class Storage
|
|
31
|
+
attr_accessor :configuration
|
|
32
|
+
|
|
33
|
+
def initialize
|
|
34
|
+
@configuration = Configuration.new
|
|
35
|
+
end
|
|
36
|
+
end
|
|
37
|
+
private_constant :Storage
|
|
38
|
+
|
|
39
|
+
STORAGE = Storage.new
|
|
40
|
+
private_constant :STORAGE
|
|
41
|
+
|
|
42
|
+
class << self
|
|
43
|
+
def configure
|
|
44
|
+
yield configuration
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
def configuration
|
|
48
|
+
STORAGE.configuration
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
def reset_configuration!
|
|
52
|
+
STORAGE.configuration = Configuration.new
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
# True when multiple shards are in use ({Database::Shard} sets +POLYRUN_SHARD_TOTAL+).
|
|
56
|
+
def parallel_workers?
|
|
57
|
+
shard_total > 1
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
# 0-based worker index; prefers +POLYRUN_SHARD_INDEX+, else derives from +TEST_ENV_NUMBER+ (parallel_tests).
|
|
61
|
+
def shard_index
|
|
62
|
+
if (s = ENV["POLYRUN_SHARD_INDEX"]) && !s.to_s.empty?
|
|
63
|
+
Integer(s)
|
|
64
|
+
elsif (n = ENV["TEST_ENV_NUMBER"]).to_s.empty? || n == "0"
|
|
65
|
+
0
|
|
66
|
+
else
|
|
67
|
+
Integer(n) - 1
|
|
68
|
+
end
|
|
69
|
+
rescue ArgumentError
|
|
70
|
+
0
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
def shard_total
|
|
74
|
+
t = ENV["POLYRUN_SHARD_TOTAL"]
|
|
75
|
+
return Integer(t) if t && !t.to_s.empty?
|
|
76
|
+
|
|
77
|
+
1
|
|
78
|
+
rescue ArgumentError
|
|
79
|
+
1
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
# Runs +parallel_worker_hook+ when {#parallel_workers?}, else +serial_hook+. No-op if the chosen hook is nil.
|
|
83
|
+
def run_suite_hooks!
|
|
84
|
+
if parallel_workers?
|
|
85
|
+
configuration.parallel_worker_hook&.call
|
|
86
|
+
else
|
|
87
|
+
configuration.serial_hook&.call
|
|
88
|
+
end
|
|
89
|
+
end
|
|
90
|
+
end
|
|
91
|
+
end
|
|
92
|
+
end
|
|
93
|
+
end
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
require "fileutils"
|
|
2
|
+
require "open3"
|
|
3
|
+
|
|
4
|
+
module Polyrun
|
|
5
|
+
module Data
|
|
6
|
+
# PostgreSQL data snapshots via +pg_dump+ / +psql+ (no +pg+ gem). Configure with ENV or explicit args.
|
|
7
|
+
# Non-Postgres adapters: use native backup/export tools; not covered here.
|
|
8
|
+
module SqlSnapshot
|
|
9
|
+
module_function
|
|
10
|
+
|
|
11
|
+
def default_connection
|
|
12
|
+
{
|
|
13
|
+
host: ENV["PGHOST"],
|
|
14
|
+
port: ENV["PGPORT"],
|
|
15
|
+
username: ENV["PGUSER"] || ENV["USER"],
|
|
16
|
+
database: ENV["PGDATABASE"]
|
|
17
|
+
}
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
# Writes data-only SQL to +root+/spec/fixtures/sql_snapshots/<name>.sql
|
|
21
|
+
def create!(name, root:, database: nil, username: nil, host: nil, port: nil)
|
|
22
|
+
database ||= default_connection[:database] or raise Polyrun::Error, "SqlSnapshot: set database: or PGDATABASE"
|
|
23
|
+
username ||= default_connection[:username]
|
|
24
|
+
path = File.join(root, "spec", "fixtures", "sql_snapshots", "#{name}.sql")
|
|
25
|
+
FileUtils.mkdir_p(File.dirname(path))
|
|
26
|
+
|
|
27
|
+
cmd = ["pg_dump", "--data-only", "-U", username]
|
|
28
|
+
cmd += ["-h", host] if host && !host.to_s.empty?
|
|
29
|
+
cmd += ["-p", port.to_s] if port && !port.to_s.empty?
|
|
30
|
+
cmd << database
|
|
31
|
+
|
|
32
|
+
out, err, st = Open3.capture3(*cmd)
|
|
33
|
+
raise Polyrun::Error, "pg_dump failed: #{err}" unless st.success?
|
|
34
|
+
|
|
35
|
+
File.write(path, out)
|
|
36
|
+
path
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
# Truncates listed tables (if any), then loads snapshot SQL. +tables+ optional; if nil and ActiveRecord
|
|
40
|
+
# is loaded, uses +connection.tables+.
|
|
41
|
+
def load!(name, root:, database: nil, username: nil, host: nil, port: nil, tables: nil)
|
|
42
|
+
database ||= default_connection[:database] or raise Polyrun::Error, "SqlSnapshot: set database: or PGDATABASE"
|
|
43
|
+
username ||= default_connection[:username]
|
|
44
|
+
path = File.join(root, "spec", "fixtures", "sql_snapshots", "#{name}.sql")
|
|
45
|
+
raise Polyrun::Error, "SqlSnapshot: missing #{path}" unless File.file?(path)
|
|
46
|
+
|
|
47
|
+
if tables.nil? && defined?(ActiveRecord::Base) && ActiveRecord::Base.connected?
|
|
48
|
+
tables = ActiveRecord::Base.connection.tables
|
|
49
|
+
end
|
|
50
|
+
tables ||= []
|
|
51
|
+
|
|
52
|
+
psql = sql_snapshot_psql_base(username, database, host, port)
|
|
53
|
+
sql_snapshot_truncate_tables!(psql, tables) if tables.any?
|
|
54
|
+
sql_snapshot_load_file!(psql, path)
|
|
55
|
+
true
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
def sql_snapshot_psql_base(username, database, host, port)
|
|
59
|
+
psql = ["psql", "-U", username, "-d", database]
|
|
60
|
+
psql += ["-h", host] if host && !host.to_s.empty?
|
|
61
|
+
psql += ["-p", port.to_s] if port && !port.to_s.empty?
|
|
62
|
+
psql
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
def sql_snapshot_truncate_tables!(psql, tables)
|
|
66
|
+
quoted = tables.map { |t| %("#{t.gsub('"', '""')}") }.join(", ")
|
|
67
|
+
trunc = "TRUNCATE TABLE #{quoted} CASCADE;"
|
|
68
|
+
_trunc_out, err, st = Open3.capture3(*psql, "-v", "ON_ERROR_STOP=1", "-c", trunc)
|
|
69
|
+
raise Polyrun::Error, "psql truncate failed: #{err}" unless st.success?
|
|
70
|
+
end
|
|
71
|
+
|
|
72
|
+
def sql_snapshot_load_file!(psql, path)
|
|
73
|
+
_load_out, err, st = Open3.capture3(
|
|
74
|
+
*psql,
|
|
75
|
+
"-v", "ON_ERROR_STOP=1",
|
|
76
|
+
"-c", "SET session_replication_role = 'replica';",
|
|
77
|
+
"-f", path,
|
|
78
|
+
"-c", "SET session_replication_role = 'origin';"
|
|
79
|
+
)
|
|
80
|
+
raise Polyrun::Error, "psql load failed: #{err}" unless st.success?
|
|
81
|
+
end
|
|
82
|
+
end
|
|
83
|
+
end
|
|
84
|
+
end
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
module Polyrun
|
|
2
|
+
module Database
|
|
3
|
+
# Prepare canonical template DBs with one +bin/rails db:prepare+ (all +DATABASE_URL*+ keys in one process for multi-DB apps), then create per-shard databases in parallel (PostgreSQL +CREATE DATABASE … TEMPLATE …+).
|
|
4
|
+
# Other ActiveRecord adapters (MySQL, SQL Server, SQLite, …) are not automated here—use +polyrun env+ URLs with your own +db:*+ scripts.
|
|
5
|
+
# Replaces shell loops like +dropdb+ / +createdb -T+ when +polyrun.yml databases:+ lists primary + +connections+.
|
|
6
|
+
module CloneShards
|
|
7
|
+
module_function
|
|
8
|
+
|
|
9
|
+
# See +provision!+ on the singleton class for options.
|
|
10
|
+
def provision!(databases_hash, workers:, rails_root:, migrate: true, replace: true, force_drop: false, dry_run: false, silent: true)
|
|
11
|
+
dh = databases_hash.is_a?(Hash) ? databases_hash : {}
|
|
12
|
+
workers = Integer(workers)
|
|
13
|
+
raise Polyrun::Error, "workers must be >= 1" if workers < 1
|
|
14
|
+
|
|
15
|
+
rails_root = File.expand_path(rails_root)
|
|
16
|
+
|
|
17
|
+
migrate_canonical_databases!(dh, rails_root, dry_run, silent) if migrate
|
|
18
|
+
create_shards_from_plan!(dh, workers, replace, force_drop, dry_run)
|
|
19
|
+
true
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
def migrate_canonical_databases!(dh, rails_root, dry_run, silent)
|
|
23
|
+
pt = (dh["template_db"] || dh[:template_db]).to_s
|
|
24
|
+
if pt.empty?
|
|
25
|
+
raise Polyrun::Error, "CloneShards: set databases.template_db (and optional connections[].template_db)"
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
if dry_run
|
|
29
|
+
log = UrlBuilder.template_prepare_env_shell_log(dh)
|
|
30
|
+
Polyrun::Log.warn "would: RAILS_ENV=test #{log} bin/rails db:prepare"
|
|
31
|
+
else
|
|
32
|
+
child_env = ENV.to_h.merge(UrlBuilder.template_prepare_env(dh))
|
|
33
|
+
child_env["RAILS_ENV"] ||= ENV["RAILS_ENV"] || "test"
|
|
34
|
+
Provision.prepare_template!(rails_root: rails_root, env: child_env, silent: silent)
|
|
35
|
+
end
|
|
36
|
+
end
|
|
37
|
+
private_class_method :migrate_canonical_databases!
|
|
38
|
+
|
|
39
|
+
def create_shards_from_plan!(dh, workers, replace, force_drop, dry_run)
|
|
40
|
+
if dry_run
|
|
41
|
+
workers.times do |shard_index|
|
|
42
|
+
plan = UrlBuilder.shard_database_plan(dh, shard_index: shard_index)
|
|
43
|
+
if plan.empty?
|
|
44
|
+
raise Polyrun::Error, "CloneShards: empty shard plan for shard_index=#{shard_index}"
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
plan.each { |row| create_one_shard!(row, replace, force_drop, dry_run) }
|
|
48
|
+
end
|
|
49
|
+
return
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
threads = workers.times.map do |shard_index|
|
|
53
|
+
Thread.new do
|
|
54
|
+
plan = UrlBuilder.shard_database_plan(dh, shard_index: shard_index)
|
|
55
|
+
if plan.empty?
|
|
56
|
+
raise Polyrun::Error, "CloneShards: empty shard plan for shard_index=#{shard_index}"
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
plan.each { |row| create_one_shard!(row, replace, force_drop, dry_run) }
|
|
60
|
+
end
|
|
61
|
+
end
|
|
62
|
+
threads.each(&:join)
|
|
63
|
+
end
|
|
64
|
+
private_class_method :create_shards_from_plan!
|
|
65
|
+
|
|
66
|
+
def create_one_shard!(row, replace, force_drop, dry_run)
|
|
67
|
+
new_db = row[:new_db].to_s
|
|
68
|
+
tmpl = row[:template_db].to_s
|
|
69
|
+
if dry_run
|
|
70
|
+
Polyrun::Log.warn "would: DROP DATABASE IF EXISTS #{new_db}" if replace
|
|
71
|
+
Polyrun::Log.warn "would: CREATE DATABASE #{new_db} TEMPLATE #{tmpl}"
|
|
72
|
+
return
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
Provision.drop_database_if_exists!(database: new_db, force: force_drop) if replace
|
|
76
|
+
Provision.create_database_from_template!(new_db: new_db, template_db: tmpl)
|
|
77
|
+
end
|
|
78
|
+
private_class_method :create_one_shard!
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
end
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
require "open3"
|
|
2
|
+
require "shellwords"
|
|
3
|
+
|
|
4
|
+
module Polyrun
|
|
5
|
+
module Database
|
|
6
|
+
# PostgreSQL-only provisioning via +psql+ / +createdb+ (spec2 §5.3). No +pg+ gem.
|
|
7
|
+
# For other adapters, use Rails tasks or vendor CLIs; +Polyrun::Database::UrlBuilder+ still emits +DATABASE_URL+ for supported schemes.
|
|
8
|
+
module Provision
|
|
9
|
+
module_function
|
|
10
|
+
|
|
11
|
+
def quote_ident(name)
|
|
12
|
+
'"' + name.to_s.gsub('"', '""') + '"'
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
# +DROP DATABASE IF EXISTS name;+ — maintenance DB +postgres+ (or +maintenance_db+).
|
|
16
|
+
def drop_database_if_exists!(database:, host: nil, port: nil, username: nil, maintenance_db: "postgres", force: false)
|
|
17
|
+
host ||= ENV["PGHOST"] || "localhost"
|
|
18
|
+
port ||= ENV["PGPORT"] || "5432"
|
|
19
|
+
username ||= ENV["PGUSER"] || "postgres"
|
|
20
|
+
|
|
21
|
+
sql =
|
|
22
|
+
if force
|
|
23
|
+
"DROP DATABASE IF EXISTS #{quote_ident(database)} WITH (FORCE);"
|
|
24
|
+
else
|
|
25
|
+
"DROP DATABASE IF EXISTS #{quote_ident(database)};"
|
|
26
|
+
end
|
|
27
|
+
cmd = ["psql", "-U", username, "-h", host, "-p", port.to_s, "-d", maintenance_db, "-v", "ON_ERROR_STOP=1", "-c", sql]
|
|
28
|
+
_out, err, st = Open3.capture3(*cmd)
|
|
29
|
+
raise Polyrun::Error, "drop database failed: #{err}" unless st.success?
|
|
30
|
+
|
|
31
|
+
true
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
# CREATE DATABASE new_db TEMPLATE template_db — connects to maintenance DB +postgres+.
|
|
35
|
+
def create_database_from_template!(new_db:, template_db:, host: nil, port: nil, username: nil, maintenance_db: "postgres")
|
|
36
|
+
host ||= ENV["PGHOST"] || "localhost"
|
|
37
|
+
port ||= ENV["PGPORT"] || "5432"
|
|
38
|
+
username ||= ENV["PGUSER"] || "postgres"
|
|
39
|
+
|
|
40
|
+
sql = "CREATE DATABASE #{quote_ident(new_db)} TEMPLATE #{quote_ident(template_db)};"
|
|
41
|
+
cmd = ["psql", "-U", username, "-h", host, "-p", port.to_s, "-d", maintenance_db, "-v", "ON_ERROR_STOP=1", "-c", sql]
|
|
42
|
+
_out, err, st = Open3.capture3(*cmd)
|
|
43
|
+
raise Polyrun::Error, "create database failed: #{err}" unless st.success?
|
|
44
|
+
|
|
45
|
+
true
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
# Runs +bin/rails db:prepare+ with merged ENV (+DATABASE_URL+ for primary, +CACHE_DATABASE_URL+, etc.).
|
|
49
|
+
# Multi-DB Rails apps must pass all template URLs in one invocation so each DB uses its own +migrations_paths+.
|
|
50
|
+
# Uses +db:prepare+ (not +db:migrate+ alone) so empty template databases load +schema.rb+ first;
|
|
51
|
+
# apps that squash or archive migrations and keep only incremental files need that path.
|
|
52
|
+
def prepare_template!(rails_root:, env:, silent: true)
|
|
53
|
+
exe = File.join(rails_root, "bin", "rails")
|
|
54
|
+
raise Polyrun::Error, "Provision: missing #{exe}" unless File.executable?(exe)
|
|
55
|
+
|
|
56
|
+
child_env = ENV.to_h.merge(env)
|
|
57
|
+
child_env["RAILS_ENV"] ||= ENV["RAILS_ENV"] || "test"
|
|
58
|
+
rails_out, err, st = Open3.capture3(child_env, exe, "db:prepare", chdir: rails_root)
|
|
59
|
+
Polyrun::Log.warn err if !silent && !err.to_s.empty?
|
|
60
|
+
unless st.success?
|
|
61
|
+
msg = +"db:prepare failed"
|
|
62
|
+
msg << "\n--- stderr ---\n#{err}" unless err.to_s.strip.empty?
|
|
63
|
+
# Rails often prints the first migration/SQL error on stdout; stderr may only show InFailedSqlTransaction.
|
|
64
|
+
msg << "\n--- stdout ---\n#{rails_out}" unless rails_out.to_s.strip.empty?
|
|
65
|
+
raise Polyrun::Error, msg
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
true
|
|
69
|
+
end
|
|
70
|
+
end
|
|
71
|
+
end
|
|
72
|
+
end
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
module Polyrun
|
|
2
|
+
module Database
|
|
3
|
+
# ENV helpers for sharded test databases (parallel_tests–style), stdlib only.
|
|
4
|
+
module Shard
|
|
5
|
+
module_function
|
|
6
|
+
|
|
7
|
+
# Builds a hash of suggested ENV vars for this shard (strings).
|
|
8
|
+
def env_map(shard_index:, shard_total:, base_database: nil)
|
|
9
|
+
idx = Integer(shard_index)
|
|
10
|
+
tot = Integer(shard_total)
|
|
11
|
+
raise Polyrun::Error, "shard_index out of range" if idx < 0 || idx >= tot
|
|
12
|
+
|
|
13
|
+
out = {
|
|
14
|
+
"POLYRUN_SHARD_INDEX" => idx.to_s,
|
|
15
|
+
"POLYRUN_SHARD_TOTAL" => tot.to_s
|
|
16
|
+
}
|
|
17
|
+
out["TEST_ENV_NUMBER"] = (idx + 1).to_s if tot > 1
|
|
18
|
+
if base_database && !base_database.to_s.empty?
|
|
19
|
+
out["POLYRUN_TEST_DATABASE"] = expand_database_name(base_database.to_s, idx)
|
|
20
|
+
end
|
|
21
|
+
out
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def expand_database_name(template, shard_index)
|
|
25
|
+
template.gsub("%{shard}", Integer(shard_index).to_s).gsub("%<shard>d", format("%d", Integer(shard_index)))
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
# Common URL transform: shard suffix on the database segment (+scheme://host/...+) or +sqlite3:+ path.
|
|
29
|
+
def database_url_with_shard(url, shard_index)
|
|
30
|
+
return url if url.nil? || url.to_s.empty?
|
|
31
|
+
|
|
32
|
+
u = url.to_s
|
|
33
|
+
return u if u.start_with?("http://", "https://", "file://")
|
|
34
|
+
|
|
35
|
+
if u.match?(/\Asqlite3:/i)
|
|
36
|
+
path = u.sub(/\Asqlite3:/i, "")
|
|
37
|
+
if path.match?(%r{([^/]+?)(\.sqlite3)\z}i)
|
|
38
|
+
idx = Integer(shard_index)
|
|
39
|
+
new_path = path.sub(%r{([^/]+?)(\.sqlite3)\z}i) { "#{$1}_#{idx}#{$2}" }
|
|
40
|
+
return "sqlite3:#{new_path}"
|
|
41
|
+
end
|
|
42
|
+
return u
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
return u unless u.match?(%r{\A[a-z][a-z0-9+.-]*://}i)
|
|
46
|
+
|
|
47
|
+
if (m = u.match(%r{/([^/?]+)(\?|$)}))
|
|
48
|
+
base = m[1]
|
|
49
|
+
suffixed = "#{base}_#{Integer(shard_index)}"
|
|
50
|
+
u.sub(%r{/#{Regexp.escape(base)}(\?|$)}, "/#{suffixed}\\1")
|
|
51
|
+
else
|
|
52
|
+
u
|
|
53
|
+
end
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
def print_exports(shard_index:, shard_total:, base_database: nil)
|
|
57
|
+
env_map(shard_index: shard_index, shard_total: shard_total, base_database: base_database).each do |k, v|
|
|
58
|
+
Polyrun::Log.puts %(export #{k}=#{v})
|
|
59
|
+
end
|
|
60
|
+
end
|
|
61
|
+
end
|
|
62
|
+
end
|
|
63
|
+
end
|