polyrun 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CODE_OF_CONDUCT.md +31 -0
- data/CONTRIBUTING.md +84 -0
- data/LICENSE +21 -0
- data/README.md +140 -0
- data/SECURITY.md +27 -0
- data/bin/polyrun +6 -0
- data/docs/SETUP_PROFILE.md +106 -0
- data/lib/polyrun/cli/coverage_commands.rb +150 -0
- data/lib/polyrun/cli/coverage_merge_io.rb +124 -0
- data/lib/polyrun/cli/database_commands.rb +149 -0
- data/lib/polyrun/cli/env_commands.rb +43 -0
- data/lib/polyrun/cli/helpers.rb +113 -0
- data/lib/polyrun/cli/init_command.rb +99 -0
- data/lib/polyrun/cli/plan_command.rb +134 -0
- data/lib/polyrun/cli/prepare_command.rb +71 -0
- data/lib/polyrun/cli/prepare_recipe.rb +77 -0
- data/lib/polyrun/cli/queue_command.rb +101 -0
- data/lib/polyrun/cli/quick_command.rb +13 -0
- data/lib/polyrun/cli/report_commands.rb +94 -0
- data/lib/polyrun/cli/run_shards_command.rb +88 -0
- data/lib/polyrun/cli/run_shards_plan_boot_phases.rb +91 -0
- data/lib/polyrun/cli/run_shards_plan_options.rb +45 -0
- data/lib/polyrun/cli/run_shards_planning.rb +124 -0
- data/lib/polyrun/cli/run_shards_run.rb +168 -0
- data/lib/polyrun/cli/start_bootstrap.rb +99 -0
- data/lib/polyrun/cli/timing_command.rb +31 -0
- data/lib/polyrun/cli.rb +184 -0
- data/lib/polyrun/config.rb +61 -0
- data/lib/polyrun/coverage/cobertura_zero_lines.rb +32 -0
- data/lib/polyrun/coverage/collector.rb +184 -0
- data/lib/polyrun/coverage/collector_finish.rb +95 -0
- data/lib/polyrun/coverage/filter.rb +22 -0
- data/lib/polyrun/coverage/formatter.rb +115 -0
- data/lib/polyrun/coverage/merge/formatters.rb +181 -0
- data/lib/polyrun/coverage/merge/formatters_html.rb +55 -0
- data/lib/polyrun/coverage/merge.rb +127 -0
- data/lib/polyrun/coverage/merge_fragment_meta.rb +47 -0
- data/lib/polyrun/coverage/merge_merge_two.rb +117 -0
- data/lib/polyrun/coverage/rails.rb +128 -0
- data/lib/polyrun/coverage/reporting.rb +41 -0
- data/lib/polyrun/coverage/result.rb +18 -0
- data/lib/polyrun/coverage/track_files.rb +141 -0
- data/lib/polyrun/data/cached_fixtures.rb +122 -0
- data/lib/polyrun/data/factory_counts.rb +35 -0
- data/lib/polyrun/data/factory_instrumentation.rb +50 -0
- data/lib/polyrun/data/fixtures.rb +68 -0
- data/lib/polyrun/data/parallel_provisioning.rb +93 -0
- data/lib/polyrun/data/snapshot.rb +84 -0
- data/lib/polyrun/database/clone_shards.rb +81 -0
- data/lib/polyrun/database/provision.rb +72 -0
- data/lib/polyrun/database/shard.rb +63 -0
- data/lib/polyrun/database/url_builder/connection/infer.rb +49 -0
- data/lib/polyrun/database/url_builder/connection/url_builders.rb +43 -0
- data/lib/polyrun/database/url_builder/connection.rb +191 -0
- data/lib/polyrun/database/url_builder/template_prepare.rb +21 -0
- data/lib/polyrun/database/url_builder.rb +160 -0
- data/lib/polyrun/debug.rb +81 -0
- data/lib/polyrun/env/ci.rb +65 -0
- data/lib/polyrun/log.rb +70 -0
- data/lib/polyrun/minitest.rb +17 -0
- data/lib/polyrun/partition/constraints.rb +69 -0
- data/lib/polyrun/partition/hrw.rb +33 -0
- data/lib/polyrun/partition/min_heap.rb +64 -0
- data/lib/polyrun/partition/paths.rb +28 -0
- data/lib/polyrun/partition/paths_build.rb +128 -0
- data/lib/polyrun/partition/plan.rb +189 -0
- data/lib/polyrun/partition/plan_lpt.rb +49 -0
- data/lib/polyrun/partition/plan_sharding.rb +48 -0
- data/lib/polyrun/partition/stable_shuffle.rb +18 -0
- data/lib/polyrun/prepare/artifacts.rb +40 -0
- data/lib/polyrun/prepare/assets.rb +57 -0
- data/lib/polyrun/queue/file_store.rb +199 -0
- data/lib/polyrun/queue/file_store_pending.rb +48 -0
- data/lib/polyrun/quick/assertions.rb +32 -0
- data/lib/polyrun/quick/errors.rb +6 -0
- data/lib/polyrun/quick/example_group.rb +66 -0
- data/lib/polyrun/quick/example_runner.rb +93 -0
- data/lib/polyrun/quick/matchers.rb +156 -0
- data/lib/polyrun/quick/reporter.rb +42 -0
- data/lib/polyrun/quick/runner.rb +180 -0
- data/lib/polyrun/quick.rb +1 -0
- data/lib/polyrun/railtie.rb +7 -0
- data/lib/polyrun/reporting/junit.rb +125 -0
- data/lib/polyrun/reporting/junit_emit.rb +58 -0
- data/lib/polyrun/reporting/rspec_junit.rb +39 -0
- data/lib/polyrun/rspec.rb +15 -0
- data/lib/polyrun/templates/POLYRUN.md +45 -0
- data/lib/polyrun/templates/ci_matrix.polyrun.yml +14 -0
- data/lib/polyrun/templates/minimal_gem.polyrun.yml +13 -0
- data/lib/polyrun/templates/rails_prepare.polyrun.yml +31 -0
- data/lib/polyrun/timing/merge.rb +35 -0
- data/lib/polyrun/timing/summary.rb +25 -0
- data/lib/polyrun/version.rb +3 -0
- data/lib/polyrun.rb +58 -0
- data/polyrun.gemspec +37 -0
- data/sig/polyrun/cli.rbs +6 -0
- data/sig/polyrun/config.rbs +20 -0
- data/sig/polyrun/debug.rbs +12 -0
- data/sig/polyrun/log.rbs +12 -0
- data/sig/polyrun/minitest.rbs +5 -0
- data/sig/polyrun/quick.rbs +19 -0
- data/sig/polyrun/rspec.rbs +5 -0
- data/sig/polyrun.rbs +11 -0
- metadata +288 -0
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
require_relative "run_shards_plan_options"
|
|
2
|
+
require_relative "run_shards_plan_boot_phases"
|
|
3
|
+
|
|
4
|
+
module Polyrun
|
|
5
|
+
class CLI
|
|
6
|
+
# Parses argv, loads config, builds {Partition::Plan} for run-shards.
|
|
7
|
+
module RunShardsPlanning
|
|
8
|
+
include RunShardsPlanOptions
|
|
9
|
+
include RunShardsPlanBootPhases
|
|
10
|
+
|
|
11
|
+
private
|
|
12
|
+
|
|
13
|
+
# @return [Array(Integer, Hash, nil)] [exit_code, nil] on failure, or [nil, ctx] on success
|
|
14
|
+
def run_shards_build_plan(argv, config_path)
|
|
15
|
+
boot = run_shards_plan_boot(argv, config_path)
|
|
16
|
+
return boot if boot.size == 2
|
|
17
|
+
|
|
18
|
+
run_t0, head, cmd, cfg, pc = boot
|
|
19
|
+
phase = run_shards_plan_phase_a(head, cmd, pc)
|
|
20
|
+
return [phase[1], nil] if phase[0] == :fail
|
|
21
|
+
|
|
22
|
+
_tag, o, cmd = phase
|
|
23
|
+
run_shards_plan_phase_b(o, cmd, cfg, pc, run_t0, config_path)
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
def run_shards_default_timing_path(pc, timing_path, strategy)
|
|
27
|
+
return timing_path if timing_path
|
|
28
|
+
|
|
29
|
+
tf = pc["timing_file"] || pc[:timing_file]
|
|
30
|
+
return tf if tf && (Polyrun::Partition::Plan.cost_strategy?(strategy) || Polyrun::Partition::Plan.hrw_strategy?(strategy))
|
|
31
|
+
|
|
32
|
+
nil
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
def run_shards_validate_workers!(o)
|
|
36
|
+
w = o[:workers]
|
|
37
|
+
if w < 1
|
|
38
|
+
Polyrun::Log.warn "polyrun run-shards: --workers must be >= 1"
|
|
39
|
+
return 2
|
|
40
|
+
end
|
|
41
|
+
if w > RunShardsCommand::MAX_PARALLEL_WORKERS
|
|
42
|
+
Polyrun::Log.warn "polyrun run-shards: capping --workers / POLYRUN_WORKERS from #{w} to #{RunShardsCommand::MAX_PARALLEL_WORKERS}"
|
|
43
|
+
o[:workers] = RunShardsCommand::MAX_PARALLEL_WORKERS
|
|
44
|
+
end
|
|
45
|
+
nil
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
def run_shards_validate_cmd(cmd)
|
|
49
|
+
if cmd.empty?
|
|
50
|
+
Polyrun::Log.warn "polyrun run-shards: empty command after --"
|
|
51
|
+
return 2
|
|
52
|
+
end
|
|
53
|
+
nil
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
def run_shards_resolve_items(paths_file)
|
|
57
|
+
resolved = Polyrun::Partition::Paths.resolve_run_shard_items(paths_file: paths_file)
|
|
58
|
+
if resolved[:error]
|
|
59
|
+
Polyrun::Log.warn "polyrun run-shards: #{resolved[:error]}"
|
|
60
|
+
return [nil, nil, 2]
|
|
61
|
+
end
|
|
62
|
+
items = resolved[:items]
|
|
63
|
+
paths_source = resolved[:source]
|
|
64
|
+
Polyrun::Log.warn "polyrun run-shards: #{items.size} spec path(s) from #{paths_source}"
|
|
65
|
+
|
|
66
|
+
if items.empty?
|
|
67
|
+
Polyrun::Log.warn "polyrun run-shards: no spec paths (spec/spec_paths.txt, partition.paths_file, or spec/**/*_spec.rb)"
|
|
68
|
+
return [nil, nil, 2]
|
|
69
|
+
end
|
|
70
|
+
[items, paths_source, nil]
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
def run_shards_resolve_costs(timing_path, strategy)
|
|
74
|
+
if timing_path
|
|
75
|
+
costs = Polyrun::Partition::Plan.load_timing_costs(File.expand_path(timing_path.to_s, Dir.pwd))
|
|
76
|
+
if costs.empty?
|
|
77
|
+
Polyrun::Log.warn "polyrun run-shards: timing file missing or empty: #{timing_path}"
|
|
78
|
+
return [nil, nil, 2]
|
|
79
|
+
end
|
|
80
|
+
unless Polyrun::Partition::Plan.cost_strategy?(strategy) || Polyrun::Partition::Plan.hrw_strategy?(strategy)
|
|
81
|
+
Polyrun::Log.warn "polyrun run-shards: using cost_binpack (timing data present)" if @verbose
|
|
82
|
+
strategy = "cost_binpack"
|
|
83
|
+
end
|
|
84
|
+
[costs, strategy, nil]
|
|
85
|
+
elsif Polyrun::Partition::Plan.cost_strategy?(strategy)
|
|
86
|
+
Polyrun::Log.warn "polyrun run-shards: --timing or partition.timing_file required for strategy #{strategy}"
|
|
87
|
+
[nil, nil, 2]
|
|
88
|
+
else
|
|
89
|
+
[nil, strategy, nil]
|
|
90
|
+
end
|
|
91
|
+
end
|
|
92
|
+
|
|
93
|
+
def run_shards_make_plan(items, workers, strategy, seed, costs, constraints)
|
|
94
|
+
Polyrun::Debug.time("Partition::Plan.new (partition #{items.size} paths → #{workers} shards)") do
|
|
95
|
+
Polyrun::Partition::Plan.new(
|
|
96
|
+
items: items,
|
|
97
|
+
total_shards: workers,
|
|
98
|
+
strategy: strategy,
|
|
99
|
+
seed: seed,
|
|
100
|
+
costs: costs,
|
|
101
|
+
constraints: constraints,
|
|
102
|
+
root: Dir.pwd
|
|
103
|
+
)
|
|
104
|
+
end
|
|
105
|
+
end
|
|
106
|
+
|
|
107
|
+
def run_shards_debug_shard_sizes(plan, workers)
|
|
108
|
+
return unless Polyrun::Debug.enabled?
|
|
109
|
+
|
|
110
|
+
workers.times do |s|
|
|
111
|
+
n = plan.shard(s).size
|
|
112
|
+
Polyrun::Debug.log("run-shards: shard #{s} → #{n} spec file(s)")
|
|
113
|
+
end
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
def run_shards_warn_parallel_banner(item_count, workers, strategy)
|
|
117
|
+
Polyrun::Log.warn <<~MSG
|
|
118
|
+
polyrun run-shards: #{item_count} spec path(s) -> #{workers} parallel worker processes (not Ruby threads); strategy=#{strategy}
|
|
119
|
+
(plain `bundle exec rspec` is one process; this command fans out.)
|
|
120
|
+
MSG
|
|
121
|
+
end
|
|
122
|
+
end
|
|
123
|
+
end
|
|
124
|
+
end
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
require "shellwords"
|
|
2
|
+
require "rbconfig"
|
|
3
|
+
|
|
4
|
+
require_relative "run_shards_planning"
|
|
5
|
+
|
|
6
|
+
module Polyrun
|
|
7
|
+
class CLI
|
|
8
|
+
# Partition + spawn workers for `polyrun run-shards` (keeps {RunShardsCommand} file small).
|
|
9
|
+
module RunShardsRun
|
|
10
|
+
include RunShardsPlanning
|
|
11
|
+
|
|
12
|
+
private
|
|
13
|
+
|
|
14
|
+
def run_shards_run!(argv, config_path)
|
|
15
|
+
code, ctx = run_shards_build_plan(argv, config_path)
|
|
16
|
+
return code if code
|
|
17
|
+
|
|
18
|
+
run_shards_workers_and_merge(ctx)
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
def run_shards_workers_and_merge(ctx)
|
|
22
|
+
pids = run_shards_spawn_workers(ctx)
|
|
23
|
+
return 1 if pids.empty?
|
|
24
|
+
|
|
25
|
+
run_shards_warn_interleaved(ctx[:parallel], pids.size)
|
|
26
|
+
|
|
27
|
+
shard_results = run_shards_wait_all_children(pids)
|
|
28
|
+
failed = shard_results.reject { |r| r[:success] }.map { |r| r[:shard] }
|
|
29
|
+
|
|
30
|
+
Polyrun::Debug.log(format(
|
|
31
|
+
"run-shards: workers wall time since start: %.3fs",
|
|
32
|
+
Process.clock_gettime(Process::CLOCK_MONOTONIC) - ctx[:run_t0]
|
|
33
|
+
))
|
|
34
|
+
|
|
35
|
+
if ctx[:parallel]
|
|
36
|
+
Polyrun::Log.warn "polyrun run-shards: finished #{pids.size} worker(s)" + (failed.any? ? " (some failed)" : " (exit 0)")
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
if failed.any?
|
|
40
|
+
run_shards_log_failed_reruns(failed, shard_results, ctx[:plan], ctx[:parallel], ctx[:workers], ctx[:cmd])
|
|
41
|
+
return 1
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
run_shards_merge_or_hint_coverage(ctx)
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
def run_shards_spawn_workers(ctx)
|
|
48
|
+
workers = ctx[:workers]
|
|
49
|
+
cmd = ctx[:cmd]
|
|
50
|
+
cfg = ctx[:cfg]
|
|
51
|
+
plan = ctx[:plan]
|
|
52
|
+
parallel = ctx[:parallel]
|
|
53
|
+
|
|
54
|
+
pids = []
|
|
55
|
+
workers.times do |shard|
|
|
56
|
+
paths = plan.shard(shard)
|
|
57
|
+
if paths.empty?
|
|
58
|
+
Polyrun::Log.warn "polyrun run-shards: shard #{shard} skipped (no paths)" if @verbose || parallel
|
|
59
|
+
next
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
child_env = shard_child_env(cfg: cfg, workers: workers, shard: shard)
|
|
63
|
+
|
|
64
|
+
Polyrun::Log.warn "polyrun run-shards: shard #{shard} → #{paths.size} file(s)" if @verbose
|
|
65
|
+
pid = Process.spawn(child_env, *cmd, *paths)
|
|
66
|
+
pids << {pid: pid, shard: shard}
|
|
67
|
+
Polyrun::Debug.log("[parent pid=#{$$}] run-shards: Process.spawn shard=#{shard} child_pid=#{pid} spec_files=#{paths.size}")
|
|
68
|
+
Polyrun::Log.warn "polyrun run-shards: started shard #{shard} pid=#{pid} (#{paths.size} file(s))" if parallel
|
|
69
|
+
end
|
|
70
|
+
pids
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
def run_shards_warn_interleaved(parallel, pid_count)
|
|
74
|
+
return unless parallel && pid_count > 1
|
|
75
|
+
|
|
76
|
+
Polyrun::Log.warn "polyrun run-shards: #{pid_count} children running; RSpec output below may be interleaved."
|
|
77
|
+
Polyrun::Log.warn "polyrun run-shards: each worker prints its own summary line; the last \"N examples\" line is not a total across shards."
|
|
78
|
+
end
|
|
79
|
+
|
|
80
|
+
def run_shards_wait_all_children(pids)
|
|
81
|
+
shard_results = []
|
|
82
|
+
Polyrun::Debug.time("Process.wait (#{pids.size} worker process(es))") do
|
|
83
|
+
pids.each do |h|
|
|
84
|
+
Process.wait(h[:pid])
|
|
85
|
+
exitstatus = $?.exitstatus
|
|
86
|
+
ok = $?.success?
|
|
87
|
+
Polyrun::Debug.log("[parent pid=#{$$}] run-shards: Process.wait child_pid=#{h[:pid]} shard=#{h[:shard]} exit=#{exitstatus} success=#{ok}")
|
|
88
|
+
shard_results << {shard: h[:shard], exitstatus: exitstatus, success: ok}
|
|
89
|
+
end
|
|
90
|
+
rescue Interrupt
|
|
91
|
+
# Do not trap SIGINT: Process.wait raises Interrupt; a trap races and prints Interrupt + SystemExit traces.
|
|
92
|
+
run_shards_shutdown_on_signal!(pids, 130)
|
|
93
|
+
rescue SignalException => e
|
|
94
|
+
raise unless e.signm == "SIGTERM"
|
|
95
|
+
|
|
96
|
+
run_shards_shutdown_on_signal!(pids, 143)
|
|
97
|
+
end
|
|
98
|
+
shard_results
|
|
99
|
+
end
|
|
100
|
+
|
|
101
|
+
# Best-effort worker teardown then exit. Does not return.
|
|
102
|
+
def run_shards_shutdown_on_signal!(pids, code)
|
|
103
|
+
run_shards_terminate_children!(pids)
|
|
104
|
+
exit(code)
|
|
105
|
+
rescue Interrupt
|
|
106
|
+
pids.each do |h|
|
|
107
|
+
Process.kill(:KILL, h[:pid])
|
|
108
|
+
rescue Errno::ESRCH
|
|
109
|
+
# already reaped
|
|
110
|
+
end
|
|
111
|
+
pids.each do |h|
|
|
112
|
+
Process.wait(h[:pid])
|
|
113
|
+
rescue Errno::ESRCH, Errno::ECHILD, Interrupt
|
|
114
|
+
# already reaped or give up
|
|
115
|
+
end
|
|
116
|
+
exit(code)
|
|
117
|
+
end
|
|
118
|
+
|
|
119
|
+
# Send SIGTERM to each worker PID and wait so Ctrl+C / SIGTERM does not leave orphans.
|
|
120
|
+
def run_shards_terminate_children!(pids)
|
|
121
|
+
pids.each do |h|
|
|
122
|
+
Process.kill(:TERM, h[:pid])
|
|
123
|
+
rescue Errno::ESRCH
|
|
124
|
+
# already reaped
|
|
125
|
+
end
|
|
126
|
+
pids.each do |h|
|
|
127
|
+
Process.wait(h[:pid])
|
|
128
|
+
rescue Errno::ESRCH, Errno::ECHILD
|
|
129
|
+
# already reaped
|
|
130
|
+
end
|
|
131
|
+
end
|
|
132
|
+
|
|
133
|
+
def run_shards_merge_or_hint_coverage(ctx)
|
|
134
|
+
if ctx[:merge_coverage]
|
|
135
|
+
mo = ctx[:merge_output] || "coverage/merged.json"
|
|
136
|
+
mf = ctx[:merge_format] || ENV["POLYRUN_MERGE_FORMATS"] || Polyrun::Coverage::Reporting::DEFAULT_MERGE_FORMAT_LIST
|
|
137
|
+
Polyrun::Debug.log("run-shards: starting post-worker merge_coverage_after_shards → #{mo}")
|
|
138
|
+
return merge_coverage_after_shards(output: mo, format_list: mf, config_path: ctx[:config_path])
|
|
139
|
+
end
|
|
140
|
+
|
|
141
|
+
if ctx[:parallel]
|
|
142
|
+
Polyrun::Log.warn <<~MSG
|
|
143
|
+
polyrun run-shards: coverage — each worker writes coverage/polyrun-fragment-<shard>.json when Polyrun coverage is enabled (POLYRUN_SHARD_INDEX per process).
|
|
144
|
+
polyrun run-shards: next step — merge with: polyrun merge-coverage -i 'coverage/polyrun-fragment-*.json' -o coverage/merged.json --format json,cobertura,console
|
|
145
|
+
MSG
|
|
146
|
+
end
|
|
147
|
+
0
|
|
148
|
+
end
|
|
149
|
+
|
|
150
|
+
def run_shards_log_failed_reruns(failed, shard_results, plan, parallel, workers, cmd)
|
|
151
|
+
exit_by_shard = shard_results.each_with_object({}) { |r, h| h[r[:shard]] = r[:exitstatus] }
|
|
152
|
+
failed_detail = failed.sort.map { |s| "#{s} (exit #{exit_by_shard[s]})" }.join(", ")
|
|
153
|
+
Polyrun::Log.warn "polyrun run-shards: failed shard(s): #{failed_detail}"
|
|
154
|
+
if parallel
|
|
155
|
+
Polyrun::Log.warn "polyrun run-shards: search this log for the failed shard's output, or re-run one shard at a time (below) for a clean RSpec report."
|
|
156
|
+
end
|
|
157
|
+
failed.sort.each do |s|
|
|
158
|
+
paths = plan.shard(s)
|
|
159
|
+
next if paths.empty?
|
|
160
|
+
|
|
161
|
+
rerun = "export POLYRUN_SHARD_INDEX=#{s} POLYRUN_SHARD_TOTAL=#{workers}; "
|
|
162
|
+
rerun << Shellwords.join(cmd + paths)
|
|
163
|
+
Polyrun::Log.warn "polyrun run-shards: shard #{s} re-run (same spec list, no interleave): #{rerun}"
|
|
164
|
+
end
|
|
165
|
+
end
|
|
166
|
+
end
|
|
167
|
+
end
|
|
168
|
+
end
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
module Polyrun
|
|
2
|
+
class CLI
|
|
3
|
+
# +polyrun start+ only: run +prepare+ and/or Postgres template+shard DBs before parallel RSpec.
|
|
4
|
+
module StartBootstrap
|
|
5
|
+
private
|
|
6
|
+
|
|
7
|
+
# Keep in sync with {RunShardsCommand} worker defaults.
|
|
8
|
+
START_ARG_WORKERS_DEFAULT = 5
|
|
9
|
+
START_ARG_WORKERS_MAX = 10
|
|
10
|
+
|
|
11
|
+
def start_bootstrap!(cfg, argv, config_path)
|
|
12
|
+
if start_run_prepare?(cfg) && !truthy_env?("POLYRUN_START_SKIP_PREPARE")
|
|
13
|
+
recipe = cfg.prepare["recipe"] || cfg.prepare[:recipe] || "default"
|
|
14
|
+
Polyrun::Log.warn "polyrun start: running prepare (recipe=#{recipe})" if @verbose
|
|
15
|
+
code = cmd_prepare([], config_path)
|
|
16
|
+
return code if code != 0
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
if start_run_database_provision?(cfg) && !truthy_env?("POLYRUN_START_SKIP_DATABASES")
|
|
20
|
+
workers = parse_workers_from_start_argv(argv)
|
|
21
|
+
Polyrun::Log.warn "polyrun start: provisioning test DBs (template + shards 0..#{workers - 1})" if @verbose
|
|
22
|
+
begin
|
|
23
|
+
Polyrun::Database::CloneShards.provision!(
|
|
24
|
+
cfg.databases,
|
|
25
|
+
workers: workers,
|
|
26
|
+
rails_root: Dir.pwd,
|
|
27
|
+
migrate: true,
|
|
28
|
+
replace: true,
|
|
29
|
+
force_drop: truthy_env?("POLYRUN_PG_DROP_FORCE"),
|
|
30
|
+
dry_run: false,
|
|
31
|
+
silent: !@verbose
|
|
32
|
+
)
|
|
33
|
+
rescue Polyrun::Error => e
|
|
34
|
+
Polyrun::Log.warn "polyrun start: #{e.message}"
|
|
35
|
+
return 1
|
|
36
|
+
end
|
|
37
|
+
end
|
|
38
|
+
0
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
def start_run_prepare?(cfg)
|
|
42
|
+
st = cfg.start_config
|
|
43
|
+
prep = cfg.prepare
|
|
44
|
+
return false unless prep.is_a?(Hash) && !prep.empty?
|
|
45
|
+
|
|
46
|
+
return false if st["prepare"] == false || st[:prepare] == false
|
|
47
|
+
return true if st["prepare"] == true || st[:prepare] == true
|
|
48
|
+
|
|
49
|
+
prepare_recipe_has_side_effects?(prep)
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
def prepare_recipe_has_side_effects?(prep)
|
|
53
|
+
recipe = (prep["recipe"] || prep[:recipe] || "default").to_s
|
|
54
|
+
return true if %w[shell assets].include?(recipe)
|
|
55
|
+
return true if prep["command"] || prep[:command] || prep["commands"] || prep[:commands]
|
|
56
|
+
|
|
57
|
+
false
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
def start_run_database_provision?(cfg)
|
|
61
|
+
st = cfg.start_config
|
|
62
|
+
dh = cfg.databases
|
|
63
|
+
return false unless dh.is_a?(Hash)
|
|
64
|
+
|
|
65
|
+
template = (dh["template_db"] || dh[:template_db]).to_s
|
|
66
|
+
return false if template.empty?
|
|
67
|
+
|
|
68
|
+
if st["databases"] == true || st[:databases] == true
|
|
69
|
+
return true
|
|
70
|
+
end
|
|
71
|
+
return false if st["databases"] == false || st[:databases] == false
|
|
72
|
+
|
|
73
|
+
true
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
def parse_workers_from_start_argv(argv)
|
|
77
|
+
sep = argv.index("--")
|
|
78
|
+
head = sep ? argv[0...sep] : argv
|
|
79
|
+
workers = env_int("POLYRUN_WORKERS", START_ARG_WORKERS_DEFAULT)
|
|
80
|
+
i = 0
|
|
81
|
+
while i < head.size
|
|
82
|
+
if head[i] == "--workers" && head[i + 1]
|
|
83
|
+
w = Integer(head[i + 1], exception: false)
|
|
84
|
+
workers = w if w && w >= 1
|
|
85
|
+
i += 2
|
|
86
|
+
else
|
|
87
|
+
i += 1
|
|
88
|
+
end
|
|
89
|
+
end
|
|
90
|
+
workers.clamp(1, START_ARG_WORKERS_MAX)
|
|
91
|
+
end
|
|
92
|
+
|
|
93
|
+
def truthy_env?(name)
|
|
94
|
+
v = ENV[name].to_s.downcase
|
|
95
|
+
%w[1 true yes].include?(v)
|
|
96
|
+
end
|
|
97
|
+
end
|
|
98
|
+
end
|
|
99
|
+
end
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
require "optparse"
|
|
2
|
+
|
|
3
|
+
module Polyrun
|
|
4
|
+
class CLI
|
|
5
|
+
module TimingCommand
|
|
6
|
+
private
|
|
7
|
+
|
|
8
|
+
def cmd_merge_timing(argv)
|
|
9
|
+
inputs = []
|
|
10
|
+
output = "polyrun_timing.json"
|
|
11
|
+
parser = OptionParser.new do |opts|
|
|
12
|
+
opts.banner = "usage: polyrun merge-timing [-i FILE]... [-o OUT] [FILE...]"
|
|
13
|
+
opts.on("-i", "--input FILE", "Timing JSON fragment (repeatable)") { |f| inputs << f }
|
|
14
|
+
opts.on("-o", "--output PATH", String) { |v| output = v }
|
|
15
|
+
end
|
|
16
|
+
parser.parse!(argv)
|
|
17
|
+
inputs.concat(argv) if inputs.empty?
|
|
18
|
+
|
|
19
|
+
if inputs.empty?
|
|
20
|
+
Polyrun::Log.warn "merge-timing: need -i FILE or positional paths"
|
|
21
|
+
return 2
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
out_abs = File.expand_path(output)
|
|
25
|
+
Polyrun::Timing::Merge.merge_and_write(inputs.map { |p| File.expand_path(p) }, out_abs)
|
|
26
|
+
Polyrun::Log.puts out_abs
|
|
27
|
+
0
|
|
28
|
+
end
|
|
29
|
+
end
|
|
30
|
+
end
|
|
31
|
+
end
|
data/lib/polyrun/cli.rb
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
require "optparse"
|
|
2
|
+
|
|
3
|
+
require_relative "cli/helpers"
|
|
4
|
+
require_relative "cli/plan_command"
|
|
5
|
+
require_relative "cli/prepare_command"
|
|
6
|
+
require_relative "cli/coverage_commands"
|
|
7
|
+
require_relative "cli/report_commands"
|
|
8
|
+
require_relative "cli/env_commands"
|
|
9
|
+
require_relative "cli/database_commands"
|
|
10
|
+
require_relative "cli/run_shards_command"
|
|
11
|
+
require_relative "cli/queue_command"
|
|
12
|
+
require_relative "cli/timing_command"
|
|
13
|
+
require_relative "cli/init_command"
|
|
14
|
+
require_relative "cli/quick_command"
|
|
15
|
+
|
|
16
|
+
module Polyrun
|
|
17
|
+
class CLI
|
|
18
|
+
include Helpers
|
|
19
|
+
include PlanCommand
|
|
20
|
+
include PrepareCommand
|
|
21
|
+
include CoverageCommands
|
|
22
|
+
include ReportCommands
|
|
23
|
+
include EnvCommands
|
|
24
|
+
include DatabaseCommands
|
|
25
|
+
include RunShardsCommand
|
|
26
|
+
include QueueCommand
|
|
27
|
+
include TimingCommand
|
|
28
|
+
include InitCommand
|
|
29
|
+
include QuickCommand
|
|
30
|
+
|
|
31
|
+
def self.run(argv = ARGV)
|
|
32
|
+
new.run(argv)
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
def run(argv)
|
|
36
|
+
argv = argv.dup
|
|
37
|
+
config_path = parse_global_cli!(argv)
|
|
38
|
+
return config_path if config_path.is_a?(Integer)
|
|
39
|
+
|
|
40
|
+
command = argv.shift
|
|
41
|
+
if command.nil?
|
|
42
|
+
print_help
|
|
43
|
+
return 0
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
Polyrun::Debug.log_kv(
|
|
47
|
+
command: command,
|
|
48
|
+
cwd: Dir.pwd,
|
|
49
|
+
polyrun_config: config_path,
|
|
50
|
+
argv_rest: argv.dup,
|
|
51
|
+
verbose: @verbose
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
dispatch_cli_command(command, argv, config_path)
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
private
|
|
58
|
+
|
|
59
|
+
def parse_global_cli!(argv)
|
|
60
|
+
config_path = ENV["POLYRUN_CONFIG"]
|
|
61
|
+
@verbose = false
|
|
62
|
+
while (a = argv.first) && a.start_with?("-") && a != "--"
|
|
63
|
+
case a
|
|
64
|
+
when "-c", "--config"
|
|
65
|
+
argv.shift
|
|
66
|
+
config_path = argv.shift or break
|
|
67
|
+
when "-v", "--verbose"
|
|
68
|
+
@verbose = true
|
|
69
|
+
argv.shift
|
|
70
|
+
when "-h", "--help"
|
|
71
|
+
print_help
|
|
72
|
+
return 0
|
|
73
|
+
else
|
|
74
|
+
break
|
|
75
|
+
end
|
|
76
|
+
end
|
|
77
|
+
config_path
|
|
78
|
+
end
|
|
79
|
+
|
|
80
|
+
def dispatch_cli_command(command, argv, config_path)
|
|
81
|
+
case command
|
|
82
|
+
when "help"
|
|
83
|
+
print_help
|
|
84
|
+
0
|
|
85
|
+
when "version"
|
|
86
|
+
cmd_version
|
|
87
|
+
else
|
|
88
|
+
dispatch_cli_command_subcommands(command, argv, config_path)
|
|
89
|
+
end
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
def dispatch_cli_command_subcommands(command, argv, config_path)
|
|
93
|
+
case command
|
|
94
|
+
when "plan"
|
|
95
|
+
cmd_plan(argv, config_path)
|
|
96
|
+
when "prepare"
|
|
97
|
+
cmd_prepare(argv, config_path)
|
|
98
|
+
when "merge-coverage"
|
|
99
|
+
cmd_merge_coverage(argv, config_path)
|
|
100
|
+
when "report-coverage"
|
|
101
|
+
cmd_report_coverage(argv)
|
|
102
|
+
when "report-junit"
|
|
103
|
+
cmd_report_junit(argv)
|
|
104
|
+
when "report-timing"
|
|
105
|
+
cmd_report_timing(argv)
|
|
106
|
+
when "env"
|
|
107
|
+
cmd_env(argv, config_path)
|
|
108
|
+
when "merge-timing"
|
|
109
|
+
cmd_merge_timing(argv)
|
|
110
|
+
when "db:setup-template"
|
|
111
|
+
cmd_db_setup_template(argv, config_path)
|
|
112
|
+
when "db:setup-shard"
|
|
113
|
+
cmd_db_setup_shard(argv, config_path)
|
|
114
|
+
when "db:clone-shards"
|
|
115
|
+
cmd_db_clone_shards(argv, config_path)
|
|
116
|
+
when "run-shards"
|
|
117
|
+
cmd_run_shards(argv, config_path)
|
|
118
|
+
when "parallel-rspec"
|
|
119
|
+
cmd_parallel_rspec(argv, config_path)
|
|
120
|
+
when "start"
|
|
121
|
+
cmd_start(argv, config_path)
|
|
122
|
+
when "build-paths"
|
|
123
|
+
cmd_build_paths(config_path)
|
|
124
|
+
when "init"
|
|
125
|
+
cmd_init(argv, config_path)
|
|
126
|
+
when "queue"
|
|
127
|
+
cmd_queue(argv)
|
|
128
|
+
when "quick"
|
|
129
|
+
cmd_quick(argv)
|
|
130
|
+
else
|
|
131
|
+
Polyrun::Log.warn "unknown command: #{command}"
|
|
132
|
+
2
|
|
133
|
+
end
|
|
134
|
+
end
|
|
135
|
+
|
|
136
|
+
def print_help
|
|
137
|
+
Polyrun::Log.puts <<~HELP
|
|
138
|
+
usage: polyrun [global options] <command> [options]
|
|
139
|
+
|
|
140
|
+
global:
|
|
141
|
+
-c, --config PATH polyrun.yml path (or POLYRUN_CONFIG)
|
|
142
|
+
-v, --verbose
|
|
143
|
+
-h, --help
|
|
144
|
+
|
|
145
|
+
Trace timing (stderr): DEBUG=1 or POLYRUN_DEBUG=1
|
|
146
|
+
Branch coverage in JSON fragments: POLYRUN_COVERAGE_BRANCHES=1 (stdlib Coverage; merge-coverage merges branches)
|
|
147
|
+
polyrun quick coverage: POLYRUN_COVERAGE=1 or (config/polyrun_coverage.yml + POLYRUN_QUICK_COVERAGE=1); POLYRUN_COVERAGE_DISABLE=1 skips
|
|
148
|
+
Merge wall time (stderr): POLYRUN_PROFILE_MERGE=1 (or verbose / DEBUG)
|
|
149
|
+
Post-merge formats (run-shards): POLYRUN_MERGE_FORMATS (default: json,lcov,cobertura,console,html)
|
|
150
|
+
Skip optional script/build_spec_paths.rb before start: POLYRUN_SKIP_BUILD_SPEC_PATHS=1
|
|
151
|
+
Skip start auto-prepare / auto DB provision: POLYRUN_START_SKIP_PREPARE=1, POLYRUN_START_SKIP_DATABASES=1
|
|
152
|
+
Skip writing paths_file from partition.paths_build: POLYRUN_SKIP_PATHS_BUILD=1
|
|
153
|
+
Warn if merge-coverage wall time exceeds N seconds (default 10): POLYRUN_MERGE_SLOW_WARN_SECONDS (0 disables)
|
|
154
|
+
Parallel RSpec workers: POLYRUN_WORKERS default 5, max 10 (run-shards / parallel-rspec / start)
|
|
155
|
+
|
|
156
|
+
commands:
|
|
157
|
+
version print version
|
|
158
|
+
plan emit partition manifest JSON
|
|
159
|
+
prepare run prepare recipe: default | assets (optional prepare.command overrides bin/rails assets:precompile) | shell (prepare.command required)
|
|
160
|
+
merge-coverage merge SimpleCov JSON fragments (json/lcov/cobertura/console)
|
|
161
|
+
run-shards fan out N parallel OS processes (POLYRUN_SHARD_*; not Ruby threads); optional --merge-coverage
|
|
162
|
+
parallel-rspec run-shards + merge-coverage (defaults to: bundle exec rspec after --)
|
|
163
|
+
start parallel-rspec; auto-runs prepare (shell/assets) and db:setup-* when polyrun.yml configures them; legacy script/build_spec_paths.rb if paths_build absent
|
|
164
|
+
build-paths write partition.paths_file from partition.paths_build (same as auto step before plan/run-shards)
|
|
165
|
+
init write a starter polyrun.yml or POLYRUN.md from built-in templates (see docs/SETUP_PROFILE.md)
|
|
166
|
+
queue file-backed batch queue (init / claim / ack / status)
|
|
167
|
+
quick run Polyrun::Quick (describe/it, before/after, let, expect…to, assert_*; optional capybara!)
|
|
168
|
+
report-coverage write all coverage formats from one JSON file
|
|
169
|
+
report-junit RSpec JSON or Polyrun testcase JSON → JUnit XML (CI)
|
|
170
|
+
report-timing print slow-file summary from merged timing JSON
|
|
171
|
+
merge-timing merge polyrun_timing_*.json shards
|
|
172
|
+
env print shard + database env (see polyrun.yml databases)
|
|
173
|
+
db:setup-template migrate template DB (PostgreSQL)
|
|
174
|
+
db:setup-shard CREATE DATABASE shard FROM template (one POLYRUN_SHARD_INDEX)
|
|
175
|
+
db:clone-shards migrate templates + DROP/CREATE all shard DBs (replaces clone_shard shell scripts)
|
|
176
|
+
HELP
|
|
177
|
+
end
|
|
178
|
+
|
|
179
|
+
def cmd_version
|
|
180
|
+
Polyrun::Log.puts "polyrun #{Polyrun::VERSION}"
|
|
181
|
+
0
|
|
182
|
+
end
|
|
183
|
+
end
|
|
184
|
+
end
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
require "yaml"
|
|
2
|
+
|
|
3
|
+
module Polyrun
|
|
4
|
+
# Loads polyrun.yml (or path from POLYRUN_CONFIG / --config).
|
|
5
|
+
class Config
|
|
6
|
+
DEFAULT_FILENAMES = %w[polyrun.yml config/polyrun.yml].freeze
|
|
7
|
+
|
|
8
|
+
attr_reader :path, :raw
|
|
9
|
+
|
|
10
|
+
def self.load(path: nil)
|
|
11
|
+
path = resolve_path(path)
|
|
12
|
+
raw =
|
|
13
|
+
if path && File.file?(path)
|
|
14
|
+
YAML.safe_load_file(path, permitted_classes: [Symbol], aliases: true) || {}
|
|
15
|
+
else
|
|
16
|
+
{}
|
|
17
|
+
end
|
|
18
|
+
new(path: path, raw: raw)
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
def self.resolve_path(explicit)
|
|
22
|
+
return File.expand_path(explicit) if explicit && !explicit.empty?
|
|
23
|
+
|
|
24
|
+
DEFAULT_FILENAMES.each do |name|
|
|
25
|
+
full = File.expand_path(name, Dir.pwd)
|
|
26
|
+
return full if File.file?(full)
|
|
27
|
+
end
|
|
28
|
+
nil
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
def initialize(path:, raw:)
|
|
32
|
+
@path = path
|
|
33
|
+
@raw = raw.freeze
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
def partition
|
|
37
|
+
raw["partition"] || raw[:partition] || {}
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
def prepare
|
|
41
|
+
raw["prepare"] || raw[:prepare] || {}
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
def coverage
|
|
45
|
+
raw["coverage"] || raw[:coverage] || {}
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
def databases
|
|
49
|
+
raw["databases"] || raw[:databases] || {}
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
# Optional +start:+ block: +prepare+ / +databases+ booleans override auto-detection for +polyrun start+.
|
|
53
|
+
def start_config
|
|
54
|
+
raw["start"] || raw[:start] || {}
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
def version
|
|
58
|
+
raw["version"] || raw[:version]
|
|
59
|
+
end
|
|
60
|
+
end
|
|
61
|
+
end
|