specwrk 0.8.0 → 0.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +0 -2
- data/docker/Dockerfile.server +11 -7
- data/docker/entrypoint.server.sh +2 -2
- data/lib/specwrk/cli.rb +2 -2
- data/lib/specwrk/client.rb +17 -0
- data/lib/specwrk/store/file_adapter.rb +180 -0
- data/lib/specwrk/store.rb +241 -0
- data/lib/specwrk/version.rb +1 -1
- data/lib/specwrk/web/app.rb +2 -26
- data/lib/specwrk/web/auth.rb +1 -1
- data/lib/specwrk/web/endpoints.rb +196 -65
- data/lib/specwrk/web.rb +0 -16
- data/lib/specwrk/worker.rb +9 -3
- metadata +32 -4
- data/docker/pitchfork.conf +0 -5
- data/lib/specwrk/queue.rb +0 -224
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 656884cedc1e6851a42054d696fb0c8cbb3c7afffa48935340c973d824cc6cc0
|
4
|
+
data.tar.gz: e8b562b8070e5342d6343b299a80fe0bc98a17060632aa369f5ded91ffecb820
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: a536a0bf7de4959ca820afa5e9249024181f198d073ca648f040f8f4276f63572ac74a1cdff481b9e050b1983f7af7eac5b2d31c3271dff911ae88aa75092f27
|
7
|
+
data.tar.gz: eddaa254e6254727f950006b30b653402a33acd827b1060baf583840d99ae2d41dec0ab4a15c11754c03db3f2664ea348efefc1b32b95b494c56287a0d3b88d0
|
data/README.md
CHANGED
@@ -58,7 +58,6 @@ Options:
|
|
58
58
|
--port=VALUE, -p VALUE # Server port. Overrides SPECWRK_SRV_PORT, default: "5138"
|
59
59
|
--bind=VALUE, -b VALUE # Server bind address. Overrides SPECWRK_SRV_BIND, default: "127.0.0.1"
|
60
60
|
--group-by=VALUE # How examples will be grouped for workers; fallback to file if no timings are found. Overrides SPECWERK_SRV_GROUP_BY: (file/timings), default: "timings"
|
61
|
-
--[no-]single-seed-per-run # Only allow one seed per run. Useful for CI where many nodes may seed at the same time, default: false
|
62
61
|
--[no-]verbose # Run in verbose mode. Default false., default: false
|
63
62
|
--help, -h # Print this help
|
64
63
|
```
|
@@ -83,7 +82,6 @@ Options:
|
|
83
82
|
--key=VALUE, -k VALUE # Authentication key clients must use for access. Overrides SPECWRK_SRV_KEY, default: ""
|
84
83
|
--output=VALUE, -o VALUE # Directory where worker output is stored. Overrides SPECWRK_OUT, default: ".specwrk/"
|
85
84
|
--group-by=VALUE # How examples will be grouped for workers; fallback to file if no timings are found. Overrides SPECWERK_SRV_GROUP_BY: (file/timings), default: "timings"
|
86
|
-
--[no-]single-seed-per-run # Only allow one seed per run. Useful for CI where many nodes may seed at the same time, default: false
|
87
85
|
--[no-]verbose # Run in verbose mode. Default false., default: false
|
88
86
|
--[no-]single-run # Act on shutdown requests from clients. Default: false., default: false
|
89
87
|
--help, -h # Print this help
|
data/docker/Dockerfile.server
CHANGED
@@ -1,6 +1,11 @@
|
|
1
1
|
FROM ruby:3.4-alpine
|
2
2
|
|
3
|
-
RUN apk add --no-cache
|
3
|
+
RUN apk add --no-cache \
|
4
|
+
build-base \
|
5
|
+
ruby-dev \
|
6
|
+
linux-headers \
|
7
|
+
zlib-dev \
|
8
|
+
libffi-dev
|
4
9
|
|
5
10
|
WORKDIR /app
|
6
11
|
|
@@ -8,15 +13,14 @@ RUN mkdir .specwrk/
|
|
8
13
|
|
9
14
|
ARG SPECWRK_SRV_PORT=5138
|
10
15
|
ARG SPECWRK_VERSION=latest
|
11
|
-
ARG
|
16
|
+
ARG GEMFILE=specwrk-$SPECWRK_VERSION.gem
|
12
17
|
|
13
|
-
COPY $
|
14
|
-
RUN gem install ./$
|
15
|
-
RUN rm ./$
|
18
|
+
COPY $GEMFILE ./
|
19
|
+
RUN gem install ./$GEMFILE --no-document
|
20
|
+
RUN rm ./$GEMFILE
|
16
21
|
|
17
|
-
RUN gem install
|
22
|
+
RUN gem install puma thruster
|
18
23
|
COPY config.ru ./
|
19
|
-
COPY docker/pitchfork.conf ./
|
20
24
|
|
21
25
|
COPY docker/entrypoint.server.sh /usr/local/bin/entrypoint
|
22
26
|
RUN chmod +x /usr/local/bin/entrypoint
|
data/docker/entrypoint.server.sh
CHANGED
@@ -2,6 +2,6 @@
|
|
2
2
|
|
3
3
|
export THRUSTER_HTTP_PORT=${PORT:-5138}
|
4
4
|
export THRUSTER_TARGET_PORT=3000
|
5
|
-
export THRUSTER_HTTP_IDLE_TIMEOUT=${IDLE_TIMEOUT:-
|
5
|
+
export THRUSTER_HTTP_IDLE_TIMEOUT=${IDLE_TIMEOUT:-305}
|
6
6
|
|
7
|
-
exec thrust
|
7
|
+
exec thrust puma --workers 0 --bind tcp://127.0.0.1:3000 --threads ${PUMA_THREADS:-1} --workers ${PUMA_WORKERS:-0}
|
data/lib/specwrk/cli.rb
CHANGED
@@ -76,7 +76,6 @@ module Specwrk
|
|
76
76
|
base.unique_option :key, type: :string, aliases: ["-k"], default: ENV.fetch("SPECWRK_SRV_KEY", ""), desc: "Authentication key clients must use for access. Overrides SPECWRK_SRV_KEY"
|
77
77
|
base.unique_option :output, type: :string, default: ENV.fetch("SPECWRK_OUT", ".specwrk/"), aliases: ["-o"], desc: "Directory where worker output is stored. Overrides SPECWRK_OUT"
|
78
78
|
base.unique_option :group_by, values: %w[file timings], default: ENV.fetch("SPECWERK_SRV_GROUP_BY", "timings"), desc: "How examples will be grouped for workers; fallback to file if no timings are found. Overrides SPECWERK_SRV_GROUP_BY"
|
79
|
-
base.unique_option :single_seed_per_run, type: :boolean, default: false, desc: "Only allow one seed per run. Useful for CI where many nodes may seed at the same time"
|
80
79
|
base.unique_option :verbose, type: :boolean, default: false, desc: "Run in verbose mode. Default false."
|
81
80
|
end
|
82
81
|
|
@@ -211,7 +210,8 @@ module Specwrk
|
|
211
210
|
status "Server responding ✓"
|
212
211
|
status "Seeding #{examples.length} examples..."
|
213
212
|
Client.new.seed(examples)
|
214
|
-
|
213
|
+
file_count = examples.group_by { |e| e[:file_path] }.keys.size
|
214
|
+
status "🌱 Seeded #{examples.size} examples across #{file_count} files"
|
215
215
|
end
|
216
216
|
|
217
217
|
if Specwrk.wait_for_pids_exit([seed_pid]).value?(1)
|
data/lib/specwrk/client.rb
CHANGED
@@ -105,6 +105,23 @@ module Specwrk
|
|
105
105
|
(response.code == "200") ? true : raise(UnhandledResponseError.new("#{response.code}: #{response.body}"))
|
106
106
|
end
|
107
107
|
|
108
|
+
def complete_and_fetch_examples(examples)
|
109
|
+
response = post "/complete_and_pop", body: examples.to_json
|
110
|
+
|
111
|
+
case response.code
|
112
|
+
when "200"
|
113
|
+
JSON.parse(response.body, symbolize_names: true)
|
114
|
+
when "204"
|
115
|
+
raise WaitingForSeedError
|
116
|
+
when "404"
|
117
|
+
raise NoMoreExamplesError
|
118
|
+
when "410"
|
119
|
+
raise CompletedAllExamplesError
|
120
|
+
else
|
121
|
+
raise UnhandledResponseError.new("#{response.code}: #{response.body}")
|
122
|
+
end
|
123
|
+
end
|
124
|
+
|
108
125
|
def seed(examples)
|
109
126
|
response = post "/seed", body: examples.to_json
|
110
127
|
|
@@ -0,0 +1,180 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "json"
|
4
|
+
require "base64"
|
5
|
+
|
6
|
+
require "specwrk/store"
|
7
|
+
|
8
|
+
module Specwrk
|
9
|
+
class Store
|
10
|
+
class FileAdapter
|
11
|
+
EXT = ".wrk.json"
|
12
|
+
|
13
|
+
THREAD_POOL = Class.new do
|
14
|
+
@work_queue = Queue.new
|
15
|
+
|
16
|
+
@threads = Array.new(ENV.fetch("SPECWRK_SRV_FILE_ADAPTER_THREAD_COUNT", "4").to_i) do
|
17
|
+
Thread.new do
|
18
|
+
loop do
|
19
|
+
@work_queue.pop.call
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
class << self
|
25
|
+
def schedule(&blk)
|
26
|
+
@work_queue.push blk
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
def initialize(path)
|
32
|
+
@path = path
|
33
|
+
FileUtils.mkdir_p(@path)
|
34
|
+
end
|
35
|
+
|
36
|
+
def [](key)
|
37
|
+
content = read(key.to_s)
|
38
|
+
return unless content
|
39
|
+
|
40
|
+
JSON.parse(content, symbolize_names: true)
|
41
|
+
end
|
42
|
+
|
43
|
+
def []=(key, value)
|
44
|
+
key_string = key.to_s
|
45
|
+
if value.nil?
|
46
|
+
delete(key_string)
|
47
|
+
else
|
48
|
+
filename = filename_for_key(key_string)
|
49
|
+
write(filename, JSON.generate(value))
|
50
|
+
known_key_pairs[key_string] = filename
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
def keys
|
55
|
+
known_key_pairs.keys
|
56
|
+
end
|
57
|
+
|
58
|
+
def clear
|
59
|
+
FileUtils.rm_rf(@path)
|
60
|
+
FileUtils.mkdir_p(@path)
|
61
|
+
|
62
|
+
@known_key_pairs = nil
|
63
|
+
end
|
64
|
+
|
65
|
+
def delete(*keys)
|
66
|
+
filenames = keys.map { |key| known_key_pairs[key] }.compact
|
67
|
+
|
68
|
+
FileUtils.rm_f(filenames)
|
69
|
+
|
70
|
+
keys.each { |key| known_key_pairs.delete(key) }
|
71
|
+
end
|
72
|
+
|
73
|
+
def merge!(h2)
|
74
|
+
multi_write(h2)
|
75
|
+
end
|
76
|
+
|
77
|
+
def multi_read(*read_keys)
|
78
|
+
known_key_pairs # precache before each thread tries to look them up
|
79
|
+
|
80
|
+
result_queue = Queue.new
|
81
|
+
|
82
|
+
read_keys.each do |key|
|
83
|
+
THREAD_POOL.schedule do
|
84
|
+
result_queue.push([key.to_s, read(key)])
|
85
|
+
end
|
86
|
+
end
|
87
|
+
|
88
|
+
Thread.pass until result_queue.length == read_keys.length
|
89
|
+
|
90
|
+
results = {}
|
91
|
+
until result_queue.empty?
|
92
|
+
result = result_queue.pop
|
93
|
+
next if result.last.nil?
|
94
|
+
|
95
|
+
results[result.first] = JSON.parse(result.last, symbolize_names: true)
|
96
|
+
end
|
97
|
+
|
98
|
+
read_keys.map { |key| [key.to_s, results[key.to_s]] if results.key?(key.to_s) }.compact.to_h # respect order requested in the returned hash
|
99
|
+
end
|
100
|
+
|
101
|
+
def multi_write(hash)
|
102
|
+
known_key_pairs # precache before each thread tries to look them up
|
103
|
+
|
104
|
+
result_queue = Queue.new
|
105
|
+
|
106
|
+
hash_with_filenames = hash.map { |key, value| [key.to_s, [filename_for_key(key.to_s), value]] }.to_h
|
107
|
+
hash_with_filenames.each do |key, (filename, value)|
|
108
|
+
content = JSON.generate(value)
|
109
|
+
|
110
|
+
THREAD_POOL.schedule do
|
111
|
+
result_queue << write(filename, content)
|
112
|
+
end
|
113
|
+
end
|
114
|
+
|
115
|
+
Thread.pass until result_queue.length == hash.length
|
116
|
+
hash_with_filenames.each { |key, (filename, _value)| known_key_pairs[key] = filename }
|
117
|
+
end
|
118
|
+
|
119
|
+
def empty?
|
120
|
+
Dir.empty? @path
|
121
|
+
end
|
122
|
+
|
123
|
+
private
|
124
|
+
|
125
|
+
def write(filename, content)
|
126
|
+
tmp_filename = [filename, "tmp"].join(".")
|
127
|
+
|
128
|
+
File.binwrite(tmp_filename, content)
|
129
|
+
|
130
|
+
FileUtils.mv tmp_filename, filename
|
131
|
+
true
|
132
|
+
end
|
133
|
+
|
134
|
+
def read(key)
|
135
|
+
File.read(known_key_pairs[key]) if known_key_pairs.key? key
|
136
|
+
end
|
137
|
+
|
138
|
+
def filename_for_key(key)
|
139
|
+
File.join(
|
140
|
+
@path,
|
141
|
+
[
|
142
|
+
counter_prefix(key),
|
143
|
+
encode_key(key)
|
144
|
+
].join("_")
|
145
|
+
) + EXT
|
146
|
+
end
|
147
|
+
|
148
|
+
def counter_prefix(key)
|
149
|
+
count = keys.index(key) || counter.tap { @counter += 1 }
|
150
|
+
|
151
|
+
"%012d" % count
|
152
|
+
end
|
153
|
+
|
154
|
+
def counter
|
155
|
+
@counter ||= keys.length
|
156
|
+
end
|
157
|
+
|
158
|
+
def encode_key(key)
|
159
|
+
Base64.urlsafe_encode64(key).delete("=")
|
160
|
+
end
|
161
|
+
|
162
|
+
def decode_key(key)
|
163
|
+
encoded_key_part = File.basename(key).delete_suffix(EXT).split(/\A\d+_/).last
|
164
|
+
padding_count = (4 - encoded_key_part.length % 4) % 4
|
165
|
+
|
166
|
+
Base64.urlsafe_decode64(encoded_key_part + ("=" * padding_count))
|
167
|
+
end
|
168
|
+
|
169
|
+
def known_key_pairs
|
170
|
+
@known_key_pairs ||= Dir.entries(@path).sort.map do |filename|
|
171
|
+
next if filename.start_with? "."
|
172
|
+
next unless filename.end_with? EXT
|
173
|
+
|
174
|
+
file_path = File.join(@path, filename)
|
175
|
+
[decode_key(filename), file_path]
|
176
|
+
end.compact.to_h
|
177
|
+
end
|
178
|
+
end
|
179
|
+
end
|
180
|
+
end
|
@@ -0,0 +1,241 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "time"
|
4
|
+
require "json"
|
5
|
+
|
6
|
+
require "specwrk/store/file_adapter"
|
7
|
+
|
8
|
+
module Specwrk
|
9
|
+
class Store
|
10
|
+
MUTEXES = {}
|
11
|
+
MUTEXES_MUTEX = Mutex.new # 🐢🐢🐢🐢
|
12
|
+
|
13
|
+
class << self
|
14
|
+
def mutex_for(path)
|
15
|
+
MUTEXES_MUTEX.synchronize do
|
16
|
+
MUTEXES[path] ||= Mutex.new
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
def initialize(path, thread_safe_reads: true)
|
22
|
+
@path = path
|
23
|
+
@thread_safe_reads = thread_safe_reads
|
24
|
+
end
|
25
|
+
|
26
|
+
def [](key)
|
27
|
+
sync(thread_safe: thread_safe_reads) { adapter[key.to_s] }
|
28
|
+
end
|
29
|
+
|
30
|
+
def multi_read(*keys)
|
31
|
+
sync(thread_safe: thread_safe_reads) { adapter.multi_read(*keys) }
|
32
|
+
end
|
33
|
+
|
34
|
+
def []=(key, value)
|
35
|
+
sync do
|
36
|
+
adapter[key.to_s] = value
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
def keys
|
41
|
+
all_keys = sync(thread_safe: thread_safe_reads) do
|
42
|
+
adapter.keys
|
43
|
+
end
|
44
|
+
|
45
|
+
all_keys.reject { |k| k.start_with? "____" }
|
46
|
+
end
|
47
|
+
|
48
|
+
def length
|
49
|
+
keys.length
|
50
|
+
end
|
51
|
+
|
52
|
+
def any?
|
53
|
+
!empty?
|
54
|
+
end
|
55
|
+
|
56
|
+
def empty?
|
57
|
+
sync(thread_safe: thread_safe_reads) do
|
58
|
+
adapter.empty?
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
def delete(*keys)
|
63
|
+
sync { adapter.delete(*keys) }
|
64
|
+
end
|
65
|
+
|
66
|
+
def merge!(h2)
|
67
|
+
h2.transform_keys!(&:to_s)
|
68
|
+
sync { adapter.merge!(h2) }
|
69
|
+
end
|
70
|
+
|
71
|
+
def clear
|
72
|
+
sync { adapter.clear }
|
73
|
+
end
|
74
|
+
|
75
|
+
def to_h
|
76
|
+
sync(thread_safe: thread_safe_reads) do
|
77
|
+
adapter.multi_read(*keys).transform_keys!(&:to_sym)
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
def inspect
|
82
|
+
reload.to_h.dup
|
83
|
+
end
|
84
|
+
|
85
|
+
# Bypass any cached values. Helpful when you have two instances
|
86
|
+
# of the same store where one mutates data and the other needs to check
|
87
|
+
# on the status of that data (i.e. endpoint tests)
|
88
|
+
def reload
|
89
|
+
@adapter = nil
|
90
|
+
self
|
91
|
+
end
|
92
|
+
|
93
|
+
private
|
94
|
+
|
95
|
+
attr_reader :thread_safe_reads
|
96
|
+
|
97
|
+
def sync(thread_safe: true)
|
98
|
+
if !thread_safe || mutex.owned?
|
99
|
+
yield
|
100
|
+
else
|
101
|
+
mutex.synchronize { yield }
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
def adapter
|
106
|
+
@adapter ||= FileAdapter.new(@path)
|
107
|
+
end
|
108
|
+
|
109
|
+
def mutex
|
110
|
+
@mutex ||= self.class.mutex_for(@path)
|
111
|
+
end
|
112
|
+
end
|
113
|
+
|
114
|
+
class PendingStore < Store
|
115
|
+
RUN_TIME_BUCKET_MAXIMUM_KEY = :____run_time_bucket_maximum
|
116
|
+
|
117
|
+
def run_time_bucket_maximum=(val)
|
118
|
+
@run_time_bucket_maximum = self[RUN_TIME_BUCKET_MAXIMUM_KEY] = val
|
119
|
+
end
|
120
|
+
|
121
|
+
def run_time_bucket_maximum
|
122
|
+
@run_time_bucket_maximum ||= self[RUN_TIME_BUCKET_MAXIMUM_KEY]
|
123
|
+
end
|
124
|
+
|
125
|
+
def shift_bucket
|
126
|
+
sync do
|
127
|
+
return bucket_by_file unless run_time_bucket_maximum&.positive?
|
128
|
+
|
129
|
+
case ENV["SPECWRK_SRV_GROUP_BY"]
|
130
|
+
when "file"
|
131
|
+
bucket_by_file
|
132
|
+
else
|
133
|
+
bucket_by_timings
|
134
|
+
end
|
135
|
+
end
|
136
|
+
end
|
137
|
+
|
138
|
+
private
|
139
|
+
|
140
|
+
# Take elements from the hash where the file_path is the same
|
141
|
+
# Expects that the examples were merged in order of filename
|
142
|
+
def bucket_by_file
|
143
|
+
bucket = []
|
144
|
+
consumed_keys = []
|
145
|
+
|
146
|
+
all_keys = keys
|
147
|
+
key = all_keys.first
|
148
|
+
return [] if key.nil?
|
149
|
+
|
150
|
+
file_path = self[key][:file_path]
|
151
|
+
|
152
|
+
catch(:full) do
|
153
|
+
all_keys.each_slice(24).each do |key_group|
|
154
|
+
examples = multi_read(*key_group)
|
155
|
+
|
156
|
+
examples.each do |key, example|
|
157
|
+
throw :full unless example[:file_path] == file_path
|
158
|
+
|
159
|
+
bucket << example
|
160
|
+
consumed_keys << key
|
161
|
+
end
|
162
|
+
end
|
163
|
+
end
|
164
|
+
|
165
|
+
delete(*consumed_keys)
|
166
|
+
bucket
|
167
|
+
end
|
168
|
+
|
169
|
+
# Take elements from the hash until the average runtime bucket has filled
|
170
|
+
def bucket_by_timings
|
171
|
+
bucket = []
|
172
|
+
consumed_keys = []
|
173
|
+
|
174
|
+
estimated_run_time_total = 0
|
175
|
+
|
176
|
+
catch(:full) do
|
177
|
+
keys.each_slice(25).each do |key_group|
|
178
|
+
examples = multi_read(*key_group)
|
179
|
+
|
180
|
+
examples.each do |key, example|
|
181
|
+
estimated_run_time_total += example[:expected_run_time] || run_time_bucket_maximum
|
182
|
+
throw :full if estimated_run_time_total > run_time_bucket_maximum && bucket.length.positive?
|
183
|
+
|
184
|
+
bucket << example
|
185
|
+
consumed_keys << key
|
186
|
+
end
|
187
|
+
end
|
188
|
+
end
|
189
|
+
|
190
|
+
delete(*consumed_keys)
|
191
|
+
bucket
|
192
|
+
end
|
193
|
+
end
|
194
|
+
|
195
|
+
class CompletedStore < Store
|
196
|
+
def dump
|
197
|
+
@run_times = []
|
198
|
+
@first_started_at = Time.new(2999, 1, 1, 0, 0, 0) # TODO: Make future proof /s
|
199
|
+
@last_finished_at = Time.new(1900, 1, 1, 0, 0, 0)
|
200
|
+
|
201
|
+
@output = {
|
202
|
+
file_totals: Hash.new { |h, filename| h[filename] = 0.0 },
|
203
|
+
meta: {failures: 0, passes: 0, pending: 0},
|
204
|
+
examples: {}
|
205
|
+
}
|
206
|
+
|
207
|
+
to_h.values.each { |example| calculate(example) }
|
208
|
+
|
209
|
+
@output[:meta][:total_run_time] = @run_times.sum
|
210
|
+
@output[:meta][:average_run_time] = @output[:meta][:total_run_time] / [@run_times.length, 1].max.to_f
|
211
|
+
@output[:meta][:first_started_at] = @first_started_at.iso8601(6)
|
212
|
+
@output[:meta][:last_finished_at] = @last_finished_at.iso8601(6)
|
213
|
+
|
214
|
+
@output
|
215
|
+
end
|
216
|
+
|
217
|
+
private
|
218
|
+
|
219
|
+
def calculate(example)
|
220
|
+
@run_times << example[:run_time]
|
221
|
+
@output[:file_totals][example[:file_path]] += example[:run_time]
|
222
|
+
|
223
|
+
started_at = Time.parse(example[:started_at])
|
224
|
+
finished_at = Time.parse(example[:finished_at])
|
225
|
+
|
226
|
+
@first_started_at = started_at if started_at < @first_started_at
|
227
|
+
@last_finished_at = finished_at if finished_at > @last_finished_at
|
228
|
+
|
229
|
+
case example[:status]
|
230
|
+
when "passed"
|
231
|
+
@output[:meta][:passes] += 1
|
232
|
+
when "failed"
|
233
|
+
@output[:meta][:failures] += 1
|
234
|
+
when "pending"
|
235
|
+
@output[:meta][:pending] += 1
|
236
|
+
end
|
237
|
+
|
238
|
+
@output[:examples][example[:id]] = example
|
239
|
+
end
|
240
|
+
end
|
241
|
+
end
|
data/lib/specwrk/version.rb
CHANGED
data/lib/specwrk/web/app.rb
CHANGED
@@ -21,8 +21,6 @@ require "specwrk/web/endpoints"
|
|
21
21
|
module Specwrk
|
22
22
|
class Web
|
23
23
|
class App
|
24
|
-
REAP_INTERVAL = 330 # HTTP connection timeout + some buffer
|
25
|
-
|
26
24
|
class << self
|
27
25
|
def run!
|
28
26
|
Process.setproctitle "specwrk-server"
|
@@ -74,10 +72,6 @@ module Specwrk
|
|
74
72
|
end
|
75
73
|
end
|
76
74
|
|
77
|
-
def initialize
|
78
|
-
@reaper_thread = Thread.new { reaper } unless ENV["SPECWRK_SRV_SINGLE_RUN"]
|
79
|
-
end
|
80
|
-
|
81
75
|
def call(env)
|
82
76
|
env[:request] ||= Rack::Request.new(env)
|
83
77
|
|
@@ -96,6 +90,8 @@ module Specwrk
|
|
96
90
|
Endpoints::Pop
|
97
91
|
when ["POST", "/complete"]
|
98
92
|
Endpoints::Complete
|
93
|
+
when ["POST", "/complete_and_pop"]
|
94
|
+
Endpoints::CompleteAndPop
|
99
95
|
when ["POST", "/seed"]
|
100
96
|
Endpoints::Seed
|
101
97
|
when ["GET", "/report"]
|
@@ -106,26 +102,6 @@ module Specwrk
|
|
106
102
|
Endpoints::NotFound
|
107
103
|
end
|
108
104
|
end
|
109
|
-
|
110
|
-
def reaper
|
111
|
-
until Specwrk.force_quit
|
112
|
-
sleep REAP_INTERVAL
|
113
|
-
|
114
|
-
reap
|
115
|
-
end
|
116
|
-
end
|
117
|
-
|
118
|
-
def reap
|
119
|
-
Web::WORKERS.each do |run, workers|
|
120
|
-
most_recent_last_seen_at = workers.map { |id, worker| worker[:last_seen_at] }.max
|
121
|
-
next unless most_recent_last_seen_at
|
122
|
-
|
123
|
-
# Don't consider runs which aren't at least REAP_INTERVAL sec stale
|
124
|
-
if most_recent_last_seen_at < Time.now - REAP_INTERVAL
|
125
|
-
Web.clear_run_queues(run)
|
126
|
-
end
|
127
|
-
end
|
128
|
-
end
|
129
105
|
end
|
130
106
|
end
|
131
107
|
end
|
data/lib/specwrk/web/auth.rb
CHANGED
@@ -2,18 +2,41 @@
|
|
2
2
|
|
3
3
|
require "json"
|
4
4
|
|
5
|
+
require "specwrk/store"
|
6
|
+
|
5
7
|
module Specwrk
|
6
8
|
class Web
|
7
9
|
module Endpoints
|
8
10
|
class Base
|
11
|
+
attr_reader :started_at
|
12
|
+
|
9
13
|
def initialize(request)
|
10
14
|
@request = request
|
11
|
-
|
12
|
-
worker[:first_seen_at] ||= Time.now
|
13
|
-
worker[:last_seen_at] = Time.now
|
14
15
|
end
|
15
16
|
|
16
17
|
def response
|
18
|
+
before_lock
|
19
|
+
|
20
|
+
return with_response unless run_id # No run_id, no datastore usage in the endpoint
|
21
|
+
|
22
|
+
payload # parse the payload before any locking
|
23
|
+
|
24
|
+
worker[:first_seen_at] ||= Time.now.iso8601
|
25
|
+
worker[:last_seen_at] = Time.now.iso8601
|
26
|
+
|
27
|
+
final_response = with_lock do
|
28
|
+
started_at = metadata[:started_at] ||= Time.now.iso8601
|
29
|
+
@started_at = Time.parse(started_at)
|
30
|
+
|
31
|
+
with_response
|
32
|
+
end
|
33
|
+
|
34
|
+
after_lock
|
35
|
+
|
36
|
+
final_response
|
37
|
+
end
|
38
|
+
|
39
|
+
def with_response
|
17
40
|
not_found
|
18
41
|
end
|
19
42
|
|
@@ -21,15 +44,25 @@ module Specwrk
|
|
21
44
|
|
22
45
|
attr_reader :request
|
23
46
|
|
47
|
+
def before_lock
|
48
|
+
end
|
49
|
+
|
50
|
+
def after_lock
|
51
|
+
end
|
52
|
+
|
24
53
|
def not_found
|
25
|
-
[404, {"
|
54
|
+
[404, {"Content-Type" => "text/plain"}, ["This is not the path you're looking for, 'ol chap..."]]
|
26
55
|
end
|
27
56
|
|
28
57
|
def ok
|
29
|
-
[200, {"
|
58
|
+
[200, {"Content-Type" => "text/plain"}, ["OK, 'ol chap"]]
|
30
59
|
end
|
31
60
|
|
32
61
|
def payload
|
62
|
+
return unless request.content_type&.start_with?("application/json")
|
63
|
+
return unless request.post? || request.put? || request.delete?
|
64
|
+
return if body.empty?
|
65
|
+
|
33
66
|
@payload ||= JSON.parse(body, symbolize_names: true)
|
34
67
|
end
|
35
68
|
|
@@ -37,24 +70,28 @@ module Specwrk
|
|
37
70
|
@body ||= request.body.read
|
38
71
|
end
|
39
72
|
|
40
|
-
def
|
41
|
-
|
73
|
+
def pending
|
74
|
+
@pending ||= PendingStore.new(File.join(datastore_path, "pending"))
|
75
|
+
end
|
76
|
+
|
77
|
+
def processing
|
78
|
+
@processing ||= Store.new(File.join(datastore_path, "processing"))
|
42
79
|
end
|
43
80
|
|
44
|
-
def
|
45
|
-
|
81
|
+
def completed
|
82
|
+
@completed ||= CompletedStore.new(File.join(datastore_path, "completed"))
|
46
83
|
end
|
47
84
|
|
48
|
-
def
|
49
|
-
|
85
|
+
def metadata
|
86
|
+
@metadata ||= Store.new(File.join(datastore_path, "metadata"), thread_safe_reads: false)
|
50
87
|
end
|
51
88
|
|
52
|
-
def
|
53
|
-
|
89
|
+
def run_times
|
90
|
+
@run_times ||= Store.new(File.join(ENV["SPECWRK_OUT"], "run_times"), thread_safe_reads: false)
|
54
91
|
end
|
55
92
|
|
56
93
|
def worker
|
57
|
-
workers
|
94
|
+
@worker ||= Store.new(File.join(datastore_path, "workers", request.get_header("HTTP_X_SPECWRK_ID").to_s))
|
58
95
|
end
|
59
96
|
|
60
97
|
def run_id
|
@@ -62,7 +99,24 @@ module Specwrk
|
|
62
99
|
end
|
63
100
|
|
64
101
|
def run_report_file_path
|
65
|
-
@run_report_file_path ||= File.join(
|
102
|
+
@run_report_file_path ||= File.join(datastore_path, "#{started_at.strftime("%Y%m%dT%H%M%S")}-report.json").to_s
|
103
|
+
end
|
104
|
+
|
105
|
+
def datastore_path
|
106
|
+
@datastore_path ||= File.join(ENV["SPECWRK_OUT"], run_id).to_s.tap do |path|
|
107
|
+
FileUtils.mkdir_p(path) unless File.directory?(path)
|
108
|
+
end
|
109
|
+
end
|
110
|
+
|
111
|
+
def with_lock
|
112
|
+
Thread.pass until lock_file.flock(File::LOCK_EX)
|
113
|
+
yield
|
114
|
+
ensure
|
115
|
+
lock_file.flock(File::LOCK_UN)
|
116
|
+
end
|
117
|
+
|
118
|
+
def lock_file
|
119
|
+
@lock_file ||= File.open(File.join(datastore_path, "lock"), "a")
|
66
120
|
end
|
67
121
|
end
|
68
122
|
|
@@ -70,105 +124,182 @@ module Specwrk
|
|
70
124
|
NotFound = Class.new(Base)
|
71
125
|
|
72
126
|
class Health < Base
|
73
|
-
def
|
127
|
+
def with_response
|
74
128
|
[200, {}, []]
|
75
129
|
end
|
76
130
|
end
|
77
131
|
|
78
132
|
class Heartbeat < Base
|
79
|
-
def
|
133
|
+
def with_response
|
80
134
|
ok
|
81
135
|
end
|
82
136
|
end
|
83
137
|
|
84
138
|
class Seed < Base
|
85
|
-
def
|
86
|
-
|
87
|
-
|
88
|
-
examples = payload.map { |hash| [hash[:id], hash] }.to_h
|
139
|
+
def before_lock
|
140
|
+
examples_with_run_times
|
141
|
+
end
|
89
142
|
|
90
|
-
|
143
|
+
def with_response
|
144
|
+
pending.clear
|
145
|
+
new_run_time_bucket_maximums = [pending.run_time_bucket_maximum, @seeds_run_time_bucket_maximum.to_f].compact
|
146
|
+
pending.run_time_bucket_maximum = new_run_time_bucket_maximums.sum.to_f / new_run_time_bucket_maximums.length.to_f
|
91
147
|
|
92
|
-
|
93
|
-
|
94
|
-
|
148
|
+
pending.merge!(examples_with_run_times)
|
149
|
+
processing.clear
|
150
|
+
completed.clear
|
95
151
|
|
96
152
|
ok
|
97
153
|
end
|
98
|
-
end
|
99
154
|
|
100
|
-
|
101
|
-
|
102
|
-
|
155
|
+
def examples_with_run_times
|
156
|
+
@examples_with_run_times ||= begin
|
157
|
+
unsorted_examples_with_run_times = []
|
158
|
+
all_ids = payload.map { |example| example[:id] }
|
159
|
+
all_run_times = run_times.multi_read(*all_ids)
|
160
|
+
|
103
161
|
payload.each do |example|
|
104
|
-
|
105
|
-
|
162
|
+
run_time = all_run_times[example[:id]]
|
163
|
+
|
164
|
+
unsorted_examples_with_run_times << [example[:id], example.merge(expected_run_time: run_time)]
|
165
|
+
end
|
166
|
+
|
167
|
+
sorted_examples_with_run_times = if sort_by == :timings
|
168
|
+
unsorted_examples_with_run_times.sort_by do |entry|
|
169
|
+
-(entry.last[:expected_run_time] || Float::INFINITY)
|
170
|
+
end
|
171
|
+
else
|
172
|
+
unsorted_examples_with_run_times.sort_by do |entry|
|
173
|
+
entry.last[:file_path]
|
174
|
+
end
|
106
175
|
end
|
176
|
+
|
177
|
+
@seeds_run_time_bucket_maximum = run_time_bucket_maximum(all_run_times.values.compact)
|
178
|
+
@examples_with_run_times = sorted_examples_with_run_times.to_h
|
107
179
|
end
|
180
|
+
end
|
181
|
+
|
182
|
+
private
|
108
183
|
|
109
|
-
|
110
|
-
|
184
|
+
# Average + standard deviation
|
185
|
+
def run_time_bucket_maximum(values)
|
186
|
+
return 0 if values.length.zero?
|
187
|
+
|
188
|
+
mean = values.sum.to_f / values.size
|
189
|
+
variance = values.map { |v| (v - mean)**2 }.sum / values.size
|
190
|
+
(mean + Math.sqrt(variance)).round(2)
|
191
|
+
end
|
192
|
+
|
193
|
+
def sort_by
|
194
|
+
if ENV["SPECWRK_SRV_GROUP_BY"] == "file" || run_times.empty?
|
195
|
+
:file
|
196
|
+
else
|
197
|
+
:timings
|
111
198
|
end
|
199
|
+
end
|
200
|
+
end
|
201
|
+
|
202
|
+
class Complete < Base
|
203
|
+
def with_response
|
204
|
+
warn "[DEPRECATED] This endpoint will be retired in favor of CompleteAndPop. Upgrade your clients."
|
205
|
+
completed.merge!(completed_examples)
|
206
|
+
processing.delete(*completed_examples.keys)
|
112
207
|
|
113
208
|
ok
|
114
209
|
end
|
115
|
-
end
|
116
210
|
|
117
|
-
|
118
|
-
def response
|
119
|
-
processing_queue.synchronize do |processing_queue_hash|
|
120
|
-
@examples = pending_queue.shift_bucket
|
211
|
+
private
|
121
212
|
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
end
|
213
|
+
def completed_examples
|
214
|
+
@completed_data ||= payload.map { |example| [example[:id], example] if processing[example[:id]] }.compact.to_h
|
215
|
+
end
|
126
216
|
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
217
|
+
# We don't care about exact values here, just approximate run times are fine
|
218
|
+
# So if we overwrite run times from another process it is nbd
|
219
|
+
def after_lock
|
220
|
+
run_time_data = payload.map { |example| [example[:id], example[:run_time]] }.to_h
|
221
|
+
run_times.merge! run_time_data
|
222
|
+
end
|
223
|
+
end
|
224
|
+
|
225
|
+
class Pop < Base
|
226
|
+
def with_response
|
227
|
+
@examples = pending.shift_bucket
|
228
|
+
|
229
|
+
processing_data = @examples.map { |example| [example[:id], example] }.to_h
|
230
|
+
processing.merge!(processing_data)
|
231
|
+
|
232
|
+
if @examples.any?
|
233
|
+
[200, {"Content-Type" => "application/json"}, [JSON.generate(@examples)]]
|
234
|
+
elsif pending.empty? && processing.empty? && completed.empty?
|
235
|
+
[204, {"Content-Type" => "text/plain"}, ["Waiting for sample to be seeded."]]
|
236
|
+
elsif completed.any? && processing.empty?
|
237
|
+
[410, {"Content-Type" => "text/plain"}, ["That's a good lad. Run along now and go home."]]
|
133
238
|
else
|
134
239
|
not_found
|
135
240
|
end
|
136
241
|
end
|
137
242
|
end
|
138
243
|
|
139
|
-
class
|
140
|
-
def
|
141
|
-
|
142
|
-
|
244
|
+
class CompleteAndPop < Base
|
245
|
+
def with_response
|
246
|
+
completed.merge!(completed_examples)
|
247
|
+
run_times.merge! run_time_data
|
248
|
+
processing.delete(*completed_examples.keys)
|
249
|
+
|
250
|
+
@examples = pending.shift_bucket
|
251
|
+
|
252
|
+
processing_data = @examples.map { |example| [example[:id], example] }.to_h
|
253
|
+
processing.merge!(processing_data)
|
254
|
+
|
255
|
+
if @examples.any?
|
256
|
+
[200, {"Content-Type" => "application/json"}, [JSON.generate(@examples)]]
|
257
|
+
elsif pending.empty? && processing.empty? && completed.empty?
|
258
|
+
[204, {"Content-Type" => "text/plain"}, ["Waiting for sample to be seeded."]]
|
259
|
+
elsif completed.any? && processing.empty?
|
260
|
+
[410, {"Content-Type" => "text/plain"}, ["That's a good lad. Run along now and go home."]]
|
143
261
|
else
|
144
|
-
|
262
|
+
not_found
|
145
263
|
end
|
146
264
|
end
|
147
265
|
|
148
266
|
private
|
149
267
|
|
150
|
-
def
|
151
|
-
|
268
|
+
def before_lock
|
269
|
+
completed_examples
|
270
|
+
run_time_data
|
271
|
+
end
|
152
272
|
|
153
|
-
|
154
|
-
|
273
|
+
def completed_examples
|
274
|
+
@completed_data ||= payload.map { |example| [example[:id], example] if processing[example[:id]] }.compact.to_h
|
275
|
+
end
|
155
276
|
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
277
|
+
# We don't care about exact values here, just approximate run times are fine
|
278
|
+
# So if we overwrite run times from another process it is nbd
|
279
|
+
def after_lock
|
280
|
+
# run_time_data = payload.map { |example| [example[:id], example[:run_time]] }.to_h
|
281
|
+
# run_times.merge! run_time_data
|
160
282
|
end
|
161
283
|
|
162
|
-
def
|
163
|
-
@
|
284
|
+
def run_time_data
|
285
|
+
@run_time_data ||= payload.map { |example| [example[:id], example[:run_time]] }.to_h
|
286
|
+
end
|
287
|
+
end
|
288
|
+
|
289
|
+
class Report < Base
|
290
|
+
def with_response
|
291
|
+
[200, {"Content-Type" => "application/json"}, [JSON.generate(completed.dump)]]
|
164
292
|
end
|
165
293
|
end
|
166
294
|
|
167
295
|
class Shutdown < Base
|
168
|
-
def
|
296
|
+
def with_response
|
297
|
+
pending.clear
|
298
|
+
processing.clear
|
299
|
+
|
169
300
|
interupt! if ENV["SPECWRK_SRV_SINGLE_RUN"]
|
170
301
|
|
171
|
-
[200, {"
|
302
|
+
[200, {"Content-Type" => "text/plain"}, ["✌️"]]
|
172
303
|
end
|
173
304
|
|
174
305
|
def interupt!
|
data/lib/specwrk/web.rb
CHANGED
@@ -1,22 +1,6 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require "specwrk/queue"
|
4
|
-
|
5
3
|
module Specwrk
|
6
4
|
class Web
|
7
|
-
PENDING_QUEUES = Queue.new { |h, key| h[key] = PendingQueue.new }
|
8
|
-
PROCESSING_QUEUES = Queue.new { |h, key| h[key] = Queue.new }
|
9
|
-
COMPLETED_QUEUES = Queue.new { |h, key| h[key] = CompletedQueue.new }
|
10
|
-
WORKERS = Hash.new { |h, key| h[key] = Hash.new { |h, key| h[key] = {} } }
|
11
|
-
|
12
|
-
def self.clear_queues
|
13
|
-
[PENDING_QUEUES, PROCESSING_QUEUES, COMPLETED_QUEUES, WORKERS].each(&:clear)
|
14
|
-
end
|
15
|
-
|
16
|
-
def self.clear_run_queues(run)
|
17
|
-
[PENDING_QUEUES, PROCESSING_QUEUES, COMPLETED_QUEUES, WORKERS].each do |queue|
|
18
|
-
queue.delete(run)
|
19
|
-
end
|
20
|
-
end
|
21
5
|
end
|
22
6
|
end
|
data/lib/specwrk/worker.rb
CHANGED
@@ -69,15 +69,21 @@ module Specwrk
|
|
69
69
|
end
|
70
70
|
|
71
71
|
def execute
|
72
|
-
executor.run
|
72
|
+
executor.run next_examples
|
73
73
|
complete_examples
|
74
74
|
rescue UnhandledResponseError => e
|
75
|
-
# If fetching examples fails we can just try again so warn and return
|
75
|
+
# If fetching examples via next_exampels fails we can just try again so warn and return
|
76
|
+
# Expects complete_examples to rescue this error if raised in that method
|
76
77
|
warn e.message
|
77
78
|
end
|
78
79
|
|
80
|
+
def next_examples
|
81
|
+
return @next_examples if @next_examples&.length&.positive?
|
82
|
+
client.fetch_examples
|
83
|
+
end
|
84
|
+
|
79
85
|
def complete_examples
|
80
|
-
client.
|
86
|
+
@next_examples = client.complete_and_fetch_examples executor.examples
|
81
87
|
rescue UnhandledResponseError => e
|
82
88
|
# I do not think we should so lightly abandon the completion of executed examples
|
83
89
|
# try to complete until successful or terminated
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: specwrk
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.10.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Daniel Westendorf
|
@@ -9,6 +9,34 @@ bindir: exe
|
|
9
9
|
cert_chain: []
|
10
10
|
date: 1980-01-02 00:00:00.000000000 Z
|
11
11
|
dependencies:
|
12
|
+
- !ruby/object:Gem::Dependency
|
13
|
+
name: json
|
14
|
+
requirement: !ruby/object:Gem::Requirement
|
15
|
+
requirements:
|
16
|
+
- - ">="
|
17
|
+
- !ruby/object:Gem::Version
|
18
|
+
version: '0'
|
19
|
+
type: :runtime
|
20
|
+
prerelease: false
|
21
|
+
version_requirements: !ruby/object:Gem::Requirement
|
22
|
+
requirements:
|
23
|
+
- - ">="
|
24
|
+
- !ruby/object:Gem::Version
|
25
|
+
version: '0'
|
26
|
+
- !ruby/object:Gem::Dependency
|
27
|
+
name: base64
|
28
|
+
requirement: !ruby/object:Gem::Requirement
|
29
|
+
requirements:
|
30
|
+
- - ">="
|
31
|
+
- !ruby/object:Gem::Version
|
32
|
+
version: '0'
|
33
|
+
type: :runtime
|
34
|
+
prerelease: false
|
35
|
+
version_requirements: !ruby/object:Gem::Requirement
|
36
|
+
requirements:
|
37
|
+
- - ">="
|
38
|
+
- !ruby/object:Gem::Version
|
39
|
+
version: '0'
|
12
40
|
- !ruby/object:Gem::Dependency
|
13
41
|
name: dry-cli
|
14
42
|
requirement: !ruby/object:Gem::Requirement
|
@@ -152,7 +180,6 @@ files:
|
|
152
180
|
- config.ru
|
153
181
|
- docker/Dockerfile.server
|
154
182
|
- docker/entrypoint.server.sh
|
155
|
-
- docker/pitchfork.conf
|
156
183
|
- examples/circleci/config.yml
|
157
184
|
- examples/github/specwrk-multi-node.yml
|
158
185
|
- examples/github/specwrk-single-node.yml
|
@@ -163,7 +190,8 @@ files:
|
|
163
190
|
- lib/specwrk/client.rb
|
164
191
|
- lib/specwrk/hookable.rb
|
165
192
|
- lib/specwrk/list_examples.rb
|
166
|
-
- lib/specwrk/
|
193
|
+
- lib/specwrk/store.rb
|
194
|
+
- lib/specwrk/store/file_adapter.rb
|
167
195
|
- lib/specwrk/version.rb
|
168
196
|
- lib/specwrk/web.rb
|
169
197
|
- lib/specwrk/web/app.rb
|
@@ -196,7 +224,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
196
224
|
- !ruby/object:Gem::Version
|
197
225
|
version: '0'
|
198
226
|
requirements: []
|
199
|
-
rubygems_version: 3.6.
|
227
|
+
rubygems_version: 3.6.9
|
200
228
|
specification_version: 4
|
201
229
|
summary: Parallel rspec test runner from a queue of pending jobs.
|
202
230
|
test_files: []
|
data/docker/pitchfork.conf
DELETED
data/lib/specwrk/queue.rb
DELETED
@@ -1,224 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require "time"
|
4
|
-
require "json"
|
5
|
-
|
6
|
-
module Specwrk
|
7
|
-
# Thread-safe Hash access
|
8
|
-
class Queue
|
9
|
-
attr_reader :created_at
|
10
|
-
|
11
|
-
def initialize(hash = {})
|
12
|
-
@created_at = Time.now
|
13
|
-
|
14
|
-
if block_given?
|
15
|
-
@mutex = Monitor.new # Reentrant locking is required here
|
16
|
-
# It's possible to enter the proc from two threads, so we need to ||= in case
|
17
|
-
# one thread has set a value prior to the yield.
|
18
|
-
hash.default_proc = proc { |h, key| @mutex.synchronize { yield(h, key) } }
|
19
|
-
end
|
20
|
-
|
21
|
-
@mutex ||= Mutex.new # Monitor is up-to 20% slower than Mutex, so if no block is given, use a mutex
|
22
|
-
@hash = hash
|
23
|
-
end
|
24
|
-
|
25
|
-
def synchronize(&blk)
|
26
|
-
if @mutex.owned?
|
27
|
-
yield(@hash)
|
28
|
-
else
|
29
|
-
@mutex.synchronize { yield(@hash) }
|
30
|
-
end
|
31
|
-
end
|
32
|
-
|
33
|
-
def method_missing(name, *args, &block)
|
34
|
-
if @hash.respond_to?(name)
|
35
|
-
@mutex.synchronize { @hash.public_send(name, *args, &block) }
|
36
|
-
else
|
37
|
-
super
|
38
|
-
end
|
39
|
-
end
|
40
|
-
|
41
|
-
def respond_to_missing?(name, include_private = false)
|
42
|
-
@hash.respond_to?(name, include_private) || super
|
43
|
-
end
|
44
|
-
end
|
45
|
-
|
46
|
-
class PendingQueue < Queue
|
47
|
-
def shift_bucket
|
48
|
-
return bucket_by_file unless previous_run_times
|
49
|
-
|
50
|
-
case ENV["SPECWRK_SRV_GROUP_BY"]
|
51
|
-
when "file"
|
52
|
-
bucket_by_file
|
53
|
-
else
|
54
|
-
bucket_by_timings
|
55
|
-
end
|
56
|
-
end
|
57
|
-
|
58
|
-
def run_time_bucket_threshold
|
59
|
-
return 1 unless previous_run_times
|
60
|
-
|
61
|
-
previous_run_times.dig(:meta, :average_run_time)
|
62
|
-
end
|
63
|
-
|
64
|
-
def previous_run_times
|
65
|
-
return unless ENV["SPECWRK_OUT"]
|
66
|
-
|
67
|
-
@previous_run_times ||= begin
|
68
|
-
return unless previous_run_times_file_path
|
69
|
-
return unless File.exist? previous_run_times_file_path
|
70
|
-
|
71
|
-
raw_data = File.open(previous_run_times_file_path, "r") do |file|
|
72
|
-
file.flock(File::LOCK_SH)
|
73
|
-
file.read
|
74
|
-
end
|
75
|
-
|
76
|
-
@previous_run_times = JSON.parse(raw_data, symbolize_names: true)
|
77
|
-
rescue JSON::ParserError => e
|
78
|
-
warn "#{e.inspect} in file #{previous_run_times_file_path}"
|
79
|
-
nil
|
80
|
-
end
|
81
|
-
end
|
82
|
-
|
83
|
-
def merge_with_previous_run_times!(h2)
|
84
|
-
synchronize do
|
85
|
-
h2.each { |_id, example| merge_example(example) }
|
86
|
-
|
87
|
-
# Sort by exepcted run time, slowest to fastest
|
88
|
-
@hash = @hash.sort_by { |_, example| example[:expected_run_time] }.reverse.to_h
|
89
|
-
end
|
90
|
-
end
|
91
|
-
|
92
|
-
private
|
93
|
-
|
94
|
-
# We want the most recently modified run time file
|
95
|
-
# report files are prefixed with a timestamp, and Dir.glob should order
|
96
|
-
# alphanumericly
|
97
|
-
def previous_run_times_file_path
|
98
|
-
return unless ENV["SPECWRK_OUT"]
|
99
|
-
|
100
|
-
@previous_run_times_file_path ||= Dir.glob(File.join(ENV["SPECWRK_OUT"], "*-report-*.json")).last
|
101
|
-
end
|
102
|
-
|
103
|
-
# Take elements from the hash where the file_path is the same
|
104
|
-
def bucket_by_file
|
105
|
-
bucket = []
|
106
|
-
|
107
|
-
@mutex.synchronize do
|
108
|
-
key = @hash.keys.first
|
109
|
-
break if key.nil?
|
110
|
-
|
111
|
-
file_path = @hash[key][:file_path]
|
112
|
-
@hash.each do |id, example|
|
113
|
-
next unless example[:file_path] == file_path
|
114
|
-
|
115
|
-
bucket << example
|
116
|
-
@hash.delete id
|
117
|
-
end
|
118
|
-
end
|
119
|
-
|
120
|
-
bucket
|
121
|
-
end
|
122
|
-
|
123
|
-
# Take elements from the hash until the average runtime bucket has filled
|
124
|
-
def bucket_by_timings
|
125
|
-
bucket = []
|
126
|
-
|
127
|
-
@mutex.synchronize do
|
128
|
-
estimated_run_time_total = 0
|
129
|
-
|
130
|
-
while estimated_run_time_total < run_time_bucket_threshold
|
131
|
-
key = @hash.keys.first
|
132
|
-
break if key.nil?
|
133
|
-
|
134
|
-
estimated_run_time_total += @hash.dig(key, :expected_run_time)
|
135
|
-
break if estimated_run_time_total > run_time_bucket_threshold && bucket.length.positive?
|
136
|
-
|
137
|
-
bucket << @hash[key]
|
138
|
-
@hash.delete key
|
139
|
-
end
|
140
|
-
end
|
141
|
-
|
142
|
-
bucket
|
143
|
-
end
|
144
|
-
|
145
|
-
# Ensure @mutex is held when calling this method
|
146
|
-
def merge_example(example)
|
147
|
-
return if @hash.key? example[:id]
|
148
|
-
return if @hash.key? example[:file_path]
|
149
|
-
|
150
|
-
@hash[example[:id]] = if previous_run_times
|
151
|
-
example.merge!(
|
152
|
-
expected_run_time: previous_run_times.dig(:examples, example[:id].to_sym, :run_time) || 99999.9 # run "unknown" files first
|
153
|
-
)
|
154
|
-
else
|
155
|
-
example.merge!(
|
156
|
-
expected_run_time: 99999.9 # run "unknown" files first
|
157
|
-
)
|
158
|
-
end
|
159
|
-
end
|
160
|
-
end
|
161
|
-
|
162
|
-
class CompletedQueue < Queue
|
163
|
-
def dump_and_write(path)
|
164
|
-
write_output_to(path, dump)
|
165
|
-
end
|
166
|
-
|
167
|
-
def dump
|
168
|
-
@mutex.synchronize do
|
169
|
-
@run_times = []
|
170
|
-
@first_started_at = Time.new(2999, 1, 1, 0, 0, 0) # TODO: Make future proof /s
|
171
|
-
@last_finished_at = Time.new(1900, 1, 1, 0, 0, 0)
|
172
|
-
|
173
|
-
@output = {
|
174
|
-
file_totals: Hash.new { |h, filename| h[filename] = 0.0 },
|
175
|
-
meta: {failures: 0, passes: 0, pending: 0},
|
176
|
-
examples: {}
|
177
|
-
}
|
178
|
-
|
179
|
-
@hash.values.each { |example| calculate(example) }
|
180
|
-
|
181
|
-
@output[:meta][:total_run_time] = @run_times.sum
|
182
|
-
@output[:meta][:average_run_time] = @output[:meta][:total_run_time] / [@run_times.length, 1].max.to_f
|
183
|
-
@output[:meta][:first_started_at] = @first_started_at.iso8601(6)
|
184
|
-
@output[:meta][:last_finished_at] = @last_finished_at.iso8601(6)
|
185
|
-
|
186
|
-
@output
|
187
|
-
end
|
188
|
-
end
|
189
|
-
|
190
|
-
private
|
191
|
-
|
192
|
-
def calculate(example)
|
193
|
-
@run_times << example[:run_time]
|
194
|
-
@output[:file_totals][example[:file_path]] += example[:run_time]
|
195
|
-
|
196
|
-
started_at = Time.parse(example[:started_at])
|
197
|
-
finished_at = Time.parse(example[:finished_at])
|
198
|
-
|
199
|
-
@first_started_at = started_at if started_at < @first_started_at
|
200
|
-
@last_finished_at = finished_at if finished_at > @last_finished_at
|
201
|
-
|
202
|
-
case example[:status]
|
203
|
-
when "passed"
|
204
|
-
@output[:meta][:passes] += 1
|
205
|
-
when "failed"
|
206
|
-
@output[:meta][:failures] += 1
|
207
|
-
when "pending"
|
208
|
-
@output[:meta][:pending] += 1
|
209
|
-
end
|
210
|
-
|
211
|
-
@output[:examples][example[:id]] = example
|
212
|
-
end
|
213
|
-
|
214
|
-
def write_output_to(path, output)
|
215
|
-
File.open(path, "w") do |file|
|
216
|
-
file.flock(File::LOCK_EX)
|
217
|
-
|
218
|
-
file.write JSON.pretty_generate(output)
|
219
|
-
|
220
|
-
file.flock(File::LOCK_UN)
|
221
|
-
end
|
222
|
-
end
|
223
|
-
end
|
224
|
-
end
|