specwrk 0.8.0 → 0.9.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 2122e3e3925fdf1c12b6d33899c52ba1b8287e46e7518754ae0ca8188ac149ce
4
- data.tar.gz: 89579242f18a5f17aac60ac52664e1197cf61ef35228d142298b4ad250057a25
3
+ metadata.gz: 657cd8e611ea7ea6344dc5bd540892e69d801ea5afd410b1be2e7a6d3f73658e
4
+ data.tar.gz: c066775a6f57452cd55c49eec7604b8e264881ad085d7847ff176240a9ecbe05
5
5
  SHA512:
6
- metadata.gz: c357e649df3ad4533290c7e53be49d1f4a3bf86870b5cc01ed4bc2219393bc026d4056e65c82b21c9f251724fe3adcebf6efda7a2643b85d873e4ede7efe17d3
7
- data.tar.gz: 1aea05a1f33cd4bfeddbbe76d7d97c7dd3535ef4d1c24b5116042bcfe37201acc7a70c0839a6ef28ba9a5ecd9380535a7007c8a118e5277fc8d798899d7f28e1
6
+ metadata.gz: 076a41a98119ccfe0108c76c32341d8cbf6642da3f37f80276a61ed885fbca9ddaf31c597b8462ddeb608c67d7e8787f5cefd606cb4b977aeef715e9a5cdb0bb
7
+ data.tar.gz: 39891db9a0400196ddaba587e7014aa00e485c65ce37efc997998f854dd98d8cabfef39806657bec15e71c43a29743dda4081b35c2022a917a5503e4e161cea4
@@ -1,6 +1,11 @@
1
1
  FROM ruby:3.4-alpine
2
2
 
3
- RUN apk add --no-cache build-base
3
+ RUN apk add --no-cache \
4
+ build-base \
5
+ ruby-dev \
6
+ linux-headers \
7
+ zlib-dev \
8
+ libffi-dev
4
9
 
5
10
  WORKDIR /app
6
11
 
@@ -8,15 +13,14 @@ RUN mkdir .specwrk/
8
13
 
9
14
  ARG SPECWRK_SRV_PORT=5138
10
15
  ARG SPECWRK_VERSION=latest
11
- ARG GEM_FILE=specwrk-$SPECWRK_VERSION.gem
16
+ ARG GEMFILE=specwrk-$SPECWRK_VERSION.gem
12
17
 
13
- COPY $GEM_FILE ./
14
- RUN gem install ./$GEM_FILE --no-document
15
- RUN rm ./$GEM_FILE
18
+ COPY $GEMFILE ./
19
+ RUN gem install ./$GEMFILE --no-document
20
+ RUN rm ./$GEMFILE
16
21
 
17
- RUN gem install pitchfork thruster
22
+ RUN gem install puma thruster
18
23
  COPY config.ru ./
19
- COPY docker/pitchfork.conf ./
20
24
 
21
25
  COPY docker/entrypoint.server.sh /usr/local/bin/entrypoint
22
26
  RUN chmod +x /usr/local/bin/entrypoint
@@ -2,6 +2,6 @@
2
2
 
3
3
  export THRUSTER_HTTP_PORT=${PORT:-5138}
4
4
  export THRUSTER_TARGET_PORT=3000
5
- export THRUSTER_HTTP_IDLE_TIMEOUT=${IDLE_TIMEOUT:-300}
5
+ export THRUSTER_HTTP_IDLE_TIMEOUT=${IDLE_TIMEOUT:-305}
6
6
 
7
- exec thrust pitchfork -c pitchfork.conf
7
+ exec thrust puma --workers 0 --bind tcp://127.0.0.1:3000 --threads ${PUMA_THREADS:-1} --workers ${PUMA_WORKERS:-0}
@@ -0,0 +1,180 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+ require "base64"
5
+
6
+ require "specwrk/store"
7
+
8
+ module Specwrk
9
+ class Store
10
+ class FileAdapter
11
+ EXT = ".wrk.json"
12
+
13
+ THREAD_POOL = Class.new do
14
+ @work_queue = Queue.new
15
+
16
+ @threads = Array.new(ENV.fetch("SPECWRK_SRV_FILE_ADAPTER_THREAD_COUNT", "4").to_i) do
17
+ Thread.new do
18
+ loop do
19
+ @work_queue.pop.call
20
+ end
21
+ end
22
+ end
23
+
24
+ class << self
25
+ def schedule(&blk)
26
+ @work_queue.push blk
27
+ end
28
+ end
29
+ end
30
+
31
+ def initialize(path)
32
+ @path = path
33
+ FileUtils.mkdir_p(@path)
34
+ end
35
+
36
+ def [](key)
37
+ content = read(key.to_s)
38
+ return unless content
39
+
40
+ JSON.parse(content, symbolize_names: true)
41
+ end
42
+
43
+ def []=(key, value)
44
+ key_string = key.to_s
45
+ if value.nil?
46
+ delete(key_string)
47
+ else
48
+ filename = filename_for_key(key_string)
49
+ write(filename, JSON.generate(value))
50
+ known_key_pairs[key_string] = filename
51
+ end
52
+ end
53
+
54
+ def keys
55
+ known_key_pairs.keys
56
+ end
57
+
58
+ def clear
59
+ FileUtils.rm_rf(@path)
60
+ FileUtils.mkdir_p(@path)
61
+
62
+ @known_key_pairs = nil
63
+ end
64
+
65
+ def delete(*keys)
66
+ filenames = keys.map { |key| known_key_pairs[key] }.compact
67
+
68
+ FileUtils.rm_f(filenames)
69
+
70
+ keys.each { |key| known_key_pairs.delete(key) }
71
+ end
72
+
73
+ def merge!(h2)
74
+ multi_write(h2)
75
+ end
76
+
77
+ def multi_read(*read_keys)
78
+ known_key_pairs # precache before each thread tries to look them up
79
+
80
+ result_queue = Queue.new
81
+
82
+ read_keys.each do |key|
83
+ THREAD_POOL.schedule do
84
+ result_queue.push([key.to_s, read(key)])
85
+ end
86
+ end
87
+
88
+ Thread.pass until result_queue.length == read_keys.length
89
+
90
+ results = {}
91
+ until result_queue.empty?
92
+ result = result_queue.pop
93
+ next if result.last.nil?
94
+
95
+ results[result.first] = JSON.parse(result.last, symbolize_names: true)
96
+ end
97
+
98
+ read_keys.map { |key| [key.to_s, results[key.to_s]] if results.key?(key.to_s) }.compact.to_h # respect order requested in the returned hash
99
+ end
100
+
101
+ def multi_write(hash)
102
+ known_key_pairs # precache before each thread tries to look them up
103
+
104
+ result_queue = Queue.new
105
+
106
+ hash_with_filenames = hash.map { |key, value| [key.to_s, [filename_for_key(key.to_s), value]] }.to_h
107
+ hash_with_filenames.each do |key, (filename, value)|
108
+ content = JSON.generate(value)
109
+
110
+ THREAD_POOL.schedule do
111
+ result_queue << write(filename, content)
112
+ end
113
+ end
114
+
115
+ Thread.pass until result_queue.length == hash.length
116
+ hash_with_filenames.each { |key, (filename, _value)| known_key_pairs[key] = filename }
117
+ end
118
+
119
+ def empty?
120
+ Dir.empty? @path
121
+ end
122
+
123
+ private
124
+
125
+ def write(filename, content)
126
+ tmp_filename = [filename, "tmp"].join(".")
127
+
128
+ File.binwrite(tmp_filename, content)
129
+
130
+ FileUtils.mv tmp_filename, filename
131
+ true
132
+ end
133
+
134
+ def read(key)
135
+ File.read(known_key_pairs[key]) if known_key_pairs.key? key
136
+ end
137
+
138
+ def filename_for_key(key)
139
+ File.join(
140
+ @path,
141
+ [
142
+ counter_prefix(key),
143
+ encode_key(key)
144
+ ].join("_")
145
+ ) + EXT
146
+ end
147
+
148
+ def counter_prefix(key)
149
+ count = keys.index(key) || counter.tap { @counter += 1 }
150
+
151
+ "%012d" % count
152
+ end
153
+
154
+ def counter
155
+ @counter ||= keys.length
156
+ end
157
+
158
+ def encode_key(key)
159
+ Base64.urlsafe_encode64(key).delete("=")
160
+ end
161
+
162
+ def decode_key(key)
163
+ encoded_key_part = File.basename(key).delete_suffix(EXT).split(/\A\d+_/).last
164
+ padding_count = (4 - encoded_key_part.length % 4) % 4
165
+
166
+ Base64.urlsafe_decode64(encoded_key_part + ("=" * padding_count))
167
+ end
168
+
169
+ def known_key_pairs
170
+ @known_key_pairs ||= Dir.entries(@path).sort.map do |filename|
171
+ next if filename.start_with? "."
172
+ next unless filename.end_with? EXT
173
+
174
+ file_path = File.join(@path, filename)
175
+ [decode_key(filename), file_path]
176
+ end.compact.to_h
177
+ end
178
+ end
179
+ end
180
+ end
@@ -0,0 +1,241 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "time"
4
+ require "json"
5
+
6
+ require "specwrk/store/file_adapter"
7
+
8
+ module Specwrk
9
+ class Store
10
+ MUTEXES = {}
11
+ MUTEXES_MUTEX = Mutex.new # 🐢🐢🐢🐢
12
+
13
+ class << self
14
+ def mutex_for(path)
15
+ MUTEXES_MUTEX.synchronize do
16
+ MUTEXES[path] ||= Mutex.new
17
+ end
18
+ end
19
+ end
20
+
21
+ def initialize(path, thread_safe_reads: true)
22
+ @path = path
23
+ @thread_safe_reads = thread_safe_reads
24
+ end
25
+
26
+ def [](key)
27
+ sync(thread_safe: thread_safe_reads) { adapter[key.to_s] }
28
+ end
29
+
30
+ def multi_read(*keys)
31
+ sync(thread_safe: thread_safe_reads) { adapter.multi_read(*keys) }
32
+ end
33
+
34
+ def []=(key, value)
35
+ sync do
36
+ adapter[key.to_s] = value
37
+ end
38
+ end
39
+
40
+ def keys
41
+ all_keys = sync(thread_safe: thread_safe_reads) do
42
+ adapter.keys
43
+ end
44
+
45
+ all_keys.reject { |k| k.start_with? "____" }
46
+ end
47
+
48
+ def length
49
+ keys.length
50
+ end
51
+
52
+ def any?
53
+ !empty?
54
+ end
55
+
56
+ def empty?
57
+ sync(thread_safe: thread_safe_reads) do
58
+ adapter.empty?
59
+ end
60
+ end
61
+
62
+ def delete(*keys)
63
+ sync { adapter.delete(*keys) }
64
+ end
65
+
66
+ def merge!(h2)
67
+ h2.transform_keys!(&:to_s)
68
+ sync { adapter.merge!(h2) }
69
+ end
70
+
71
+ def clear
72
+ sync { adapter.clear }
73
+ end
74
+
75
+ def to_h
76
+ sync(thread_safe: thread_safe_reads) do
77
+ adapter.multi_read(*keys).transform_keys!(&:to_sym)
78
+ end
79
+ end
80
+
81
+ def inspect
82
+ reload.to_h.dup
83
+ end
84
+
85
+ # Bypass any cached values. Helpful when you have two instances
86
+ # of the same store where one mutates data and the other needs to check
87
+ # on the status of that data (i.e. endpoint tests)
88
+ def reload
89
+ @adapter = nil
90
+ self
91
+ end
92
+
93
+ private
94
+
95
+ attr_reader :thread_safe_reads
96
+
97
+ def sync(thread_safe: true)
98
+ if !thread_safe || mutex.owned?
99
+ yield
100
+ else
101
+ mutex.synchronize { yield }
102
+ end
103
+ end
104
+
105
+ def adapter
106
+ @adapter ||= FileAdapter.new(@path)
107
+ end
108
+
109
+ def mutex
110
+ @mutex ||= self.class.mutex_for(@path)
111
+ end
112
+ end
113
+
114
+ class PendingStore < Store
115
+ RUN_TIME_BUCKET_MAXIMUM_KEY = :____run_time_bucket_maximum
116
+
117
+ def run_time_bucket_maximum=(val)
118
+ @run_time_bucket_maximum = self[RUN_TIME_BUCKET_MAXIMUM_KEY] = val
119
+ end
120
+
121
+ def run_time_bucket_maximum
122
+ @run_time_bucket_maximum ||= self[RUN_TIME_BUCKET_MAXIMUM_KEY]
123
+ end
124
+
125
+ def shift_bucket
126
+ sync do
127
+ return bucket_by_file unless run_time_bucket_maximum&.positive?
128
+
129
+ case ENV["SPECWRK_SRV_GROUP_BY"]
130
+ when "file"
131
+ bucket_by_file
132
+ else
133
+ bucket_by_timings
134
+ end
135
+ end
136
+ end
137
+
138
+ private
139
+
140
+ # Take elements from the hash where the file_path is the same
141
+ # Expects that the examples were merged in order of filename
142
+ def bucket_by_file
143
+ bucket = []
144
+ consumed_keys = []
145
+
146
+ all_keys = keys
147
+ key = all_keys.first
148
+ return [] if key.nil?
149
+
150
+ file_path = self[key][:file_path]
151
+
152
+ catch(:full) do
153
+ all_keys.each_slice(24).each do |key_group|
154
+ examples = multi_read(*key_group)
155
+
156
+ examples.each do |key, example|
157
+ throw :full unless example[:file_path] == file_path
158
+
159
+ bucket << example
160
+ consumed_keys << key
161
+ end
162
+ end
163
+ end
164
+
165
+ delete(*consumed_keys)
166
+ bucket
167
+ end
168
+
169
+ # Take elements from the hash until the average runtime bucket has filled
170
+ def bucket_by_timings
171
+ bucket = []
172
+ consumed_keys = []
173
+
174
+ estimated_run_time_total = 0
175
+
176
+ catch(:full) do
177
+ keys.each_slice(25).each do |key_group|
178
+ examples = multi_read(*key_group)
179
+
180
+ examples.each do |key, example|
181
+ estimated_run_time_total += example[:expected_run_time] || run_time_bucket_maximum
182
+ throw :full if estimated_run_time_total > run_time_bucket_maximum && bucket.length.positive?
183
+
184
+ bucket << example
185
+ consumed_keys << key
186
+ end
187
+ end
188
+ end
189
+
190
+ delete(*consumed_keys)
191
+ bucket
192
+ end
193
+ end
194
+
195
+ class CompletedStore < Store
196
+ def dump
197
+ @run_times = []
198
+ @first_started_at = Time.new(2999, 1, 1, 0, 0, 0) # TODO: Make future proof /s
199
+ @last_finished_at = Time.new(1900, 1, 1, 0, 0, 0)
200
+
201
+ @output = {
202
+ file_totals: Hash.new { |h, filename| h[filename] = 0.0 },
203
+ meta: {failures: 0, passes: 0, pending: 0},
204
+ examples: {}
205
+ }
206
+
207
+ to_h.values.each { |example| calculate(example) }
208
+
209
+ @output[:meta][:total_run_time] = @run_times.sum
210
+ @output[:meta][:average_run_time] = @output[:meta][:total_run_time] / [@run_times.length, 1].max.to_f
211
+ @output[:meta][:first_started_at] = @first_started_at.iso8601(6)
212
+ @output[:meta][:last_finished_at] = @last_finished_at.iso8601(6)
213
+
214
+ @output
215
+ end
216
+
217
+ private
218
+
219
+ def calculate(example)
220
+ @run_times << example[:run_time]
221
+ @output[:file_totals][example[:file_path]] += example[:run_time]
222
+
223
+ started_at = Time.parse(example[:started_at])
224
+ finished_at = Time.parse(example[:finished_at])
225
+
226
+ @first_started_at = started_at if started_at < @first_started_at
227
+ @last_finished_at = finished_at if finished_at > @last_finished_at
228
+
229
+ case example[:status]
230
+ when "passed"
231
+ @output[:meta][:passes] += 1
232
+ when "failed"
233
+ @output[:meta][:failures] += 1
234
+ when "pending"
235
+ @output[:meta][:pending] += 1
236
+ end
237
+
238
+ @output[:examples][example[:id]] = example
239
+ end
240
+ end
241
+ end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Specwrk
4
- VERSION = "0.8.0"
4
+ VERSION = "0.9.1"
5
5
  end
@@ -21,8 +21,6 @@ require "specwrk/web/endpoints"
21
21
  module Specwrk
22
22
  class Web
23
23
  class App
24
- REAP_INTERVAL = 330 # HTTP connection timeout + some buffer
25
-
26
24
  class << self
27
25
  def run!
28
26
  Process.setproctitle "specwrk-server"
@@ -74,10 +72,6 @@ module Specwrk
74
72
  end
75
73
  end
76
74
 
77
- def initialize
78
- @reaper_thread = Thread.new { reaper } unless ENV["SPECWRK_SRV_SINGLE_RUN"]
79
- end
80
-
81
75
  def call(env)
82
76
  env[:request] ||= Rack::Request.new(env)
83
77
 
@@ -106,26 +100,6 @@ module Specwrk
106
100
  Endpoints::NotFound
107
101
  end
108
102
  end
109
-
110
- def reaper
111
- until Specwrk.force_quit
112
- sleep REAP_INTERVAL
113
-
114
- reap
115
- end
116
- end
117
-
118
- def reap
119
- Web::WORKERS.each do |run, workers|
120
- most_recent_last_seen_at = workers.map { |id, worker| worker[:last_seen_at] }.max
121
- next unless most_recent_last_seen_at
122
-
123
- # Don't consider runs which aren't at least REAP_INTERVAL sec stale
124
- if most_recent_last_seen_at < Time.now - REAP_INTERVAL
125
- Web.clear_run_queues(run)
126
- end
127
- end
128
- end
129
103
  end
130
104
  end
131
105
  end
@@ -28,7 +28,7 @@ module Specwrk
28
28
  private
29
29
 
30
30
  def unauthorized
31
- [401, {"content-type" => "application/json"}, ["Unauthorized"]]
31
+ [401, {"Content-Type" => "application/json"}, ["Unauthorized"]]
32
32
  end
33
33
  end
34
34
  end
@@ -2,18 +2,41 @@
2
2
 
3
3
  require "json"
4
4
 
5
+ require "specwrk/store"
6
+
5
7
  module Specwrk
6
8
  class Web
7
9
  module Endpoints
8
10
  class Base
11
+ attr_reader :started_at
12
+
9
13
  def initialize(request)
10
14
  @request = request
11
-
12
- worker[:first_seen_at] ||= Time.now
13
- worker[:last_seen_at] = Time.now
14
15
  end
15
16
 
16
17
  def response
18
+ before_lock
19
+
20
+ return with_response unless run_id # No run_id, no datastore usage in the endpoint
21
+
22
+ payload # parse the payload before any locking
23
+
24
+ worker[:first_seen_at] ||= Time.now.iso8601
25
+ worker[:last_seen_at] = Time.now.iso8601
26
+
27
+ final_response = with_lock do
28
+ started_at = metadata[:started_at] ||= Time.now.iso8601
29
+ @started_at = Time.parse(started_at)
30
+
31
+ with_response
32
+ end
33
+
34
+ after_lock
35
+
36
+ final_response
37
+ end
38
+
39
+ def with_response
17
40
  not_found
18
41
  end
19
42
 
@@ -21,15 +44,25 @@ module Specwrk
21
44
 
22
45
  attr_reader :request
23
46
 
47
+ def before_lock
48
+ end
49
+
50
+ def after_lock
51
+ end
52
+
24
53
  def not_found
25
- [404, {"content-type" => "text/plain"}, ["This is not the path you're looking for, 'ol chap..."]]
54
+ [404, {"Content-Type" => "text/plain"}, ["This is not the path you're looking for, 'ol chap..."]]
26
55
  end
27
56
 
28
57
  def ok
29
- [200, {"content-type" => "text/plain"}, ["OK, 'ol chap"]]
58
+ [200, {"Content-Type" => "text/plain"}, ["OK, 'ol chap"]]
30
59
  end
31
60
 
32
61
  def payload
62
+ return unless request.content_type&.start_with?("application/json")
63
+ return unless request.post? || request.put? || request.delete?
64
+ return if body.empty?
65
+
33
66
  @payload ||= JSON.parse(body, symbolize_names: true)
34
67
  end
35
68
 
@@ -37,24 +70,28 @@ module Specwrk
37
70
  @body ||= request.body.read
38
71
  end
39
72
 
40
- def pending_queue
41
- Web::PENDING_QUEUES[run_id]
73
+ def pending
74
+ @pending ||= PendingStore.new(File.join(datastore_path, "pending"))
42
75
  end
43
76
 
44
- def processing_queue
45
- Web::PROCESSING_QUEUES[request.get_header("HTTP_X_SPECWRK_RUN")]
77
+ def processing
78
+ @processing ||= Store.new(File.join(datastore_path, "processing"))
46
79
  end
47
80
 
48
- def completed_queue
49
- Web::COMPLETED_QUEUES[request.get_header("HTTP_X_SPECWRK_RUN")]
81
+ def completed
82
+ @completed ||= CompletedStore.new(File.join(datastore_path, "completed"))
50
83
  end
51
84
 
52
- def workers
53
- Web::WORKERS[request.get_header("HTTP_X_SPECWRK_RUN")]
85
+ def metadata
86
+ @metadata ||= Store.new(File.join(datastore_path, "metadata"), thread_safe_reads: false)
87
+ end
88
+
89
+ def run_times
90
+ @run_times ||= Store.new(File.join(ENV["SPECWRK_OUT"], "run_times"), thread_safe_reads: false)
54
91
  end
55
92
 
56
93
  def worker
57
- workers[request.get_header("HTTP_X_SPECWRK_ID")]
94
+ @worker ||= Store.new(File.join(datastore_path, "workers", request.get_header("HTTP_X_SPECWRK_ID").to_s))
58
95
  end
59
96
 
60
97
  def run_id
@@ -62,7 +99,24 @@ module Specwrk
62
99
  end
63
100
 
64
101
  def run_report_file_path
65
- @run_report_file_path ||= File.join(ENV["SPECWRK_OUT"], "#{completed_queue.created_at.strftime("%Y%m%dT%H%M%S")}-report-#{run_id}.json").to_s
102
+ @run_report_file_path ||= File.join(datastore_path, "#{started_at.strftime("%Y%m%dT%H%M%S")}-report.json").to_s
103
+ end
104
+
105
+ def datastore_path
106
+ @datastore_path ||= File.join(ENV["SPECWRK_OUT"], run_id).to_s.tap do |path|
107
+ FileUtils.mkdir_p(path) unless File.directory?(path)
108
+ end
109
+ end
110
+
111
+ def with_lock
112
+ Thread.pass until lock_file.flock(File::LOCK_EX)
113
+ yield
114
+ ensure
115
+ lock_file.flock(File::LOCK_UN)
116
+ end
117
+
118
+ def lock_file
119
+ @lock_file ||= File.open(File.join(datastore_path, "lock"), "a")
66
120
  end
67
121
  end
68
122
 
@@ -70,105 +124,142 @@ module Specwrk
70
124
  NotFound = Class.new(Base)
71
125
 
72
126
  class Health < Base
73
- def response
127
+ def with_response
74
128
  [200, {}, []]
75
129
  end
76
130
  end
77
131
 
78
132
  class Heartbeat < Base
79
- def response
133
+ def with_response
80
134
  ok
81
135
  end
82
136
  end
83
137
 
84
138
  class Seed < Base
85
- def response
86
- pending_queue.synchronize do |pending_queue_hash|
87
- unless ENV["SPECWRK_SRV_SINGLE_SEED_PER_RUN"] && pending_queue_hash.length.positive?
88
- examples = payload.map { |hash| [hash[:id], hash] }.to_h
139
+ def before_lock
140
+ examples_with_run_times if persist_seeds?
141
+ end
89
142
 
90
- pending_queue.merge_with_previous_run_times!(examples)
143
+ def with_response
144
+ if persist_seeds?
145
+ new_run_time_bucket_maximums = [pending.run_time_bucket_maximum, @seeds_run_time_bucket_maximum.to_f].compact
146
+ pending.run_time_bucket_maximum = new_run_time_bucket_maximums.sum.to_f / new_run_time_bucket_maximums.length.to_f
91
147
 
92
- ok
93
- end
148
+ pending.merge!(examples_with_run_times)
94
149
  end
95
150
 
151
+ processing.clear
152
+ completed.clear
153
+
96
154
  ok
97
155
  end
98
- end
99
156
 
100
- class Complete < Base
101
- def response
102
- processing_queue.synchronize do |processing_queue_hash|
157
+ def examples_with_run_times
158
+ @examples_with_run_times ||= begin
159
+ unsorted_examples_with_run_times = []
160
+ all_ids = payload.map { |example| example[:id] }
161
+ all_run_times = run_times.multi_read(*all_ids)
162
+
103
163
  payload.each do |example|
104
- next unless processing_queue_hash.delete(example[:id])
105
- completed_queue[example[:id]] = example
164
+ run_time = all_run_times[example[:id]]
165
+
166
+ unsorted_examples_with_run_times << [example[:id], example.merge(expected_run_time: run_time)]
106
167
  end
107
- end
108
168
 
109
- if pending_queue.length.zero? && processing_queue.length.zero? && completed_queue.length.positive? && ENV["SPECWRK_OUT"]
110
- completed_queue.dump_and_write(run_report_file_path)
111
- end
169
+ sorted_examples_with_run_times = if sort_by == :timings
170
+ unsorted_examples_with_run_times.sort_by do |entry|
171
+ -(entry.last[:expected_run_time] || Float::INFINITY)
172
+ end
173
+ else
174
+ unsorted_examples_with_run_times.sort_by do |entry|
175
+ entry.last[:file_path]
176
+ end
177
+ end
112
178
 
113
- ok
179
+ @seeds_run_time_bucket_maximum = run_time_bucket_maximum(all_run_times.values.compact)
180
+ @examples_with_run_times = sorted_examples_with_run_times.to_h
181
+ end
114
182
  end
115
- end
116
183
 
117
- class Pop < Base
118
- def response
119
- processing_queue.synchronize do |processing_queue_hash|
120
- @examples = pending_queue.shift_bucket
184
+ private
121
185
 
122
- @examples.each do |example|
123
- processing_queue_hash[example[:id]] = example
124
- end
125
- end
186
+ # Average + standard deviation
187
+ def run_time_bucket_maximum(values)
188
+ return 0 if values.length.zero?
126
189
 
127
- if @examples.length.positive?
128
- [200, {"content-type" => "application/json"}, [JSON.generate(@examples)]]
129
- elsif pending_queue.length.zero? && processing_queue.length.zero? && completed_queue.length.zero?
130
- [204, {"content-type" => "text/plain"}, ["Waiting for sample to be seeded."]]
131
- elsif completed_queue.length.positive? && processing_queue.length.zero?
132
- [410, {"content-type" => "text/plain"}, ["That's a good lad. Run along now and go home."]]
190
+ mean = values.sum.to_f / values.size
191
+ variance = values.map { |v| (v - mean)**2 }.sum / values.size
192
+ (mean + Math.sqrt(variance)).round(2)
193
+ end
194
+
195
+ def persist_seeds?
196
+ ENV["SPECWRK_SRV_SINGLE_SEED_PER_RUN"].nil? || pending.empty?
197
+ end
198
+
199
+ def sort_by
200
+ if ENV["SPECWRK_SRV_GROUP_BY"] == "file" || run_times.empty?
201
+ :file
133
202
  else
134
- not_found
203
+ :timings
135
204
  end
136
205
  end
137
206
  end
138
207
 
139
- class Report < Base
140
- def response
141
- if data
142
- [200, {"content-type" => "application/json"}, [data]]
143
- else
144
- [404, {"content-type" => "text/plain"}, ["Unable to report on run #{run_id}; no file matching #{"*-report-#{run_id}.json"}"]]
145
- end
208
+ class Complete < Base
209
+ def with_response
210
+ completed.merge!(completed_examples)
211
+ processing.delete(*completed_examples.keys)
212
+
213
+ ok
146
214
  end
147
215
 
148
216
  private
149
217
 
150
- def data
151
- return @data if defined? @data
218
+ def completed_examples
219
+ @completed_data ||= payload.map { |example| [example[:id], example] if processing[example[:id]] }.compact.to_h
220
+ end
152
221
 
153
- return unless most_recent_run_report_file
154
- return unless File.exist?(most_recent_run_report_file)
222
+ # We don't care about exact values here, just approximate run times are fine
223
+ # So if we overwrite run times from another process it is nbd
224
+ def after_lock
225
+ run_time_data = payload.map { |example| [example[:id], example[:run_time]] }.to_h
226
+ run_times.merge! run_time_data
227
+ end
228
+ end
155
229
 
156
- @data = File.open(most_recent_run_report_file, "r") do |file|
157
- file.flock(File::LOCK_SH)
158
- file.read
230
+ class Pop < Base
231
+ def with_response
232
+ @examples = pending.shift_bucket
233
+
234
+ processing_data = @examples.map { |example| [example[:id], example] }.to_h
235
+ processing.merge!(processing_data)
236
+
237
+ if @examples.any?
238
+ [200, {"Content-Type" => "application/json"}, [JSON.generate(@examples)]]
239
+ elsif pending.empty? && processing.empty? && completed.empty?
240
+ [204, {"Content-Type" => "text/plain"}, ["Waiting for sample to be seeded."]]
241
+ elsif completed.any? && processing.empty?
242
+ [410, {"Content-Type" => "text/plain"}, ["That's a good lad. Run along now and go home."]]
243
+ else
244
+ not_found
159
245
  end
160
246
  end
247
+ end
161
248
 
162
- def most_recent_run_report_file
163
- @most_recent_run_report_file ||= Dir.glob(File.join(ENV["SPECWRK_OUT"], "*-report-#{run_id}.json")).last
249
+ class Report < Base
250
+ def with_response
251
+ [200, {"Content-Type" => "application/json"}, [JSON.generate(completed.dump)]]
164
252
  end
165
253
  end
166
254
 
167
255
  class Shutdown < Base
168
- def response
256
+ def with_response
257
+ pending.clear
258
+ processing.clear
259
+
169
260
  interupt! if ENV["SPECWRK_SRV_SINGLE_RUN"]
170
261
 
171
- [200, {"content-type" => "text/plain"}, ["✌️"]]
262
+ [200, {"Content-Type" => "text/plain"}, ["✌️"]]
172
263
  end
173
264
 
174
265
  def interupt!
data/lib/specwrk/web.rb CHANGED
@@ -1,22 +1,6 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "specwrk/queue"
4
-
5
3
  module Specwrk
6
4
  class Web
7
- PENDING_QUEUES = Queue.new { |h, key| h[key] = PendingQueue.new }
8
- PROCESSING_QUEUES = Queue.new { |h, key| h[key] = Queue.new }
9
- COMPLETED_QUEUES = Queue.new { |h, key| h[key] = CompletedQueue.new }
10
- WORKERS = Hash.new { |h, key| h[key] = Hash.new { |h, key| h[key] = {} } }
11
-
12
- def self.clear_queues
13
- [PENDING_QUEUES, PROCESSING_QUEUES, COMPLETED_QUEUES, WORKERS].each(&:clear)
14
- end
15
-
16
- def self.clear_run_queues(run)
17
- [PENDING_QUEUES, PROCESSING_QUEUES, COMPLETED_QUEUES, WORKERS].each do |queue|
18
- queue.delete(run)
19
- end
20
- end
21
5
  end
22
6
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: specwrk
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.8.0
4
+ version: 0.9.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Daniel Westendorf
@@ -9,6 +9,34 @@ bindir: exe
9
9
  cert_chain: []
10
10
  date: 1980-01-02 00:00:00.000000000 Z
11
11
  dependencies:
12
+ - !ruby/object:Gem::Dependency
13
+ name: json
14
+ requirement: !ruby/object:Gem::Requirement
15
+ requirements:
16
+ - - ">="
17
+ - !ruby/object:Gem::Version
18
+ version: '0'
19
+ type: :runtime
20
+ prerelease: false
21
+ version_requirements: !ruby/object:Gem::Requirement
22
+ requirements:
23
+ - - ">="
24
+ - !ruby/object:Gem::Version
25
+ version: '0'
26
+ - !ruby/object:Gem::Dependency
27
+ name: base64
28
+ requirement: !ruby/object:Gem::Requirement
29
+ requirements:
30
+ - - ">="
31
+ - !ruby/object:Gem::Version
32
+ version: '0'
33
+ type: :runtime
34
+ prerelease: false
35
+ version_requirements: !ruby/object:Gem::Requirement
36
+ requirements:
37
+ - - ">="
38
+ - !ruby/object:Gem::Version
39
+ version: '0'
12
40
  - !ruby/object:Gem::Dependency
13
41
  name: dry-cli
14
42
  requirement: !ruby/object:Gem::Requirement
@@ -152,7 +180,6 @@ files:
152
180
  - config.ru
153
181
  - docker/Dockerfile.server
154
182
  - docker/entrypoint.server.sh
155
- - docker/pitchfork.conf
156
183
  - examples/circleci/config.yml
157
184
  - examples/github/specwrk-multi-node.yml
158
185
  - examples/github/specwrk-single-node.yml
@@ -163,7 +190,8 @@ files:
163
190
  - lib/specwrk/client.rb
164
191
  - lib/specwrk/hookable.rb
165
192
  - lib/specwrk/list_examples.rb
166
- - lib/specwrk/queue.rb
193
+ - lib/specwrk/store.rb
194
+ - lib/specwrk/store/file_adapter.rb
167
195
  - lib/specwrk/version.rb
168
196
  - lib/specwrk/web.rb
169
197
  - lib/specwrk/web/app.rb
@@ -196,7 +224,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
196
224
  - !ruby/object:Gem::Version
197
225
  version: '0'
198
226
  requirements: []
199
- rubygems_version: 3.6.7
227
+ rubygems_version: 3.6.9
200
228
  specification_version: 4
201
229
  summary: Parallel rspec test runner from a queue of pending jobs.
202
230
  test_files: []
@@ -1,5 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- worker_processes 1
4
- listen "localhost:3000", backlog: 2048
5
- timeout 301
data/lib/specwrk/queue.rb DELETED
@@ -1,224 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require "time"
4
- require "json"
5
-
6
- module Specwrk
7
- # Thread-safe Hash access
8
- class Queue
9
- attr_reader :created_at
10
-
11
- def initialize(hash = {})
12
- @created_at = Time.now
13
-
14
- if block_given?
15
- @mutex = Monitor.new # Reentrant locking is required here
16
- # It's possible to enter the proc from two threads, so we need to ||= in case
17
- # one thread has set a value prior to the yield.
18
- hash.default_proc = proc { |h, key| @mutex.synchronize { yield(h, key) } }
19
- end
20
-
21
- @mutex ||= Mutex.new # Monitor is up-to 20% slower than Mutex, so if no block is given, use a mutex
22
- @hash = hash
23
- end
24
-
25
- def synchronize(&blk)
26
- if @mutex.owned?
27
- yield(@hash)
28
- else
29
- @mutex.synchronize { yield(@hash) }
30
- end
31
- end
32
-
33
- def method_missing(name, *args, &block)
34
- if @hash.respond_to?(name)
35
- @mutex.synchronize { @hash.public_send(name, *args, &block) }
36
- else
37
- super
38
- end
39
- end
40
-
41
- def respond_to_missing?(name, include_private = false)
42
- @hash.respond_to?(name, include_private) || super
43
- end
44
- end
45
-
46
- class PendingQueue < Queue
47
- def shift_bucket
48
- return bucket_by_file unless previous_run_times
49
-
50
- case ENV["SPECWRK_SRV_GROUP_BY"]
51
- when "file"
52
- bucket_by_file
53
- else
54
- bucket_by_timings
55
- end
56
- end
57
-
58
- def run_time_bucket_threshold
59
- return 1 unless previous_run_times
60
-
61
- previous_run_times.dig(:meta, :average_run_time)
62
- end
63
-
64
- def previous_run_times
65
- return unless ENV["SPECWRK_OUT"]
66
-
67
- @previous_run_times ||= begin
68
- return unless previous_run_times_file_path
69
- return unless File.exist? previous_run_times_file_path
70
-
71
- raw_data = File.open(previous_run_times_file_path, "r") do |file|
72
- file.flock(File::LOCK_SH)
73
- file.read
74
- end
75
-
76
- @previous_run_times = JSON.parse(raw_data, symbolize_names: true)
77
- rescue JSON::ParserError => e
78
- warn "#{e.inspect} in file #{previous_run_times_file_path}"
79
- nil
80
- end
81
- end
82
-
83
- def merge_with_previous_run_times!(h2)
84
- synchronize do
85
- h2.each { |_id, example| merge_example(example) }
86
-
87
- # Sort by exepcted run time, slowest to fastest
88
- @hash = @hash.sort_by { |_, example| example[:expected_run_time] }.reverse.to_h
89
- end
90
- end
91
-
92
- private
93
-
94
- # We want the most recently modified run time file
95
- # report files are prefixed with a timestamp, and Dir.glob should order
96
- # alphanumericly
97
- def previous_run_times_file_path
98
- return unless ENV["SPECWRK_OUT"]
99
-
100
- @previous_run_times_file_path ||= Dir.glob(File.join(ENV["SPECWRK_OUT"], "*-report-*.json")).last
101
- end
102
-
103
- # Take elements from the hash where the file_path is the same
104
- def bucket_by_file
105
- bucket = []
106
-
107
- @mutex.synchronize do
108
- key = @hash.keys.first
109
- break if key.nil?
110
-
111
- file_path = @hash[key][:file_path]
112
- @hash.each do |id, example|
113
- next unless example[:file_path] == file_path
114
-
115
- bucket << example
116
- @hash.delete id
117
- end
118
- end
119
-
120
- bucket
121
- end
122
-
123
- # Take elements from the hash until the average runtime bucket has filled
124
- def bucket_by_timings
125
- bucket = []
126
-
127
- @mutex.synchronize do
128
- estimated_run_time_total = 0
129
-
130
- while estimated_run_time_total < run_time_bucket_threshold
131
- key = @hash.keys.first
132
- break if key.nil?
133
-
134
- estimated_run_time_total += @hash.dig(key, :expected_run_time)
135
- break if estimated_run_time_total > run_time_bucket_threshold && bucket.length.positive?
136
-
137
- bucket << @hash[key]
138
- @hash.delete key
139
- end
140
- end
141
-
142
- bucket
143
- end
144
-
145
- # Ensure @mutex is held when calling this method
146
- def merge_example(example)
147
- return if @hash.key? example[:id]
148
- return if @hash.key? example[:file_path]
149
-
150
- @hash[example[:id]] = if previous_run_times
151
- example.merge!(
152
- expected_run_time: previous_run_times.dig(:examples, example[:id].to_sym, :run_time) || 99999.9 # run "unknown" files first
153
- )
154
- else
155
- example.merge!(
156
- expected_run_time: 99999.9 # run "unknown" files first
157
- )
158
- end
159
- end
160
- end
161
-
162
- class CompletedQueue < Queue
163
- def dump_and_write(path)
164
- write_output_to(path, dump)
165
- end
166
-
167
- def dump
168
- @mutex.synchronize do
169
- @run_times = []
170
- @first_started_at = Time.new(2999, 1, 1, 0, 0, 0) # TODO: Make future proof /s
171
- @last_finished_at = Time.new(1900, 1, 1, 0, 0, 0)
172
-
173
- @output = {
174
- file_totals: Hash.new { |h, filename| h[filename] = 0.0 },
175
- meta: {failures: 0, passes: 0, pending: 0},
176
- examples: {}
177
- }
178
-
179
- @hash.values.each { |example| calculate(example) }
180
-
181
- @output[:meta][:total_run_time] = @run_times.sum
182
- @output[:meta][:average_run_time] = @output[:meta][:total_run_time] / [@run_times.length, 1].max.to_f
183
- @output[:meta][:first_started_at] = @first_started_at.iso8601(6)
184
- @output[:meta][:last_finished_at] = @last_finished_at.iso8601(6)
185
-
186
- @output
187
- end
188
- end
189
-
190
- private
191
-
192
- def calculate(example)
193
- @run_times << example[:run_time]
194
- @output[:file_totals][example[:file_path]] += example[:run_time]
195
-
196
- started_at = Time.parse(example[:started_at])
197
- finished_at = Time.parse(example[:finished_at])
198
-
199
- @first_started_at = started_at if started_at < @first_started_at
200
- @last_finished_at = finished_at if finished_at > @last_finished_at
201
-
202
- case example[:status]
203
- when "passed"
204
- @output[:meta][:passes] += 1
205
- when "failed"
206
- @output[:meta][:failures] += 1
207
- when "pending"
208
- @output[:meta][:pending] += 1
209
- end
210
-
211
- @output[:examples][example[:id]] = example
212
- end
213
-
214
- def write_output_to(path, output)
215
- File.open(path, "w") do |file|
216
- file.flock(File::LOCK_EX)
217
-
218
- file.write JSON.pretty_generate(output)
219
-
220
- file.flock(File::LOCK_UN)
221
- end
222
- end
223
- end
224
- end