rspec-background-process 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,95 @@
1
+ require 'rspec'
2
+ require 'rspec/core/formatters'
3
+ require 'rspec/core/shared_context'
4
+ require_relative 'background_process'
5
+ require_relative 'process_pool'
6
+
7
+ # Just methods
8
+ # config.include BackgroundProcessCoreHelpers
9
+ module BackgroundProcessCoreHelpers
10
+ def process_pool(options = {})
11
+ @@process_pool ||= RSpecBackgroundProcess::ProcessPool.new(options)
12
+ end
13
+
14
+ def background_process(path, options = {})
15
+ RSpecBackgroundProcess::ProcessPool::ProcessDefinition.new(
16
+ process_pool.pool,
17
+ options[:group] || 'default',
18
+ path,
19
+ options[:load] ? RSpecBackgroundProcess::LoadedBackgroundProcess : RSpecBackgroundProcess::BackgroundProcess,
20
+ process_pool.options
21
+ )
22
+ end
23
+
24
+ def self.report_failed_instance
25
+ return unless defined? @@process_pool
26
+
27
+ @@process_pool.report_failed_instance
28
+ @@process_pool.report_logs
29
+ end
30
+
31
+ def self.report_pool_stats
32
+ return unless defined? @@process_pool
33
+
34
+ @@process_pool.report_stats
35
+ end
36
+ end
37
+
38
+ # RSpec specific cleanup
39
+ # config.include BackgroundProcessHelpers
40
+ module BackgroundProcessHelpers
41
+ extend RSpec::Core::SharedContext
42
+ include BackgroundProcessCoreHelpers
43
+
44
+ after(:each) do
45
+ @@process_pool.cleanup
46
+ end
47
+ end
48
+
49
+ # RSpec custom reporter
50
+ # config.add_formatter FailedBackgroundProcessReporter
51
+ class FailedBackgroundProcessReporter
52
+ RSpec::Core::Formatters.register self, :example_failed
53
+
54
+ def initialize(output)
55
+ @output = output
56
+ end
57
+
58
+ def example_failed(example)
59
+ @output << BackgroundProcessCoreHelpers.report_failed_instance
60
+ end
61
+ end
62
+
63
+ # RSpec setup
64
+ RSpec.configure do |config|
65
+ config.include BackgroundProcessHelpers, with: :background_process
66
+ config.add_formatter FailedBackgroundProcessReporter
67
+ end
68
+
69
+ # Cucumber setup
70
+ if respond_to?(:World) and respond_to?(:After)
71
+ World(BackgroundProcessCoreHelpers)
72
+
73
+ After do
74
+ process_pool.cleanup
75
+ end
76
+
77
+ After do |scenario|
78
+ if scenario.failed?
79
+ BackgroundProcessCoreHelpers.report_failed_instance
80
+ end
81
+ end
82
+ end
83
+
84
+ ## To configure pool in Cucumber add this to env.rb
85
+ # Before do
86
+ # process_pool(
87
+ # logging: true,
88
+ # max_running: 16
89
+ # )
90
+ # end
91
+
92
+ ## To report pool/LRU statistics at exit add this to env.rb
93
+ # at_exit do
94
+ # BackgroundProcessCoreHelpers.report_pool_stats
95
+ # end
@@ -0,0 +1,356 @@
1
+ require 'digest'
2
+ require 'tmpdir'
3
+ require 'pathname'
4
+ require 'rufus-lru'
5
+ require 'set'
6
+ require 'delegate'
7
+
8
+ module RSpecBackgroundProcess
9
+ class ProcessPool
10
+ class ProcessDefinition
11
+ def initialize(pool, group, path, type, options)
12
+ @pool = pool
13
+ @group = group
14
+ @path = path
15
+ @type = type
16
+
17
+ @extensions = Set.new
18
+ @options = {
19
+ ready_timeout: 10,
20
+ term_timeout: 10,
21
+ kill_timeout: 10,
22
+ ready_test: ->(p){fail 'no readiness check defined'},
23
+ refresh_action: ->(p){p.restart},
24
+ logging: false
25
+ }.merge(options)
26
+ @working_directory = nil
27
+ @arguments = []
28
+ end
29
+
30
+ attr_accessor :group
31
+ attr_reader :path
32
+
33
+ def initialize_copy(old)
34
+ # need own copy
35
+ @extensions = @extensions.dup
36
+ @options = @options.dup
37
+ @arguments = @arguments.dup
38
+ end
39
+
40
+ def with
41
+ process = dup
42
+ yield process
43
+ process
44
+ end
45
+
46
+ def extend(mod, options = {})
47
+ @extensions << mod
48
+ @options.merge! options
49
+ end
50
+
51
+ def logging_enabled
52
+ @options[:logging] = true
53
+ end
54
+
55
+ def logging_enabled?
56
+ @options[:logging]
57
+ end
58
+
59
+ def ready_test(&block)
60
+ @options[:ready_test] = block
61
+ end
62
+
63
+ def refresh_action(&block)
64
+ @options[:refresh_action] = block
65
+ end
66
+
67
+ def ready_timeout(seconds)
68
+ @options[:ready_timeout] = seconds
69
+ end
70
+
71
+ def term_timeout(seconds)
72
+ @options[:term_timeout] = seconds
73
+ end
74
+
75
+ def kill_timeout(seconds)
76
+ @options[:kill_timeout] = seconds
77
+ end
78
+
79
+ def working_directory(dir)
80
+ @working_directory = dir
81
+ end
82
+
83
+ def arguments
84
+ @arguments
85
+ end
86
+
87
+ def argument(*value)
88
+ @arguments += value
89
+ end
90
+
91
+ def instance
92
+ # disallow changes to the definition once we have instantiated
93
+ @options.freeze
94
+ @arguments.freeze
95
+ @working_directory.freeze
96
+ @extensions.freeze
97
+
98
+ # instance is requested
99
+ # we calculate key based on current definition
100
+ _key = key
101
+
102
+ # already crated
103
+ if instance = @pool[_key]
104
+ # always make sure options are up to date with definition
105
+ instance.reset_options(@options)
106
+ return instance
107
+ end
108
+
109
+ # can only use parts of the key for instance name
110
+ name = Pathname.new(@path).basename
111
+
112
+ # need to crate new one
113
+ instance = @type.new(
114
+ "#{@group}-#{name}-#{_key}",
115
+ @path,
116
+ @arguments,
117
+ @working_directory || [name, _key],
118
+ @options
119
+ )
120
+
121
+ # ports get allocated here...
122
+ @extensions.each do |mod|
123
+ instance.extend(mod)
124
+ end
125
+
126
+ @pool[_key] = instance
127
+ end
128
+
129
+ # shortcut
130
+ def start
131
+ instance.start
132
+ end
133
+
134
+ def key
135
+ hash = Digest::SHA256.new
136
+ hash.update @group.to_s
137
+ hash.update @path.to_s
138
+ hash.update @type.name
139
+ @extensions.each do |mod|
140
+ hash.update mod.name
141
+ end
142
+ hash.update @working_directory.to_s
143
+ @arguments.each do |argument|
144
+ case argument
145
+ when Pathname
146
+ begin
147
+ # use file content as part of the hash
148
+ hash.update argument.read
149
+ rescue Errno::ENOENT
150
+ # use file name if it does not exist
151
+ hash.update argument.to_s
152
+ end
153
+ else
154
+ hash.update argument.to_s
155
+ end
156
+ end
157
+ Digest.hexencode(hash.digest)[0..16]
158
+ end
159
+ end
160
+
161
+ class LRUPool
162
+ class VoidHash < Hash
163
+ def []=(key, value)
164
+ value
165
+ end
166
+ end
167
+
168
+ def initialize(max_running, &lru_stop)
169
+ @all = {}
170
+ @max_running = max_running
171
+ @running_keep = max_running > 0 ? LruHash.new(max_running) : VoidHash.new
172
+ @running_all = Set[]
173
+ @active = Set[]
174
+
175
+ @after_store = []
176
+ @lru_stop = lru_stop
177
+ end
178
+
179
+ def to_s
180
+ "LRUPool[all: #{@all.length}, running: #{@running_all.length}, active: #{@active.map(&:to_s).join(',')}, keep: #{@running_keep.length}]"
181
+ end
182
+
183
+ def []=(key, value)
184
+ @active << key
185
+ @all[key] = value
186
+ @after_store.each{|callback| callback.call(key, value)}
187
+ end
188
+
189
+ def [](key)
190
+ if @all.member? key
191
+ @active << key
192
+ @running_keep[key] # bump on use if on running LRU list
193
+ end
194
+ @all[key]
195
+ end
196
+
197
+ def delete(key)
198
+ @running_keep.delete(key)
199
+ @running_all.delete(key)
200
+ @active.delete(key)
201
+ @all.delete(key)
202
+ end
203
+
204
+ def instances
205
+ @all.values
206
+ end
207
+
208
+ def reset_active
209
+ puts "WARNING: There are more active processes than max running allowed! Consider increasing max running from #{@max_running} to #{@active.length} or more." if @max_running < @active.length
210
+ @active = Set.new
211
+ trim!
212
+ end
213
+
214
+ def running(key)
215
+ return unless @all.member? key
216
+ @running_keep[key] = key
217
+ @running_all << key
218
+ trim!
219
+ end
220
+
221
+ def not_running(key)
222
+ @running_keep.delete(key)
223
+ @running_all.delete(key)
224
+ end
225
+
226
+ def after_store(&callback)
227
+ @after_store << callback
228
+ end
229
+
230
+ private
231
+
232
+ def trim!
233
+ to_stop.each do |key|
234
+ @lru_stop.call(key, @all[key])
235
+ end
236
+ end
237
+
238
+ def to_stop
239
+ @running_all - @active - @running_keep.values
240
+ end
241
+ end
242
+
243
+ def initialize(options)
244
+ @stats = {}
245
+
246
+ @max_running = options.delete(:max_running) || 4
247
+
248
+ @pool = LRUPool.new(@max_running) do |key, instance|
249
+ #puts "too many instances running, stopping: #{instance.name}[#{key}]; #{@pool}"
250
+ stats(instance.name)[:lru_stopped] += 1
251
+ instance.stop
252
+ end
253
+
254
+ # keep track of running instances
255
+ @pool.after_store do |key, instance|
256
+ instance.after_state_change do |new_state|
257
+ # we mark running before it is actually started to have a chance to stop over-limit instance first
258
+ if new_state == :starting
259
+ #puts "new instance running: #{instance.name}[#{key}]"
260
+ @pool.running(key)
261
+ stats(instance.name)[:started] += 1
262
+ end
263
+ @pool.not_running(key) if [:not_running, :dead, :jammed].include? new_state
264
+ end
265
+
266
+ # mark running if added while already running
267
+ @pool.running(key) if instance.running?
268
+
269
+ # init stats
270
+ stats(instance.name)[:started] ||= 0
271
+ stats(instance.name)[:lru_stopped] ||= 0
272
+ end
273
+
274
+ # for storing shared data
275
+ @global_context = {}
276
+
277
+ # for filling template strings with actual instance data
278
+ @template_renderer = ->(variables, string) {
279
+ out = string.dup
280
+ variables.merge(
281
+ /project directory/ => -> { Dir.pwd.to_s }
282
+ ).each do |regexp, source|
283
+ out.gsub!(/<#{regexp}>/) do
284
+ source.call(*$~.captures)
285
+ end
286
+ end
287
+ out
288
+ }
289
+
290
+ # this are passed down to instance
291
+ @options = options.merge(
292
+ global_context: @global_context,
293
+ template_renderer: @template_renderer
294
+ )
295
+ end
296
+
297
+ attr_reader :pool
298
+ attr_reader :options
299
+
300
+ def logging_enabled?
301
+ @options[:logging]
302
+ end
303
+
304
+ def cleanup
305
+ @pool.reset_active
306
+ end
307
+
308
+ def stats(name)
309
+ @stats[name] ||= {}
310
+ end
311
+
312
+ def report_stats
313
+ puts
314
+ puts "Process pool stats (max running: #{@max_running}):"
315
+ @stats.each do |key, stats|
316
+ puts " #{key}: #{stats.map{|k, v| "#{k}: #{v}"}.join(' ')}"
317
+ end
318
+ puts "Total instances: #{@stats.length}"
319
+ puts "Total starts: #{@stats.reduce(0){|total, stat| total += stat.last[:started]}}"
320
+ puts "Total LRU stops: #{@stats.reduce(0){|total, stat| total += stat.last[:lru_stopped]}}"
321
+ puts "Total extra LRU stops: #{@stats.reduce(0){|total, stat| extra = (stat.last[:lru_stopped] - 1); total += extra if extra > 0; total}}"
322
+ end
323
+
324
+ def failed_instance
325
+ @pool.instances.select do |instance|
326
+ instance.dead? or
327
+ instance.failed? or
328
+ instance.jammed?
329
+ end.sort_by do |instance|
330
+ instance.state_change_time
331
+ end.last
332
+ end
333
+
334
+ def report_failed_instance
335
+ if failed_instance
336
+ puts "Last failed process instance state log: "
337
+ failed_instance.state_log.each do |log_line|
338
+ puts "\t#{log_line}"
339
+ end
340
+ puts "Working directory: #{failed_instance.working_directory}"
341
+ puts "Log file: #{failed_instance.log_file}"
342
+ puts "State: #{failed_instance.state}"
343
+ puts "Exit code: #{failed_instance.exit_code}"
344
+ else
345
+ puts "No process instance in failed state"
346
+ end
347
+ end
348
+
349
+ def report_logs
350
+ puts "Process instance logs:"
351
+ @pool.instances.each do |instance|
352
+ puts "#{instance.name}: #{instance.log_file}"
353
+ end
354
+ end
355
+ end
356
+ end