gpack 2.0.0 → 2.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/LICENSE +674 -0
- data/README.rst +157 -0
- data/lib/gpack/config/headers.rb +5 -0
- data/lib/gpack/core/GitCollection.rb +154 -0
- data/lib/gpack/core/GitReference.rb +366 -0
- data/lib/gpack/core/gpack.rb +94 -0
- data/lib/gpack/core/gpack_readme.rb +160 -0
- data/lib/gpack/core/parallel.rb +570 -0
- data/lib/gpack/core/parse_repos.rb +73 -0
- data/lib/gpack/core/ssh.rb +33 -0
- data/lib/gpack_readme.rb +154 -0
- data/test/test_gpack.rb +10 -0
- metadata +13 -1
|
@@ -0,0 +1,570 @@
|
|
|
1
|
+
|
|
2
|
+
require 'rbconfig'
|
|
3
|
+
#require 'parallel/version'
|
|
4
|
+
#require 'parallel/processor_count'
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
module Parallel
|
|
8
|
+
module ProcessorCount
|
|
9
|
+
# Number of processors seen by the OS and used for process scheduling.
|
|
10
|
+
#
|
|
11
|
+
# * AIX: /usr/sbin/pmcycles (AIX 5+), /usr/sbin/lsdev
|
|
12
|
+
# * BSD: /sbin/sysctl
|
|
13
|
+
# * Cygwin: /proc/cpuinfo
|
|
14
|
+
# * Darwin: /usr/bin/hwprefs, /usr/sbin/sysctl
|
|
15
|
+
# * HP-UX: /usr/sbin/ioscan
|
|
16
|
+
# * IRIX: /usr/sbin/sysconf
|
|
17
|
+
# * Linux: /proc/cpuinfo
|
|
18
|
+
# * Minix 3+: /proc/cpuinfo
|
|
19
|
+
# * Solaris: /usr/sbin/psrinfo
|
|
20
|
+
# * Tru64 UNIX: /usr/sbin/psrinfo
|
|
21
|
+
# * UnixWare: /usr/sbin/psrinfo
|
|
22
|
+
#
|
|
23
|
+
def processor_count
|
|
24
|
+
@processor_count ||= begin
|
|
25
|
+
os_name = RbConfig::CONFIG["target_os"]
|
|
26
|
+
if os_name =~ /mingw|mswin/
|
|
27
|
+
require 'win32ole'
|
|
28
|
+
result = WIN32OLE.connect("winmgmts://").ExecQuery(
|
|
29
|
+
"select NumberOfLogicalProcessors from Win32_Processor")
|
|
30
|
+
result.to_enum.collect(&:NumberOfLogicalProcessors).reduce(:+)
|
|
31
|
+
elsif File.readable?("/proc/cpuinfo")
|
|
32
|
+
IO.read("/proc/cpuinfo").scan(/^processor/).size
|
|
33
|
+
elsif File.executable?("/usr/bin/hwprefs")
|
|
34
|
+
IO.popen("/usr/bin/hwprefs thread_count").read.to_i
|
|
35
|
+
elsif File.executable?("/usr/sbin/psrinfo")
|
|
36
|
+
IO.popen("/usr/sbin/psrinfo").read.scan(/^.*on-*line/).size
|
|
37
|
+
elsif File.executable?("/usr/sbin/ioscan")
|
|
38
|
+
IO.popen("/usr/sbin/ioscan -kC processor") do |out|
|
|
39
|
+
out.read.scan(/^.*processor/).size
|
|
40
|
+
end
|
|
41
|
+
elsif File.executable?("/usr/sbin/pmcycles")
|
|
42
|
+
IO.popen("/usr/sbin/pmcycles -m").read.count("\n")
|
|
43
|
+
elsif File.executable?("/usr/sbin/lsdev")
|
|
44
|
+
IO.popen("/usr/sbin/lsdev -Cc processor -S 1").read.count("\n")
|
|
45
|
+
elsif File.executable?("/usr/sbin/sysconf") and os_name =~ /irix/i
|
|
46
|
+
IO.popen("/usr/sbin/sysconf NPROC_ONLN").read.to_i
|
|
47
|
+
elsif File.executable?("/usr/sbin/sysctl")
|
|
48
|
+
IO.popen("/usr/sbin/sysctl -n hw.ncpu").read.to_i
|
|
49
|
+
elsif File.executable?("/sbin/sysctl")
|
|
50
|
+
IO.popen("/sbin/sysctl -n hw.ncpu").read.to_i
|
|
51
|
+
else
|
|
52
|
+
$stderr.puts "Unknown platform: " + RbConfig::CONFIG["target_os"]
|
|
53
|
+
$stderr.puts "Assuming 1 processor."
|
|
54
|
+
1
|
|
55
|
+
end
|
|
56
|
+
end
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
# Number of physical processor cores on the current system.
|
|
60
|
+
#
|
|
61
|
+
def physical_processor_count
|
|
62
|
+
@physical_processor_count ||= begin
|
|
63
|
+
ppc = case RbConfig::CONFIG["target_os"]
|
|
64
|
+
when /darwin1/
|
|
65
|
+
IO.popen("/usr/sbin/sysctl -n hw.physicalcpu").read.to_i
|
|
66
|
+
when /linux/
|
|
67
|
+
cores = {} # unique physical ID / core ID combinations
|
|
68
|
+
phy = 0
|
|
69
|
+
IO.read("/proc/cpuinfo").scan(/^physical id.*|^core id.*/) do |ln|
|
|
70
|
+
if ln.start_with?("physical")
|
|
71
|
+
phy = ln[/\d+/]
|
|
72
|
+
elsif ln.start_with?("core")
|
|
73
|
+
cid = phy + ":" + ln[/\d+/]
|
|
74
|
+
cores[cid] = true if not cores[cid]
|
|
75
|
+
end
|
|
76
|
+
end
|
|
77
|
+
cores.count
|
|
78
|
+
when /mswin|mingw/
|
|
79
|
+
require 'win32ole'
|
|
80
|
+
result_set = WIN32OLE.connect("winmgmts://").ExecQuery(
|
|
81
|
+
"select NumberOfCores from Win32_Processor")
|
|
82
|
+
result_set.to_enum.collect(&:NumberOfCores).reduce(:+)
|
|
83
|
+
else
|
|
84
|
+
processor_count
|
|
85
|
+
end
|
|
86
|
+
# fall back to logical count if physical info is invalid
|
|
87
|
+
ppc > 0 ? ppc : processor_count
|
|
88
|
+
end
|
|
89
|
+
end
|
|
90
|
+
end
|
|
91
|
+
end
|
|
92
|
+
|
|
93
|
+
module Parallel
|
|
94
|
+
VERSION = Version = '1.10.0'
|
|
95
|
+
end
|
|
96
|
+
|
|
97
|
+
module Parallel
|
|
98
|
+
extend Parallel::ProcessorCount
|
|
99
|
+
|
|
100
|
+
class DeadWorker < StandardError
|
|
101
|
+
end
|
|
102
|
+
|
|
103
|
+
class Break < StandardError
|
|
104
|
+
end
|
|
105
|
+
|
|
106
|
+
class Kill < StandardError
|
|
107
|
+
end
|
|
108
|
+
|
|
109
|
+
class UndumpableException < StandardError
|
|
110
|
+
def initialize(original)
|
|
111
|
+
super "#{original.class}: #{original.message}"
|
|
112
|
+
@bracktrace = original.backtrace
|
|
113
|
+
end
|
|
114
|
+
|
|
115
|
+
def backtrace
|
|
116
|
+
@bracktrace
|
|
117
|
+
end
|
|
118
|
+
end
|
|
119
|
+
|
|
120
|
+
Stop = Object.new
|
|
121
|
+
|
|
122
|
+
class ExceptionWrapper
|
|
123
|
+
attr_reader :exception
|
|
124
|
+
def initialize(exception)
|
|
125
|
+
@exception =
|
|
126
|
+
begin
|
|
127
|
+
Marshal.dump(exception) && exception
|
|
128
|
+
rescue
|
|
129
|
+
UndumpableException.new(exception)
|
|
130
|
+
end
|
|
131
|
+
end
|
|
132
|
+
end
|
|
133
|
+
|
|
134
|
+
class Worker
|
|
135
|
+
attr_reader :pid, :read, :write
|
|
136
|
+
attr_accessor :thread
|
|
137
|
+
def initialize(read, write, pid)
|
|
138
|
+
@read, @write, @pid = read, write, pid
|
|
139
|
+
end
|
|
140
|
+
|
|
141
|
+
def stop
|
|
142
|
+
close_pipes
|
|
143
|
+
wait # if it goes zombie, rather wait here to be able to debug
|
|
144
|
+
end
|
|
145
|
+
|
|
146
|
+
# might be passed to started_processes and simultaneously closed by another thread
|
|
147
|
+
# when running in isolation mode, so we have to check if it is closed before closing
|
|
148
|
+
def close_pipes
|
|
149
|
+
read.close unless read.closed?
|
|
150
|
+
write.close unless write.closed?
|
|
151
|
+
end
|
|
152
|
+
|
|
153
|
+
def work(data)
|
|
154
|
+
begin
|
|
155
|
+
Marshal.dump(data, write)
|
|
156
|
+
rescue Errno::EPIPE
|
|
157
|
+
raise DeadWorker
|
|
158
|
+
end
|
|
159
|
+
|
|
160
|
+
result = begin
|
|
161
|
+
Marshal.load(read)
|
|
162
|
+
rescue EOFError
|
|
163
|
+
raise DeadWorker
|
|
164
|
+
end
|
|
165
|
+
raise result.exception if ExceptionWrapper === result
|
|
166
|
+
result
|
|
167
|
+
end
|
|
168
|
+
|
|
169
|
+
private
|
|
170
|
+
|
|
171
|
+
def wait
|
|
172
|
+
Process.wait(pid)
|
|
173
|
+
rescue Interrupt
|
|
174
|
+
# process died
|
|
175
|
+
end
|
|
176
|
+
end
|
|
177
|
+
|
|
178
|
+
class JobFactory
|
|
179
|
+
def initialize(source, mutex)
|
|
180
|
+
@lambda = (source.respond_to?(:call) && source) || queue_wrapper(source)
|
|
181
|
+
@source = source.to_a unless @lambda # turn Range and other Enumerable-s into an Array
|
|
182
|
+
@mutex = mutex
|
|
183
|
+
@index = -1
|
|
184
|
+
@stopped = false
|
|
185
|
+
end
|
|
186
|
+
|
|
187
|
+
def next
|
|
188
|
+
if producer?
|
|
189
|
+
# - index and item stay in sync
|
|
190
|
+
# - do not call lambda after it has returned Stop
|
|
191
|
+
item, index = @mutex.synchronize do
|
|
192
|
+
return if @stopped
|
|
193
|
+
item = @lambda.call
|
|
194
|
+
@stopped = (item == Parallel::Stop)
|
|
195
|
+
return if @stopped
|
|
196
|
+
[item, @index += 1]
|
|
197
|
+
end
|
|
198
|
+
else
|
|
199
|
+
index = @mutex.synchronize { @index += 1 }
|
|
200
|
+
return if index >= size
|
|
201
|
+
item = @source[index]
|
|
202
|
+
end
|
|
203
|
+
[item, index]
|
|
204
|
+
end
|
|
205
|
+
|
|
206
|
+
def size
|
|
207
|
+
if producer?
|
|
208
|
+
Float::INFINITY
|
|
209
|
+
else
|
|
210
|
+
@source.size
|
|
211
|
+
end
|
|
212
|
+
end
|
|
213
|
+
|
|
214
|
+
# generate item that is sent to workers
|
|
215
|
+
# just index is faster + less likely to blow up with unserializable errors
|
|
216
|
+
def pack(item, index)
|
|
217
|
+
producer? ? [item, index] : index
|
|
218
|
+
end
|
|
219
|
+
|
|
220
|
+
# unpack item that is sent to workers
|
|
221
|
+
def unpack(data)
|
|
222
|
+
producer? ? data : [@source[data], data]
|
|
223
|
+
end
|
|
224
|
+
|
|
225
|
+
private
|
|
226
|
+
|
|
227
|
+
def producer?
|
|
228
|
+
@lambda
|
|
229
|
+
end
|
|
230
|
+
|
|
231
|
+
def queue_wrapper(array)
|
|
232
|
+
array.respond_to?(:num_waiting) && array.respond_to?(:pop) && lambda { array.pop(false) }
|
|
233
|
+
end
|
|
234
|
+
end
|
|
235
|
+
|
|
236
|
+
class UserInterruptHandler
|
|
237
|
+
INTERRUPT_SIGNAL = :SIGINT
|
|
238
|
+
|
|
239
|
+
class << self
|
|
240
|
+
# kill all these pids or threads if user presses Ctrl+c
|
|
241
|
+
def kill_on_ctrl_c(pids, options)
|
|
242
|
+
@to_be_killed ||= []
|
|
243
|
+
old_interrupt = nil
|
|
244
|
+
signal = options.fetch(:interrupt_signal, INTERRUPT_SIGNAL)
|
|
245
|
+
|
|
246
|
+
if @to_be_killed.empty?
|
|
247
|
+
old_interrupt = trap_interrupt(signal) do
|
|
248
|
+
$stderr.puts 'Parallel execution interrupted, exiting ...'
|
|
249
|
+
@to_be_killed.flatten.each { |pid| kill(pid) }
|
|
250
|
+
end
|
|
251
|
+
end
|
|
252
|
+
|
|
253
|
+
@to_be_killed << pids
|
|
254
|
+
|
|
255
|
+
yield
|
|
256
|
+
ensure
|
|
257
|
+
@to_be_killed.pop # do not kill pids that could be used for new processes
|
|
258
|
+
restore_interrupt(old_interrupt, signal) if @to_be_killed.empty?
|
|
259
|
+
end
|
|
260
|
+
|
|
261
|
+
def kill(thing)
|
|
262
|
+
Process.kill(:KILL, thing)
|
|
263
|
+
rescue Errno::ESRCH
|
|
264
|
+
# some linux systems already automatically killed the children at this point
|
|
265
|
+
# so we just ignore them not being there
|
|
266
|
+
end
|
|
267
|
+
|
|
268
|
+
private
|
|
269
|
+
|
|
270
|
+
def trap_interrupt(signal)
|
|
271
|
+
old = Signal.trap signal, 'IGNORE'
|
|
272
|
+
|
|
273
|
+
Signal.trap signal do
|
|
274
|
+
yield
|
|
275
|
+
if old == "DEFAULT"
|
|
276
|
+
raise Interrupt
|
|
277
|
+
else
|
|
278
|
+
old.call
|
|
279
|
+
end
|
|
280
|
+
end
|
|
281
|
+
|
|
282
|
+
old
|
|
283
|
+
end
|
|
284
|
+
|
|
285
|
+
def restore_interrupt(old, signal)
|
|
286
|
+
Signal.trap signal, old
|
|
287
|
+
end
|
|
288
|
+
end
|
|
289
|
+
end
|
|
290
|
+
|
|
291
|
+
class << self
|
|
292
|
+
def in_threads(options={:count => 2})
|
|
293
|
+
count, _ = extract_count_from_options(options)
|
|
294
|
+
Array.new(count) do |i|
|
|
295
|
+
Thread.new { yield(i) }
|
|
296
|
+
end.map!(&:value)
|
|
297
|
+
end
|
|
298
|
+
|
|
299
|
+
def in_processes(options = {}, &block)
|
|
300
|
+
count, options = extract_count_from_options(options)
|
|
301
|
+
count ||= processor_count
|
|
302
|
+
map(0...count, options.merge(:in_processes => count), &block)
|
|
303
|
+
end
|
|
304
|
+
|
|
305
|
+
def each(array, options={}, &block)
|
|
306
|
+
map(array, options.merge(:preserve_results => false), &block)
|
|
307
|
+
array
|
|
308
|
+
end
|
|
309
|
+
|
|
310
|
+
def each_with_index(array, options={}, &block)
|
|
311
|
+
each(array, options.merge(:with_index => true), &block)
|
|
312
|
+
end
|
|
313
|
+
|
|
314
|
+
def map(source, options = {}, &block)
|
|
315
|
+
options[:mutex] = Mutex.new
|
|
316
|
+
|
|
317
|
+
if RUBY_PLATFORM =~ /java/ and not options[:in_processes]
|
|
318
|
+
method = :in_threads
|
|
319
|
+
size = options[method] || processor_count
|
|
320
|
+
elsif options[:in_threads]
|
|
321
|
+
method = :in_threads
|
|
322
|
+
size = options[method]
|
|
323
|
+
else
|
|
324
|
+
method = :in_processes
|
|
325
|
+
if Process.respond_to?(:fork)
|
|
326
|
+
size = options[method] || processor_count
|
|
327
|
+
else
|
|
328
|
+
warn "Process.fork is not supported by this Ruby"
|
|
329
|
+
size = 0
|
|
330
|
+
end
|
|
331
|
+
end
|
|
332
|
+
|
|
333
|
+
job_factory = JobFactory.new(source, options[:mutex])
|
|
334
|
+
size = [job_factory.size, size].min
|
|
335
|
+
|
|
336
|
+
options[:return_results] = (options[:preserve_results] != false || !!options[:finish])
|
|
337
|
+
add_progress_bar!(job_factory, options)
|
|
338
|
+
|
|
339
|
+
if size == 0
|
|
340
|
+
work_direct(job_factory, options, &block)
|
|
341
|
+
elsif method == :in_threads
|
|
342
|
+
work_in_threads(job_factory, options.merge(:count => size), &block)
|
|
343
|
+
else
|
|
344
|
+
work_in_processes(job_factory, options.merge(:count => size), &block)
|
|
345
|
+
end
|
|
346
|
+
end
|
|
347
|
+
|
|
348
|
+
def map_with_index(array, options={}, &block)
|
|
349
|
+
map(array, options.merge(:with_index => true), &block)
|
|
350
|
+
end
|
|
351
|
+
|
|
352
|
+
def worker_number
|
|
353
|
+
Thread.current[:parallel_worker_number]
|
|
354
|
+
end
|
|
355
|
+
|
|
356
|
+
def worker_number=(worker_num)
|
|
357
|
+
Thread.current[:parallel_worker_number] = worker_num
|
|
358
|
+
end
|
|
359
|
+
|
|
360
|
+
private
|
|
361
|
+
|
|
362
|
+
def add_progress_bar!(job_factory, options)
|
|
363
|
+
if progress_options = options[:progress]
|
|
364
|
+
raise "Progressbar can only be used with array like items" if job_factory.size == Float::INFINITY
|
|
365
|
+
require 'ruby-progressbar'
|
|
366
|
+
|
|
367
|
+
if progress_options == true
|
|
368
|
+
progress_options = { title: "Progress" }
|
|
369
|
+
elsif progress_options.respond_to? :to_str
|
|
370
|
+
progress_options = { title: progress_options.to_str }
|
|
371
|
+
end
|
|
372
|
+
|
|
373
|
+
progress_options = {
|
|
374
|
+
total: job_factory.size,
|
|
375
|
+
format: '%t |%E | %B | %a'
|
|
376
|
+
}.merge(progress_options)
|
|
377
|
+
|
|
378
|
+
progress = ProgressBar.create(progress_options)
|
|
379
|
+
old_finish = options[:finish]
|
|
380
|
+
options[:finish] = lambda do |item, i, result|
|
|
381
|
+
old_finish.call(item, i, result) if old_finish
|
|
382
|
+
progress.increment
|
|
383
|
+
end
|
|
384
|
+
end
|
|
385
|
+
end
|
|
386
|
+
|
|
387
|
+
def work_direct(job_factory, options, &block)
|
|
388
|
+
self.worker_number = 0
|
|
389
|
+
results = []
|
|
390
|
+
while set = job_factory.next
|
|
391
|
+
item, index = set
|
|
392
|
+
results << with_instrumentation(item, index, options) do
|
|
393
|
+
call_with_index(item, index, options, &block)
|
|
394
|
+
end
|
|
395
|
+
end
|
|
396
|
+
results
|
|
397
|
+
ensure
|
|
398
|
+
self.worker_number = nil
|
|
399
|
+
end
|
|
400
|
+
|
|
401
|
+
def work_in_threads(job_factory, options, &block)
|
|
402
|
+
raise "interrupt_signal is no longer supported for threads" if options[:interrupt_signal]
|
|
403
|
+
results = []
|
|
404
|
+
results_mutex = Mutex.new # arrays are not thread-safe on jRuby
|
|
405
|
+
exception = nil
|
|
406
|
+
|
|
407
|
+
in_threads(options) do |worker_num|
|
|
408
|
+
self.worker_number = worker_num
|
|
409
|
+
# as long as there are more jobs, work on one of them
|
|
410
|
+
while !exception && set = job_factory.next
|
|
411
|
+
begin
|
|
412
|
+
item, index = set
|
|
413
|
+
result = with_instrumentation item, index, options do
|
|
414
|
+
call_with_index(item, index, options, &block)
|
|
415
|
+
end
|
|
416
|
+
results_mutex.synchronize { results[index] = result }
|
|
417
|
+
rescue StandardError => e
|
|
418
|
+
exception = e
|
|
419
|
+
end
|
|
420
|
+
end
|
|
421
|
+
end
|
|
422
|
+
|
|
423
|
+
handle_exception(exception, results)
|
|
424
|
+
end
|
|
425
|
+
|
|
426
|
+
def work_in_processes(job_factory, options, &blk)
|
|
427
|
+
workers = if options[:isolation]
|
|
428
|
+
[] # we create workers per job and not beforehand
|
|
429
|
+
else
|
|
430
|
+
create_workers(job_factory, options, &blk)
|
|
431
|
+
end
|
|
432
|
+
results = []
|
|
433
|
+
results_mutex = Mutex.new # arrays are not thread-safe
|
|
434
|
+
exception = nil
|
|
435
|
+
|
|
436
|
+
UserInterruptHandler.kill_on_ctrl_c(workers.map(&:pid), options) do
|
|
437
|
+
in_threads(options) do |i|
|
|
438
|
+
worker = workers[i]
|
|
439
|
+
|
|
440
|
+
begin
|
|
441
|
+
loop do
|
|
442
|
+
break if exception
|
|
443
|
+
item, index = job_factory.next
|
|
444
|
+
break unless index
|
|
445
|
+
|
|
446
|
+
if options[:isolation]
|
|
447
|
+
worker = replace_worker(job_factory, workers, i, options, blk)
|
|
448
|
+
end
|
|
449
|
+
|
|
450
|
+
worker.thread = Thread.current
|
|
451
|
+
|
|
452
|
+
begin
|
|
453
|
+
result = with_instrumentation item, index, options do
|
|
454
|
+
worker.work(job_factory.pack(item, index))
|
|
455
|
+
end
|
|
456
|
+
results_mutex.synchronize { results[index] = result } # arrays are not threads safe on jRuby
|
|
457
|
+
rescue StandardError => e
|
|
458
|
+
exception = e
|
|
459
|
+
if Parallel::Kill === exception
|
|
460
|
+
(workers - [worker]).each do |w|
|
|
461
|
+
w.thread.kill unless w.thread.nil?
|
|
462
|
+
UserInterruptHandler.kill(w.pid)
|
|
463
|
+
end
|
|
464
|
+
end
|
|
465
|
+
end
|
|
466
|
+
end
|
|
467
|
+
ensure
|
|
468
|
+
worker.stop if worker
|
|
469
|
+
end
|
|
470
|
+
end
|
|
471
|
+
end
|
|
472
|
+
|
|
473
|
+
handle_exception(exception, results)
|
|
474
|
+
end
|
|
475
|
+
|
|
476
|
+
def replace_worker(job_factory, workers, i, options, blk)
|
|
477
|
+
# old worker is no longer used ... stop it
|
|
478
|
+
worker = workers[i]
|
|
479
|
+
worker.stop if worker
|
|
480
|
+
|
|
481
|
+
# create a new replacement worker
|
|
482
|
+
running = workers - [worker]
|
|
483
|
+
workers[i] = worker(job_factory, options.merge(started_workers: running, worker_number: i), &blk)
|
|
484
|
+
end
|
|
485
|
+
|
|
486
|
+
def create_workers(job_factory, options, &block)
|
|
487
|
+
workers = []
|
|
488
|
+
Array.new(options[:count]).each_with_index do |_, i|
|
|
489
|
+
workers << worker(job_factory, options.merge(started_workers: workers, worker_number: i), &block)
|
|
490
|
+
end
|
|
491
|
+
workers
|
|
492
|
+
end
|
|
493
|
+
|
|
494
|
+
def worker(job_factory, options, &block)
|
|
495
|
+
child_read, parent_write = IO.pipe
|
|
496
|
+
parent_read, child_write = IO.pipe
|
|
497
|
+
|
|
498
|
+
pid = Process.fork do
|
|
499
|
+
self.worker_number = options[:worker_number]
|
|
500
|
+
|
|
501
|
+
begin
|
|
502
|
+
options.delete(:started_workers).each(&:close_pipes)
|
|
503
|
+
|
|
504
|
+
parent_write.close
|
|
505
|
+
parent_read.close
|
|
506
|
+
|
|
507
|
+
process_incoming_jobs(child_read, child_write, job_factory, options, &block)
|
|
508
|
+
ensure
|
|
509
|
+
child_read.close
|
|
510
|
+
child_write.close
|
|
511
|
+
end
|
|
512
|
+
end
|
|
513
|
+
|
|
514
|
+
child_read.close
|
|
515
|
+
child_write.close
|
|
516
|
+
|
|
517
|
+
Worker.new(parent_read, parent_write, pid)
|
|
518
|
+
end
|
|
519
|
+
|
|
520
|
+
def process_incoming_jobs(read, write, job_factory, options, &block)
|
|
521
|
+
until read.eof?
|
|
522
|
+
data = Marshal.load(read)
|
|
523
|
+
item, index = job_factory.unpack(data)
|
|
524
|
+
result = begin
|
|
525
|
+
call_with_index(item, index, options, &block)
|
|
526
|
+
rescue StandardError => e
|
|
527
|
+
ExceptionWrapper.new(e)
|
|
528
|
+
end
|
|
529
|
+
Marshal.dump(result, write)
|
|
530
|
+
end
|
|
531
|
+
end
|
|
532
|
+
|
|
533
|
+
def handle_exception(exception, results)
|
|
534
|
+
return nil if [Parallel::Break, Parallel::Kill].include? exception.class
|
|
535
|
+
raise exception if exception
|
|
536
|
+
results
|
|
537
|
+
end
|
|
538
|
+
|
|
539
|
+
# options is either a Integer or a Hash with :count
|
|
540
|
+
def extract_count_from_options(options)
|
|
541
|
+
if options.is_a?(Hash)
|
|
542
|
+
count = options[:count]
|
|
543
|
+
else
|
|
544
|
+
count = options
|
|
545
|
+
options = {}
|
|
546
|
+
end
|
|
547
|
+
[count, options]
|
|
548
|
+
end
|
|
549
|
+
|
|
550
|
+
def call_with_index(item, index, options, &block)
|
|
551
|
+
args = [item]
|
|
552
|
+
args << index if options[:with_index]
|
|
553
|
+
if options[:return_results]
|
|
554
|
+
block.call(*args)
|
|
555
|
+
else
|
|
556
|
+
block.call(*args)
|
|
557
|
+
nil # avoid GC overhead of passing large results around
|
|
558
|
+
end
|
|
559
|
+
end
|
|
560
|
+
|
|
561
|
+
def with_instrumentation(item, index, options)
|
|
562
|
+
on_start = options[:start]
|
|
563
|
+
on_finish = options[:finish]
|
|
564
|
+
options[:mutex].synchronize { on_start.call(item, index) } if on_start
|
|
565
|
+
result = yield
|
|
566
|
+
options[:mutex].synchronize { on_finish.call(item, index, result) } if on_finish
|
|
567
|
+
result unless options[:preserve_results] == false
|
|
568
|
+
end
|
|
569
|
+
end
|
|
570
|
+
end
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
|
|
2
|
+
## Parse the GpackRepose file
|
|
3
|
+
|
|
4
|
+
def parse_gpackrepos()
|
|
5
|
+
|
|
6
|
+
grepos_file = $SETTINGS["core"]["repofile"]
|
|
7
|
+
|
|
8
|
+
## Options for YAML File
|
|
9
|
+
required_keys = ["url","localdir","branch"]
|
|
10
|
+
valid_config = ["remote_key","ssh_command"]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
grepos = GitCollection.new()
|
|
14
|
+
|
|
15
|
+
if !File.exist?(grepos_file)
|
|
16
|
+
raise "File does not exist #{grepos_file}"
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
unlocked = File.exists?(".gpackunlock")
|
|
20
|
+
|
|
21
|
+
yml_file = YAML.load_file(grepos_file)
|
|
22
|
+
|
|
23
|
+
yml_file.each do |key,entry|
|
|
24
|
+
if key == "config"
|
|
25
|
+
# Read in config settings
|
|
26
|
+
# Check if the config option is valid
|
|
27
|
+
entry.each do |ckey,centry|
|
|
28
|
+
if !valid_config.index(ckey)
|
|
29
|
+
raise "Error in file '#{grepos_file}'.\n\tError in configuration entry #{key}\n\tConfig option must be one of #{valid_config}"
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
case ckey
|
|
33
|
+
when "lock"
|
|
34
|
+
# TODO implement this
|
|
35
|
+
when "remote_key"
|
|
36
|
+
#SSH KEY stuff
|
|
37
|
+
$SETTINGS["ssh"]["key_url"] = centry
|
|
38
|
+
when "ssh_command"
|
|
39
|
+
# Arguements to ssh
|
|
40
|
+
$SETTINGS["ssh"]["cmd"] = centry
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
else
|
|
46
|
+
reponame = key
|
|
47
|
+
|
|
48
|
+
# Check required keys exist
|
|
49
|
+
if !required_keys.all? {|s| entry.key? s}
|
|
50
|
+
raise "Error in file '#{grepos_file}'.\n\tEntry #{key}\n\tFor a repository these properties are required #{required_keys}"
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
# Optional Key Parsing
|
|
54
|
+
if entry.key?("lock")
|
|
55
|
+
readonly = entry["lock"]
|
|
56
|
+
else
|
|
57
|
+
readonly = true
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
new_repo = GitReference.new :url=>entry["url"], :localdir=>entry["localdir"], :branch=>entry["branch"]
|
|
61
|
+
|
|
62
|
+
if unlocked
|
|
63
|
+
new_repo.readonly = false
|
|
64
|
+
end
|
|
65
|
+
grepos.add_ref(new_repo)
|
|
66
|
+
|
|
67
|
+
end
|
|
68
|
+
end
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
return grepos
|
|
72
|
+
|
|
73
|
+
end
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
|
|
2
|
+
def download_ssh_key()
|
|
3
|
+
key_url = $SETTINGS["ssh"]["key_url"]
|
|
4
|
+
if key_url
|
|
5
|
+
remote_key = Tempfile.new('gpack_ssh') # TODO make this readable only by user
|
|
6
|
+
begin
|
|
7
|
+
download = open(key_url)
|
|
8
|
+
IO.copy_stream(download, remote_key.path)
|
|
9
|
+
rescue
|
|
10
|
+
puts "Error with URL #{key_url}\nEnsure this is a valid url and can be reached"
|
|
11
|
+
raise
|
|
12
|
+
end
|
|
13
|
+
$SETTINGS["ssh"]["key"] = remote_key
|
|
14
|
+
|
|
15
|
+
end
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
def set_ssh_cmd()
|
|
19
|
+
remote_key = $SETTINGS["ssh"]["key"]
|
|
20
|
+
|
|
21
|
+
if remote_key
|
|
22
|
+
id_arg = " -i #{remote_key.path}"
|
|
23
|
+
|
|
24
|
+
ssh_cmd=$SETTINGS["ssh"]["cmd"]
|
|
25
|
+
if $SETTINGS["ssh"]["cmd"]
|
|
26
|
+
ssh_cmd="#{ssh_cmd}#{id_arg}"
|
|
27
|
+
else
|
|
28
|
+
ssh_cmd="ssh #{id_arg}"
|
|
29
|
+
end
|
|
30
|
+
$SETTINGS["ssh"]["cmd"] = ssh_cmd
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
end
|