fairy 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/LICENSE +674 -0
- data/Makefile +116 -0
- data/README +15 -0
- data/bin/fairy +582 -0
- data/bin/fairy-cat +74 -0
- data/bin/fairy-cp +128 -0
- data/bin/fairy-rm +122 -0
- data/bin/subcmd/controller +41 -0
- data/bin/subcmd/inspector +81 -0
- data/bin/subcmd/master +43 -0
- data/bin/subcmd/node +47 -0
- data/bin/subcmd/processor +54 -0
- data/doc/programming-interface.html +240 -0
- data/doc/programming-interface.rd +300 -0
- data/etc/fairy.conf.tmpl +118 -0
- data/ext/simple_hash/extconf.rb +4 -0
- data/ext/simple_hash/simple_hash.c +42 -0
- data/fairy.gemspec +60 -0
- data/lib/fairy/client/addins.rb +20 -0
- data/lib/fairy/client/barrier.rb +29 -0
- data/lib/fairy/client/basic-group-by.rb +52 -0
- data/lib/fairy/client/cat.rb +41 -0
- data/lib/fairy/client/direct-product.rb +51 -0
- data/lib/fairy/client/equijoin.rb +79 -0
- data/lib/fairy/client/exec.rb +54 -0
- data/lib/fairy/client/filter.rb +62 -0
- data/lib/fairy/client/find.rb +35 -0
- data/lib/fairy/client/group-by.rb +194 -0
- data/lib/fairy/client/here.rb +84 -0
- data/lib/fairy/client/inject.rb +70 -0
- data/lib/fairy/client/input-file.rb +53 -0
- data/lib/fairy/client/input-iota.rb +49 -0
- data/lib/fairy/client/input-local-file.rb +188 -0
- data/lib/fairy/client/input-varray.rb +30 -0
- data/lib/fairy/client/input.rb +42 -0
- data/lib/fairy/client/io-filter.rb +26 -0
- data/lib/fairy/client/junction.rb +31 -0
- data/lib/fairy/client/map.rb +34 -0
- data/lib/fairy/client/merge-group-by.rb +71 -0
- data/lib/fairy/client/output-file.rb +64 -0
- data/lib/fairy/client/output-local-file.rb +60 -0
- data/lib/fairy/client/output-null.rb +47 -0
- data/lib/fairy/client/output-varray.rb +50 -0
- data/lib/fairy/client/output.rb +29 -0
- data/lib/fairy/client/roma-put.rb +62 -0
- data/lib/fairy/client/roma.rb +156 -0
- data/lib/fairy/client/seg-join.rb +61 -0
- data/lib/fairy/client/seg-map.rb +78 -0
- data/lib/fairy/client/seg-shuffle.rb +35 -0
- data/lib/fairy/client/seg-split.rb +27 -0
- data/lib/fairy/client/seg-zip.rb +60 -0
- data/lib/fairy/client/select.rb +38 -0
- data/lib/fairy/client/sort.rb +48 -0
- data/lib/fairy/client/sort18.rb +56 -0
- data/lib/fairy/client/sort19.rb +61 -0
- data/lib/fairy/client/there.rb +47 -0
- data/lib/fairy/client/top_n_into_roma.rb +34 -0
- data/lib/fairy/client/wc.rb +92 -0
- data/lib/fairy/controller.rb +1103 -0
- data/lib/fairy/logger.rb +107 -0
- data/lib/fairy/master/addins.rb +20 -0
- data/lib/fairy/master/atom.rb +17 -0
- data/lib/fairy/master/c-barrier.rb +283 -0
- data/lib/fairy/master/c-basic-group-by.rb +250 -0
- data/lib/fairy/master/c-cat.rb +159 -0
- data/lib/fairy/master/c-direct-product.rb +203 -0
- data/lib/fairy/master/c-exec.rb +68 -0
- data/lib/fairy/master/c-filter.rb +422 -0
- data/lib/fairy/master/c-find.rb +138 -0
- data/lib/fairy/master/c-group-by.rb +64 -0
- data/lib/fairy/master/c-here.rb +80 -0
- data/lib/fairy/master/c-inject.rb +119 -0
- data/lib/fairy/master/c-input-file.rb +46 -0
- data/lib/fairy/master/c-input-iota.rb +66 -0
- data/lib/fairy/master/c-input-local-file.rb +117 -0
- data/lib/fairy/master/c-input-varray.rb +53 -0
- data/lib/fairy/master/c-input.rb +24 -0
- data/lib/fairy/master/c-inputtable.rb +31 -0
- data/lib/fairy/master/c-inputtable18.rb +36 -0
- data/lib/fairy/master/c-inputtable19.rb +35 -0
- data/lib/fairy/master/c-io-filter.rb +28 -0
- data/lib/fairy/master/c-junction.rb +54 -0
- data/lib/fairy/master/c-map.rb +27 -0
- data/lib/fairy/master/c-merge-group-by.rb +241 -0
- data/lib/fairy/master/c-output-file.rb +84 -0
- data/lib/fairy/master/c-output-local-file.rb +19 -0
- data/lib/fairy/master/c-output-null.rb +45 -0
- data/lib/fairy/master/c-output-varray.rb +57 -0
- data/lib/fairy/master/c-output.rb +20 -0
- data/lib/fairy/master/c-seg-join.rb +141 -0
- data/lib/fairy/master/c-seg-map.rb +26 -0
- data/lib/fairy/master/c-seg-shuffle.rb +87 -0
- data/lib/fairy/master/c-seg-split.rb +110 -0
- data/lib/fairy/master/c-seg-zip.rb +132 -0
- data/lib/fairy/master/c-select.rb +27 -0
- data/lib/fairy/master/c-sort.rb +108 -0
- data/lib/fairy/master/c-there.rb +57 -0
- data/lib/fairy/master/c-wc.rb +232 -0
- data/lib/fairy/master/job-interpriter.rb +19 -0
- data/lib/fairy/master/scheduler.rb +24 -0
- data/lib/fairy/master.rb +329 -0
- data/lib/fairy/node/addins.rb +19 -0
- data/lib/fairy/node/p-barrier.rb +95 -0
- data/lib/fairy/node/p-basic-group-by.rb +252 -0
- data/lib/fairy/node/p-direct-product.rb +153 -0
- data/lib/fairy/node/p-exec.rb +30 -0
- data/lib/fairy/node/p-filter.rb +363 -0
- data/lib/fairy/node/p-find.rb +111 -0
- data/lib/fairy/node/p-group-by.rb +1534 -0
- data/lib/fairy/node/p-here.rb +21 -0
- data/lib/fairy/node/p-identity.rb +24 -0
- data/lib/fairy/node/p-inject.rb +127 -0
- data/lib/fairy/node/p-input-file.rb +108 -0
- data/lib/fairy/node/p-input-iota.rb +39 -0
- data/lib/fairy/node/p-input-local-file.rb +61 -0
- data/lib/fairy/node/p-input-varray.rb +26 -0
- data/lib/fairy/node/p-io-filter.rb +28 -0
- data/lib/fairy/node/p-map.rb +40 -0
- data/lib/fairy/node/p-merger-group-by.rb +48 -0
- data/lib/fairy/node/p-output-file.rb +104 -0
- data/lib/fairy/node/p-output-local-file.rb +14 -0
- data/lib/fairy/node/p-output-null.rb +32 -0
- data/lib/fairy/node/p-output-varray.rb +41 -0
- data/lib/fairy/node/p-seg-join.rb +82 -0
- data/lib/fairy/node/p-seg-map.rb +34 -0
- data/lib/fairy/node/p-seg-split.rb +61 -0
- data/lib/fairy/node/p-seg-zip.rb +79 -0
- data/lib/fairy/node/p-select.rb +40 -0
- data/lib/fairy/node/p-single-exportable.rb +90 -0
- data/lib/fairy/node/p-sort.rb +195 -0
- data/lib/fairy/node/p-task.rb +113 -0
- data/lib/fairy/node/p-there.rb +44 -0
- data/lib/fairy/node/p-wc.rb +266 -0
- data/lib/fairy/node.rb +187 -0
- data/lib/fairy/processor.rb +510 -0
- data/lib/fairy/share/base-app.rb +114 -0
- data/lib/fairy/share/block-source.rb +234 -0
- data/lib/fairy/share/conf.rb +396 -0
- data/lib/fairy/share/debug.rb +21 -0
- data/lib/fairy/share/encoding.rb +17 -0
- data/lib/fairy/share/fast-tempfile.rb +93 -0
- data/lib/fairy/share/file-place.rb +176 -0
- data/lib/fairy/share/hash-1.rb +20 -0
- data/lib/fairy/share/hash-md5.rb +28 -0
- data/lib/fairy/share/hash-murmur.rb +69 -0
- data/lib/fairy/share/hash-rb18.rb +20 -0
- data/lib/fairy/share/hash-simple-hash.rb +28 -0
- data/lib/fairy/share/inspector.rb +16 -0
- data/lib/fairy/share/lc/exceptions.rb +82 -0
- data/lib/fairy/share/lc/ja/exceptions.rb +81 -0
- data/lib/fairy/share/locale.rb +17 -0
- data/lib/fairy/share/log.rb +215 -0
- data/lib/fairy/share/pool-dictionary.rb +53 -0
- data/lib/fairy/share/port-marshaled-queue.rb +347 -0
- data/lib/fairy/share/port.rb +1697 -0
- data/lib/fairy/share/reference.rb +45 -0
- data/lib/fairy/share/stdout.rb +56 -0
- data/lib/fairy/share/tr.rb +16 -0
- data/lib/fairy/share/varray.rb +147 -0
- data/lib/fairy/share/vfile.rb +183 -0
- data/lib/fairy/version.rb +8 -0
- data/lib/fairy.rb +206 -0
- data/sample/grep.rb +46 -0
- data/sample/ping.rb +19 -0
- data/sample/sort.rb +102 -0
- data/sample/wordcount.rb +61 -0
- data/spec/README +12 -0
- data/spec/fairy1_spec.rb +31 -0
- data/spec/fairy2_spec.rb +42 -0
- data/spec/fairy3_spec.rb +126 -0
- data/spec/fairy4_spec.rb +63 -0
- data/spec/fairy5_spec.rb +45 -0
- data/spec/fairy6_spec.rb +52 -0
- data/spec/fairy7_spec.rb +58 -0
- data/spec/fairy8_spec.rb +48 -0
- data/spec/mkdat.rb +148 -0
- data/spec/run_all.sh +65 -0
- data/test/testc.rb +7111 -0
- data/tools/cap_recipe/Capfile +144 -0
- data/tools/cap_recipe/cluster.yml.sample +14 -0
- data/tools/fairy_perf_graph.rb +444 -0
- data/tools/git-tag +44 -0
- data/tools/log-analysis.rb +62 -0
- data/tools/svn-ls-diff +38 -0
- data/tools/svn-tags +37 -0
- metadata +298 -0
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# encoding: UTF-8
|
|
2
|
+
#
|
|
3
|
+
# Copyright (C) 2007-2010 Rakuten, Inc.
|
|
4
|
+
#
|
|
5
|
+
|
|
6
|
+
require "fairy/client/filter"
|
|
7
|
+
|
|
8
|
+
module Fairy
|
|
9
|
+
class There < Filter
|
|
10
|
+
module Interface
|
|
11
|
+
|
|
12
|
+
# Usage:
|
|
13
|
+
# ○ fairy.there(enumeratable)....
|
|
14
|
+
# ○ enumerable.there(fairy)....
|
|
15
|
+
# enumerable | fairy.there
|
|
16
|
+
def there(enumerable = nil, opts={})
|
|
17
|
+
There.input(self, opts, enumerable)
|
|
18
|
+
end
|
|
19
|
+
end
|
|
20
|
+
Fairy::def_fairy_interface Interface
|
|
21
|
+
|
|
22
|
+
Enumerable.module_eval %{def there(fairy); fairy.there(self); end}
|
|
23
|
+
|
|
24
|
+
def self.input(fairy, opts, enumerable)
|
|
25
|
+
self.start(fairy, opts, enumerable)
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
def self.start(fairy, opts, enumerable)
|
|
29
|
+
there = new(fairy, opts, enumerable)
|
|
30
|
+
there.start
|
|
31
|
+
there
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
def initialize(fairy, opts, enumerable)
|
|
35
|
+
super
|
|
36
|
+
@enumerable = enumerable
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
def backend_class_name
|
|
40
|
+
"CThere"
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
def start
|
|
44
|
+
backend.start
|
|
45
|
+
end
|
|
46
|
+
end
|
|
47
|
+
end
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# encoding: UTF-8
|
|
2
|
+
#
|
|
3
|
+
# Copyright (C) 2007-2010 Rakuten, Inc.
|
|
4
|
+
#
|
|
5
|
+
|
|
6
|
+
require 'rbconfig'
|
|
7
|
+
|
|
8
|
+
module Fairy
|
|
9
|
+
module TopNIntoRomaInterface
|
|
10
|
+
def top_n_into_roma(ap, key, n, sep=',')
|
|
11
|
+
ap = [ap] if ap.kind_of?(String)
|
|
12
|
+
|
|
13
|
+
buf = []
|
|
14
|
+
cnt = 0
|
|
15
|
+
if Config::CONFIG['RUBY_INSTALL_NAME'] =~ /jruby/
|
|
16
|
+
here.each{|s|
|
|
17
|
+
cnt += 1
|
|
18
|
+
buf << s if cnt <= n
|
|
19
|
+
}
|
|
20
|
+
else
|
|
21
|
+
here.each{|s|
|
|
22
|
+
cnt += 1
|
|
23
|
+
buf << s
|
|
24
|
+
break if cnt == n
|
|
25
|
+
}
|
|
26
|
+
end
|
|
27
|
+
roma = Roma::Client::RomaClient.new(ap)
|
|
28
|
+
roma[key] = buf.join(sep)
|
|
29
|
+
end
|
|
30
|
+
end
|
|
31
|
+
def_filter_interface TopNIntoRomaInterface
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
# encoding: UTF-8
|
|
2
|
+
#
|
|
3
|
+
# Copyright (C) 2007-2010 Rakuten, Inc.
|
|
4
|
+
#
|
|
5
|
+
|
|
6
|
+
require "fairy/client/io-filter"
|
|
7
|
+
|
|
8
|
+
module Fairy
|
|
9
|
+
class WC<IOFilter
|
|
10
|
+
module Interface
|
|
11
|
+
def wc(from_desc, to_desc, opts = {})
|
|
12
|
+
wc = WC.open(self, opts, from_desc)
|
|
13
|
+
wc.post_wc(to_desc, opts)
|
|
14
|
+
end
|
|
15
|
+
end
|
|
16
|
+
Fairy::def_fairy_interface Interface
|
|
17
|
+
|
|
18
|
+
def self.open(fairy, opts, from_desc)
|
|
19
|
+
wc = new(fairy, opts)
|
|
20
|
+
wc.open(from_desc)
|
|
21
|
+
wc
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def initialize(fairy, opts=nil)
|
|
25
|
+
super
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
def backend_class_name
|
|
29
|
+
"CWC"
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
def open(from_desc)
|
|
33
|
+
@descripter = from_desc
|
|
34
|
+
|
|
35
|
+
case from_desc
|
|
36
|
+
when Array
|
|
37
|
+
vf = VFile.real_files(from_desc)
|
|
38
|
+
when VFile
|
|
39
|
+
vf = from_desc
|
|
40
|
+
when String
|
|
41
|
+
if VFile.vfile?(from_desc)
|
|
42
|
+
vf = VFile.vfile(from_desc)
|
|
43
|
+
else
|
|
44
|
+
vf = VFile.real_files([from_desc])
|
|
45
|
+
end
|
|
46
|
+
else
|
|
47
|
+
ERR::Raise ERR::IllegalVFile
|
|
48
|
+
end
|
|
49
|
+
backend.open(vf)
|
|
50
|
+
self
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
class PostFilter<IOFilter
|
|
54
|
+
module Interface
|
|
55
|
+
def post_wc(to_desc, opts = nil)
|
|
56
|
+
post_wc = PostFilter.new(@fairy, opts)
|
|
57
|
+
post_wc.output(to_desc)
|
|
58
|
+
post_wc.input = self
|
|
59
|
+
post_wc
|
|
60
|
+
end
|
|
61
|
+
Fairy::def_filter_interface Interface
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
def initialize(fairy, opts = nil)
|
|
65
|
+
super
|
|
66
|
+
@to_desc = nil
|
|
67
|
+
end
|
|
68
|
+
|
|
69
|
+
def backend_class_name
|
|
70
|
+
"CWC::CPostFilter"
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
def output(vfn)
|
|
74
|
+
@descripter = vfn
|
|
75
|
+
@vfile = VFile.new
|
|
76
|
+
@vfile.vfile_name = vfn
|
|
77
|
+
backend.output(@vfile)
|
|
78
|
+
end
|
|
79
|
+
|
|
80
|
+
def input=(job)
|
|
81
|
+
@input = job
|
|
82
|
+
backend.input=job.backend
|
|
83
|
+
|
|
84
|
+
backend.wait_all_output_finished
|
|
85
|
+
@vfile.create_vfile
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
end
|
|
89
|
+
end
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
|
|
@@ -0,0 +1,1103 @@
|
|
|
1
|
+
# encoding: UTF-8
|
|
2
|
+
#
|
|
3
|
+
# Copyright (C) 2007-2010 Rakuten, Inc.
|
|
4
|
+
#
|
|
5
|
+
|
|
6
|
+
require "thread"
|
|
7
|
+
require "forwardable"
|
|
8
|
+
|
|
9
|
+
require "deep-connect.rb"
|
|
10
|
+
|
|
11
|
+
require "fairy/version"
|
|
12
|
+
require "fairy/share/conf"
|
|
13
|
+
require "fairy/share/pool-dictionary"
|
|
14
|
+
require "fairy/share/stdout"
|
|
15
|
+
|
|
16
|
+
module Fairy
|
|
17
|
+
|
|
18
|
+
class Controller
|
|
19
|
+
|
|
20
|
+
EXPORTS = []
|
|
21
|
+
def Controller.def_export(obj, name = nil)
|
|
22
|
+
unless name
|
|
23
|
+
if obj.kind_of?(Class)
|
|
24
|
+
if /Fairy::(.*)$/ =~ obj.name
|
|
25
|
+
name = $1
|
|
26
|
+
else
|
|
27
|
+
name = obj.name
|
|
28
|
+
end
|
|
29
|
+
else
|
|
30
|
+
ERR::Raise ERR::INTERNAL::CantDefExport, obj.to_s
|
|
31
|
+
end
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
EXPORTS.push [name, obj]
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
def Controller.start(id, master_port)
|
|
38
|
+
controller = Controller.new(id)
|
|
39
|
+
controller.start(master_port)
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
def initialize(id)
|
|
43
|
+
@id = id
|
|
44
|
+
|
|
45
|
+
@deepconnect = nil
|
|
46
|
+
|
|
47
|
+
@master_deepspace = nil
|
|
48
|
+
@master = nil
|
|
49
|
+
|
|
50
|
+
@client = nil
|
|
51
|
+
|
|
52
|
+
@stdout_mutex = Mutex.new
|
|
53
|
+
|
|
54
|
+
@services = {}
|
|
55
|
+
|
|
56
|
+
@create_processor_mutex = Mutex.new
|
|
57
|
+
|
|
58
|
+
# processor -> no of reserve
|
|
59
|
+
@reserves = {}
|
|
60
|
+
@reserves_mutex = Mutex.new
|
|
61
|
+
@reserves_cv = ConditionVariable.new
|
|
62
|
+
|
|
63
|
+
# bjob -> [processor, ...]
|
|
64
|
+
@bjob2processors = {}
|
|
65
|
+
@bjob2processors_mutex = Mutex.new
|
|
66
|
+
@bjob2processors_cv = ConditionVariable.new
|
|
67
|
+
|
|
68
|
+
# processor -> no of active ntasks
|
|
69
|
+
@no_active_ntasks = {}
|
|
70
|
+
@no_active_ntasks_mutex = Mutex.new
|
|
71
|
+
@no_active_ntasks_cv = ConditionVariable.new
|
|
72
|
+
|
|
73
|
+
@pool_dict = PoolDictionary.new
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
attr_reader :id
|
|
77
|
+
attr_reader :create_processor_mutex
|
|
78
|
+
|
|
79
|
+
attr_reader :hash_seed
|
|
80
|
+
|
|
81
|
+
def log_id
|
|
82
|
+
"Controller[#{id}]"
|
|
83
|
+
end
|
|
84
|
+
|
|
85
|
+
def start(master_port, service=0)
|
|
86
|
+
@deepconnect = DeepConnect.start(service)
|
|
87
|
+
@deepconnect.export("Controller", self)
|
|
88
|
+
|
|
89
|
+
@deepconnect.when_disconnected do |deepspace, opts|
|
|
90
|
+
when_disconnected(deepspace, opts)
|
|
91
|
+
end
|
|
92
|
+
|
|
93
|
+
for name, obj in EXPORTS
|
|
94
|
+
export(name, obj)
|
|
95
|
+
end
|
|
96
|
+
|
|
97
|
+
require "fairy/share/inspector"
|
|
98
|
+
@deepconnect.export("Inspector", Inspector.new(self))
|
|
99
|
+
|
|
100
|
+
require "fairy/share/log"
|
|
101
|
+
@master_deepspace = @deepconnect.open_deepspace("localhost", master_port)
|
|
102
|
+
@master = @master_deepspace.import("Master")
|
|
103
|
+
@logger = @master.logger
|
|
104
|
+
Log.type = "[C]"
|
|
105
|
+
Log.pid = id
|
|
106
|
+
Log.logger = @logger
|
|
107
|
+
Log::info(self, "Controller Service Start")
|
|
108
|
+
Log::info(self, "\tfairy version: #{Version}")
|
|
109
|
+
Log::info(self, "\t[Powered by #{RUBY_DESCRIPTION}")
|
|
110
|
+
|
|
111
|
+
@master.register_controller(self)
|
|
112
|
+
|
|
113
|
+
end
|
|
114
|
+
|
|
115
|
+
def connect(client, conf)
|
|
116
|
+
@client = client
|
|
117
|
+
|
|
118
|
+
conf.base_conf = CONF
|
|
119
|
+
Fairy::REPLACE_CONF(conf)
|
|
120
|
+
|
|
121
|
+
mod = CONF.GROUP_BY_HASH_MODULE
|
|
122
|
+
require mod
|
|
123
|
+
@hash_seed = Fairy::HValueGenerator.create_seed
|
|
124
|
+
def_pool_variable(:HASH_SEED, @hash_seed)
|
|
125
|
+
|
|
126
|
+
@PROCESS_LIFE_MANAGE_INTERVAL = CONF.PROCESS_LIFE_MANAGE_INTERVAL
|
|
127
|
+
|
|
128
|
+
if @PROCESS_LIFE_MANAGE_INTERVAL
|
|
129
|
+
Thread.start do
|
|
130
|
+
start_process_life_manage
|
|
131
|
+
end
|
|
132
|
+
nil
|
|
133
|
+
end
|
|
134
|
+
|
|
135
|
+
$stdout = Stdout.new(@client)
|
|
136
|
+
|
|
137
|
+
end
|
|
138
|
+
DeepConnect.def_method_spec(self, "REF connext(REF, DVAL)")
|
|
139
|
+
|
|
140
|
+
def terminate
|
|
141
|
+
# clientが終了したときの終了処理
|
|
142
|
+
# master から呼ばれる
|
|
143
|
+
|
|
144
|
+
Log::debug(self, "TERMINATE: #1")
|
|
145
|
+
# デッドロックするのでNG
|
|
146
|
+
# @reserves_mutex.synchronize do
|
|
147
|
+
@bjob2processors.keys.each do |bjob|
|
|
148
|
+
begin
|
|
149
|
+
Log::debug(self, "TERMINATE: #1.1")
|
|
150
|
+
bjob.abort_create_node
|
|
151
|
+
rescue
|
|
152
|
+
Log::debug(self, "TERMINATE: #1.1.1")
|
|
153
|
+
Log::debug_exception($!)
|
|
154
|
+
end
|
|
155
|
+
end
|
|
156
|
+
# end
|
|
157
|
+
|
|
158
|
+
Log::debug(self, "TERMINATE: #2")
|
|
159
|
+
cond = true
|
|
160
|
+
while cond
|
|
161
|
+
Log::debug(self, "TERMINATE: #2.1")
|
|
162
|
+
@reserves_mutex.synchronize do
|
|
163
|
+
Log::debug(self, "TERMINATE: #2.2")
|
|
164
|
+
cond = false if @reserves.empty?
|
|
165
|
+
@reserves.keys.each do |p|
|
|
166
|
+
Log::debug(self, "TERMINATE: #2.3")
|
|
167
|
+
if @reserves[p] == 0
|
|
168
|
+
Log::debug(self, "TERMINATE: #2.4")
|
|
169
|
+
begin
|
|
170
|
+
p.terminate_all_ntasks
|
|
171
|
+
rescue
|
|
172
|
+
Log::debug(self, "TERMINATE: #2.4.1")
|
|
173
|
+
Log::debug_exception($!)
|
|
174
|
+
end
|
|
175
|
+
Log::debug(self, "TERMINATE: #2.5")
|
|
176
|
+
begin
|
|
177
|
+
@reserves.delete(p)
|
|
178
|
+
rescue
|
|
179
|
+
Log::debug(self, "TERMINATE: #2.5.1")
|
|
180
|
+
Log::debug_exception($!)
|
|
181
|
+
end
|
|
182
|
+
Log::debug(self, "TERMINATE: #2.5.1")
|
|
183
|
+
begin
|
|
184
|
+
Log::debug(self, "TERMINATE: #2.5.2")
|
|
185
|
+
p.node.terminate_processor(p)
|
|
186
|
+
Log::debug(self, "TERMINATE: #2.5.3")
|
|
187
|
+
rescue
|
|
188
|
+
Log::debug(self, "TERMINATE: #2.5.4")
|
|
189
|
+
Log::debug_exception($!)
|
|
190
|
+
end
|
|
191
|
+
Log::debug(self, "TERMINATE: #2.6")
|
|
192
|
+
end
|
|
193
|
+
end
|
|
194
|
+
Log::debug(self, "TERMINATE: #2.7")
|
|
195
|
+
end
|
|
196
|
+
Log::debug(self, "TERMINATE: #2.8")
|
|
197
|
+
end
|
|
198
|
+
|
|
199
|
+
Log::debug(self, "TERMINATE: #3")
|
|
200
|
+
@reserves.keys.each do |p|
|
|
201
|
+
begin
|
|
202
|
+
p.node.terminate_processor(p)
|
|
203
|
+
rescue
|
|
204
|
+
# p $!, $@
|
|
205
|
+
end
|
|
206
|
+
end
|
|
207
|
+
|
|
208
|
+
Log::debug(self, "TERMINATE: #4")
|
|
209
|
+
Thread.start do
|
|
210
|
+
sleep 0.2
|
|
211
|
+
begin
|
|
212
|
+
@deepconnect.stop
|
|
213
|
+
ensure
|
|
214
|
+
Process.exit!(0)
|
|
215
|
+
end
|
|
216
|
+
end
|
|
217
|
+
Log::debug(self, "TERMINATE: #5")
|
|
218
|
+
nil
|
|
219
|
+
end
|
|
220
|
+
|
|
221
|
+
def terminate_rev0
|
|
222
|
+
# clientが終了したときの終了処理
|
|
223
|
+
# master から呼ばれる
|
|
224
|
+
|
|
225
|
+
Log::debug(self, "TERMINATE: #1")
|
|
226
|
+
@reserves_mutex.synchronize do
|
|
227
|
+
@bjob2processors.keys.each do |bjob|
|
|
228
|
+
bjob.abort_create_node
|
|
229
|
+
end
|
|
230
|
+
end
|
|
231
|
+
|
|
232
|
+
Log::debug(self, "TERMINATE: #2")
|
|
233
|
+
@reserves.keys.each do |p|
|
|
234
|
+
begin
|
|
235
|
+
Log::debug(self, "TERMINATE: #2.1")
|
|
236
|
+
p.terminate_all_njobs
|
|
237
|
+
Log::debug(self, "TERMINATE: #2.2")
|
|
238
|
+
rescue
|
|
239
|
+
LOG::debug_exception(self)
|
|
240
|
+
end
|
|
241
|
+
end
|
|
242
|
+
|
|
243
|
+
Log::debug(self, "TERMINATE: #2.5")
|
|
244
|
+
@reserves.keys.each do |p|
|
|
245
|
+
begin
|
|
246
|
+
p.terminate_all_njobs
|
|
247
|
+
rescue
|
|
248
|
+
LOG::debug_exception(self)
|
|
249
|
+
end
|
|
250
|
+
end
|
|
251
|
+
|
|
252
|
+
Log::debug(self, "TERMINATE: #3")
|
|
253
|
+
@reserves.keys.each do |p|
|
|
254
|
+
begin
|
|
255
|
+
p.node.terminate_processor(p)
|
|
256
|
+
rescue
|
|
257
|
+
# p $!, $@
|
|
258
|
+
end
|
|
259
|
+
end
|
|
260
|
+
|
|
261
|
+
Log::debug(self, "TERMINATE: #4")
|
|
262
|
+
Thread.start do
|
|
263
|
+
sleep 0.1
|
|
264
|
+
@deepconnect.stop
|
|
265
|
+
Process.exit(0)
|
|
266
|
+
end
|
|
267
|
+
Log::debug(self, "TERMINATE: #5")
|
|
268
|
+
nil
|
|
269
|
+
end
|
|
270
|
+
|
|
271
|
+
def when_disconnected(deepspace, opts)
|
|
272
|
+
if deepspace == @client.deep_space
|
|
273
|
+
Log::info(self, "CONTROLLER: client disconnected: Start termination")
|
|
274
|
+
# クライアントがおなくなりになったら, こっちも死ぬよ
|
|
275
|
+
@master.terminate_controller(self)
|
|
276
|
+
end
|
|
277
|
+
end
|
|
278
|
+
|
|
279
|
+
#
|
|
280
|
+
# clent interface
|
|
281
|
+
#
|
|
282
|
+
def export(service, obj)
|
|
283
|
+
@services[service] = obj
|
|
284
|
+
end
|
|
285
|
+
|
|
286
|
+
def import(service)
|
|
287
|
+
@services[service]
|
|
288
|
+
end
|
|
289
|
+
|
|
290
|
+
#
|
|
291
|
+
# bjob methods
|
|
292
|
+
#
|
|
293
|
+
def register_bjob(bjob)
|
|
294
|
+
@bjob2processors_mutex.synchronize do
|
|
295
|
+
@bjob2processors[bjob] = []
|
|
296
|
+
end
|
|
297
|
+
end
|
|
298
|
+
|
|
299
|
+
#
|
|
300
|
+
# ntask methods
|
|
301
|
+
#
|
|
302
|
+
def no_active_ntasks_in_processor(processor)
|
|
303
|
+
@no_active_ntasks_mutex.synchronize do
|
|
304
|
+
@no_active_ntasks[processor] || 0
|
|
305
|
+
end
|
|
306
|
+
end
|
|
307
|
+
|
|
308
|
+
def update_active_ntasks(processor, no_active_ntasks)
|
|
309
|
+
Log::debug(self, "Processor[#{processor.id}] => #{no_active_ntasks}")
|
|
310
|
+
@no_active_ntasks_mutex.synchronize do
|
|
311
|
+
@no_active_ntasks[processor] = no_active_ntasks
|
|
312
|
+
@no_active_ntasks_cv.broadcast
|
|
313
|
+
end
|
|
314
|
+
end
|
|
315
|
+
|
|
316
|
+
#
|
|
317
|
+
# processor methods
|
|
318
|
+
#
|
|
319
|
+
# reserve してから njob 割り当てを行う
|
|
320
|
+
def reserve_processor(processor, &block)
|
|
321
|
+
@reserves_mutex.synchronize do
|
|
322
|
+
begin
|
|
323
|
+
return nil unless @reserves[processor]
|
|
324
|
+
rescue DeepConnect::SessionServiceStopped
|
|
325
|
+
# processor は 終了している可能性がある
|
|
326
|
+
return nil
|
|
327
|
+
end
|
|
328
|
+
@reserves[processor] += 1
|
|
329
|
+
end
|
|
330
|
+
begin
|
|
331
|
+
yield processor
|
|
332
|
+
processor
|
|
333
|
+
ensure
|
|
334
|
+
@reserves_mutex.synchronize do
|
|
335
|
+
@reserves[processor] -= 1
|
|
336
|
+
end
|
|
337
|
+
end
|
|
338
|
+
end
|
|
339
|
+
|
|
340
|
+
def create_processor(node, bjob, &block)
|
|
341
|
+
@create_processor_mutex.synchronize do
|
|
342
|
+
processor = node.create_processor
|
|
343
|
+
processor.connect_controller(self, CONF)
|
|
344
|
+
@reserves_mutex.synchronize do
|
|
345
|
+
@reserves[processor] = 1
|
|
346
|
+
end
|
|
347
|
+
begin
|
|
348
|
+
register_processor(bjob, processor)
|
|
349
|
+
yield processor
|
|
350
|
+
processor
|
|
351
|
+
ensure
|
|
352
|
+
@reserves_mutex.synchronize do
|
|
353
|
+
@reserves[processor] -= 1
|
|
354
|
+
end
|
|
355
|
+
end
|
|
356
|
+
end
|
|
357
|
+
end
|
|
358
|
+
|
|
359
|
+
def register_processor(bjob, processor)
|
|
360
|
+
@bjob2processors_mutex.synchronize do
|
|
361
|
+
@bjob2processors[bjob] = [] unless @bjob2processors[bjob]
|
|
362
|
+
unless @bjob2processors[bjob].include?(processor)
|
|
363
|
+
@bjob2processors[bjob].push processor
|
|
364
|
+
end
|
|
365
|
+
@bjob2processors_cv.broadcast
|
|
366
|
+
@no_active_ntasks_cv.broadcast
|
|
367
|
+
end
|
|
368
|
+
processor
|
|
369
|
+
end
|
|
370
|
+
|
|
371
|
+
# def assign_inputtable_processor(bjob, input_bjob, input_njob, input_export, &block)
|
|
372
|
+
# case input_bjob
|
|
373
|
+
# when BGroupBy
|
|
374
|
+
# assign_processor(bjob, :NEW_PROCESSOR_N, input_bjob, &block)
|
|
375
|
+
# # assign_processor(bjob, :NEW_PROCESSOR, &block)
|
|
376
|
+
# # assign_processor(bjob, :SAME_PROCESSOR, input_njob.processor, &block)
|
|
377
|
+
# when BSplitter
|
|
378
|
+
# assign_processor(bjob, :NEW_PROCESSOR, &block)
|
|
379
|
+
# # assign_processor(bjob, :NEW_PROCESSOR_N, input_bjob, &block)
|
|
380
|
+
# # assign_processor(bjob, :SAME_PROCESSOR, input_njob.processor, &block)
|
|
381
|
+
# else
|
|
382
|
+
# assign_processor(bjob, :SAME_PROCESSOR, input_njob.processor, &block)
|
|
383
|
+
# end
|
|
384
|
+
# end
|
|
385
|
+
|
|
386
|
+
# # Processor 関連メソッド
|
|
387
|
+
# # Policy: :SAME_PROCESSOR, :NEW_PROCESSOR, :INPUT, MUST_BE_SAME_PROCESSOR
|
|
388
|
+
# def assign_processor(bjob, policy, *opts, &block)
|
|
389
|
+
# case policy
|
|
390
|
+
# when :INPUT
|
|
391
|
+
# assign_input_processor(bjob, opts[0], &block)
|
|
392
|
+
# when :SAME_PROCESSOR_OBJ
|
|
393
|
+
# assign_same_obj_processor(bjob, opts[0], &block)
|
|
394
|
+
# when :SAME_PROCESSOR, :MUST_BE_SAME_PROCESSOR
|
|
395
|
+
# processor = opts[0]
|
|
396
|
+
# assign_same_processor(bjob, processor, &block)
|
|
397
|
+
# when :NEW_PROCESSOR
|
|
398
|
+
# assign_new_processor(bjob, &block)
|
|
399
|
+
# when :NEW_PROCESSOR_N
|
|
400
|
+
# input_bjob = opts[0]
|
|
401
|
+
# assign_new_processor_n(bjob, input_bjob, &block)
|
|
402
|
+
# else
|
|
403
|
+
# ERR::Raise ERR::INTERNAL::UndefinedPolicy, policy.to_s
|
|
404
|
+
# end
|
|
405
|
+
# end
|
|
406
|
+
|
|
407
|
+
#
|
|
408
|
+
# methods of assgin processor.
|
|
409
|
+
#
|
|
410
|
+
def assign_input_processor(bjob, host, &block)
|
|
411
|
+
node = @master.node_in_reisured(host)
|
|
412
|
+
unless node
|
|
413
|
+
begin
|
|
414
|
+
ERR::Raise ERR::NodeNotArrived, host
|
|
415
|
+
rescue
|
|
416
|
+
handle_exception($!)
|
|
417
|
+
raise AbortCreateNode
|
|
418
|
+
end
|
|
419
|
+
end
|
|
420
|
+
create_processor(node, bjob, &block)
|
|
421
|
+
end
|
|
422
|
+
|
|
423
|
+
def assign_input_processor_n(bjob, host, &block)
|
|
424
|
+
max_no = CONF.CONTROLLER_INPUT_PROCESSOR_N
|
|
425
|
+
max_ntasks = CONF.CONTROLLER_MAX_ACTIVE_TASKS_IN_PROCESSOR
|
|
426
|
+
|
|
427
|
+
loop do
|
|
428
|
+
node = @master.node_in_reisured(host)
|
|
429
|
+
unless node
|
|
430
|
+
begin
|
|
431
|
+
ERR::Raise ERR::NodeNotArrived, host
|
|
432
|
+
rescue
|
|
433
|
+
handle_exception($!)
|
|
434
|
+
raise AbortCreateNode
|
|
435
|
+
end
|
|
436
|
+
end
|
|
437
|
+
|
|
438
|
+
no_of_processors = 0
|
|
439
|
+
leisured_processor = nil
|
|
440
|
+
min = nil
|
|
441
|
+
for processor in @bjob2processors[bjob].dup
|
|
442
|
+
next if processor.node != node
|
|
443
|
+
no_of_processors += 1
|
|
444
|
+
|
|
445
|
+
n = no_active_ntasks_in_processor(processor)
|
|
446
|
+
if !min or min > n
|
|
447
|
+
min = n
|
|
448
|
+
leisured_processor = processor
|
|
449
|
+
end
|
|
450
|
+
end
|
|
451
|
+
|
|
452
|
+
if max_no.nil? || max_no >= no_of_processors
|
|
453
|
+
create_processor(node, bjob, &block)
|
|
454
|
+
return
|
|
455
|
+
elsif min > max_ntasks
|
|
456
|
+
@no_active_ntasks_mutex.synchronize do
|
|
457
|
+
Log::debug(self, "NO_ACTIVE_NTASKS: WAIT")
|
|
458
|
+
@no_active_ntasks_cv.wait(@no_active_ntasks_mutex)
|
|
459
|
+
Log::debug(self, "NO_ACTIVE_NTASKS: WAIT END")
|
|
460
|
+
end
|
|
461
|
+
else
|
|
462
|
+
ret = reserve_processor(leisured_processor) {|processor|
|
|
463
|
+
register_processor(bjob, processor)
|
|
464
|
+
yield processor
|
|
465
|
+
}
|
|
466
|
+
unless ret
|
|
467
|
+
# プロセッサが終了していたとき. もうちょっとどうにかしたい気もする
|
|
468
|
+
assign_new_processor(bjob, &block)
|
|
469
|
+
end
|
|
470
|
+
return
|
|
471
|
+
end
|
|
472
|
+
end
|
|
473
|
+
end
|
|
474
|
+
|
|
475
|
+
# def assign_input_processor_n(bjob, host, &block)
|
|
476
|
+
# Log::debug(self, "HHHHHHHHHHHHHHHHHHHHH: #{host}")
|
|
477
|
+
# no = 0
|
|
478
|
+
# if processors = @bjob2processors[bjob]
|
|
479
|
+
# no += processors.size
|
|
480
|
+
# end
|
|
481
|
+
|
|
482
|
+
# max_no = CONF.CONTROLLER_INPUT_PROCESSOR_N
|
|
483
|
+
# if max_no.nil? || max_no > no
|
|
484
|
+
# node = @master.node_in_reisured(host)
|
|
485
|
+
# ERR::Raise ERR::NodeNotArrived, host unless node
|
|
486
|
+
|
|
487
|
+
# create_processor(node, bjob, &block)
|
|
488
|
+
# else
|
|
489
|
+
# node = @master.node_in_reisured(host)
|
|
490
|
+
# ERR::Raise ERR::NodeNotArrived, host unless node
|
|
491
|
+
|
|
492
|
+
# leisured_processor = nil
|
|
493
|
+
# min = nil
|
|
494
|
+
# for processor in @bjob2processors[bjob].dup
|
|
495
|
+
# next if processor.node != node
|
|
496
|
+
|
|
497
|
+
# n = processor.no_ntasks
|
|
498
|
+
# if !min or min > n
|
|
499
|
+
# min = n
|
|
500
|
+
# leisured_processor = processor
|
|
501
|
+
# end
|
|
502
|
+
# end
|
|
503
|
+
# ret = reserve_processor(leisured_processor) {|processor|
|
|
504
|
+
# register_processor(bjob, processor)
|
|
505
|
+
# yield processor
|
|
506
|
+
# }
|
|
507
|
+
# unless ret
|
|
508
|
+
# # プロセッサが終了していたとき. もうちょっとどうにかしたい気もする
|
|
509
|
+
# assign_new_processor(bjob, &block)
|
|
510
|
+
# end
|
|
511
|
+
# end
|
|
512
|
+
# end
|
|
513
|
+
|
|
514
|
+
|
|
515
|
+
def assign_same_processor(bjob, processor, &block)
|
|
516
|
+
# このメソッドは, 基本的にはreserve しているだけ
|
|
517
|
+
ret = reserve_processor(processor) {|processor|
|
|
518
|
+
register_processor(bjob, processor)
|
|
519
|
+
yield processor
|
|
520
|
+
processor}
|
|
521
|
+
|
|
522
|
+
unless ret
|
|
523
|
+
# プロセッサが終了していたとき(ほとんどあり得ないけど)
|
|
524
|
+
# この時のassgin_processor側の処理がイマイチ
|
|
525
|
+
assign_new_processor(bjob, &block)
|
|
526
|
+
end
|
|
527
|
+
end
|
|
528
|
+
|
|
529
|
+
def assign_same_obj_processor(bjob, obj, &block)
|
|
530
|
+
processor = nil
|
|
531
|
+
@reserves_mutex.synchronize do
|
|
532
|
+
@reserves.each_key do |p|
|
|
533
|
+
if p.deep_space == obj.deep_space
|
|
534
|
+
processor = p
|
|
535
|
+
break
|
|
536
|
+
end
|
|
537
|
+
end
|
|
538
|
+
end
|
|
539
|
+
ERR::Raise ERR::NoExistProcesorWithObject obj.to_s unless processor
|
|
540
|
+
|
|
541
|
+
ret = reserve_processor(processor) {
|
|
542
|
+
register_processor(bjob, processor)
|
|
543
|
+
yield processor
|
|
544
|
+
}
|
|
545
|
+
|
|
546
|
+
ERR::Raise ERR::NoExistProcesorWithObject obj.to_s unless ret
|
|
547
|
+
end
|
|
548
|
+
|
|
549
|
+
def assign_new_processor(bjob, &block)
|
|
550
|
+
node = @master.leisured_node
|
|
551
|
+
create_processor(node, bjob, &block)
|
|
552
|
+
end
|
|
553
|
+
|
|
554
|
+
# まあ, 大体n個になるかなぁ...
|
|
555
|
+
# input_bjobのプロセスも動的に割り当てられるので...
|
|
556
|
+
# 最終的には 大体そうなるということで....
|
|
557
|
+
def assign_new_processor_n(bjob, input_bjob, &block)
|
|
558
|
+
|
|
559
|
+
if input_bjob
|
|
560
|
+
factor = CONF.CONTROLLER_ASSIGN_NEW_PROCESSOR_N_FACTOR
|
|
561
|
+
else
|
|
562
|
+
# ここバグっている? CONTROLLER_INPUT_PROCESSOR_N は1-node辺りの数
|
|
563
|
+
max_no = CONF.CONTROLLER_INPUT_PROCESSOR_N
|
|
564
|
+
end
|
|
565
|
+
max_ntasks = CONF.CONTROLLER_MAX_ACTIVE_TASKS_IN_PROCESSOR
|
|
566
|
+
|
|
567
|
+
loop do
|
|
568
|
+
if input_bjob
|
|
569
|
+
no_i = 0
|
|
570
|
+
@bjob2processors_mutex.synchronize do
|
|
571
|
+
while !@bjob2processors[input_bjob]
|
|
572
|
+
Log::debug(self, "ASSIGN NEW PROCESSOR WAIT: #{bjob.class}")
|
|
573
|
+
@bjob2processors_cv.wait(@bjob2processors_mutex)
|
|
574
|
+
Log::debug(self, "ASSIGN NEW PROCESSOR RESUME: #{bjob.class}")
|
|
575
|
+
end
|
|
576
|
+
if i_processors = @bjob2processors[input_bjob]
|
|
577
|
+
no_i += i_processors.size
|
|
578
|
+
end
|
|
579
|
+
end
|
|
580
|
+
max_no = no_i * factor
|
|
581
|
+
end
|
|
582
|
+
|
|
583
|
+
no = 0
|
|
584
|
+
if processors = @bjob2processors[bjob]
|
|
585
|
+
no += processors.size
|
|
586
|
+
end
|
|
587
|
+
|
|
588
|
+
if max_no > no
|
|
589
|
+
node = @master.leisured_node
|
|
590
|
+
create_processor(node, bjob, &block)
|
|
591
|
+
return
|
|
592
|
+
else
|
|
593
|
+
leisured_processor = nil
|
|
594
|
+
min = nil
|
|
595
|
+
for processor in @bjob2processors[bjob].dup
|
|
596
|
+
# これだと頭から割り当てられる...
|
|
597
|
+
# けど取りあえずということで.
|
|
598
|
+
|
|
599
|
+
n = no_active_ntasks_in_processor(processor)
|
|
600
|
+
if !min or min > n
|
|
601
|
+
min = n
|
|
602
|
+
leisured_processor = processor
|
|
603
|
+
end
|
|
604
|
+
end
|
|
605
|
+
|
|
606
|
+
if min > max_ntasks
|
|
607
|
+
@no_active_ntasks_mutex.synchronize do
|
|
608
|
+
Log::debug(self, "NO_ACTIVE_NTASKS: WAIT")
|
|
609
|
+
@no_active_ntasks_cv.wait(@no_active_ntasks_mutex)
|
|
610
|
+
Log::debug(self, "NO_ACTIVE_NTASKS: WAIT END")
|
|
611
|
+
end
|
|
612
|
+
else
|
|
613
|
+
ret = reserve_processor(leisured_processor) {|processor|
|
|
614
|
+
register_processor(bjob, processor)
|
|
615
|
+
yield processor
|
|
616
|
+
}
|
|
617
|
+
unless ret
|
|
618
|
+
# プロセッサが終了していたとき. もうちょっとどうにかしたい気もする
|
|
619
|
+
assign_new_processor(bjob, &block)
|
|
620
|
+
end
|
|
621
|
+
return
|
|
622
|
+
end
|
|
623
|
+
end
|
|
624
|
+
end
|
|
625
|
+
end
|
|
626
|
+
|
|
627
|
+
def assign_new_processor_n_for_local_io(bjob, &block)
|
|
628
|
+
|
|
629
|
+
nodes = {}
|
|
630
|
+
# for p in @bjob2processors[bjob].dup
|
|
631
|
+
for p in bjob.nodes.collect{|njob| njob.processor}
|
|
632
|
+
if nodes[p.node]
|
|
633
|
+
nodes[p.node].push p
|
|
634
|
+
else
|
|
635
|
+
nodes[p.node] = [p]
|
|
636
|
+
end
|
|
637
|
+
end
|
|
638
|
+
|
|
639
|
+
node = nil
|
|
640
|
+
assign_level = 0
|
|
641
|
+
while !node
|
|
642
|
+
assign_level += 1
|
|
643
|
+
except_nodes = nodes.select{|n, ps| ps.size >= assign_level}
|
|
644
|
+
node = @master.leisured_node_except_nodes(except_nodes, false)
|
|
645
|
+
end
|
|
646
|
+
|
|
647
|
+
max_no = CONF.CONTROLLER_INPUT_PROCESSOR_N
|
|
648
|
+
if nodes[node]
|
|
649
|
+
leisured_processor = nil
|
|
650
|
+
min = nil
|
|
651
|
+
for processor in nodes[node]
|
|
652
|
+
n = processor.no_ntasks
|
|
653
|
+
if !min or min > n
|
|
654
|
+
min = n
|
|
655
|
+
leisured_processor = processor
|
|
656
|
+
end
|
|
657
|
+
end
|
|
658
|
+
no_of_processors = nodes[node].size
|
|
659
|
+
else
|
|
660
|
+
no_of_processors = 0
|
|
661
|
+
end
|
|
662
|
+
|
|
663
|
+
if max_no.nil? || max_no >= no_of_processors
|
|
664
|
+
create_processor(node, bjob, &block)
|
|
665
|
+
else
|
|
666
|
+
ret = reserve_processor(leisured_processor) {|processor|
|
|
667
|
+
register_processor(bjob, processor)
|
|
668
|
+
yield processor
|
|
669
|
+
}
|
|
670
|
+
unless ret
|
|
671
|
+
# プロセッサが終了していたとき. もうちょっとどうにかしたい気もする
|
|
672
|
+
assign_new_processor(bjob, &block)
|
|
673
|
+
end
|
|
674
|
+
end
|
|
675
|
+
end
|
|
676
|
+
|
|
677
|
+
def terminate_processor
|
|
678
|
+
deresister_processor(processor)
|
|
679
|
+
@master.deregister_processor(processor)
|
|
680
|
+
@node.deregister_processor(processor)
|
|
681
|
+
@node.terminate_processor
|
|
682
|
+
end
|
|
683
|
+
|
|
684
|
+
def start_process_life_manage
|
|
685
|
+
loop do
|
|
686
|
+
sleep PROCESS_LIFE_MANAGE_INTERVAL
|
|
687
|
+
Log::debug(self, "START_PROCESS_LIFE_MANAGE: S")
|
|
688
|
+
processors = @reserves_mutex.synchronize{@reserves.keys}
|
|
689
|
+
for p in processors
|
|
690
|
+
Log::debugf(self, "START_PROCESS_LIFE_MANAGE: 1 %{p}")
|
|
691
|
+
kill = false
|
|
692
|
+
@reserves_mutex.synchronize do
|
|
693
|
+
# for q, r in @reserves
|
|
694
|
+
# puts "#{q.id} =>#{r}"
|
|
695
|
+
# end
|
|
696
|
+
Log::debug(self, "START_PROCESS_LIFE_MANAGE: 2 ")
|
|
697
|
+
if @reserves[p] == 0 && p.life_out_life_span?
|
|
698
|
+
Log::info self, "Kill #{p.inspectx}"
|
|
699
|
+
kill = true
|
|
700
|
+
@reserves.delete(p)
|
|
701
|
+
@bjob2processors_mutex.synchronize do
|
|
702
|
+
# @bjob2processors から p を削除する必要あるか?
|
|
703
|
+
end
|
|
704
|
+
end
|
|
705
|
+
end
|
|
706
|
+
Log::debug(self, "START_PROCESS_LIFE_MANAGE: 3 ")
|
|
707
|
+
if kill
|
|
708
|
+
Log::debug(self, "START_PROCESS_LIFE_MANAGE: 4 ")
|
|
709
|
+
p.node.terminate_processor(p)
|
|
710
|
+
end
|
|
711
|
+
Log::debug(self, "START_PROCESS_LIFE_MANAGE: 5 ")
|
|
712
|
+
end
|
|
713
|
+
Log::debug(self, "START_PROCESS_LIFE_MANAGE: E ")
|
|
714
|
+
end
|
|
715
|
+
end
|
|
716
|
+
|
|
717
|
+
# exception handling
|
|
718
|
+
def handle_exception(exp)
|
|
719
|
+
Thread.start do
|
|
720
|
+
begin
|
|
721
|
+
@client.handle_exception(exp)
|
|
722
|
+
rescue
|
|
723
|
+
end
|
|
724
|
+
end
|
|
725
|
+
nil
|
|
726
|
+
end
|
|
727
|
+
|
|
728
|
+
# stdout
|
|
729
|
+
def stdout_write(str)
|
|
730
|
+
$stdout.replace_stdout do
|
|
731
|
+
$stdout.write(str)
|
|
732
|
+
end
|
|
733
|
+
end
|
|
734
|
+
|
|
735
|
+
# pool variable
|
|
736
|
+
def pool_dict
|
|
737
|
+
@pool_dict
|
|
738
|
+
end
|
|
739
|
+
|
|
740
|
+
def def_pool_variable(vname, value = nil)
|
|
741
|
+
# value が Hash で キー :block をもっていたら block と見なす.
|
|
742
|
+
if value.__deep_connect_reference? && value.kind_of?(Hash) && value[:block]
|
|
743
|
+
p = Context.create_proc(self, value[:block])
|
|
744
|
+
value = p.call
|
|
745
|
+
end
|
|
746
|
+
@pool_dict.def_variable(vname, value)
|
|
747
|
+
end
|
|
748
|
+
|
|
749
|
+
def pool_variable(vname, *value)
|
|
750
|
+
if value.empty?
|
|
751
|
+
@pool_dict[vname]
|
|
752
|
+
else
|
|
753
|
+
@pool_dict[vname] = value.first
|
|
754
|
+
end
|
|
755
|
+
end
|
|
756
|
+
|
|
757
|
+
class Context
|
|
758
|
+
def self.create_proc(controller, source)
|
|
759
|
+
context = new(controller)
|
|
760
|
+
context.create_proc(source)
|
|
761
|
+
end
|
|
762
|
+
|
|
763
|
+
def initialize(controller)
|
|
764
|
+
@Pool = controller.pool_dict
|
|
765
|
+
end
|
|
766
|
+
|
|
767
|
+
def create_proc(source)
|
|
768
|
+
eval("proc{#{source}}", binding)
|
|
769
|
+
end
|
|
770
|
+
end
|
|
771
|
+
|
|
772
|
+
#-- new fairy
|
|
773
|
+
|
|
774
|
+
|
|
775
|
+
# def assign_processor(target_bjob, &block)
|
|
776
|
+
# mapper = NjobMapper.new(self, target_bjob)
|
|
777
|
+
# mapper.assign_processor(&block)
|
|
778
|
+
# end
|
|
779
|
+
|
|
780
|
+
def assign_ntasks(target_bjob, create_node_mutex, &block)
|
|
781
|
+
target_bjob.input.each_assigned_filter do |input_filter|
|
|
782
|
+
mapper = NjobMapper.new(self, target_bjob, input_filter)
|
|
783
|
+
# create_node_mutex.synchronize do
|
|
784
|
+
mapper.assign_ntask(&block)
|
|
785
|
+
# end
|
|
786
|
+
end
|
|
787
|
+
end
|
|
788
|
+
|
|
789
|
+
class NjobMapper
|
|
790
|
+
|
|
791
|
+
def initialize(cont, target_bjob, input_filter)
|
|
792
|
+
@controller = cont
|
|
793
|
+
@target_bjob = target_bjob
|
|
794
|
+
|
|
795
|
+
@pre_bjob = @target_bjob.input
|
|
796
|
+
@input_filter = input_filter
|
|
797
|
+
|
|
798
|
+
init_policy
|
|
799
|
+
|
|
800
|
+
Log::debug(self, "Mapping Policy: #{@pre_bjob.class} -(#{@policy.class})-> #{@target_bjob.class}")
|
|
801
|
+
|
|
802
|
+
end
|
|
803
|
+
|
|
804
|
+
attr_reader :controller
|
|
805
|
+
attr_reader :pre_bjob
|
|
806
|
+
attr_reader :target_bjob
|
|
807
|
+
attr_reader :input_filter
|
|
808
|
+
attr_reader :policy
|
|
809
|
+
|
|
810
|
+
def init_policy
|
|
811
|
+
if @pre_bjob.respond_to?(:postmapping_policy) &&
|
|
812
|
+
@pre_bjob.postmapping_policy
|
|
813
|
+
@policy = eval(@pre_bjob.postmapping_policy.to_s).new(self)
|
|
814
|
+
return
|
|
815
|
+
end
|
|
816
|
+
|
|
817
|
+
# 今のところは必要なし(lazy create njob時に対応)
|
|
818
|
+
# if @target_bjob.kind_of?(BShuffle)
|
|
819
|
+
# @policy = MPNewProcessorN.new(self)
|
|
820
|
+
# return
|
|
821
|
+
# end
|
|
822
|
+
|
|
823
|
+
case @pre_bjob
|
|
824
|
+
when CFilePlace
|
|
825
|
+
#BInput系
|
|
826
|
+
@policy = MPInputProcessor.new(self)
|
|
827
|
+
when CLocalIOPlace
|
|
828
|
+
@policy = MPLocalInputNewProcessorN.new(self)
|
|
829
|
+
when CIotaPlace, CTherePlace
|
|
830
|
+
@policy = MPInputNewProcessorN.new(self)
|
|
831
|
+
when CVarrayPlace
|
|
832
|
+
@policy = MPVarrayInputProcessor.new(self)
|
|
833
|
+
# when CIotaPlace
|
|
834
|
+
# @policy = MPIotaInputProcessor.new(self)
|
|
835
|
+
when CBasicGroupBy, CDirectProduct::CPreFilter, CWC #, CSegShuffle
|
|
836
|
+
@policy = MPNewProcessorN.new(self)
|
|
837
|
+
# @policy = MPNewProcessor.new(self)
|
|
838
|
+
when CSegSplit, CInject::CLocalInject, CFind::CLocalFind
|
|
839
|
+
@policy = MPNewProcessor.new(self)
|
|
840
|
+
# when CSegShuffle
|
|
841
|
+
# @policy = MPPostShuffle.new(self)
|
|
842
|
+
# when CZip::CPreZipFilter
|
|
843
|
+
# @policy = MPZippedFilter.new(self)
|
|
844
|
+
else
|
|
845
|
+
@policy = MPSameNTask.new(self)
|
|
846
|
+
end
|
|
847
|
+
end
|
|
848
|
+
|
|
849
|
+
def assign_ntask(&block)
|
|
850
|
+
@policy.assign_ntask(&block)
|
|
851
|
+
end
|
|
852
|
+
|
|
853
|
+
def bind_input(njob)
|
|
854
|
+
@policy.bind_input(njob)
|
|
855
|
+
end
|
|
856
|
+
end
|
|
857
|
+
|
|
858
|
+
class NjobMappingPolicy
|
|
859
|
+
extend Forwardable
|
|
860
|
+
|
|
861
|
+
def initialize(mapper)
|
|
862
|
+
@mapper = mapper
|
|
863
|
+
end
|
|
864
|
+
|
|
865
|
+
def_delegator :@mapper, :controller
|
|
866
|
+
def_delegator :@mapper, :pre_bjob
|
|
867
|
+
def_delegator :@mapper, :target_bjob
|
|
868
|
+
def_delegator :@mapper, :input_filter
|
|
869
|
+
|
|
870
|
+
end
|
|
871
|
+
|
|
872
|
+
class MPInputProcessor < NjobMappingPolicy
|
|
873
|
+
def assign_ntask(&block)
|
|
874
|
+
controller.assign_input_processor_n(target_bjob,
|
|
875
|
+
input_filter.host) do |processor|
|
|
876
|
+
ntask = processor.create_ntask
|
|
877
|
+
block.call(ntask, @mapper)
|
|
878
|
+
end
|
|
879
|
+
end
|
|
880
|
+
|
|
881
|
+
def bind_input(njob)
|
|
882
|
+
begin
|
|
883
|
+
njob.open(input_filter)
|
|
884
|
+
rescue
|
|
885
|
+
raise AbortCreateNode
|
|
886
|
+
end
|
|
887
|
+
end
|
|
888
|
+
end
|
|
889
|
+
|
|
890
|
+
class MPInputNewProcessor< MPInputProcessor
|
|
891
|
+
def assign_ntask(&block)
|
|
892
|
+
controller.assign_new_processor(target_bjob) do |processor|
|
|
893
|
+
ntask = processor.create_ntask
|
|
894
|
+
block.call(ntask, @mapper)
|
|
895
|
+
end
|
|
896
|
+
end
|
|
897
|
+
end
|
|
898
|
+
|
|
899
|
+
class MPInputNewProcessorN< MPInputProcessor
|
|
900
|
+
def assign_ntask(&block)
|
|
901
|
+
controller.assign_new_processor_n(target_bjob,
|
|
902
|
+
nil) do |processor|
|
|
903
|
+
ntask = processor.create_ntask
|
|
904
|
+
block.call(ntask, @mapper)
|
|
905
|
+
end
|
|
906
|
+
end
|
|
907
|
+
end
|
|
908
|
+
|
|
909
|
+
class MPLocalInputNewProcessorN< MPInputProcessor
|
|
910
|
+
def assign_ntask(&block)
|
|
911
|
+
controller.assign_new_processor_n_for_local_io(target_bjob) do |processor|
|
|
912
|
+
ntask = processor.create_ntask
|
|
913
|
+
block.call(ntask, @mapper)
|
|
914
|
+
end
|
|
915
|
+
end
|
|
916
|
+
end
|
|
917
|
+
|
|
918
|
+
class MPVarrayInputProcessor < MPInputProcessor
|
|
919
|
+
def assign_ntask(&block)
|
|
920
|
+
controller.assign_same_obj_processor(target_bjob,
|
|
921
|
+
input_filter.ary) do |processor|
|
|
922
|
+
ntask = processor.create_ntask
|
|
923
|
+
block.call(ntask, @mapper)
|
|
924
|
+
end
|
|
925
|
+
end
|
|
926
|
+
end
|
|
927
|
+
|
|
928
|
+
class MPSameNTask < NjobMappingPolicy
|
|
929
|
+
def initialze(mapper)
|
|
930
|
+
super
|
|
931
|
+
@import = nil
|
|
932
|
+
end
|
|
933
|
+
|
|
934
|
+
def assign_ntask(&block)
|
|
935
|
+
# thread を立ち上げるべき
|
|
936
|
+
# このままでは, 十分に並列性が取れない(for [REQ:#5)]
|
|
937
|
+
controller.assign_same_processor(target_bjob,
|
|
938
|
+
input_filter.processor) do |processor|
|
|
939
|
+
ntask = input_filter.ntask
|
|
940
|
+
if input_filter.processor != processor
|
|
941
|
+
Log::warn(self, "ASSIGN_NTASK: assign defferent processor!!")
|
|
942
|
+
ntask = processor.create_ntask
|
|
943
|
+
end
|
|
944
|
+
block.call(ntask, @mapper)
|
|
945
|
+
end
|
|
946
|
+
end
|
|
947
|
+
|
|
948
|
+
def bind_input(njob)
|
|
949
|
+
njob.input = input_filter
|
|
950
|
+
end
|
|
951
|
+
end
|
|
952
|
+
|
|
953
|
+
|
|
954
|
+
# 必要ない?
|
|
955
|
+
# class MPSameProcessorObj < NjobMappingPolicy
|
|
956
|
+
# end
|
|
957
|
+
|
|
958
|
+
class MPNewProcessor < NjobMappingPolicy
|
|
959
|
+
|
|
960
|
+
def initialize(mapper)
|
|
961
|
+
super
|
|
962
|
+
@export = nil
|
|
963
|
+
@import = nil
|
|
964
|
+
end
|
|
965
|
+
|
|
966
|
+
def assign_ntask(&block)
|
|
967
|
+
pre_bjob.start_export(input_filter)
|
|
968
|
+
|
|
969
|
+
pre_bjob.each_export_by(input_filter, self) do |export, opts={}|
|
|
970
|
+
# pre_bjob.each_export_by(input_filter, self) do |export, opts|
|
|
971
|
+
# opts = {} unless opts
|
|
972
|
+
|
|
973
|
+
# thread を立ち上げるべき
|
|
974
|
+
# このままでは, 十分に並列性が取れない(for [REQ:#5)]
|
|
975
|
+
controller.assign_new_processor(target_bjob) do |processor|
|
|
976
|
+
# シリアライズに処理されることが前提になっている
|
|
977
|
+
@export = export
|
|
978
|
+
@import = target_bjob.create_import(processor)
|
|
979
|
+
ntask = processor.create_ntask
|
|
980
|
+
block.call(ntask, @mapper, opts)
|
|
981
|
+
end
|
|
982
|
+
end
|
|
983
|
+
end
|
|
984
|
+
|
|
985
|
+
def bind_input(njob)
|
|
986
|
+
@import.no = @export.no
|
|
987
|
+
@import.key = @export.key
|
|
988
|
+
njob.input = @import
|
|
989
|
+
@export.output = @import
|
|
990
|
+
pre_bjob.bind_export(@export, @import)
|
|
991
|
+
end
|
|
992
|
+
end
|
|
993
|
+
|
|
994
|
+
class MPNewProcessorN < MPNewProcessor
|
|
995
|
+
def assign_ntask(&block)
|
|
996
|
+
pre_bjob.start_export(input_filter)
|
|
997
|
+
|
|
998
|
+
pre_bjob.each_export_by(input_filter, self) do |export, opts={}|
|
|
999
|
+
# pre_bjob.each_export_by(input_filter, self) do |export, opts|
|
|
1000
|
+
# opts = {} unless opts
|
|
1001
|
+
# thread を立ち上げるべき
|
|
1002
|
+
# このままでは, 十分に並列性が取れない(for [REQ:#5)]
|
|
1003
|
+
|
|
1004
|
+
controller.assign_new_processor_n(target_bjob, pre_bjob) do
|
|
1005
|
+
|processor|
|
|
1006
|
+
# シリアライズに処理されることが前提になっている
|
|
1007
|
+
@export = export
|
|
1008
|
+
@import = target_bjob.create_import(processor)
|
|
1009
|
+
ntask = processor.create_ntask
|
|
1010
|
+
block.call(ntask, @mapper, opts)
|
|
1011
|
+
end
|
|
1012
|
+
end
|
|
1013
|
+
end
|
|
1014
|
+
end
|
|
1015
|
+
|
|
1016
|
+
class MPSameProcessor < MPNewProcessor
|
|
1017
|
+
|
|
1018
|
+
def assign_ntask(&block)
|
|
1019
|
+
pre_bjob.start_export(input_filter)
|
|
1020
|
+
|
|
1021
|
+
pre_bjob.each_export_by(input_filter, self) do |export, opts={}|
|
|
1022
|
+
# pre_bjob.each_export_by(input_filter, self) do |export, opts|
|
|
1023
|
+
# Log::debug(self, "YYYYYYYYYYYYYYY: #{export.class}, #{opts.class}")
|
|
1024
|
+
# opts = {} unless opts
|
|
1025
|
+
# thread を立ち上げるべき
|
|
1026
|
+
# このままでは, 十分に並列性が取れない(for [REQ:#5)]
|
|
1027
|
+
controller.assign_same_processor(target_bjob,
|
|
1028
|
+
input_filter.processor) do
|
|
1029
|
+
|processor|
|
|
1030
|
+
# シリアライズに処理されることが前提になっている
|
|
1031
|
+
@export = export
|
|
1032
|
+
@import = target_bjob.create_import(processor)
|
|
1033
|
+
|
|
1034
|
+
ntask = processor.create_ntask
|
|
1035
|
+
block.call(ntask, @mapper, opts)
|
|
1036
|
+
end
|
|
1037
|
+
end
|
|
1038
|
+
end
|
|
1039
|
+
end
|
|
1040
|
+
MPSameProcessorQ = MPSameProcessor
|
|
1041
|
+
|
|
1042
|
+
# class MPZippedFilter<MPNewProcessor
|
|
1043
|
+
|
|
1044
|
+
# def assign_processor(&block)
|
|
1045
|
+
# pre_bjob.start_export(input_filter)
|
|
1046
|
+
|
|
1047
|
+
# pre_bjob.each_export_by(input_filter, self) do |export|
|
|
1048
|
+
# # thread を立ち上げるべき
|
|
1049
|
+
# # このままでは, 十分に並列性が取れない(for [REQ:#5)]
|
|
1050
|
+
# controller.assign_new_processor(target_bjob) do |processor|
|
|
1051
|
+
# # シリアライズに処理されることが前提になっている
|
|
1052
|
+
# @export = export
|
|
1053
|
+
# @import = target_bjob.create_import(processor)
|
|
1054
|
+
# block.call(processor, @mapper)
|
|
1055
|
+
# end
|
|
1056
|
+
# end
|
|
1057
|
+
# end
|
|
1058
|
+
|
|
1059
|
+
# def bind_input(njob)
|
|
1060
|
+
# @import.no = @export.no
|
|
1061
|
+
# @import.key = @export.key
|
|
1062
|
+
# njob.input = @import
|
|
1063
|
+
# @export.output = @import
|
|
1064
|
+
# pre_bjob.bind_export(@export, @import)
|
|
1065
|
+
# end
|
|
1066
|
+
# end
|
|
1067
|
+
|
|
1068
|
+
|
|
1069
|
+
# class MPPreShuffle < NjobMappingPolicy
|
|
1070
|
+
# def each_exports(&block)
|
|
1071
|
+
# pre_bjob.start_export(input_filter)
|
|
1072
|
+
|
|
1073
|
+
# pre_bjob.each_export_by(input_filter, self) do |export|
|
|
1074
|
+
# # thread を立ち上げるべき
|
|
1075
|
+
# # このままでは, 十分に並列性が取れない(for [REQ:#5)]
|
|
1076
|
+
# @export = export
|
|
1077
|
+
# block.call(export)
|
|
1078
|
+
# end
|
|
1079
|
+
# end
|
|
1080
|
+
# end
|
|
1081
|
+
|
|
1082
|
+
# class MPPostShuffle < MPNewProcessorN
|
|
1083
|
+
# def assign_processor(&block)
|
|
1084
|
+
# # すでにスタートしている
|
|
1085
|
+
# #pre_bjob.start_export(input_filter)
|
|
1086
|
+
|
|
1087
|
+
# pre_bjob.each_export_by(input_filter, self) do |export|
|
|
1088
|
+
# # thread を立ち上げるべき
|
|
1089
|
+
# # このままでは, 十分に並列性が取れない(for [REQ:#5)]
|
|
1090
|
+
# controller.assign_new_processor_n(target_bjob, pre_bjob) do
|
|
1091
|
+
# |processor|
|
|
1092
|
+
# # シリアライズに処理されることが前提になっている
|
|
1093
|
+
# @export = export
|
|
1094
|
+
# @import = target_bjob.create_import(processor)
|
|
1095
|
+
# block.call(processor, @mapper)
|
|
1096
|
+
# end
|
|
1097
|
+
# end
|
|
1098
|
+
# end
|
|
1099
|
+
# end
|
|
1100
|
+
end
|
|
1101
|
+
end
|
|
1102
|
+
|
|
1103
|
+
require "fairy/master/addins"
|