nuri 0.5.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +6 -0
- data/.travis.yml +12 -0
- data/CHANGELOG +146 -0
- data/Gemfile +3 -0
- data/LICENSE +28 -0
- data/README.md +64 -0
- data/Rakefile +15 -0
- data/VERSION +1 -0
- data/bin/delete_modules +11 -0
- data/bin/install_agent +18 -0
- data/bin/install_module +65 -0
- data/bin/nuri +519 -0
- data/bin/nuri.old +183 -0
- data/bin/push_model +16 -0
- data/examples/.gitignore +3 -0
- data/examples/bonfire.sfp +95 -0
- data/examples/bonfire/epcc.sfp +43 -0
- data/examples/bonfire/epcc0.sfp +49 -0
- data/examples/bonfire/epcc2.sfp +52 -0
- data/examples/bonfire/epcc2a.sfp +25 -0
- data/examples/bonfire/inria.sfp +72 -0
- data/examples/bonfire/inria0.sfp +49 -0
- data/examples/bonfire/inria2.sfp +71 -0
- data/examples/bonfire/inria2a.sfp +44 -0
- data/examples/bonfire/inria2b.sfp +54 -0
- data/examples/bonfire/inria2c.sfp +62 -0
- data/examples/bonfire/inria2d.sfp +71 -0
- data/examples/bonfire/inria2e.sfp +80 -0
- data/examples/bonfire/main.sfp +33 -0
- data/examples/bonfire/old/bonfire-1-1-1.sfp +76 -0
- data/examples/bonfire/old/bonfire-1-10-1.sfp +77 -0
- data/examples/bonfire/old/bonfire-1-2-1.sfp +58 -0
- data/examples/bonfire/old/bonfire-1-3-1.sfp +61 -0
- data/examples/bonfire/old/bonfire-1-4-1.sfp +64 -0
- data/examples/bonfire/old/bonfire-1-5-1.sfp +67 -0
- data/examples/bonfire/old/bonfire-1-6-1.sfp +82 -0
- data/examples/bonfire/old/bonfire-1-7-1.sfp +82 -0
- data/examples/bonfire/old/bonfire-1-8-1.sfp +79 -0
- data/examples/bonfire/old/bonfire-1-9-1.sfp +83 -0
- data/examples/bonfire/old/wp-test1a.sfp +38 -0
- data/examples/bonfire/old/wp-test1b.sfp +18 -0
- data/examples/bonfire/old/wp-test1c.sfp +7 -0
- data/examples/bonfire/old/wp-test2.sfp +47 -0
- data/examples/bonfire/old3/bonfire-epcc.sfp +57 -0
- data/examples/bonfire/old3/bonfire-inria.sfp +72 -0
- data/examples/bonfire/old3/bonfire-master.sfp +18 -0
- data/examples/bonfire/old3/bonfire.sfp +23 -0
- data/examples/bonfire/old3/bonfire2.sfp +49 -0
- data/examples/bonfire/old3/bonfire3.sfp +76 -0
- data/examples/bonfire/old3/bonfire4.sfp +78 -0
- data/examples/bonfire/old3/bonfire5.sfp +34 -0
- data/examples/bonfire/old3/bonfire5b.sfp +84 -0
- data/examples/bonfire/old3/hpvm6.sfp +22 -0
- data/examples/bonfire/old3/model.json +1 -0
- data/examples/bonfire/old3/test0.sfp +16 -0
- data/examples/bonfire/old3/test1.sfp +5 -0
- data/examples/bonfire/old3/test10.sfp +5 -0
- data/examples/bonfire/old3/test2.sfp +18 -0
- data/examples/bonfire/old3/test3.sfp +10 -0
- data/examples/bonfire/old3/test4.sfp +11 -0
- data/examples/bonfire/old3/test5.sfp +18 -0
- data/examples/bonfire/old3/test6.sfp +19 -0
- data/examples/bonfire/old3/test7.sfp +34 -0
- data/examples/bonfire/old3/test8.sfp +5 -0
- data/examples/bonfire/old3/test9.sfp +16 -0
- data/examples/bonfire/old3/wordpress-test-cluster.sfp +38 -0
- data/examples/bonfire/old3/wordpress-test.sfp +22 -0
- data/examples/bonfire/old3/wp-test-2.sfp +49 -0
- data/examples/bonfire/test.sfp +13 -0
- data/examples/generator.rb +66 -0
- data/examples/hadoop2.sfp +20 -0
- data/examples/hpcloud.sfp +18 -0
- data/examples/run.rb +17 -0
- data/examples/test.inc +0 -0
- data/examples/test.sfp +11 -0
- data/lib/naas/d3.js +5 -0
- data/lib/naas/d3.v3.min.js +5 -0
- data/lib/naas/index.css +0 -0
- data/lib/naas/index.html +18 -0
- data/lib/naas/index.js +18 -0
- data/lib/naas/jquery-1.10.2.min.js +6 -0
- data/lib/naas/jquery.js +6 -0
- data/lib/naas/naas.rb +160 -0
- data/lib/nuri.rb +62 -0
- data/lib/nuri/choreographer.rb +151 -0
- data/lib/nuri/constraint_helper.rb +9 -0
- data/lib/nuri/directory.rb +40 -0
- data/lib/nuri/master.rb +725 -0
- data/lib/nuri/net_helper.rb +65 -0
- data/lib/nuri/orchestrator.rb +224 -0
- data/lib/nuri/server.rb +212 -0
- data/modules/.gitignore +4 -0
- data/modules/apache/apache.rb +255 -0
- data/modules/apache/apache.rb.old +167 -0
- data/modules/apache/apache.sfp +146 -0
- data/modules/apache/apache.sfp.future +100 -0
- data/modules/apache/load_balancer +20 -0
- data/modules/apache/model.json +1 -0
- data/modules/apache/test.sfp +8 -0
- data/modules/aptpackage/aptpackage.rb +82 -0
- data/modules/aptpackage/aptpackage.sfp +5 -0
- data/modules/bonfire/.gitignore +2 -0
- data/modules/bonfire/README.md +12 -0
- data/modules/bonfire/bonfire.rb +60 -0
- data/modules/bonfire/bonfire.sfp +9 -0
- data/modules/bonfire/config.yml +4 -0
- data/modules/bonfire/helper.rb +149 -0
- data/modules/bonfire/stresstest.rb +144 -0
- data/modules/bonfire/test.sfp +8 -0
- data/modules/client/client.rb +22 -0
- data/modules/client/client.sfp +14 -0
- data/modules/cloud/cloud.rb +11 -0
- data/modules/cloud/cloud.sfp +26 -0
- data/modules/file/file.rb +91 -0
- data/modules/file/file.sfp +9 -0
- data/modules/hadoop1/core-site.xml +17 -0
- data/modules/hadoop1/hadoop-env.sh +55 -0
- data/modules/hadoop1/hadoop1.rb +384 -0
- data/modules/hadoop1/hadoop1.sfp +93 -0
- data/modules/hadoop1/hdfs-site.xml +16 -0
- data/modules/hadoop1/mapred-site.xml +17 -0
- data/modules/hadoop2/core-site.xml +31 -0
- data/modules/hadoop2/hadoop-env.sh +77 -0
- data/modules/hadoop2/hadoop2.rb +401 -0
- data/modules/hadoop2/hadoop2.sfp +114 -0
- data/modules/hadoop2/hdfs-site.xml +47 -0
- data/modules/hadoop2/mapred-site.xml +71 -0
- data/modules/hadoop2/ports +14 -0
- data/modules/hadoop2/yarn-env.sh +112 -0
- data/modules/hadoop2/yarn-site.xml +107 -0
- data/modules/hpcloud/.gitignore +2 -0
- data/modules/hpcloud/README.md +16 -0
- data/modules/hpcloud/config.yml +3 -0
- data/modules/hpcloud/example.sfp +18 -0
- data/modules/hpcloud/hpcloud.rb +241 -0
- data/modules/hpcloud/hpcloud.sfp +22 -0
- data/modules/hpcloud/test.sfp +5 -0
- data/modules/install_module +65 -0
- data/modules/machine/machine.rb +95 -0
- data/modules/machine/machine.sfp +9 -0
- data/modules/mockcloud/mockcloud.rb +20 -0
- data/modules/mockcloud/mockcloud.sfp +6 -0
- data/modules/mysql/mysql.rb +118 -0
- data/modules/mysql/mysql.sfp +38 -0
- data/modules/mysql/test.sfp +3 -0
- data/modules/node/node.rb +8 -0
- data/modules/node/node.sfp +7 -0
- data/modules/object/object.rb +7 -0
- data/modules/object/object.sfp +1 -0
- data/modules/os/os.rb +38 -0
- data/modules/os/os.sfp +11 -0
- data/modules/package/package.rb +26 -0
- data/modules/package/package.sfp +22 -0
- data/modules/package/test.sfp +6 -0
- data/modules/service/model.json +1 -0
- data/modules/service/service.rb +50 -0
- data/modules/service/service.sfp +46 -0
- data/modules/service/test.sfp +6 -0
- data/modules/tarpackage/tarpackage.rb +93 -0
- data/modules/tarpackage/tarpackage.sfp +5 -0
- data/modules/vm/vm.rb +8 -0
- data/modules/vm/vm.sfp +18 -0
- data/modules/wordpress/wordpress.rb +98 -0
- data/modules/wordpress/wordpress.sfp +34 -0
- data/modules/wordpresscluster/wordpresscluster.rb +150 -0
- data/modules/wordpresscluster/wordpresscluster.sfp +74 -0
- data/nuri.gemspec +26 -0
- metadata +281 -0
@@ -0,0 +1,144 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
require 'logger'
|
4
|
+
require 'thread'
|
5
|
+
|
6
|
+
module Sfp
|
7
|
+
module Module
|
8
|
+
end
|
9
|
+
|
10
|
+
module Resource
|
11
|
+
attr_reader :state, :model
|
12
|
+
def init(model={})
|
13
|
+
@state = {}
|
14
|
+
@model = model
|
15
|
+
end
|
16
|
+
def reset
|
17
|
+
end
|
18
|
+
end
|
19
|
+
Agent = Object.new
|
20
|
+
def Agent.set_logger(logger)
|
21
|
+
@logger = logger
|
22
|
+
end
|
23
|
+
def Agent.logger
|
24
|
+
@logger
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
require File.expand_path('../../../lib/nuri.rb', __FILE__)
|
29
|
+
require File.expand_path('../bonfire.rb', __FILE__)
|
30
|
+
|
31
|
+
def do_test(p={})
|
32
|
+
log = (p[:logger] ? p[:logger] : Logger.new(STDOUT))
|
33
|
+
log.level = Logger::INFO
|
34
|
+
log.info "Site: #{p[:site]}"
|
35
|
+
log.info "Total VMs: #{p[:totalvms]}"
|
36
|
+
|
37
|
+
lock = Mutex.new
|
38
|
+
|
39
|
+
model = { 'location' => p[:site],
|
40
|
+
'experiment' => p[:experiment],
|
41
|
+
'gateway' => "ssh.bonfire.grid5000.fr",
|
42
|
+
'image_name' => "BonFIRE Debian Squeeze 10G v5",
|
43
|
+
'wan_name' => "BonFIRE WAN" }
|
44
|
+
|
45
|
+
Sfp::Agent.set_logger log
|
46
|
+
bonfire = Sfp::Module::Bonfire.new
|
47
|
+
bonfire.init(model)
|
48
|
+
bonfire.update_state
|
49
|
+
|
50
|
+
if not bonfire.state['running']
|
51
|
+
log.info "site #{p[:site]} is not running!"
|
52
|
+
return
|
53
|
+
end
|
54
|
+
|
55
|
+
log.info "Creating #{p[:totalvms]} VMs [Wait]"
|
56
|
+
start_time = Time.now
|
57
|
+
total = 0
|
58
|
+
1.upto(p[:totalvms]) do |i|
|
59
|
+
lock.synchronize { total += 1 }
|
60
|
+
Thread.new {
|
61
|
+
begin
|
62
|
+
name = "vm#{i}"
|
63
|
+
status = bonfire.create_server({
|
64
|
+
:name => name,
|
65
|
+
:image => model['image_name'],
|
66
|
+
:wan => model['wan_name']
|
67
|
+
})
|
68
|
+
#status = bonfire.create_vm('vm' => name)
|
69
|
+
log.info "Creating #{name}: " + (status ? "[OK]" : "[Failed]")
|
70
|
+
rescue Exception => exp
|
71
|
+
log.error "#{exp}\n#{exp.backtrace.join("\n")}"
|
72
|
+
log.info "Creating #{name}: [Failed]"
|
73
|
+
end
|
74
|
+
lock.synchronize { total -= 1 }
|
75
|
+
}
|
76
|
+
end
|
77
|
+
|
78
|
+
loop do
|
79
|
+
sleep 1
|
80
|
+
break if total <= 0
|
81
|
+
end
|
82
|
+
end_time = Time.now
|
83
|
+
|
84
|
+
log.info "Creating #{p[:totalvms]} VMs [OK]"
|
85
|
+
log.info "time=#{end_time-start_time} site=#{p[:site]} vms=#{p[:totalvms]}"
|
86
|
+
|
87
|
+
loop do
|
88
|
+
sleep 30 if p[:totalvms] > 0
|
89
|
+
|
90
|
+
# Deleting VMs
|
91
|
+
start_time = Time.new
|
92
|
+
log.info "Deleting VMs (except master) [Wait]"
|
93
|
+
total = 0
|
94
|
+
bonfire.state['vms'].each { |name,data|
|
95
|
+
next if name == 'master'
|
96
|
+
lock.synchronize { total += 1 }
|
97
|
+
Thread.new {
|
98
|
+
log.info "Deleting #{name}: " + (bonfire.delete_vm('vm' => name) ? "[OK]" : "[Failed]")
|
99
|
+
lock.synchronize { total -= 1 }
|
100
|
+
}
|
101
|
+
}
|
102
|
+
|
103
|
+
loop do
|
104
|
+
sleep 1
|
105
|
+
break if total <= 0
|
106
|
+
end
|
107
|
+
end_time = Time.new
|
108
|
+
|
109
|
+
bonfire.update_state
|
110
|
+
if bonfire.state['vms'].length > 1
|
111
|
+
log.info "Deleting VMs (except master) [Failed]"
|
112
|
+
else
|
113
|
+
log.info "Deleting VMs (except master): #{end_time - start_time} [OK]"
|
114
|
+
break
|
115
|
+
end
|
116
|
+
end
|
117
|
+
end
|
118
|
+
|
119
|
+
if $0 == __FILE__
|
120
|
+
if ARGV[0] == 'daemon'
|
121
|
+
sites = ['fr-inria'] #, 'uk-epcc'] #, 'uk-hplabs']
|
122
|
+
experiment = 'autocloud'
|
123
|
+
total_vms = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
|
124
|
+
repeat = 10
|
125
|
+
logger = Logger.new('output.log')
|
126
|
+
|
127
|
+
Process.daemon
|
128
|
+
1.upto(repeat) do |id|
|
129
|
+
total_vms.each do |vms|
|
130
|
+
sites.each do |site|
|
131
|
+
logger.info "Experiment=#{id} vms=#{vms} site=#{site}"
|
132
|
+
do_test(:site => site, :experiment => experiment, :totalvms => vms, :logger => logger)
|
133
|
+
end
|
134
|
+
end
|
135
|
+
end
|
136
|
+
|
137
|
+
else
|
138
|
+
if ARGV.length < 3
|
139
|
+
puts 'Usage: stresstest.rb <bonfire-site> <experiment> <total-vms>'
|
140
|
+
else
|
141
|
+
do_test(:site => ARGV[0], :experiment => ARGV[1], :totalvms => ARGV[2].to_i)
|
142
|
+
end
|
143
|
+
end
|
144
|
+
end
|
@@ -0,0 +1,22 @@
|
|
1
|
+
class Sfp::Module::Client
|
2
|
+
include Sfp::Resource
|
3
|
+
|
4
|
+
ConfigFile = '/tmp/sfp_client_config'
|
5
|
+
def update_state
|
6
|
+
if File.exist?(ConfigFile)
|
7
|
+
@state['refer'] = File.read(ConfigFile)
|
8
|
+
else
|
9
|
+
@state['refer'] = nil
|
10
|
+
end
|
11
|
+
end
|
12
|
+
|
13
|
+
def redirect(p={})
|
14
|
+
return false if !p.has_key?('s')
|
15
|
+
if p['s'].nil?
|
16
|
+
File.delete(ConfigFile) if File.exist?(ConfigFile)
|
17
|
+
else
|
18
|
+
File.open(ConfigFile, 'w') { |f| f.write(p['s'].to_s) }
|
19
|
+
end
|
20
|
+
true
|
21
|
+
end
|
22
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
schema Cloud {
|
2
|
+
running : Bool
|
3
|
+
|
4
|
+
sub create_vm (vm isref VM) {
|
5
|
+
condition {
|
6
|
+
this.running = true
|
7
|
+
vm.created = false
|
8
|
+
}
|
9
|
+
effect {
|
10
|
+
vm.in_cloud = this
|
11
|
+
vm.created = true
|
12
|
+
}
|
13
|
+
}
|
14
|
+
|
15
|
+
sub delete_vm (vm isref VM) {
|
16
|
+
condition {
|
17
|
+
this.running = true
|
18
|
+
vm.created = true
|
19
|
+
vm.in_cloud = this
|
20
|
+
}
|
21
|
+
effect {
|
22
|
+
vm.created = false
|
23
|
+
vm.in_cloud = null
|
24
|
+
}
|
25
|
+
}
|
26
|
+
}
|
@@ -0,0 +1,91 @@
|
|
1
|
+
require 'etc'
|
2
|
+
require 'fileutils'
|
3
|
+
require 'digest/sha1'
|
4
|
+
|
5
|
+
class Sfp::Module::File
|
6
|
+
include ::Sfp::Resource
|
7
|
+
|
8
|
+
##############################
|
9
|
+
#
|
10
|
+
# update current state method
|
11
|
+
#
|
12
|
+
##############################
|
13
|
+
|
14
|
+
def update_state
|
15
|
+
path = @model['path'].to_s.strip
|
16
|
+
if @model['exists']
|
17
|
+
create(path)
|
18
|
+
else
|
19
|
+
delete(path)
|
20
|
+
end
|
21
|
+
|
22
|
+
@state['path'] = path
|
23
|
+
@state['exists'] = ::File.exist?(path)
|
24
|
+
@state['content'] = content?
|
25
|
+
@state['user'], @state['group'] = user_group?
|
26
|
+
@state['permission'] = permission?
|
27
|
+
end
|
28
|
+
|
29
|
+
##############################
|
30
|
+
#
|
31
|
+
# Helper methods
|
32
|
+
#
|
33
|
+
##############################
|
34
|
+
|
35
|
+
protected
|
36
|
+
|
37
|
+
def delete(file)
|
38
|
+
::File.delete(file) if ::File.exist?(file)
|
39
|
+
end
|
40
|
+
|
41
|
+
def create(file)
|
42
|
+
log.warn "Failed to create/update file #{file}!" if
|
43
|
+
not set_content(file) or
|
44
|
+
not set_owner(file) or
|
45
|
+
not set_permission(file)
|
46
|
+
end
|
47
|
+
|
48
|
+
def set_content(file)
|
49
|
+
return true if not @model['content'].is_a?(String)
|
50
|
+
begin
|
51
|
+
current = (::File.exist?(file) ? content? : nil)
|
52
|
+
desired = Digest::SHA1.hexdigest(@model['content'])
|
53
|
+
File.open(file, 'w') { |f| f.write(@model['content']) } if current != desired
|
54
|
+
return true
|
55
|
+
rescue Exception => e
|
56
|
+
log.error "#{e}\n#{e.backtrace.join("\n")}"
|
57
|
+
end
|
58
|
+
false
|
59
|
+
end
|
60
|
+
|
61
|
+
def set_owner(file)
|
62
|
+
return true if not ::File.exist?(file)
|
63
|
+
return true if not @model['user'].is_a?(String)
|
64
|
+
return true if not @model['group'].is_a?(String)
|
65
|
+
!!system("chown #{@model['user']}:#{@model['group']} #{file}")
|
66
|
+
end
|
67
|
+
|
68
|
+
def set_permission(file)
|
69
|
+
return if not ::File.exist?(file) or not @model['permission'].is_a?(String)
|
70
|
+
!!system("chmod #{model['permission']} #{file}") if @model['permission'] != permission?
|
71
|
+
true
|
72
|
+
end
|
73
|
+
|
74
|
+
def content?
|
75
|
+
(::File.exist?(@model['path']) ? Digest::SHA1.hexdigest(::File.read(@model['path'])) : '')
|
76
|
+
end
|
77
|
+
|
78
|
+
def user_group?
|
79
|
+
if ::File.exist?(@model['path'])
|
80
|
+
stat = ::File.stat(@model['path'])
|
81
|
+
[Etc.getpwuid(stat.uid).name, Etc.getgrgid(stat.gid).name]
|
82
|
+
else
|
83
|
+
['', '']
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
def permission?
|
88
|
+
(::File.exist?(@model['path']) ? sprintf("%o", ::File.stat(@model['path']).mode) : '')
|
89
|
+
end
|
90
|
+
|
91
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
<?xml version="1.0"?>
|
2
|
+
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
3
|
+
|
4
|
+
<!-- Put site-specific property overrides in this file. -->
|
5
|
+
|
6
|
+
<configuration>
|
7
|
+
<property>
|
8
|
+
<name>hadoop.tmp.dir</name>
|
9
|
+
<value><%= tmp_dir %></value>
|
10
|
+
</property>
|
11
|
+
|
12
|
+
<property>
|
13
|
+
<name>fs.default.name</name>
|
14
|
+
<value>hdfs://<%= master %>:54310</value>
|
15
|
+
</property>
|
16
|
+
</configuration>
|
17
|
+
|
@@ -0,0 +1,55 @@
|
|
1
|
+
# Set Hadoop-specific environment variables here.
|
2
|
+
|
3
|
+
# The only required environment variable is JAVA_HOME. All others are
|
4
|
+
# optional. When running a distributed configuration it is best to
|
5
|
+
# set JAVA_HOME in this file, so that it is correctly defined on
|
6
|
+
# remote nodes.
|
7
|
+
|
8
|
+
# The java implementation to use. Required.
|
9
|
+
export JAVA_HOME=<%= java_home %>
|
10
|
+
|
11
|
+
# Extra Java CLASSPATH elements. Optional.
|
12
|
+
# export HADOOP_CLASSPATH=
|
13
|
+
|
14
|
+
# The maximum amount of heap to use, in MB. Default is 1000.
|
15
|
+
# export HADOOP_HEAPSIZE=2000
|
16
|
+
|
17
|
+
# Extra Java runtime options. Empty by default.
|
18
|
+
# export HADOOP_OPTS=-server
|
19
|
+
|
20
|
+
# Command specific options appended to HADOOP_OPTS when specified
|
21
|
+
export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
|
22
|
+
export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
|
23
|
+
export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
|
24
|
+
export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
|
25
|
+
export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
|
26
|
+
# export HADOOP_TASKTRACKER_OPTS=
|
27
|
+
# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
|
28
|
+
# export HADOOP_CLIENT_OPTS
|
29
|
+
|
30
|
+
# Extra ssh options. Empty by default.
|
31
|
+
# export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR"
|
32
|
+
|
33
|
+
# Where log files are stored. \$HADOOP_HOME/logs by default.
|
34
|
+
# export HADOOP_LOG_DIR=\${HADOOP_HOME}/logs
|
35
|
+
|
36
|
+
# File naming remote slave hosts. \$HADOOP_HOME/conf/slaves by default.
|
37
|
+
# export HADOOP_SLAVES=\${HADOOP_HOME}/conf/slaves
|
38
|
+
|
39
|
+
# host:path where hadoop code should be rsync'd from. Unset by default.
|
40
|
+
# export HADOOP_MASTER=master:/home/\$USER/src/hadoop
|
41
|
+
|
42
|
+
# Seconds to sleep between slave commands. Unset by default. This
|
43
|
+
# can be useful in large clusters, where, e.g., slave rsyncs can
|
44
|
+
# otherwise arrive faster than the master can service them.
|
45
|
+
# export HADOOP_SLAVE_SLEEP=0.1
|
46
|
+
|
47
|
+
# The directory where pid files are stored. /tmp by default.
|
48
|
+
# export HADOOP_PID_DIR=/var/hadoop/pids
|
49
|
+
|
50
|
+
# A string representing this instance of hadoop. \$USER by default.
|
51
|
+
# export HADOOP_IDENT_STRING=\$USER
|
52
|
+
|
53
|
+
# The scheduling priority for daemon processes. See 'man nice'.
|
54
|
+
# export HADOOP_NICENESS=10
|
55
|
+
|
@@ -0,0 +1,384 @@
|
|
1
|
+
require 'ostruct'
|
2
|
+
|
3
|
+
class Sfp::Module::Hadoop1Master
|
4
|
+
include Sfp::Resource
|
5
|
+
|
6
|
+
Services = ['namenode', 'secondarynamenode', 'jobtracker']
|
7
|
+
|
8
|
+
def update_state
|
9
|
+
to_model
|
10
|
+
|
11
|
+
@state['installed'] = installed?
|
12
|
+
@state['running'] = running?
|
13
|
+
@state['pids'] = pids
|
14
|
+
|
15
|
+
# try to restart any stopped daemon
|
16
|
+
start if @state['running']
|
17
|
+
end
|
18
|
+
|
19
|
+
##############################
|
20
|
+
#
|
21
|
+
# Action methods (see Hadoop1.sfp)
|
22
|
+
#
|
23
|
+
##############################
|
24
|
+
|
25
|
+
def install(p={})
|
26
|
+
model = OpenStruct.new(@model)
|
27
|
+
|
28
|
+
if java_home.length <= 0
|
29
|
+
### install JRE
|
30
|
+
shell "apt-get install -y default-jre"
|
31
|
+
end
|
32
|
+
|
33
|
+
# add group hadoop
|
34
|
+
if `grep '^#{model.group}' /etc/group`.length <= 0
|
35
|
+
log.info "adding group #{model.group}"
|
36
|
+
shell "echo '#{model.group}:x:8000:' >> /etc/group"
|
37
|
+
else
|
38
|
+
log.info "group #{model.group} is already exist"
|
39
|
+
end
|
40
|
+
|
41
|
+
# add user hadoop
|
42
|
+
if `grep '^#{model.user}' /etc/passwd`.length <= 0
|
43
|
+
log.info "adding user #{model.user}"
|
44
|
+
shell "echo '#{model.user}:x:8000:8000::#{model.home}:/bin/bash' >> /etc/passwd &&
|
45
|
+
echo '#{model.user}:#{model.password}:15958:0:99999:7:::' >> /etc/shadow"
|
46
|
+
else
|
47
|
+
log.info "user #{model.user} is already exist"
|
48
|
+
end
|
49
|
+
|
50
|
+
# create home
|
51
|
+
log.info "create hadoop home directory: #{model.home}"
|
52
|
+
shell "mkdir -p #{model.home}" if !::File.exist?(model.home)
|
53
|
+
shell "chown -R #{model.user}:#{model.user} #{model.home} && rm -rf #{model.home}/*"
|
54
|
+
|
55
|
+
# create data_dir
|
56
|
+
shell "rm -f #{model.data_dir} && mkdir -p #{model.data_dir}" if !::File.directory?(model.data_dir)
|
57
|
+
shell "chown -R #{model.user}:#{model.user} #{model.data_dir} && rm -rf #{model.data_dir}/*"
|
58
|
+
|
59
|
+
# download and extract hadoop binaries
|
60
|
+
shell 'apt-get install -y axel'
|
61
|
+
downloader = 'axel -q -o' # 'wget -O'
|
62
|
+
source = (model.source[-7,7] == '.tar.gz' or model.source[-4,4] == '.tgz' ? model.source : "#{model.source}/hadoop-#{model.version}/hadoop-#{model.version}.tar.gz")
|
63
|
+
|
64
|
+
log.info "download and install hadoop binaries"
|
65
|
+
file = source.split('/').last.to_s
|
66
|
+
basename = (::File.extname(file) == '.gz' ? ::File.basename(file, '.tar.gz') : ::File.basename(file, ::File.extname(file)))
|
67
|
+
shell "cd #{model.home} &&
|
68
|
+
#{downloader} #{file} #{source} &&
|
69
|
+
tar xvzf #{file} && rm -f #{file} &&
|
70
|
+
bash -c 'cd #{model.home}/#{basename} && shopt -s dotglob && mv * .. && cd .. && rm -rf #{basename}'"
|
71
|
+
|
72
|
+
config_dir = "#{model.home}/conf"
|
73
|
+
|
74
|
+
map = {
|
75
|
+
'user' => model.user,
|
76
|
+
'master' => `hostname`.strip,
|
77
|
+
'java_home' => java_home,
|
78
|
+
'tmp_dir' => model.data_dir,
|
79
|
+
'replication' => model.replication,
|
80
|
+
}
|
81
|
+
|
82
|
+
# copy and process template configuration files
|
83
|
+
log.info "copy and process template configuration files: core-site.xml, hadoop-env.sh, mapred-site.xml"
|
84
|
+
dir = File.expand_path(File.dirname(__FILE__))
|
85
|
+
['hadoop-env.sh', 'core-site.xml', 'mapred-site.xml', 'hdfs-site.xml'].each do |file|
|
86
|
+
shell "cp -f #{dir}/#{file} #{config_dir}"
|
87
|
+
render_file "#{config_dir}/#{file}", map
|
88
|
+
end
|
89
|
+
shell "chown -R #{model.user}:#{model.user} #{model.home}"
|
90
|
+
|
91
|
+
# create HDFS directory
|
92
|
+
if !::File.exist?(model.data_dir) && !shell("mkdir -p #{model.data_dir}")
|
93
|
+
log.info "create scratch directory for HDFS: #{model.data_dir}"
|
94
|
+
shell "mkdir -p #{model.data_dir}"
|
95
|
+
end
|
96
|
+
shell "chown -R #{model.user}:#{model.user} #{model.data_dir}"
|
97
|
+
|
98
|
+
# format namenode space
|
99
|
+
log.info "format namenode space"
|
100
|
+
shell "su -c '#{model.home}/bin/hadoop namenode -format' hadoop"
|
101
|
+
|
102
|
+
return false if not installed?
|
103
|
+
|
104
|
+
# export hadoop home to root
|
105
|
+
log.info "export hadoop home directory to root"
|
106
|
+
shell "sed -i '/^export HADOOP_HOME/d' /root/.bashrc"
|
107
|
+
shell "echo 'export HADOOP_HOME=#{model.home}' >> /root/.bashrc"
|
108
|
+
|
109
|
+
true
|
110
|
+
end
|
111
|
+
|
112
|
+
def uninstall(p={})
|
113
|
+
model = OpenStruct.new(@model)
|
114
|
+
# remove hadoop user and group, and then delete hadoop's home directory
|
115
|
+
shell "sed -i '/^export HADOOP_HOME/d' /root/.bash_profile"
|
116
|
+
shell "sed -i '/^#{model.user}/d' /etc/passwd &&
|
117
|
+
sed -i '/^#{model.user}/d' /etc/shadow &&
|
118
|
+
sed -i '/^#{model.user}/d' /etc/group &&
|
119
|
+
rm -rf #{model.home} &&
|
120
|
+
rm -rf /tmp/#{model.user}* &&
|
121
|
+
rm -rf #{model.data_dir}"
|
122
|
+
end
|
123
|
+
|
124
|
+
def start(p={})
|
125
|
+
model = OpenStruct.new(@model)
|
126
|
+
pids = self.pids
|
127
|
+
Services.each { |name|
|
128
|
+
pid = pids[name]
|
129
|
+
if pid <= 0
|
130
|
+
cmd = "#{model.home}/bin/hadoop-daemon.sh start #{name}"
|
131
|
+
log.info `su -c '#{cmd} && sleep 3' #{model.user}`
|
132
|
+
end
|
133
|
+
}
|
134
|
+
true
|
135
|
+
end
|
136
|
+
|
137
|
+
def stop(p={})
|
138
|
+
model = OpenStruct.new(@model)
|
139
|
+
pids = self.pids
|
140
|
+
Services.reverse.each { |name|
|
141
|
+
pid = pids[name]
|
142
|
+
if pid > 0
|
143
|
+
cmd = "#{model.home}/bin/hadoop-daemon.sh stop #{name}"
|
144
|
+
log.info `su -c '#{cmd}' #{model.user}`
|
145
|
+
end
|
146
|
+
}
|
147
|
+
true
|
148
|
+
end
|
149
|
+
|
150
|
+
|
151
|
+
##############################
|
152
|
+
#
|
153
|
+
# Helper methods
|
154
|
+
#
|
155
|
+
##############################
|
156
|
+
|
157
|
+
protected
|
158
|
+
def installed?
|
159
|
+
['bin/hadoop', 'conf/hadoop-env.sh', 'conf/core-site.xml', 'conf/mapred-site.xml', 'conf/hdfs-site.xml'].each { |file|
|
160
|
+
return false if !::File.exist?("#{@model['home']}/#{file}")
|
161
|
+
}
|
162
|
+
true
|
163
|
+
end
|
164
|
+
|
165
|
+
def resolve_link(link)
|
166
|
+
begin
|
167
|
+
link = ::File.readlink(link)
|
168
|
+
link = resolve_link(link)
|
169
|
+
rescue
|
170
|
+
link
|
171
|
+
end
|
172
|
+
end
|
173
|
+
|
174
|
+
def java_home
|
175
|
+
return @model['java_home'] if @model['java_home'].to_s.strip.length > 0
|
176
|
+
java = resolve_link(`which java`.strip)
|
177
|
+
return '' if java.length <= 0
|
178
|
+
::File.expand_path(java + '/../../')
|
179
|
+
end
|
180
|
+
|
181
|
+
def running?
|
182
|
+
status = false
|
183
|
+
pids.each { |name,pid|
|
184
|
+
log.warn "#{name} is not running!" if pid <= 0
|
185
|
+
status = true if pid > 0
|
186
|
+
}
|
187
|
+
status
|
188
|
+
end
|
189
|
+
|
190
|
+
def pids
|
191
|
+
data = {}
|
192
|
+
Services.each { |name|
|
193
|
+
data[name] = `ps axf | grep java | grep -v grep | grep hadoop | grep Dproc_#{name}`.to_s.strip.split(' ', 2)[0].to_i
|
194
|
+
}
|
195
|
+
data
|
196
|
+
end
|
197
|
+
end
|
198
|
+
|
199
|
+
|
200
|
+
|
201
|
+
class Sfp::Module::Hadoop1Slave
|
202
|
+
include Sfp::Resource
|
203
|
+
|
204
|
+
Services = ['datanode', 'tasktracker']
|
205
|
+
|
206
|
+
def update_state
|
207
|
+
to_model
|
208
|
+
|
209
|
+
@state['installed'] = installed?
|
210
|
+
@state['running'] = running?
|
211
|
+
@state['pids'] = pids
|
212
|
+
|
213
|
+
# try to restart any stopped daemon
|
214
|
+
start if @state['running']
|
215
|
+
end
|
216
|
+
|
217
|
+
##############################
|
218
|
+
#
|
219
|
+
# Action methods (see Hadoop1.sfp)
|
220
|
+
#
|
221
|
+
##############################
|
222
|
+
|
223
|
+
def install(p={})
|
224
|
+
model = OpenStruct.new(@model)
|
225
|
+
|
226
|
+
if java_home.length <= 0
|
227
|
+
### install JRE
|
228
|
+
shell "apt-get install -y default-jre"
|
229
|
+
end
|
230
|
+
|
231
|
+
# add group hadoop
|
232
|
+
if `grep '^#{model.group}' /etc/group`.length <= 0
|
233
|
+
log.info "adding group #{model.group}"
|
234
|
+
shell "echo '#{model.group}:x:8000:' >> /etc/group"
|
235
|
+
else
|
236
|
+
log.info "group #{model.group} is already exist"
|
237
|
+
end
|
238
|
+
|
239
|
+
# add user hadoop
|
240
|
+
if `grep '^#{model.user}' /etc/passwd`.length <= 0
|
241
|
+
log.info "adding user #{model.user}"
|
242
|
+
shell "echo '#{model.user}:x:8000:8000::#{model.home}:/bin/bash' >> /etc/passwd &&
|
243
|
+
echo '#{model.user}:#{model.password}:15958:0:99999:7:::' >> /etc/shadow"
|
244
|
+
else
|
245
|
+
log.info "user #{model.user} is already exist"
|
246
|
+
end
|
247
|
+
|
248
|
+
# create home
|
249
|
+
log.info "create hadoop home directory: #{model.home}"
|
250
|
+
shell "mkdir -p #{model.home}" if !::File.exist?(model.home)
|
251
|
+
shell "chown -R #{model.user}:#{model.user} #{model.home} && rm -rf #{model.home}/*"
|
252
|
+
|
253
|
+
# create data_dir
|
254
|
+
shell "rm -f #{model.data_dir} && mkdir -p #{model.data_dir}" if !::File.directory?(model.data_dir)
|
255
|
+
shell "chown -R #{model.user}:#{model.user} #{model.data_dir} && rm -rf #{model.data_dir}/*"
|
256
|
+
|
257
|
+
# download and extract hadoop binaries
|
258
|
+
system 'apt-get install -y axel'
|
259
|
+
downloader = 'axel -q -o' # 'wget -O'
|
260
|
+
source = (model.source[-7,7] == '.tar.gz' or model.source[-4,4] == '.tgz' ? model.source : "#{model.source}/hadoop-#{model.version}/hadoop-#{model.version}.tar.gz")
|
261
|
+
|
262
|
+
log.info "download and install hadoop binaries"
|
263
|
+
file = source.split('/').last.to_s
|
264
|
+
basename = (::File.extname(file) == '.gz' ? ::File.basename(file, '.tar.gz') : ::File.basename(file, ::File.extname(file)))
|
265
|
+
shell "cd #{model.home} &&
|
266
|
+
#{downloader} #{file} #{source} &&
|
267
|
+
tar xvzf #{file} && rm -f #{file} &&
|
268
|
+
bash -c 'cd #{model.home}/#{basename} && shopt -s dotglob && mv * .. && cd .. && rm -rf #{basename}'"
|
269
|
+
|
270
|
+
map = {
|
271
|
+
'user' => model.user,
|
272
|
+
'master' => resolve(model.master + '.parent.sfpAddress'),
|
273
|
+
'java_home' => java_home,
|
274
|
+
'replication' => resolve(model.master + '.replication')
|
275
|
+
}
|
276
|
+
|
277
|
+
# copy and process template configuration files
|
278
|
+
log.info "copy and process template configuration files: core-site.xml, hadoop-env.sh, mapred-site.xml"
|
279
|
+
dir = File.expand_path(File.dirname(__FILE__))
|
280
|
+
['hadoop-env.sh', 'core-site.xml', 'mapred-site.xml', 'hdfs-site.xml'].each do |file|
|
281
|
+
copy "#{dir}/#{file}", "#{model.home}/conf/"
|
282
|
+
render_file "#{model.home}/conf/#{file}", map
|
283
|
+
end
|
284
|
+
shell "chown -R #{model.user}:#{model.user} #{model.home}"
|
285
|
+
|
286
|
+
# export hadoop home to root
|
287
|
+
log.info "export hadoop home directory to root"
|
288
|
+
shell "sed -i '/^export HADOOP_HOME/d' /root/.bashrc"
|
289
|
+
shell "echo 'export HADOOP_HOME=#{model.home}' >> /root/.bashrc"
|
290
|
+
|
291
|
+
installed?
|
292
|
+
end
|
293
|
+
|
294
|
+
def uninstall(p={})
|
295
|
+
model = OpenStruct.new(@model)
|
296
|
+
# remove hadoop user and group, and then delete hadoop's home directory
|
297
|
+
shell "sed -i '/^export HADOOP_HOME/d' /root/.bash_profile"
|
298
|
+
shell "sed -i '/^#{model.user}/d' /etc/passwd &&
|
299
|
+
sed -i '/^#{model.user}/d' /etc/shadow &&
|
300
|
+
sed -i '/^#{model.user}/d' /etc/group &&
|
301
|
+
rm -rf #{model.home} &&
|
302
|
+
rm -rf /tmp/#{model.user}*"
|
303
|
+
|
304
|
+
not installed?
|
305
|
+
end
|
306
|
+
|
307
|
+
def start(p={})
|
308
|
+
model = OpenStruct.new(@model)
|
309
|
+
pids.each { |name,pid|
|
310
|
+
if pid <= 0
|
311
|
+
cmd = "#{model.home}/bin/hadoop-daemon.sh start #{name}"
|
312
|
+
log.info `su -c '#{cmd} && sleep 3' #{model.user}`
|
313
|
+
end
|
314
|
+
}
|
315
|
+
|
316
|
+
running?
|
317
|
+
end
|
318
|
+
|
319
|
+
def stop(p={})
|
320
|
+
model = OpenStruct.new(@model)
|
321
|
+
pids.each { |name,pid|
|
322
|
+
if pid > 0
|
323
|
+
cmd = "#{model.home}/bin/hadoop-daemon.sh stop #{name}"
|
324
|
+
log.info `su -c '#{cmd}' #{model.user}`
|
325
|
+
end
|
326
|
+
}
|
327
|
+
|
328
|
+
pids.each { |name,pid|
|
329
|
+
begin
|
330
|
+
Process.kill 9, pid
|
331
|
+
rescue
|
332
|
+
end
|
333
|
+
}
|
334
|
+
|
335
|
+
not running?
|
336
|
+
end
|
337
|
+
|
338
|
+
|
339
|
+
##############################
|
340
|
+
#
|
341
|
+
# Helper methods
|
342
|
+
#
|
343
|
+
##############################
|
344
|
+
|
345
|
+
def installed?
|
346
|
+
['bin/hadoop', 'conf/hadoop-env.sh', 'conf/core-site.xml', 'conf/mapred-site.xml', 'conf/hdfs-site.xml'].each { |file|
|
347
|
+
return false if !::File.exist?("#{@model['home']}/#{file}")
|
348
|
+
}
|
349
|
+
true
|
350
|
+
end
|
351
|
+
|
352
|
+
def resolve_link(link)
|
353
|
+
begin
|
354
|
+
link = ::File.readlink(link)
|
355
|
+
link = resolve_link(link)
|
356
|
+
rescue
|
357
|
+
link
|
358
|
+
end
|
359
|
+
end
|
360
|
+
|
361
|
+
def java_home
|
362
|
+
return @model['java_home'] if @model['java_home'].to_s.strip.length > 0
|
363
|
+
java = resolve_link(`which java`.strip)
|
364
|
+
return '' if java.length <= 0
|
365
|
+
::File.expand_path(java + '/../../')
|
366
|
+
end
|
367
|
+
|
368
|
+
def running?
|
369
|
+
status = false
|
370
|
+
pids.each { |name,pid|
|
371
|
+
log.warn "#{name} is not running!" if pid <= 0
|
372
|
+
status = true if pid > 0
|
373
|
+
}
|
374
|
+
status
|
375
|
+
end
|
376
|
+
|
377
|
+
def pids
|
378
|
+
data = {}
|
379
|
+
Services.each { |name|
|
380
|
+
data[name] = `ps axf | grep java | grep -v grep | grep hadoop | grep Dproc_#{name}`.to_s.strip.split(' ', 2)[0].to_i
|
381
|
+
}
|
382
|
+
data
|
383
|
+
end
|
384
|
+
end
|