nuri 0.5.1 → 0.5.2

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,9 +1,9 @@
1
- include "../modules/node/node.sfp"
2
- include "../modules/vm/vm.sfp"
3
- include "../modules/apache/apache.sfp"
4
- include "../modules/mysql/mysql.sfp"
5
- include "../modules/wordpresscluster/wordpresscluster.sfp"
6
- include "../modules/bonfire/bonfire.sfp"
1
+ include "modules/node/node.sfp"
2
+ include "modules/vm/vm.sfp"
3
+ include "modules/apache/apache.sfp"
4
+ include "modules/mysql/mysql.sfp"
5
+ include "modules/wordpresscluster/wordpresscluster.sfp"
6
+ include "modules/bonfire/bonfire.sfp"
7
7
 
8
8
  proxy isa Node {
9
9
  sfpAddress is "localhost"
@@ -85,11 +85,3 @@ global {
85
85
  if vmapp4.apache.running = true then vmdb.mysql.running = true
86
86
  if vmapp5.apache.running = true then vmdb.mysql.running = true
87
87
  }
88
-
89
- /*
90
- vmlb isa VM {
91
- created is false
92
- }
93
- vmapp1 extends vmlb
94
- vmdb extends vmlb
95
- */
@@ -0,0 +1,21 @@
1
+ include "modules/vm/vm.sfp"
2
+ include "modules/hpcloud/hpcloud.sfp"
3
+ include "modules/hadoop1/hadoop1.sfp"
4
+ proxy isa Node {
5
+ sfpAddress is "localhost"
6
+ hpcloud isa HPCloud {
7
+ vm_ssh_key_name = "nurikey"
8
+ }
9
+ }
10
+ master isa VM {
11
+ in_cloud is proxy.hpcloud
12
+ hadoop isa Hadoop1Master
13
+ }
14
+ slave1 isa VM {
15
+ in_cloud is proxy.hpcloud
16
+ hadoop isa Hadoop1Slave {
17
+ master is master.hadoop
18
+ }
19
+ }
20
+ slave2 extends slave1
21
+ slave3 extends slave1
@@ -4,7 +4,7 @@ include "modules/hadoop2/hadoop2.sfp"
4
4
  proxy isa Node {
5
5
  sfpAddress is "localhost"
6
6
  hpcloud isa HPCloud {
7
- vm_ssh_key_name = "herrykey3"
7
+ vm_ssh_key_name = "nurikey"
8
8
  }
9
9
  }
10
10
  master isa VM {
@@ -18,3 +18,4 @@ slave1 isa VM {
18
18
  }
19
19
  }
20
20
  slave2 extends slave1
21
+ slave3 extends slave1
File without changes
@@ -0,0 +1,41 @@
1
+ include "modules/vm/vm.sfp"
2
+ include "modules/hpcloud/hpcloud.sfp"
3
+ include "modules/apache/apache.sfp"
4
+ include "modules/mysql/mysql.sfp"
5
+ include "modules/wordpress/wordpress.sfp"
6
+ proxy isa Node {
7
+ sfpAddress is "localhost"
8
+ hpcloud isa HPCloud {
9
+ vm_ssh_key_name is "herrykey3"
10
+ }
11
+ }
12
+ lb isa VM {
13
+ apache isa Apache {
14
+ load_balancer is true
15
+ lb_members is (app1,app2,app3)
16
+ }
17
+ }
18
+ app1 isa VM {
19
+ apache isa Apache {
20
+ modules is ("php", "php-mysql")
21
+ }
22
+ wp isa WordpressWeb {
23
+ database is vmdb.wp
24
+ }
25
+ }
26
+ app2 extends app1
27
+ app3 extends app1
28
+ db isa VM {
29
+ mysql isa Mysql
30
+ wp isa WordpressDB
31
+ }
32
+ global {
33
+ if lb.apache.running is true then {
34
+ app1.apache.running is true
35
+ app2.apache.running is true
36
+ app3.apache.running is true
37
+ }
38
+ if app1.apache.running is true then db.mysql.running is true
39
+ if app2.apache.running is true then db.mysql.running is true
40
+ if app3.apache.running is true then db.mysql.running is true
41
+ }
@@ -55,7 +55,7 @@ end
55
55
  # internal dependencies
56
56
  libdir = File.expand_path(File.dirname(__FILE__))
57
57
  require libdir + '/nuri/constraint_helper.rb'
58
- require libdir + '/nuri/net_helper.rb'
58
+ require libdir + '/nuri/helper.rb'
59
59
  require libdir + '/nuri/orchestrator.rb'
60
60
  require libdir + '/nuri/choreographer.rb'
61
61
  #require libdir + '/nuri/server.rb'
@@ -1,7 +1,7 @@
1
1
  require 'thread'
2
2
 
3
3
  module Nuri::Choreographer
4
- include Nuri::Net::Helper
4
+ include Nuri::Helper
5
5
 
6
6
  def get_bsig(p={})
7
7
  def postprocess(bsig)
@@ -1,10 +1,40 @@
1
1
  require 'uri'
2
2
  require 'net/http'
3
3
 
4
- module Nuri::Net
4
+ module Sfp::Helper
5
+ Sfp2Ruby = Object.new
6
+ def Sfp2Ruby.visit(name, value, parent)
7
+ if name[0] == '_' or (value.is_a?(Hash) and (value['_context'] == 'constraint' or value['_context'] == 'procedure'))
8
+ parent.delete(name)
9
+ else
10
+ parent[name] = Sfp::Helper::Sfp2Ruby.val(value)
11
+ end
12
+ true
13
+ end
14
+
15
+ def Sfp2Ruby.val(value)
16
+ if value.is_a?(Hash)
17
+ case value['_context']
18
+ when 'null'
19
+ nil
20
+ when 'any_value'
21
+ '$.Any:' + value['_isa']
22
+ when 'set'
23
+ value['_values']
24
+ else
25
+ value
26
+ end
27
+ elsif value.is_a?(Sfp::Unknown)
28
+ '$.Unknown:' + value.type.to_s
29
+ elsif value.is_a?(Sfp::Undefined)
30
+ '$.Undefined:' + value.type.to_s
31
+ else
32
+ value
33
+ end
34
+ end
5
35
  end
6
36
 
7
- module Nuri::Net::Helper
37
+ module Nuri::Helper
8
38
  def post_data(address, port, path, data, open_timeout=5, read_timeout=1800)
9
39
  uri = create_uri(address, port, path)
10
40
  req = Net::HTTP::Post.new(uri.path)
@@ -1,10 +1,9 @@
1
1
  require 'thread'
2
2
 
3
3
  class Nuri::Master
4
- include Nuri::Net::Helper
4
+ include Nuri::Helper
5
5
  include Nuri::Orchestrator
6
6
  include Nuri::Choreographer
7
- #include Nuri::Server
8
7
 
9
8
  SfpUnknown = Sfp::Unknown.new
10
9
  SfpUndefined = Sfp::Undefined.new
@@ -157,9 +156,10 @@ class Nuri::Master
157
156
  puts "Goal state:".yellow
158
157
  goalgen.results.each { |k,v|
159
158
  next if k[0,1] == '_'
160
- print "- #{k}: " + Sfp::Helper.sfp_to_s(v['_value']).green
161
- print " #{Sfp::Helper.sfp_to_s(f1.results[k])}".red if f1.results.has_key?(k) and
162
- f1.results[k] != v['_value']
159
+ print " #{k}: " + Sfp::Helper::Sfp2Ruby.val(v['_value']).to_s.green
160
+ if f1.results.has_key?(k) and f1.results[k] != v['_value']
161
+ print " " + Sfp::Helper::Sfp2Ruby.val(f1.results[k]).to_s.red
162
+ end
163
163
  puts ""
164
164
  }
165
165
 
@@ -338,10 +338,12 @@ class Nuri::Master
338
338
 
339
339
  return true if list == ''
340
340
 
341
- if system("cd #{@modules_dir}; ./install_module #{address} #{port} #{list} 1>/dev/null 2>/tmp/install_module.error")
342
- puts "Push modules #{list}to #{name} [OK]".green
343
- else
344
- puts "Push modules #{list}to #{name} [Failed]".red
341
+ output = JSON.parse(`cd #{@modules_dir}; ./install_module #{address} #{port} #{list}`)
342
+ if output['installed_modules'].length > 0
343
+ puts ("Push modules: " + output['installed_modules'].join(" ") + " to agent #{name} [OK]").green
344
+ end
345
+ if output['missing_modules'].length > 0
346
+ puts ("Missing modules: " + output['missing_modules'].join(" ") + ".").red
345
347
  end
346
348
 
347
349
  return true
@@ -425,9 +427,6 @@ class Nuri::Master
425
427
  end
426
428
 
427
429
  def get_agents
428
- #@model.select { |k,v| !(k[0,1] == '_' or not v.is_a?(Hash) or
429
- # v['_context'] != 'object' or v['_classes'].index(AgentSchema).nil?)
430
- #}
431
430
  Nuri::Master.agents(@model)
432
431
  end
433
432
 
@@ -584,7 +583,7 @@ class Nuri::Master
584
583
 
585
584
  # for each not-exist state VM, add an effect
586
585
  @map[vm].each { |k,v|
587
- next if operator.has_key?(k) # skip if variable is exist (avoid overwrite)
586
+ next if operator.has_key?(k) # skip if variable is exist (avoid overwrite)
588
587
  next if k =~ /\.sfpAddress/ or k =~ /\.sfpPort/ # skip "sfpAddress" and "sfpPort"
589
588
  # because these will be assigned dynamically
590
589
  var = parser.variables[k]
@@ -592,7 +591,7 @@ class Nuri::Master
592
591
 
593
592
  if v.is_a?(Hash)
594
593
  val = parser.types[v['_value']][0] if v['_context'] == 'null'
595
- raise Exception, "Not implemented yet." # this may arise on Set values
594
+ raise Exception, "Not implemented yet." # this may arise on Set values
596
595
  else
597
596
  val = v
598
597
  end
@@ -607,19 +606,6 @@ class Nuri::Master
607
606
  }
608
607
  end
609
608
  end
610
-
611
- def self.start
612
- # TODO
613
- fork {
614
- while true do
615
- sleep 5000
616
- end
617
- }
618
- end
619
-
620
- def self.stop
621
- # TODO
622
- end
623
609
  end
624
610
 
625
611
  ### Helper classes/modules
@@ -1,7 +1,7 @@
1
1
  require 'thread'
2
2
 
3
3
  module Nuri::Orchestrator
4
- include Nuri::Net::Helper
4
+ include Nuri::Helper
5
5
 
6
6
  def execute_plan(p={})
7
7
  raise Exception, "Plan file is not exist!" if not File.exist?(p[:execute].to_s) and !p[:plan]
@@ -0,0 +1,97 @@
1
+ require 'rubygems'
2
+ require 'rubygems/package'
3
+ require 'zlib'
4
+ require 'fileutils'
5
+
6
+ module Nuri
7
+ module Util
8
+ module Tar
9
+ def targzip(path, prefix="")
10
+ gzip(tar(path, prefix))
11
+ end
12
+
13
+ # Creates a tar file in memory recursively
14
+ # from the given path.
15
+ #
16
+ # Returns a StringIO whose underlying String
17
+ # is the contents of the tar file.
18
+ def tar(path, prefix="")
19
+ prefix += "/" unless prefix[-1] == '/' or prefix.length <= 0
20
+ tarfile = StringIO.new("")
21
+ Gem::Package::TarWriter.new(tarfile) do |tar|
22
+ Dir[File.join(path, "**/*")].each do |file|
23
+ mode = File.stat(file).mode
24
+ relative_file = file.sub /^#{Regexp::escape path}\/?/, ''
25
+ relative_file = prefix + relative_file
26
+
27
+ if File.directory?(file)
28
+ tar.mkdir relative_file, mode
29
+ else
30
+ tar.add_file relative_file, mode do |tf|
31
+ File.open(file, "rb") { |f| tf.write f.read }
32
+ end
33
+ end
34
+ end
35
+ end
36
+
37
+ tarfile.rewind
38
+ tarfile
39
+ end
40
+
41
+ # gzips the underlying string in the given StringIO,
42
+ # returning a new StringIO representing the
43
+ # compressed file.
44
+ def gzip(tarfile)
45
+ gz = StringIO.new("")
46
+ z = Zlib::GzipWriter.new(gz)
47
+ z.write tarfile.string
48
+ z.close # this is necessary!
49
+
50
+ # z was closed to write the gzip footer, so
51
+ # now we need a new StringIO
52
+ StringIO.new gz.string
53
+ end
54
+
55
+ # un-gzips the given IO, returning the
56
+ # decompressed version as a StringIO
57
+ def ungzip(tarfile)
58
+ z = Zlib::GzipReader.new(tarfile)
59
+ unzipped = StringIO.new(z.read)
60
+ z.close
61
+ unzipped
62
+ end
63
+
64
+ # untars the given IO into the specified
65
+ # directory
66
+ def untar(io, destination)
67
+ Gem::Package::TarReader.new io do |tar|
68
+ tar.each do |tarfile|
69
+ destination_file = File.join destination, tarfile.full_name
70
+
71
+ if tarfile.directory?
72
+ FileUtils.mkdir_p destination_file
73
+ else
74
+ destination_directory = File.dirname(destination_file)
75
+ FileUtils.mkdir_p destination_directory unless File.directory?(destination_directory)
76
+ File.open destination_file, "wb" do |f|
77
+ #f.print tarfile.read
78
+ f.write tarfile.read
79
+ end
80
+ end
81
+ end
82
+ end
83
+ end
84
+ end
85
+ end
86
+ end
87
+
88
+ ### Usage Example: ###
89
+ #
90
+ # include Util::Tar
91
+ #
92
+ # io = tar("./Desktop") # io is a TAR of files
93
+ # gz = gzip(io) # gz is a TGZ
94
+ #
95
+ # io = ungzip(gz) # io is a TAR
96
+ # untar(io, "./untarred") # files are untarred
97
+ #
@@ -1,3 +1,5 @@
1
+ require 'thread'
2
+
1
3
  class Sfp::Module::AptPackage
2
4
  include Sfp::Resource
3
5
 
@@ -28,6 +30,8 @@ class Sfp::Module::AptPackage
28
30
  #
29
31
  ##############################
30
32
 
33
+ @@lock = Mutex.new
34
+
31
35
  def self.installed?(package)
32
36
  package = package.to_s.strip
33
37
  return false if package.length <= 0
@@ -46,23 +50,27 @@ class Sfp::Module::AptPackage
46
50
 
47
51
  def self.install(package)
48
52
  return false if not package.is_a?(String) or package.length <= 0
49
- return true if Sfp::Module::Package.installed?(package)
50
- system("dpkg --configure -a")
51
- system("apt-get -y --purge autoremove")
52
- return true if system("apt-get -y install #{package}")
53
- system("dpkg --configure -a")
54
- system("apt-get -y update")
55
- !!system("apt-get -y install #{package}")
53
+ return true if Sfp::Module::AptPackage.installed?(package)
54
+ @@lock.synchronize {
55
+ system("dpkg --configure -a")
56
+ system("apt-get -y --purge autoremove")
57
+ return true if system("apt-get -y install #{package}")
58
+ system("dpkg --configure -a")
59
+ system("apt-get -y update")
60
+ !!system("apt-get -y install #{package}")
61
+ }
56
62
  end
57
63
 
58
64
  def self.uninstall(package)
59
65
  return false if not package.is_a?(String) or package.length <= 0
60
- return true if not Sfp::Module::Package.installed?(package)
61
- system("dpkg --configure -a")
62
- system("apt-get -y --purge autoremove")
63
- return (!!system("sudo apt-get -y --purge remove #{package}") and
64
- !!system("sudo apt-get -y --purge autoremove") and
65
- !!system("sudo apt-get -y --purge autoremove"))
66
+ return true if not Sfp::Module::AptPackage.installed?(package)
67
+ @@lock.synchronize {
68
+ system("dpkg --configure -a")
69
+ system("apt-get -y --purge autoremove")
70
+ return (!!system("sudo apt-get -y --purge remove #{package}") and
71
+ !!system("sudo apt-get -y --purge autoremove") and
72
+ !!system("sudo apt-get -y --purge autoremove"))
73
+ }
66
74
  end
67
75
 
68
76
  protected
@@ -72,7 +80,7 @@ class Sfp::Module::AptPackage
72
80
 
73
81
  def version?
74
82
  package = @model['package_name'].to_s.strip
75
- return nil if package.length <= 0
83
+ return "" if package.length <= 0
76
84
  installed = `apt-cache policy #{package} | grep Installed`.strip.split(' ', 2)[1].to_s.strip
77
85
  return "" if installed.length <= 0
78
86
  candidate = `apt-cache policy #{package} | grep Candidate`.strip.split(' ', 2)[1].to_s.strip
@@ -1,10 +1,8 @@
1
1
  require 'ostruct'
2
2
 
3
- class Sfp::Module::Hadoop1Master
3
+ class Sfp::Module::Hadoop1Common
4
4
  include Sfp::Resource
5
5
 
6
- Services = ['namenode', 'secondarynamenode', 'jobtracker']
7
-
8
6
  def update_state
9
7
  to_model
10
8
 
@@ -25,11 +23,6 @@ class Sfp::Module::Hadoop1Master
25
23
  def install(p={})
26
24
  model = OpenStruct.new(@model)
27
25
 
28
- if java_home.length <= 0
29
- ### install JRE
30
- shell "apt-get install -y default-jre"
31
- end
32
-
33
26
  # add group hadoop
34
27
  if `grep '^#{model.group}' /etc/group`.length <= 0
35
28
  log.info "adding group #{model.group}"
@@ -50,103 +43,82 @@ class Sfp::Module::Hadoop1Master
50
43
  # create home
51
44
  log.info "create hadoop home directory: #{model.home}"
52
45
  shell "mkdir -p #{model.home}" if !::File.exist?(model.home)
53
- shell "chown -R #{model.user}:#{model.user} #{model.home} && rm -rf #{model.home}/*"
54
46
 
55
- # create data_dir
56
- shell "rm -f #{model.data_dir} && mkdir -p #{model.data_dir}" if !::File.directory?(model.data_dir)
57
- shell "chown -R #{model.user}:#{model.user} #{model.data_dir} && rm -rf #{model.data_dir}/*"
58
-
59
47
  # download and extract hadoop binaries
60
- shell 'apt-get install -y axel'
61
- downloader = 'axel -q -o' # 'wget -O'
48
+ log.info "download and install hadoop binaries"
62
49
  source = (model.source[-7,7] == '.tar.gz' or model.source[-4,4] == '.tgz' ? model.source : "#{model.source}/hadoop-#{model.version}/hadoop-#{model.version}.tar.gz")
63
50
 
64
- log.info "download and install hadoop binaries"
65
51
  file = source.split('/').last.to_s
66
52
  basename = (::File.extname(file) == '.gz' ? ::File.basename(file, '.tar.gz') : ::File.basename(file, ::File.extname(file)))
67
- shell "cd #{model.home} &&
68
- #{downloader} #{file} #{source} &&
69
- tar xvzf #{file} && rm -f #{file} &&
70
- bash -c 'cd #{model.home}/#{basename} && shopt -s dotglob && mv * .. && cd .. && rm -rf #{basename}'"
71
-
72
- config_dir = "#{model.home}/conf"
73
-
74
- map = {
75
- 'user' => model.user,
76
- 'master' => `hostname`.strip,
77
- 'java_home' => java_home,
78
- 'tmp_dir' => model.data_dir,
79
- 'replication' => model.replication,
80
- }
53
+ destination = "#{model.home}/#{file}"
54
+ download source, destination
55
+ return false if not ::File.exist?(destination)
56
+ shell "cd #{model.home} &&
57
+ tar xvzf #{file} && rm -f #{file} &&
58
+ bash -c 'cd #{model.home}/#{basename} && shopt -s dotglob && mv * .. && cd .. && rm -rf #{basename}'"
81
59
 
82
60
  # copy and process template configuration files
83
61
  log.info "copy and process template configuration files: core-site.xml, hadoop-env.sh, mapred-site.xml"
84
62
  dir = File.expand_path(File.dirname(__FILE__))
85
63
  ['hadoop-env.sh', 'core-site.xml', 'mapred-site.xml', 'hdfs-site.xml'].each do |file|
86
- shell "cp -f #{dir}/#{file} #{config_dir}"
64
+ copy "#{dir}/#{file}", config_dir
87
65
  render_file "#{config_dir}/#{file}", map
88
66
  end
89
67
  shell "chown -R #{model.user}:#{model.user} #{model.home}"
90
68
 
91
- # create HDFS directory
92
- if !::File.exist?(model.data_dir) && !shell("mkdir -p #{model.data_dir}")
93
- log.info "create scratch directory for HDFS: #{model.data_dir}"
94
- shell "mkdir -p #{model.data_dir}"
95
- end
96
- shell "chown -R #{model.user}:#{model.user} #{model.data_dir}"
97
-
98
- # format namenode space
99
- log.info "format namenode space"
100
- shell "su -c '#{model.home}/bin/hadoop namenode -format' hadoop"
101
-
102
- return false if not installed?
103
-
104
69
  # export hadoop home to root
105
70
  log.info "export hadoop home directory to root"
106
71
  shell "sed -i '/^export HADOOP_HOME/d' /root/.bashrc"
107
72
  shell "echo 'export HADOOP_HOME=#{model.home}' >> /root/.bashrc"
108
73
 
109
- true
74
+ # create data_dir
75
+ shell "rm -f #{model.data_dir} && mkdir -p #{model.data_dir}" if not ::File.directory?(model.data_dir)
76
+ shell "chown -R #{model.user}:#{model.user} #{model.data_dir} && rm -rf #{model.data_dir}/*"
77
+
78
+ installed?
110
79
  end
111
80
 
112
81
  def uninstall(p={})
113
- model = OpenStruct.new(@model)
114
82
  # remove hadoop user and group, and then delete hadoop's home directory
115
83
  shell "sed -i '/^export HADOOP_HOME/d' /root/.bash_profile"
116
- shell "sed -i '/^#{model.user}/d' /etc/passwd &&
117
- sed -i '/^#{model.user}/d' /etc/shadow &&
118
- sed -i '/^#{model.user}/d' /etc/group &&
119
- rm -rf #{model.home} &&
120
- rm -rf /tmp/#{model.user}* &&
121
- rm -rf #{model.data_dir}"
84
+ shell "rm -rf #{@model['home']} &&
85
+ rm -rf /tmp/#{@model['user']}*"
86
+
87
+ not installed?
122
88
  end
123
89
 
124
90
  def start(p={})
125
- model = OpenStruct.new(@model)
126
91
  pids = self.pids
127
- Services.each { |name|
92
+ services.each { |name|
128
93
  pid = pids[name]
129
94
  if pid <= 0
130
- cmd = "#{model.home}/bin/hadoop-daemon.sh start #{name}"
131
- log.info `su -c '#{cmd} && sleep 3' #{model.user}`
95
+ cmd = "#{@model['home']}/bin/hadoop-daemon.sh start #{name}"
96
+ log.info `su -c '#{cmd} && sleep 3' #{@model['user']}`
132
97
  end
133
98
  }
134
- true
99
+
100
+ running?
135
101
  end
136
102
 
137
103
  def stop(p={})
138
- model = OpenStruct.new(@model)
139
104
  pids = self.pids
140
- Services.reverse.each { |name|
105
+ services.reverse.each { |name|
141
106
  pid = pids[name]
142
107
  if pid > 0
143
- cmd = "#{model.home}/bin/hadoop-daemon.sh stop #{name}"
144
- log.info `su -c '#{cmd}' #{model.user}`
108
+ cmd = "#{@model['home']}/bin/hadoop-daemon.sh stop #{name}"
109
+ log.info `su -c '#{cmd}' #{@model['user']}`
145
110
  end
146
111
  }
147
- true
148
- end
149
112
 
113
+ self.pids.each { |name,pid|
114
+ begin
115
+ Process.kill 9, pid
116
+ rescue
117
+ end
118
+ }
119
+
120
+ not running?
121
+ end
150
122
 
151
123
  ##############################
152
124
  #
@@ -154,12 +126,8 @@ class Sfp::Module::Hadoop1Master
154
126
  #
155
127
  ##############################
156
128
 
157
- protected
158
- def installed?
159
- ['bin/hadoop', 'conf/hadoop-env.sh', 'conf/core-site.xml', 'conf/mapred-site.xml', 'conf/hdfs-site.xml'].each { |file|
160
- return false if !::File.exist?("#{@model['home']}/#{file}")
161
- }
162
- true
129
+ def config_dir
130
+ @model['home'] + '/conf'
163
131
  end
164
132
 
165
133
  def resolve_link(link)
@@ -189,30 +157,14 @@ class Sfp::Module::Hadoop1Master
189
157
 
190
158
  def pids
191
159
  data = {}
192
- Services.each { |name|
160
+ services.each { |name|
193
161
  data[name] = `ps axf | grep java | grep -v grep | grep hadoop | grep Dproc_#{name}`.to_s.strip.split(' ', 2)[0].to_i
194
162
  }
195
163
  data
196
164
  end
197
165
  end
198
166
 
199
-
200
-
201
- class Sfp::Module::Hadoop1Slave
202
- include Sfp::Resource
203
-
204
- Services = ['datanode', 'tasktracker']
205
-
206
- def update_state
207
- to_model
208
-
209
- @state['installed'] = installed?
210
- @state['running'] = running?
211
- @state['pids'] = pids
212
-
213
- # try to restart any stopped daemon
214
- start if @state['running']
215
- end
167
+ class Sfp::Module::Hadoop1Master < Sfp::Module::Hadoop1Common
216
168
 
217
169
  ##############################
218
170
  #
@@ -221,164 +173,81 @@ class Sfp::Module::Hadoop1Slave
221
173
  ##############################
222
174
 
223
175
  def install(p={})
224
- model = OpenStruct.new(@model)
225
-
226
- if java_home.length <= 0
227
- ### install JRE
228
- shell "apt-get install -y default-jre"
229
- end
230
-
231
- # add group hadoop
232
- if `grep '^#{model.group}' /etc/group`.length <= 0
233
- log.info "adding group #{model.group}"
234
- shell "echo '#{model.group}:x:8000:' >> /etc/group"
235
- else
236
- log.info "group #{model.group} is already exist"
237
- end
238
-
239
- # add user hadoop
240
- if `grep '^#{model.user}' /etc/passwd`.length <= 0
241
- log.info "adding user #{model.user}"
242
- shell "echo '#{model.user}:x:8000:8000::#{model.home}:/bin/bash' >> /etc/passwd &&
243
- echo '#{model.user}:#{model.password}:15958:0:99999:7:::' >> /etc/shadow"
244
- else
245
- log.info "user #{model.user} is already exist"
246
- end
247
-
248
- # create home
249
- log.info "create hadoop home directory: #{model.home}"
250
- shell "mkdir -p #{model.home}" if !::File.exist?(model.home)
251
- shell "chown -R #{model.user}:#{model.user} #{model.home} && rm -rf #{model.home}/*"
252
-
253
- # create data_dir
254
- shell "rm -f #{model.data_dir} && mkdir -p #{model.data_dir}" if !::File.directory?(model.data_dir)
255
- shell "chown -R #{model.user}:#{model.user} #{model.data_dir} && rm -rf #{model.data_dir}/*"
256
-
257
- # download and extract hadoop binaries
258
- system 'apt-get install -y axel'
259
- downloader = 'axel -q -o' # 'wget -O'
260
- source = (model.source[-7,7] == '.tar.gz' or model.source[-4,4] == '.tgz' ? model.source : "#{model.source}/hadoop-#{model.version}/hadoop-#{model.version}.tar.gz")
261
-
262
- log.info "download and install hadoop binaries"
263
- file = source.split('/').last.to_s
264
- basename = (::File.extname(file) == '.gz' ? ::File.basename(file, '.tar.gz') : ::File.basename(file, ::File.extname(file)))
265
- shell "cd #{model.home} &&
266
- #{downloader} #{file} #{source} &&
267
- tar xvzf #{file} && rm -f #{file} &&
268
- bash -c 'cd #{model.home}/#{basename} && shopt -s dotglob && mv * .. && cd .. && rm -rf #{basename}'"
269
-
270
- map = {
271
- 'user' => model.user,
272
- 'master' => resolve(model.master + '.parent.sfpAddress'),
273
- 'java_home' => java_home,
274
- 'replication' => resolve(model.master + '.replication')
275
- }
276
-
277
- # copy and process template configuration files
278
- log.info "copy and process template configuration files: core-site.xml, hadoop-env.sh, mapred-site.xml"
279
- dir = File.expand_path(File.dirname(__FILE__))
280
- ['hadoop-env.sh', 'core-site.xml', 'mapred-site.xml', 'hdfs-site.xml'].each do |file|
281
- copy "#{dir}/#{file}", "#{model.home}/conf/"
282
- render_file "#{model.home}/conf/#{file}", map
283
- end
284
- shell "chown -R #{model.user}:#{model.user} #{model.home}"
176
+ super
285
177
 
286
- # export hadoop home to root
287
- log.info "export hadoop home directory to root"
288
- shell "sed -i '/^export HADOOP_HOME/d' /root/.bashrc"
289
- shell "echo 'export HADOOP_HOME=#{model.home}' >> /root/.bashrc"
178
+ # format namenode space
179
+ log.info "format namenode space"
180
+ shell "su -c '#{@model['home']}/bin/hadoop namenode -format' hadoop"
290
181
 
291
182
  installed?
292
183
  end
293
184
 
294
185
  def uninstall(p={})
295
- model = OpenStruct.new(@model)
296
- # remove hadoop user and group, and then delete hadoop's home directory
297
- shell "sed -i '/^export HADOOP_HOME/d' /root/.bash_profile"
298
- shell "sed -i '/^#{model.user}/d' /etc/passwd &&
299
- sed -i '/^#{model.user}/d' /etc/shadow &&
300
- sed -i '/^#{model.user}/d' /etc/group &&
301
- rm -rf #{model.home} &&
302
- rm -rf /tmp/#{model.user}*"
186
+ super
187
+
188
+ shell "rm -rf #{model.data_dir}"
303
189
 
304
190
  not installed?
305
191
  end
306
192
 
307
- def start(p={})
308
- model = OpenStruct.new(@model)
309
- pids.each { |name,pid|
310
- if pid <= 0
311
- cmd = "#{model.home}/bin/hadoop-daemon.sh start #{name}"
312
- log.info `su -c '#{cmd} && sleep 3' #{model.user}`
313
- end
314
- }
193
+ ##############################
194
+ #
195
+ # Helper methods
196
+ #
197
+ ##############################
315
198
 
316
- running?
199
+ protected
200
+
201
+ def services
202
+ ['namenode', 'secondarynamenode', 'jobtracker']
317
203
  end
318
204
 
319
- def stop(p={})
320
- model = OpenStruct.new(@model)
321
- pids.each { |name,pid|
322
- if pid > 0
323
- cmd = "#{model.home}/bin/hadoop-daemon.sh stop #{name}"
324
- log.info `su -c '#{cmd}' #{model.user}`
325
- end
326
- }
327
205
 
328
- pids.each { |name,pid|
329
- begin
330
- Process.kill 9, pid
331
- rescue
332
- end
206
+ def map
207
+ {
208
+ 'user' => @model['user'],
209
+ 'master' => `hostname`.strip,
210
+ 'java_home' => java_home,
211
+ 'tmp_dir' => @model['data_dir'],
212
+ 'replication' => @model['replication'],
333
213
  }
334
-
335
- not running?
336
214
  end
337
215
 
338
-
339
- ##############################
340
- #
341
- # Helper methods
342
- #
343
- ##############################
344
-
345
216
  def installed?
346
217
  ['bin/hadoop', 'conf/hadoop-env.sh', 'conf/core-site.xml', 'conf/mapred-site.xml', 'conf/hdfs-site.xml'].each { |file|
347
218
  return false if !::File.exist?("#{@model['home']}/#{file}")
348
219
  }
349
220
  true
350
221
  end
222
+ end
351
223
 
352
- def resolve_link(link)
353
- begin
354
- link = ::File.readlink(link)
355
- link = resolve_link(link)
356
- rescue
357
- link
358
- end
359
- end
360
224
 
361
- def java_home
362
- return @model['java_home'] if @model['java_home'].to_s.strip.length > 0
363
- java = resolve_link(`which java`.strip)
364
- return '' if java.length <= 0
365
- ::File.expand_path(java + '/../../')
225
+
226
+ class Sfp::Module::Hadoop1Slave < Sfp::Module::Hadoop1Common
227
+
228
+ ##############################
229
+ #
230
+ # Helper methods
231
+ #
232
+ ##############################
233
+
234
+ def services
235
+ ['datanode', 'tasktracker']
366
236
  end
367
237
 
368
- def running?
369
- status = false
370
- pids.each { |name,pid|
371
- log.warn "#{name} is not running!" if pid <= 0
372
- status = true if pid > 0
238
+ def map
239
+ {
240
+ 'user' => @model['user'],
241
+ 'master' => resolve(@model['master'] + '.parent.sfpAddress'),
242
+ 'java_home' => java_home,
243
+ 'replication' => resolve(@model['master'] + '.replication')
373
244
  }
374
- status
375
245
  end
376
246
 
377
- def pids
378
- data = {}
379
- Services.each { |name|
380
- data[name] = `ps axf | grep java | grep -v grep | grep hadoop | grep Dproc_#{name}`.to_s.strip.split(' ', 2)[0].to_i
247
+ def installed?
248
+ ['bin/hadoop', 'conf/hadoop-env.sh', 'conf/core-site.xml', 'conf/mapred-site.xml', 'conf/hdfs-site.xml'].each { |file|
249
+ return false if !::File.exist?("#{@model['home']}/#{file}")
381
250
  }
382
- data
251
+ true
383
252
  end
384
253
  end