nuri 0.5.3 → 0.5.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +5 -1
  3. data/VERSION +1 -1
  4. data/bin/nuri +60 -14
  5. data/bin/nuri-install-module +17 -9
  6. data/examples/mockcloud/apache2.sfp +14 -0
  7. data/examples/mockcloud/ping.rb +38 -0
  8. data/examples/openstack/openstack-hadoop1-cluster.sfp +37 -0
  9. data/examples/openstack/openstack-hadoop2-cluster.sfp +39 -0
  10. data/examples/v2/apache.sfp +30 -0
  11. data/examples/v2/aptpackage.sfp +6 -0
  12. data/examples/v2/mock1.sfp +12 -0
  13. data/examples/v2/package.sfp +22 -0
  14. data/examples/v2/service.sfp +94 -0
  15. data/examples/v2/tarpackage.sfp +5 -0
  16. data/lib/nuri.rb +14 -10
  17. data/lib/nuri/choreographer.rb +3 -3
  18. data/lib/nuri/helper.rb +20 -10
  19. data/lib/nuri/master.rb +82 -54
  20. data/lib/nuri/orchestrator.rb +1 -1
  21. data/modules/.gitignore +0 -4
  22. data/modules/README.md +11 -0
  23. data/modules/apache/apache.sfp +2 -1
  24. data/modules/file/file.rb +49 -19
  25. data/modules/hadoop1/hadoop1.rb +18 -11
  26. data/modules/hadoop2/hadoop2.rb +11 -11
  27. data/modules/hadoop2/hadoop2.sfp +7 -6
  28. data/modules/hadoop2/yarn-site.xml +5 -0
  29. data/modules/machine/machine.rb +24 -14
  30. data/modules/openstack/README.md +24 -0
  31. data/modules/openstack/config.yml +5 -0
  32. data/modules/openstack/example.sfp +9 -0
  33. data/modules/openstack/openstack.rb +329 -0
  34. data/modules/openstack/openstack.sfp +24 -0
  35. data/modules/os/os.rb +1 -1
  36. data/modules/package2/apt-repo-list.sh +15 -0
  37. data/modules/package2/package2.rb +213 -43
  38. data/modules/package2/package2.sfp +3 -2
  39. data/modules/pyfile/README.md +4 -0
  40. data/modules/vm/vm.rb +3 -1
  41. data/modules/vm/vm.sfp +4 -3
  42. metadata +20 -3
  43. data/modules/hpcloud/test.sfp +0 -5
@@ -13,14 +13,14 @@ class Sfp::Module::File
13
13
 
14
14
  def update_state
15
15
  path = @model['path'].to_s.strip
16
- if @model['exists']
17
- create(path)
18
- else
19
- delete(path)
16
+
17
+ if not exist? or type? != @model['type']
18
+ create if del
20
19
  end
21
20
 
22
21
  @state['path'] = path
23
- @state['exists'] = ::File.exist?(path)
22
+ @state['exists'] = exist?
23
+ @state['type'] = type?
24
24
  @state['content'] = content?
25
25
  @state['user'], @state['group'] = user_group?
26
26
  @state['permission'] = permission?
@@ -34,28 +34,58 @@ class Sfp::Module::File
34
34
 
35
35
  protected
36
36
 
37
- def delete(file)
38
- ::File.delete(file) if ::File.exist?(file)
37
+ def del(p={})
38
+ return true if not exist?
39
+
40
+ file = @model['path']
41
+ if ::File.file?(file)
42
+ ::File.delete(file)
43
+ elsif ::File.directory?(file) and file != '/'
44
+ shell "rm -rf #{file}"
45
+ end
46
+ not exist?
39
47
  end
40
48
 
41
- def create(file)
42
- log.warn "Failed to create/update file #{file}!" if
49
+ def create(p={})
50
+ return true if exist?
51
+ file = @model['path']
52
+ if @model['type'] == 'file'
43
53
  not set_content(file) or
44
54
  not set_owner(file) or
45
55
  not set_permission(file)
56
+ elsif @model['type'] == 'directory'
57
+ Dir.mkdir(file)
58
+ end
59
+ exist?
60
+ end
61
+
62
+ def exist?
63
+ ::File.exist?(@model['path'])
64
+ end
65
+
66
+ def type?
67
+ if exist?
68
+ (::File.file?(@model['path']) ? 'file' : 'directory')
69
+ else
70
+ ''
71
+ end
46
72
  end
47
73
 
48
74
  def set_content(file)
49
- return true if not @model['content'].is_a?(String)
50
- begin
51
- current = (::File.exist?(file) ? content? : nil)
52
- desired = Digest::SHA1.hexdigest(@model['content'])
53
- File.open(file, 'w') { |f| f.write(@model['content']) } if current != desired
54
- return true
55
- rescue Exception => e
56
- log.error "#{e}\n#{e.backtrace.join("\n")}"
75
+ if @model['type'] == 'directory'
76
+ shell "mkdir #{@model['path']}"
77
+ else
78
+ return true if not @model['content'].is_a?(String)
79
+ begin
80
+ current = (::File.exist?(file) ? content? : nil)
81
+ desired = Digest::SHA1.hexdigest(@model['content'])
82
+ File.open(file, 'w') { |f| f.write(@model['content']) } if current != desired
83
+ return true
84
+ rescue Exception => e
85
+ log.error "#{e}\n#{e.backtrace.join("\n")}"
86
+ end
87
+ false
57
88
  end
58
- false
59
89
  end
60
90
 
61
91
  def set_owner(file)
@@ -72,7 +102,7 @@ class Sfp::Module::File
72
102
  end
73
103
 
74
104
  def content?
75
- (::File.exist?(@model['path']) ? Digest::SHA1.hexdigest(::File.read(@model['path'])) : '')
105
+ (::File.file?(@model['path']) ? Digest::SHA1.hexdigest(::File.read(@model['path'])) : '')
76
106
  end
77
107
 
78
108
  def user_group?
@@ -24,7 +24,7 @@ class Sfp::Module::Hadoop1Common
24
24
  model = OpenStruct.new(@model)
25
25
 
26
26
  # add group hadoop
27
- if `grep '^#{model.group}' /etc/group`.length <= 0
27
+ if %x[grep '^#{model.group}' /etc/group].length <= 0
28
28
  log.info "adding group #{model.group}"
29
29
  shell "echo '#{model.group}:x:8000:' >> /etc/group"
30
30
  else
@@ -32,7 +32,7 @@ class Sfp::Module::Hadoop1Common
32
32
  end
33
33
 
34
34
  # add user hadoop
35
- if `grep '^#{model.user}' /etc/passwd`.length <= 0
35
+ if %x[grep '^#{model.user}' /etc/passwd].length <= 0
36
36
  log.info "adding user #{model.user}"
37
37
  shell "echo '#{model.user}:x:8000:8000::#{model.home}:/bin/bash' >> /etc/passwd &&
38
38
  echo '#{model.user}:#{model.password}:15958:0:99999:7:::' >> /etc/shadow"
@@ -46,11 +46,13 @@ class Sfp::Module::Hadoop1Common
46
46
 
47
47
  # download and extract hadoop binaries
48
48
  log.info "download and install hadoop binaries"
49
- source = (model.source[-7,7] == '.tar.gz' or model.source[-4,4] == '.tgz' ? model.source : "#{model.source}/hadoop-#{model.version}/hadoop-#{model.version}.tar.gz")
49
+ #source = (model.source[-7,7] == '.tar.gz' or model.source[-4,4] == '.tgz' ? model.source : "#{model.source}/hadoop-#{model.version}/hadoop-#{model.version}.tar.gz")
50
+ source = (model.source[-7,7] == '.tar.gz' or model.source[-4,4] == '.tgz' ? model.source : "#{model.source}/hadoop-#{model.version}.tar.gz")
50
51
 
51
52
  file = source.split('/').last.to_s
52
53
  basename = (::File.extname(file) == '.gz' ? ::File.basename(file, '.tar.gz') : ::File.basename(file, ::File.extname(file)))
53
54
  destination = "#{model.home}/#{file}"
55
+ log.info "download #{source} to #{destination}"
54
56
  download source, destination
55
57
  return false if not ::File.exist?(destination)
56
58
  shell "cd #{model.home} &&
@@ -92,8 +94,10 @@ class Sfp::Module::Hadoop1Common
92
94
  services.each { |name|
93
95
  pid = pids[name]
94
96
  if pid <= 0
95
- cmd = "#{@model['home']}/bin/hadoop-daemon.sh start #{name}"
96
- log.info `su -c '#{cmd} && sleep 3' #{@model['user']}`
97
+ cmd = "su -c '#{@model['home']}/bin/hadoop-daemon.sh start #{name} && sleep 3' #{@model['user']}"
98
+ pid = spawn(cmd)
99
+ Process.wait pid
100
+ #log.info %x[sudo su -c '#{cmd} && sleep 3' #{@model['user']}]
97
101
  end
98
102
  }
99
103
 
@@ -105,8 +109,10 @@ class Sfp::Module::Hadoop1Common
105
109
  services.reverse.each { |name|
106
110
  pid = pids[name]
107
111
  if pid > 0
108
- cmd = "#{@model['home']}/bin/hadoop-daemon.sh stop #{name}"
109
- log.info `su -c '#{cmd}' #{@model['user']}`
112
+ cmd = "su -c '#{@model['home']}/bin/hadoop-daemon.sh stop #{name}' #{@model['user']}"
113
+ pid = spawn(cmd)
114
+ Process.wait pid
115
+ #log.info %x[sudo su -c '#{cmd}' #{@model['user']}]
110
116
  end
111
117
  }
112
118
 
@@ -141,7 +147,7 @@ class Sfp::Module::Hadoop1Common
141
147
 
142
148
  def java_home
143
149
  return @model['java_home'] if @model['java_home'].to_s.strip.length > 0
144
- java = resolve_link(`which java`.strip)
150
+ java = resolve_link(%x[which java].strip)
145
151
  return '' if java.length <= 0
146
152
  ::File.expand_path(java + '/../../')
147
153
  end
@@ -158,7 +164,7 @@ class Sfp::Module::Hadoop1Common
158
164
  def pids
159
165
  data = {}
160
166
  services.each { |name|
161
- data[name] = `ps axf | grep java | grep -v grep | grep hadoop | grep Dproc_#{name}`.to_s.strip.split(' ', 2)[0].to_i
167
+ data[name] = %x[ps axf | grep java | grep -v grep | grep hadoop | grep Dproc_#{name}].to_s.strip.split(' ', 2)[0].to_i
162
168
  }
163
169
  data
164
170
  end
@@ -185,7 +191,7 @@ class Sfp::Module::Hadoop1Master < Sfp::Module::Hadoop1Common
185
191
  def uninstall(p={})
186
192
  super
187
193
 
188
- shell "rm -rf #{model.data_dir}"
194
+ shell "rm -rf #{@model['data_dir']}"
189
195
 
190
196
  not installed?
191
197
  end
@@ -206,7 +212,7 @@ class Sfp::Module::Hadoop1Master < Sfp::Module::Hadoop1Common
206
212
  def map
207
213
  {
208
214
  'user' => @model['user'],
209
- 'master' => `hostname`.strip,
215
+ 'master' => resolve("$." + Sfp::Agent.whoami? + ".sfpAddress"), #'master' => %x[hostname].strip,
210
216
  'java_home' => java_home,
211
217
  'tmp_dir' => @model['data_dir'],
212
218
  'replication' => @model['replication'],
@@ -240,6 +246,7 @@ class Sfp::Module::Hadoop1Slave < Sfp::Module::Hadoop1Common
240
246
  'user' => @model['user'],
241
247
  'master' => resolve(@model['master'] + '.parent.sfpAddress'),
242
248
  'java_home' => java_home,
249
+ 'tmp_dir' => @model['data_dir'],
243
250
  'replication' => resolve(@model['master'] + '.replication')
244
251
  }
245
252
  end
@@ -1,9 +1,7 @@
1
- require ::File.dirname(__FILE__) + '/../tarpackage/tarpackage.rb'
1
+ require ::File.dirname(__FILE__) + '/../package2/package2.rb'
2
2
 
3
3
  module Sfp::Module::Hadoop2Common
4
4
  def update_state
5
- to_model
6
-
7
5
  super
8
6
 
9
7
  @state['running'] = running?
@@ -11,12 +9,12 @@ module Sfp::Module::Hadoop2Common
11
9
  @state['pids'] = pids
12
10
  @state['java_home'] = java_home
13
11
 
14
- start if @state['running']
12
+ #start if @state['running']
15
13
  end
16
14
 
17
15
  ##############################
18
16
  #
19
- # Action methods (see Hadoop.sfp)
17
+ # Action methods (see hadoop2.sfp)
20
18
  #
21
19
  ##############################
22
20
 
@@ -157,6 +155,7 @@ module Sfp::Module::Hadoop2Common
157
155
  'yarn_nodemanager_aux_services' => 'mapreduce_shuffle',
158
156
  'yarn_log_aggregation_retain_seconds' => -1,
159
157
  'yarn_log_aggregation_retain_check_interval_seconds' => -1,
158
+ 'yarn_nodemanager_hostname' => local_address,
160
159
  'yarn_nodemanager_local_dirs' => @model['data_dir'] + "/yarn_local_dir",
161
160
  'yarn_nodemanager_log_dirs' => @model['data_dir'] + "/yarn_log_dir",
162
161
  'yarn_web_proxy_address' => local_address,
@@ -165,8 +164,9 @@ module Sfp::Module::Hadoop2Common
165
164
  end
166
165
 
167
166
  def local_address
168
- domain = `dnsdomainname`.to_s.strip
169
- `hostname`.to_s.strip + (domain.length > 0 ? '.' + domain : '')
167
+ resolve("$.#{Sfp::Agent.whoami?}.sfpAddress")
168
+ #domain = `dnsdomainname`.to_s.strip
169
+ #`hostname`.to_s.strip + (domain.length > 0 ? '.' + domain : '')
170
170
  end
171
171
 
172
172
  # TODO -- user "useradd" and "groupadd"
@@ -216,14 +216,14 @@ module Sfp::Module::Hadoop2Common
216
216
  end
217
217
  end
218
218
 
219
- class Sfp::Module::Hadoop2Master < Sfp::Module::TarPackage
219
+ class Sfp::Module::Hadoop2Master < Sfp::Module::Package2
220
220
  include ::Sfp::Module::Hadoop2Common
221
221
 
222
222
  Services = ['namenode', 'resourcemanager', 'historyserver', 'proxyserver']
223
223
 
224
224
  ##############################
225
225
  #
226
- # Action methods (see TarPackage.sfp)
226
+ # Action methods (see hadoop2.sfp)
227
227
  #
228
228
  ##############################
229
229
 
@@ -330,14 +330,14 @@ class Sfp::Module::Hadoop2Master < Sfp::Module::TarPackage
330
330
  end
331
331
  end
332
332
 
333
- class Sfp::Module::Hadoop2Slave < Sfp::Module::TarPackage
333
+ class Sfp::Module::Hadoop2Slave < Sfp::Module::Package2
334
334
  include ::Sfp::Module::Hadoop2Common
335
335
 
336
336
  Services = ['datanode', 'nodemanager']
337
337
 
338
338
  ##############################
339
339
  #
340
- # Action methods (see TarPackage.sfp)
340
+ # Action methods (see hadoop2.sfp)
341
341
  #
342
342
  ##############################
343
343
 
@@ -1,20 +1,21 @@
1
- include "../tarpackage/tarpackage.sfp"
1
+ include "../package2/package2.sfp"
2
2
 
3
- schema Hadoop2 extends TarPackage {
3
+ schema Hadoop2 extends Package2 {
4
4
  installed = true
5
5
  running = true
6
6
  configured = true
7
7
 
8
- final package_name = "hadoop"
8
+ final name = "hadoop"
9
+ final provider = "tar"
9
10
  final version = "2.2.0"
10
- final source = "http://www.mirrorservice.org/sites/ftp.apache.org/hadoop/common"
11
- final home = "/opt/hadoop"
11
+ final source = "http://master.herry.ext9.sup.hpl.hp.com/hadoop"
12
+ final home = "/opt/hadoop2"
12
13
 
13
14
  final user = "hadoop"
14
15
  final group = "hadoop"
15
16
  final password = "!"
16
17
  final java_home = ""
17
- final data_dir = "/opt/hadoop/data"
18
+ final data_dir = "/opt/hadoop2/data"
18
19
 
19
20
  sub install {
20
21
  condition {
@@ -44,6 +44,11 @@
44
44
  <name>yarn.web-proxy.address</name>
45
45
  <value><%= yarn_web_proxy_address %>:54315</value>
46
46
  </property>
47
+
48
+ <property>
49
+ <name>yarn.nodemanager.hostname</name>
50
+ <value><%= yarn_nodemanager_hostname %></value>
51
+ </property>
47
52
  <property>
48
53
  <name>yarn.nodemanager.local-dirs</name>
49
54
  <value><%= yarn_nodemanager_local_dirs %></value>
@@ -4,7 +4,9 @@ class Sfp::Module::Machine
4
4
  def update_state
5
5
  to_model
6
6
 
7
- load_kernel_modules(['acpiphp'])
7
+ if platform.include?('linux')
8
+ load_kernel_modules(['acpiphp'])
9
+ end
8
10
 
9
11
  @state['sfpAddress'] = @model['sfpAddress']
10
12
  @state['sfpPort'] = @model['sfpPort']
@@ -20,7 +22,11 @@ class Sfp::Module::Machine
20
22
  @state["cpus"] = (File.exist?('/proc/cpuinfo') ? `cat /proc/cpuinfo | grep processor | wc -l`.strip.to_i : -1)
21
23
  @state['memory'] = (`which free`.strip != '' ? `free`.split("\n")[1].split(" ")[1] : -1)
22
24
 
23
- @state['disk'] = get_disk_state
25
+ if platform.include?('linux')
26
+ @state['disks'] = get_disks_state
27
+ else
28
+ @state['disks'] = {}
29
+ end
24
30
  end
25
31
 
26
32
  ##############################
@@ -29,6 +35,10 @@ class Sfp::Module::Machine
29
35
  #
30
36
  ##############################
31
37
 
38
+ def platform
39
+ RUBY_PLATFORM.downcase
40
+ end
41
+
32
42
  protected
33
43
 
34
44
  def load_kernel_modules(modules=[])
@@ -48,12 +58,12 @@ class Sfp::Module::Machine
48
58
 
49
59
  # generate the disks' state, try to automatically mount the disk to target directory
50
60
  #
51
- def get_disk_state
61
+ def get_disks_state
52
62
  def generate_state
53
- disk = {}
63
+ disks = {}
54
64
  # get disks UUID
55
65
  uuids = {}
56
- `blkid`.each_line do |line|
66
+ `/sbin/blkid`.each_line do |line|
57
67
  line.strip!
58
68
  next if line.length <= 0
59
69
  device, info = line.split(':', 2)
@@ -69,32 +79,32 @@ class Sfp::Module::Machine
69
79
  if data[0][0..4] == '/dev/'
70
80
  name = 'root'
71
81
  if data[5] != '/'
72
- model = (@model['disk'].is_a?(Hash) ? @model['disk'].select { |k,v| v['mount'] == data[5] if k[0] != '_' } : {})
82
+ model = (@model['disks'].is_a?(Hash) ? @model['disks'].select { |k,v| v['mount'] == data[5] if k[0] != '_' } : {})
73
83
  name = (model.length > 0 ? model.keys.first : "uuid_#{uuids[data[0]]}")
74
84
  end
75
- disk[name] = {
85
+ disks[name] = {
76
86
  'size' => (data[1].to_f / 1000.0).to_i,
77
87
  'mount' => data[5],
78
88
  'uuid' => uuids[data[0]]
79
89
  }
80
90
  end
81
91
  end
82
- disk
92
+ disks
83
93
  end
84
94
 
85
- disk = generate_state
95
+ disks = generate_state
86
96
 
87
- if @model['disk'].is_a?(Hash)
88
- names = @model['disk'].keys.sort { |x,y| x <=> y }
97
+ if @model['disks'].is_a?(Hash)
98
+ names = @model['disks'].keys.sort { |x,y| x <=> y }
89
99
  device = "/dev/vdb"
90
100
  # format unformatted disks, mount unmount disks
91
101
  names.each { |name|
92
- next if name[0] == '_' or disk.has_key?(name)
93
- spec = @model['disk'][name]
102
+ next if name[0] == '_' or disks.has_key?(name)
103
+ spec = @model['disks'][name]
94
104
  status = `file -s #{device}`
95
105
  if not (status =~ /ERROR/)
96
106
  target = spec['mount'].to_s.strip
97
- # format the disk if not yet formatted
107
+ # format the disks if not yet formatted
98
108
  system "mkfs.ext4 #{device}" if not (status =~ /.+ filesystem data/)
99
109
  # create target directory if not exist
100
110
  system "mkdir -p #{target}" if !File.exist? target
@@ -0,0 +1,24 @@
1
+ OpenStack module
2
+ ----------------
3
+
4
+ Required Ruby Gems:
5
+ - fog
6
+ - json
7
+
8
+ To use:
9
+ - Assume that your key-name for SSH is `default`.
10
+ - Copy your VM's SSH private key to filename `default.pem` into this directory.
11
+ - Edit file `config.yml` by filling in fields:
12
+ - `username` - the username of your account
13
+ - `password` - the password of your account
14
+
15
+ Those information can be found in your OpenStack account under menu `Access`.
16
+ - Set `key_name` value.
17
+ - Replace any necessary default values of HPCloud's attributes such as:
18
+ - `auth_uri` - the URL of OpenStack's end point (append `/tokens`)
19
+ - `vm_image` - the UUID of the VM's image
20
+ - `vm_flavour` - the ID of the flavour
21
+ - `vm_ssh_user` - the user that will be used by the module for SSH (`root`, `ubuntu`, or other)
22
+ - `vm_ssh_key_name` - the SSH key name
23
+ - `vm_security_group` - the security group that will be applied to your VM
24
+ - `vm_network` - the network that will be connected to your VM
@@ -0,0 +1,5 @@
1
+ access_key:
2
+ secret_key:
3
+ tenant_id:
4
+ username:
5
+ password:
@@ -0,0 +1,9 @@
1
+ include "modules/vm/vm.sfp"
2
+ include "modules/openstack/openstack.sfp"
3
+
4
+ proxy isa Node {
5
+ sfpAddress is "localhost"
6
+ openstack isa OpenStack {
7
+ auth_uri is "http://16.25.166.21:5000/v2.0/tokens"
8
+ }
9
+ }