nuri 0.5.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +6 -0
- data/.travis.yml +12 -0
- data/CHANGELOG +146 -0
- data/Gemfile +3 -0
- data/LICENSE +28 -0
- data/README.md +64 -0
- data/Rakefile +15 -0
- data/VERSION +1 -0
- data/bin/delete_modules +11 -0
- data/bin/install_agent +18 -0
- data/bin/install_module +65 -0
- data/bin/nuri +519 -0
- data/bin/nuri.old +183 -0
- data/bin/push_model +16 -0
- data/examples/.gitignore +3 -0
- data/examples/bonfire.sfp +95 -0
- data/examples/bonfire/epcc.sfp +43 -0
- data/examples/bonfire/epcc0.sfp +49 -0
- data/examples/bonfire/epcc2.sfp +52 -0
- data/examples/bonfire/epcc2a.sfp +25 -0
- data/examples/bonfire/inria.sfp +72 -0
- data/examples/bonfire/inria0.sfp +49 -0
- data/examples/bonfire/inria2.sfp +71 -0
- data/examples/bonfire/inria2a.sfp +44 -0
- data/examples/bonfire/inria2b.sfp +54 -0
- data/examples/bonfire/inria2c.sfp +62 -0
- data/examples/bonfire/inria2d.sfp +71 -0
- data/examples/bonfire/inria2e.sfp +80 -0
- data/examples/bonfire/main.sfp +33 -0
- data/examples/bonfire/old/bonfire-1-1-1.sfp +76 -0
- data/examples/bonfire/old/bonfire-1-10-1.sfp +77 -0
- data/examples/bonfire/old/bonfire-1-2-1.sfp +58 -0
- data/examples/bonfire/old/bonfire-1-3-1.sfp +61 -0
- data/examples/bonfire/old/bonfire-1-4-1.sfp +64 -0
- data/examples/bonfire/old/bonfire-1-5-1.sfp +67 -0
- data/examples/bonfire/old/bonfire-1-6-1.sfp +82 -0
- data/examples/bonfire/old/bonfire-1-7-1.sfp +82 -0
- data/examples/bonfire/old/bonfire-1-8-1.sfp +79 -0
- data/examples/bonfire/old/bonfire-1-9-1.sfp +83 -0
- data/examples/bonfire/old/wp-test1a.sfp +38 -0
- data/examples/bonfire/old/wp-test1b.sfp +18 -0
- data/examples/bonfire/old/wp-test1c.sfp +7 -0
- data/examples/bonfire/old/wp-test2.sfp +47 -0
- data/examples/bonfire/old3/bonfire-epcc.sfp +57 -0
- data/examples/bonfire/old3/bonfire-inria.sfp +72 -0
- data/examples/bonfire/old3/bonfire-master.sfp +18 -0
- data/examples/bonfire/old3/bonfire.sfp +23 -0
- data/examples/bonfire/old3/bonfire2.sfp +49 -0
- data/examples/bonfire/old3/bonfire3.sfp +76 -0
- data/examples/bonfire/old3/bonfire4.sfp +78 -0
- data/examples/bonfire/old3/bonfire5.sfp +34 -0
- data/examples/bonfire/old3/bonfire5b.sfp +84 -0
- data/examples/bonfire/old3/hpvm6.sfp +22 -0
- data/examples/bonfire/old3/model.json +1 -0
- data/examples/bonfire/old3/test0.sfp +16 -0
- data/examples/bonfire/old3/test1.sfp +5 -0
- data/examples/bonfire/old3/test10.sfp +5 -0
- data/examples/bonfire/old3/test2.sfp +18 -0
- data/examples/bonfire/old3/test3.sfp +10 -0
- data/examples/bonfire/old3/test4.sfp +11 -0
- data/examples/bonfire/old3/test5.sfp +18 -0
- data/examples/bonfire/old3/test6.sfp +19 -0
- data/examples/bonfire/old3/test7.sfp +34 -0
- data/examples/bonfire/old3/test8.sfp +5 -0
- data/examples/bonfire/old3/test9.sfp +16 -0
- data/examples/bonfire/old3/wordpress-test-cluster.sfp +38 -0
- data/examples/bonfire/old3/wordpress-test.sfp +22 -0
- data/examples/bonfire/old3/wp-test-2.sfp +49 -0
- data/examples/bonfire/test.sfp +13 -0
- data/examples/generator.rb +66 -0
- data/examples/hadoop2.sfp +20 -0
- data/examples/hpcloud.sfp +18 -0
- data/examples/run.rb +17 -0
- data/examples/test.inc +0 -0
- data/examples/test.sfp +11 -0
- data/lib/naas/d3.js +5 -0
- data/lib/naas/d3.v3.min.js +5 -0
- data/lib/naas/index.css +0 -0
- data/lib/naas/index.html +18 -0
- data/lib/naas/index.js +18 -0
- data/lib/naas/jquery-1.10.2.min.js +6 -0
- data/lib/naas/jquery.js +6 -0
- data/lib/naas/naas.rb +160 -0
- data/lib/nuri.rb +62 -0
- data/lib/nuri/choreographer.rb +151 -0
- data/lib/nuri/constraint_helper.rb +9 -0
- data/lib/nuri/directory.rb +40 -0
- data/lib/nuri/master.rb +725 -0
- data/lib/nuri/net_helper.rb +65 -0
- data/lib/nuri/orchestrator.rb +224 -0
- data/lib/nuri/server.rb +212 -0
- data/modules/.gitignore +4 -0
- data/modules/apache/apache.rb +255 -0
- data/modules/apache/apache.rb.old +167 -0
- data/modules/apache/apache.sfp +146 -0
- data/modules/apache/apache.sfp.future +100 -0
- data/modules/apache/load_balancer +20 -0
- data/modules/apache/model.json +1 -0
- data/modules/apache/test.sfp +8 -0
- data/modules/aptpackage/aptpackage.rb +82 -0
- data/modules/aptpackage/aptpackage.sfp +5 -0
- data/modules/bonfire/.gitignore +2 -0
- data/modules/bonfire/README.md +12 -0
- data/modules/bonfire/bonfire.rb +60 -0
- data/modules/bonfire/bonfire.sfp +9 -0
- data/modules/bonfire/config.yml +4 -0
- data/modules/bonfire/helper.rb +149 -0
- data/modules/bonfire/stresstest.rb +144 -0
- data/modules/bonfire/test.sfp +8 -0
- data/modules/client/client.rb +22 -0
- data/modules/client/client.sfp +14 -0
- data/modules/cloud/cloud.rb +11 -0
- data/modules/cloud/cloud.sfp +26 -0
- data/modules/file/file.rb +91 -0
- data/modules/file/file.sfp +9 -0
- data/modules/hadoop1/core-site.xml +17 -0
- data/modules/hadoop1/hadoop-env.sh +55 -0
- data/modules/hadoop1/hadoop1.rb +384 -0
- data/modules/hadoop1/hadoop1.sfp +93 -0
- data/modules/hadoop1/hdfs-site.xml +16 -0
- data/modules/hadoop1/mapred-site.xml +17 -0
- data/modules/hadoop2/core-site.xml +31 -0
- data/modules/hadoop2/hadoop-env.sh +77 -0
- data/modules/hadoop2/hadoop2.rb +401 -0
- data/modules/hadoop2/hadoop2.sfp +114 -0
- data/modules/hadoop2/hdfs-site.xml +47 -0
- data/modules/hadoop2/mapred-site.xml +71 -0
- data/modules/hadoop2/ports +14 -0
- data/modules/hadoop2/yarn-env.sh +112 -0
- data/modules/hadoop2/yarn-site.xml +107 -0
- data/modules/hpcloud/.gitignore +2 -0
- data/modules/hpcloud/README.md +16 -0
- data/modules/hpcloud/config.yml +3 -0
- data/modules/hpcloud/example.sfp +18 -0
- data/modules/hpcloud/hpcloud.rb +241 -0
- data/modules/hpcloud/hpcloud.sfp +22 -0
- data/modules/hpcloud/test.sfp +5 -0
- data/modules/install_module +65 -0
- data/modules/machine/machine.rb +95 -0
- data/modules/machine/machine.sfp +9 -0
- data/modules/mockcloud/mockcloud.rb +20 -0
- data/modules/mockcloud/mockcloud.sfp +6 -0
- data/modules/mysql/mysql.rb +118 -0
- data/modules/mysql/mysql.sfp +38 -0
- data/modules/mysql/test.sfp +3 -0
- data/modules/node/node.rb +8 -0
- data/modules/node/node.sfp +7 -0
- data/modules/object/object.rb +7 -0
- data/modules/object/object.sfp +1 -0
- data/modules/os/os.rb +38 -0
- data/modules/os/os.sfp +11 -0
- data/modules/package/package.rb +26 -0
- data/modules/package/package.sfp +22 -0
- data/modules/package/test.sfp +6 -0
- data/modules/service/model.json +1 -0
- data/modules/service/service.rb +50 -0
- data/modules/service/service.sfp +46 -0
- data/modules/service/test.sfp +6 -0
- data/modules/tarpackage/tarpackage.rb +93 -0
- data/modules/tarpackage/tarpackage.sfp +5 -0
- data/modules/vm/vm.rb +8 -0
- data/modules/vm/vm.sfp +18 -0
- data/modules/wordpress/wordpress.rb +98 -0
- data/modules/wordpress/wordpress.sfp +34 -0
- data/modules/wordpresscluster/wordpresscluster.rb +150 -0
- data/modules/wordpresscluster/wordpresscluster.sfp +74 -0
- data/nuri.gemspec +26 -0
- metadata +281 -0
@@ -0,0 +1,93 @@
|
|
1
|
+
include "../service/service.sfp"
|
2
|
+
|
3
|
+
/**
|
4
|
+
* Module for managing Hadoop version 1
|
5
|
+
*
|
6
|
+
* - password can be generated using command:
|
7
|
+
* $ echo "mypassword" | makepasswd --clearfrom=- --crypt-md5 |awk '{ print $2 }'
|
8
|
+
*
|
9
|
+
*/
|
10
|
+
schema Hadoop1 {
|
11
|
+
installed = true
|
12
|
+
running = true
|
13
|
+
|
14
|
+
final home = "/opt/hadoop"
|
15
|
+
final user = "hadoop"
|
16
|
+
final group = "hadoop"
|
17
|
+
final password = "!"
|
18
|
+
final source = "http://www.mirrorservice.org/sites/ftp.apache.org/hadoop/common"
|
19
|
+
final version = "1.2.1"
|
20
|
+
final java_home = "" //"/usr/local/java/jre1.6.0_34"
|
21
|
+
final data_dir = "/tmp/hadoop"
|
22
|
+
final replication = 2
|
23
|
+
|
24
|
+
sub install {
|
25
|
+
condition {
|
26
|
+
this.installed != true
|
27
|
+
}
|
28
|
+
effect {
|
29
|
+
this.installed = true
|
30
|
+
this.running = false
|
31
|
+
}
|
32
|
+
}
|
33
|
+
|
34
|
+
sub uninstall {
|
35
|
+
condition {
|
36
|
+
this.running = false
|
37
|
+
this.installed = true
|
38
|
+
}
|
39
|
+
effect {
|
40
|
+
this.installed = false
|
41
|
+
}
|
42
|
+
}
|
43
|
+
|
44
|
+
sub start {
|
45
|
+
condition {
|
46
|
+
this.running != true
|
47
|
+
this.installed = true
|
48
|
+
}
|
49
|
+
effect {
|
50
|
+
this.running = true
|
51
|
+
}
|
52
|
+
}
|
53
|
+
|
54
|
+
sub stop {
|
55
|
+
condition {
|
56
|
+
this.running = true
|
57
|
+
}
|
58
|
+
effect {
|
59
|
+
this.running = false
|
60
|
+
}
|
61
|
+
}
|
62
|
+
}
|
63
|
+
|
64
|
+
schema Hadoop1Master extends Hadoop1 { }
|
65
|
+
|
66
|
+
|
67
|
+
schema Hadoop1Slave extends Hadoop1 {
|
68
|
+
master isref Hadoop1Master
|
69
|
+
|
70
|
+
sub install (master : Hadoop1Master) {
|
71
|
+
condition {
|
72
|
+
this.installed != true
|
73
|
+
master.parent.created = true
|
74
|
+
}
|
75
|
+
effect {
|
76
|
+
this.installed = true
|
77
|
+
this.running = false
|
78
|
+
this.master = master
|
79
|
+
}
|
80
|
+
}
|
81
|
+
|
82
|
+
sub start (master : Hadoop1Master) {
|
83
|
+
condition {
|
84
|
+
this.running != true
|
85
|
+
this.installed = true
|
86
|
+
this.master = master
|
87
|
+
master.running = true
|
88
|
+
}
|
89
|
+
effect {
|
90
|
+
this.running = true
|
91
|
+
}
|
92
|
+
}
|
93
|
+
}
|
@@ -0,0 +1,16 @@
|
|
1
|
+
<?xml version="1.0"?>
|
2
|
+
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
3
|
+
|
4
|
+
<!-- Put site-specific property overrides in this file. -->
|
5
|
+
|
6
|
+
<configuration>
|
7
|
+
<property>
|
8
|
+
<name>dfs.replication</name>
|
9
|
+
<value><%= replication %></value>
|
10
|
+
<description>Default block replication.
|
11
|
+
The actual number of replications can be specified when the file is created.
|
12
|
+
The default is used if replication is not specified in create time.
|
13
|
+
</description>
|
14
|
+
</property>
|
15
|
+
</configuration>
|
16
|
+
|
@@ -0,0 +1,17 @@
|
|
1
|
+
<?xml version="1.0"?>
|
2
|
+
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
3
|
+
|
4
|
+
<!-- Put site-specific property overrides in this file. -->
|
5
|
+
|
6
|
+
<configuration>
|
7
|
+
<property>
|
8
|
+
<name>mapred.job.tracker</name>
|
9
|
+
<value><%= master %>:54311</value>
|
10
|
+
<description>The host and port that the MapReduce job tracker runs
|
11
|
+
at. If "local", then jobs are run in-process as a single map
|
12
|
+
and reduce task.
|
13
|
+
</description>
|
14
|
+
</property>
|
15
|
+
|
16
|
+
</configuration>
|
17
|
+
|
@@ -0,0 +1,31 @@
|
|
1
|
+
<?xml version="1.0" encoding="UTF-8"?>
|
2
|
+
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
3
|
+
<!--
|
4
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
you may not use this file except in compliance with the License.
|
6
|
+
You may obtain a copy of the License at
|
7
|
+
|
8
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
9
|
+
|
10
|
+
Unless required by applicable law or agreed to in writing, software
|
11
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
See the License for the specific language governing permissions and
|
14
|
+
limitations under the License. See accompanying LICENSE file.
|
15
|
+
-->
|
16
|
+
|
17
|
+
<!-- Put site-specific property overrides in this file. -->
|
18
|
+
|
19
|
+
<configuration>
|
20
|
+
|
21
|
+
<property>
|
22
|
+
<name>fs.defaultFS</name>
|
23
|
+
<value>hdfs://<%= master %>:54310/</value>
|
24
|
+
</property>
|
25
|
+
|
26
|
+
<property>
|
27
|
+
<name>io.file.buffer.size</name>
|
28
|
+
<value><%= io_file_buffer_size %></value>
|
29
|
+
</property>
|
30
|
+
|
31
|
+
</configuration>
|
@@ -0,0 +1,77 @@
|
|
1
|
+
# Copyright 2011 The Apache Software Foundation
|
2
|
+
#
|
3
|
+
# Licensed to the Apache Software Foundation (ASF) under one
|
4
|
+
# or more contributor license agreements. See the NOTICE file
|
5
|
+
# distributed with this work for additional information
|
6
|
+
# regarding copyright ownership. The ASF licenses this file
|
7
|
+
# to you under the Apache License, Version 2.0 (the
|
8
|
+
# "License"); you may not use this file except in compliance
|
9
|
+
# with the License. You may obtain a copy of the License at
|
10
|
+
#
|
11
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
12
|
+
#
|
13
|
+
# Unless required by applicable law or agreed to in writing, software
|
14
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
15
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
16
|
+
# See the License for the specific language governing permissions and
|
17
|
+
# limitations under the License.
|
18
|
+
|
19
|
+
# Set Hadoop-specific environment variables here.
|
20
|
+
|
21
|
+
# The only required environment variable is JAVA_HOME. All others are
|
22
|
+
# optional. When running a distributed configuration it is best to
|
23
|
+
# set JAVA_HOME in this file, so that it is correctly defined on
|
24
|
+
# remote nodes.
|
25
|
+
|
26
|
+
# The java implementation to use.
|
27
|
+
export JAVA_HOME="<%= java_home %>"
|
28
|
+
|
29
|
+
# The jsvc implementation to use. Jsvc is required to run secure datanodes.
|
30
|
+
#export JSVC_HOME=${JSVC_HOME}
|
31
|
+
|
32
|
+
export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
|
33
|
+
|
34
|
+
# Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
|
35
|
+
for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
|
36
|
+
if [ "$HADOOP_CLASSPATH" ]; then
|
37
|
+
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
|
38
|
+
else
|
39
|
+
export HADOOP_CLASSPATH=$f
|
40
|
+
fi
|
41
|
+
done
|
42
|
+
|
43
|
+
# The maximum amount of heap to use, in MB. Default is 1000.
|
44
|
+
#export HADOOP_HEAPSIZE=
|
45
|
+
#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
|
46
|
+
|
47
|
+
# Extra Java runtime options. Empty by default.
|
48
|
+
export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
|
49
|
+
|
50
|
+
# Command specific options appended to HADOOP_OPTS when specified
|
51
|
+
export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
|
52
|
+
export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
|
53
|
+
|
54
|
+
export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
|
55
|
+
|
56
|
+
# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
|
57
|
+
export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
|
58
|
+
#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
|
59
|
+
|
60
|
+
# On secure datanodes, user to run the datanode as after dropping privileges
|
61
|
+
export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
|
62
|
+
|
63
|
+
# Where log files are stored. $HADOOP_HOME/logs by default.
|
64
|
+
#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
|
65
|
+
|
66
|
+
# Where log files are stored in the secure data environment.
|
67
|
+
export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
|
68
|
+
|
69
|
+
# The directory where pid files are stored. /tmp by default.
|
70
|
+
# NOTE: this should be set to a directory that can only be written to by
|
71
|
+
# the user that will run the hadoop daemons. Otherwise there is the
|
72
|
+
# potential for a symlink attack.
|
73
|
+
export HADOOP_PID_DIR=${HADOOP_PID_DIR}
|
74
|
+
export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
|
75
|
+
|
76
|
+
# A string representing this instance of hadoop. $USER by default.
|
77
|
+
export HADOOP_IDENT_STRING=$USER
|
@@ -0,0 +1,401 @@
|
|
1
|
+
require ::File.dirname(__FILE__) + '/../tarpackage/tarpackage.rb'
|
2
|
+
|
3
|
+
module Sfp::Module::Hadoop2Common
|
4
|
+
def update_state
|
5
|
+
to_model
|
6
|
+
|
7
|
+
super
|
8
|
+
|
9
|
+
@state['running'] = running?
|
10
|
+
@state['configured'] = configured?
|
11
|
+
@state['pids'] = pids
|
12
|
+
@state['java_home'] = java_home
|
13
|
+
|
14
|
+
start if @state['running']
|
15
|
+
end
|
16
|
+
|
17
|
+
##############################
|
18
|
+
#
|
19
|
+
# Action methods (see Hadoop.sfp)
|
20
|
+
#
|
21
|
+
##############################
|
22
|
+
|
23
|
+
def uninstall(p={})
|
24
|
+
model = OpenStruct.new(@model)
|
25
|
+
# remove hadoop user and group, and then delete hadoop's home directory
|
26
|
+
shell "sed -i '/^export HADOOP_HOME/d' /root/.bash_profile"
|
27
|
+
shell "rm -rf #{model.home} &&
|
28
|
+
rm -rf /tmp/#{model.user}* &&
|
29
|
+
rm -rf #{model.data_dir}"
|
30
|
+
|
31
|
+
not installed?
|
32
|
+
end
|
33
|
+
|
34
|
+
def configure(p={})
|
35
|
+
### configure Java runtime
|
36
|
+
if java_home.length <= 0
|
37
|
+
log.info "JRE is not available - installing default-jre..."
|
38
|
+
shell "apt-get install -y default-jre"
|
39
|
+
end
|
40
|
+
|
41
|
+
### generate configuration files
|
42
|
+
log.info "copy and process template configuration files: {hadoop,yarn}-env.sh, {core,hdfs,yarn,mapred}-site.xml"
|
43
|
+
dir = File.dirname(__FILE__)
|
44
|
+
['hadoop-env.sh', 'yarn-env.sh', 'core-site.xml', 'hdfs-site.xml', 'yarn-site.xml', 'mapred-site.xml'].each do |file|
|
45
|
+
copy "#{dir}/#{file}", config_dir
|
46
|
+
render_file "#{config_dir}/#{file}", map
|
47
|
+
end
|
48
|
+
|
49
|
+
### add user and group
|
50
|
+
add_user_group
|
51
|
+
|
52
|
+
### create data directory
|
53
|
+
create_data_directory
|
54
|
+
|
55
|
+
# export hadoop home to root
|
56
|
+
log.info "export hadoop home directory to root"
|
57
|
+
shell "sed -i '/^export HADOOP_HOME/d' /root/.bashrc"
|
58
|
+
shell "echo 'export HADOOP_HOME=#{@model['home']}' >> /root/.bashrc"
|
59
|
+
|
60
|
+
### set ownership of home directory
|
61
|
+
log.info "set ownership of home directory"
|
62
|
+
shell "chown -R #{@model['user']}:#{@model['group']} #{@model['home']}"
|
63
|
+
|
64
|
+
configured?
|
65
|
+
end
|
66
|
+
|
67
|
+
|
68
|
+
##############################
|
69
|
+
#
|
70
|
+
# Helper methods
|
71
|
+
#
|
72
|
+
##############################
|
73
|
+
|
74
|
+
def resolve_link(link)
|
75
|
+
begin
|
76
|
+
link = ::File.readlink(link)
|
77
|
+
link = resolve_link(link)
|
78
|
+
rescue
|
79
|
+
link
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
def java_home
|
84
|
+
return @model['java_home'] if @model['java_home'].to_s.strip.length > 0
|
85
|
+
java = resolve_link(`which java`.strip)
|
86
|
+
return '' if java.length <= 0
|
87
|
+
::File.expand_path(java + '/../../')
|
88
|
+
end
|
89
|
+
|
90
|
+
def running?
|
91
|
+
status = false
|
92
|
+
pids.each { |name,pid|
|
93
|
+
log.info "#{name} is not running!" if pid <= 0
|
94
|
+
status = true if pid > 0
|
95
|
+
}
|
96
|
+
status
|
97
|
+
end
|
98
|
+
|
99
|
+
def configured?
|
100
|
+
return true if not installed?
|
101
|
+
|
102
|
+
return false if Etc.getpwuid(File.stat(@model['home']).uid).name != @model['user']
|
103
|
+
|
104
|
+
return false if java_home.length <= 0
|
105
|
+
|
106
|
+
### check data directory
|
107
|
+
return false if not ::File.exist?(@model['data_dir'])
|
108
|
+
|
109
|
+
### check configuration files
|
110
|
+
dir = File.dirname(__FILE__)
|
111
|
+
map = self.map
|
112
|
+
['hadoop-env.sh', 'yarn-env.sh', 'core-site.xml', 'hdfs-site.xml', 'yarn-site.xml', 'mapred-site.xml'].each do |file|
|
113
|
+
target = "#{config_dir}/#{file}"
|
114
|
+
source = "#{dir}/#{file}"
|
115
|
+
if not ::File.exist?(target)
|
116
|
+
log.warn "File #{target} is not exist!"
|
117
|
+
return false
|
118
|
+
end
|
119
|
+
if render(::File.read(source), map).hash != File.read(target).hash
|
120
|
+
log.warn "File #{target} is not comply!"
|
121
|
+
return false
|
122
|
+
end
|
123
|
+
end
|
124
|
+
|
125
|
+
true
|
126
|
+
end
|
127
|
+
|
128
|
+
def config_dir
|
129
|
+
@model['home'] + '/etc/hadoop'
|
130
|
+
end
|
131
|
+
|
132
|
+
def common_map
|
133
|
+
{
|
134
|
+
'java_home' => java_home,
|
135
|
+
'dfs_namenode_name_dir' => @model['data_dir'] + "/namenode_data",
|
136
|
+
'dfs_namenode_hosts' => '',
|
137
|
+
'dfs_blocksize' => 268435456,
|
138
|
+
'dfs_namenode_handler_count' => 10,
|
139
|
+
'dfs_datanode_data_dir' => @model['data_dir'] + "/datanode_data",
|
140
|
+
'mapreduce_framework_name' => 'yarn',
|
141
|
+
'mapreduce_map_memory_mb' => 1536,
|
142
|
+
'mapreduce_map_java_opts' => '-Xmx1024M',
|
143
|
+
'mapreduce_reduce_memory_mb' => 3072,
|
144
|
+
'mapreduce_reduce_java_opts' => '-Xmx2560M',
|
145
|
+
'mapreduce_task_io_sort_mb' => 512,
|
146
|
+
'mapreduce_task_io_sort_factor' => 100,
|
147
|
+
'mapreduce_reduce_shuffle_parallelcopies' => 50,
|
148
|
+
'yarn_acl_enable' => false,
|
149
|
+
'yarn_admin_acl' => '*',
|
150
|
+
'yarn_log_aggregation_enable' => false,
|
151
|
+
'yarn_resourcemanager_scheduler_class' => 'org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler',
|
152
|
+
'yarn_scheduler_minimum_allocation_mb' => 256,
|
153
|
+
'yarn_scheduler_maximum_allocation_mb' => 1024,
|
154
|
+
'yarn_nodemanager_resource_memory_mb' => 512,
|
155
|
+
'yarn_nodemanager_vmem_pmem_ratio' => 2,
|
156
|
+
'yarn_nodemanager_log_retain_seconds' => 10800,
|
157
|
+
'yarn_nodemanager_aux_services' => 'mapreduce_shuffle',
|
158
|
+
'yarn_log_aggregation_retain_seconds' => -1,
|
159
|
+
'yarn_log_aggregation_retain_check_interval_seconds' => -1,
|
160
|
+
'yarn_nodemanager_local_dirs' => @model['data_dir'] + "/yarn_local_dir",
|
161
|
+
'yarn_nodemanager_log_dirs' => @model['data_dir'] + "/yarn_log_dir",
|
162
|
+
'io_file_buffer_size' => 131072,
|
163
|
+
}
|
164
|
+
end
|
165
|
+
|
166
|
+
# TODO -- user "useradd" and "groupadd"
|
167
|
+
def add_user_group
|
168
|
+
user = @model['user']
|
169
|
+
group = @model['group']
|
170
|
+
|
171
|
+
### add group if not exist
|
172
|
+
if `grep '^#{group}' /etc/group`.length <= 0
|
173
|
+
log.info "adding group #{group}"
|
174
|
+
shell "echo '#{group}:x:8000:' >> /etc/group"
|
175
|
+
else
|
176
|
+
log.info "group #{group} is already exist"
|
177
|
+
end
|
178
|
+
|
179
|
+
### add user if not exist
|
180
|
+
if `grep '^#{user}' /etc/passwd`.length <= 0
|
181
|
+
password = @model['password']
|
182
|
+
home = @model['home']
|
183
|
+
|
184
|
+
log.info "adding user #{user}"
|
185
|
+
shell "echo '#{user}:x:8000:8000::#{home}:/bin/bash' >> /etc/passwd &&
|
186
|
+
echo '#{user}:#{password}:15958:0:99999:7:::' >> /etc/shadow"
|
187
|
+
else
|
188
|
+
log.info "user #{user} is already exist"
|
189
|
+
end
|
190
|
+
end
|
191
|
+
|
192
|
+
def common_create_data_directory
|
193
|
+
data_dir = @model['data_dir']
|
194
|
+
return if ::File.exist?(data_dir)
|
195
|
+
|
196
|
+
log.info "create data directory #{data_dir}"
|
197
|
+
|
198
|
+
### create data directory if not exist
|
199
|
+
Dir.mkdir(data_dir)
|
200
|
+
|
201
|
+
### create data sub directory
|
202
|
+
map = self.map
|
203
|
+
[map['dfs_namenode_name_dir'], map['dfs_datanode_data_dir'], map['yarn_nodemanager_local_dirs'], map['yarn_nodemanager_log_dirs']].each do |dir|
|
204
|
+
log.info "create directory #{dir}"
|
205
|
+
Dir.mkdir(dir)
|
206
|
+
end
|
207
|
+
|
208
|
+
### set ownership
|
209
|
+
shell "chown -R #{@model['user']}:#{@model['group']} #{data_dir}"
|
210
|
+
end
|
211
|
+
end
|
212
|
+
|
213
|
+
class Sfp::Module::Hadoop2Master < Sfp::Module::TarPackage
|
214
|
+
include ::Sfp::Module::Hadoop2Common
|
215
|
+
|
216
|
+
Services = ['namenode', 'resourcemanager', 'historyserver', 'proxyserver']
|
217
|
+
|
218
|
+
##############################
|
219
|
+
#
|
220
|
+
# Action methods (see TarPackage.sfp)
|
221
|
+
#
|
222
|
+
##############################
|
223
|
+
|
224
|
+
def start(p={})
|
225
|
+
model = OpenStruct.new(@model)
|
226
|
+
pids = self.pids
|
227
|
+
|
228
|
+
if pids['namenode'] <= 0
|
229
|
+
cmd = "#{model.home}/sbin/hadoop-daemon.sh --config #{model.home}/etc/hadoop --script hdfs start namenode"
|
230
|
+
log.info `su -c '#{cmd}' #{model.user}`
|
231
|
+
end
|
232
|
+
|
233
|
+
if pids['resourcemanager'] <= 0
|
234
|
+
cmd = "#{model.home}/sbin/yarn-daemon.sh --config #{model.home}/etc/hadoop start resourcemanager"
|
235
|
+
log.info `su -c '#{cmd}' #{model.user}`
|
236
|
+
end
|
237
|
+
|
238
|
+
if pids['proxyserver'] <= 0
|
239
|
+
cmd = "#{model.home}/sbin/yarn-daemon.sh --config #{model.home}/etc/hadoop start proxyserver"
|
240
|
+
log.info `su -c '#{cmd}' #{model.user}`
|
241
|
+
end
|
242
|
+
|
243
|
+
### wait until namenode is running
|
244
|
+
n = 0
|
245
|
+
until self.pids['namenode'] > 0 or n > 10
|
246
|
+
n += 1
|
247
|
+
sleep 1
|
248
|
+
end
|
249
|
+
|
250
|
+
if pids['historyserver'] <= 0
|
251
|
+
cmd = "#{model.home}/sbin/mr-jobhistory-daemon.sh --config #{model.home}/etc/hadoop start historyserver"
|
252
|
+
log.info `su -c '#{cmd}' #{model.user}`
|
253
|
+
end
|
254
|
+
|
255
|
+
running?
|
256
|
+
end
|
257
|
+
|
258
|
+
def stop(p={})
|
259
|
+
model = OpenStruct.new(@model)
|
260
|
+
pids = self.pids
|
261
|
+
|
262
|
+
if pids['historyserver'] > 0
|
263
|
+
cmd = "#{model.home}/sbin/mr-jobhistory-daemon.sh --config #{model.home}/etc/hadoop stop historyserver"
|
264
|
+
log.info `su -c '#{cmd}' #{model.user}`
|
265
|
+
end
|
266
|
+
|
267
|
+
if pids['proxyserver'] > 0
|
268
|
+
cmd = "#{model.home}/sbin/yarn-daemon.sh --config #{model.home}/etc/hadoop stop proxyserver"
|
269
|
+
log.info `su -c '#{cmd}' #{model.user}`
|
270
|
+
end
|
271
|
+
|
272
|
+
if pids['resourcemanager'] > 0
|
273
|
+
cmd = "#{model.home}/sbin/yarn-daemon.sh --config #{model.home}/etc/hadoop stop resourcemanager"
|
274
|
+
log.info `su -c '#{cmd}' #{model.user}`
|
275
|
+
end
|
276
|
+
|
277
|
+
if pids['namenode'] > 0
|
278
|
+
cmd = "#{model.home}/sbin/hadoop-daemon.sh --config #{model.home}/etc/hadoop --script hdfs stop namenode"
|
279
|
+
log.info `su -c '#{cmd}' #{model.user}`
|
280
|
+
end
|
281
|
+
|
282
|
+
sleep 1
|
283
|
+
pids.each { |service,pid|
|
284
|
+
next if pid <= 0
|
285
|
+
begin
|
286
|
+
log.info "Killing #{service}"
|
287
|
+
Process.kill 9, pid
|
288
|
+
rescue
|
289
|
+
end
|
290
|
+
}
|
291
|
+
|
292
|
+
not running?
|
293
|
+
end
|
294
|
+
|
295
|
+
##############################
|
296
|
+
#
|
297
|
+
# Helper methods
|
298
|
+
#
|
299
|
+
##############################
|
300
|
+
|
301
|
+
def pids
|
302
|
+
data = {}
|
303
|
+
Services.each { |name|
|
304
|
+
data[name] = `ps axf | grep java | grep -v grep | grep hadoop | grep Dproc_#{name}`.to_s.strip.split(' ', 2)[0].to_i
|
305
|
+
}
|
306
|
+
data
|
307
|
+
end
|
308
|
+
|
309
|
+
def map
|
310
|
+
map = common_map
|
311
|
+
map['master'] = `hostname`.to_s.strip
|
312
|
+
map
|
313
|
+
end
|
314
|
+
|
315
|
+
def create_data_directory
|
316
|
+
return if ::File.exist?(@model['data_dir'])
|
317
|
+
|
318
|
+
common_create_data_directory
|
319
|
+
|
320
|
+
### format namenode
|
321
|
+
log.info "format namenode space"
|
322
|
+
cmd = "#{@model['home']}/bin/hdfs namenode -format #{@model['cluster_name']}"
|
323
|
+
log.info `su -c '#{cmd}' #{@model['user']}`
|
324
|
+
end
|
325
|
+
end
|
326
|
+
|
327
|
+
class Sfp::Module::Hadoop2Slave < Sfp::Module::TarPackage
|
328
|
+
include ::Sfp::Module::Hadoop2Common
|
329
|
+
|
330
|
+
Services = ['datanode', 'nodemanager']
|
331
|
+
|
332
|
+
##############################
|
333
|
+
#
|
334
|
+
# Action methods (see TarPackage.sfp)
|
335
|
+
#
|
336
|
+
##############################
|
337
|
+
|
338
|
+
def start(p={})
|
339
|
+
model = OpenStruct.new(@model)
|
340
|
+
pids = self.pids
|
341
|
+
|
342
|
+
if pids['datanode'] <= 0
|
343
|
+
cmd = "#{model.home}/sbin/hadoop-daemon.sh --config #{model.home}/etc/hadoop --script hdfs start datanode"
|
344
|
+
log.info `su -c '#{cmd}' #{model.user}`
|
345
|
+
end
|
346
|
+
|
347
|
+
if pids['nodemanager'] <= 0
|
348
|
+
cmd = "#{model.home}/sbin/yarn-daemon.sh --config #{model.home}/etc/hadoop start nodemanager"
|
349
|
+
log.info `su -c '#{cmd}' #{model.user}`
|
350
|
+
end
|
351
|
+
|
352
|
+
running?
|
353
|
+
end
|
354
|
+
|
355
|
+
def stop(p={})
|
356
|
+
model = OpenStruct.new(@model)
|
357
|
+
pids = self.pids
|
358
|
+
|
359
|
+
if pids['datanode'] > 0
|
360
|
+
cmd = "#{model.home}/sbin/hadoop-daemon.sh --config #{model.home}/etc/hadoop --script hdfs stop datanode"
|
361
|
+
log.info `su -c '#{cmd}' #{model.user}`
|
362
|
+
end
|
363
|
+
|
364
|
+
if pids['nodemanager'] > 0
|
365
|
+
cmd = "#{model.home}/sbin/yarn-daemon.sh --config #{model.home}/etc/hadoop stop nodemanager"
|
366
|
+
log.info `su -c '#{cmd}' #{model.user}`
|
367
|
+
end
|
368
|
+
|
369
|
+
pids.each { |service,pid|
|
370
|
+
next if pid <= 0
|
371
|
+
log.info "Killing #{server}"
|
372
|
+
Process.kill 9, pid
|
373
|
+
}
|
374
|
+
|
375
|
+
not running?
|
376
|
+
end
|
377
|
+
|
378
|
+
##############################
|
379
|
+
#
|
380
|
+
# Helper methods
|
381
|
+
#
|
382
|
+
##############################
|
383
|
+
|
384
|
+
def pids
|
385
|
+
data = {}
|
386
|
+
Services.each { |name|
|
387
|
+
data[name] = `ps axf | grep java | grep -v grep | grep hadoop | grep Dproc_#{name}`.to_s.strip.split(' ', 2)[0].to_i
|
388
|
+
}
|
389
|
+
data
|
390
|
+
end
|
391
|
+
|
392
|
+
def map
|
393
|
+
map = common_map
|
394
|
+
map['master'] = resolve(@model['master'] + '.parent.sfpAddress')
|
395
|
+
map
|
396
|
+
end
|
397
|
+
|
398
|
+
def create_data_directory
|
399
|
+
common_create_data_directory
|
400
|
+
end
|
401
|
+
end
|