jashmenn-poolparty-extensions 0.1.0 → 0.1.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,64 @@
1
+ <?xml version="1.0"?>
2
+ <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3
+
4
+ <!-- Put site-specific property overrides in this file. -->
5
+
6
+ <configuration>
7
+
8
+ <property>
9
+ <name>hadoop.tmp.dir</name>
10
+ <!--<value><%= @node[:poolparty][:hadoop_data_dir] %>/hadoop-${user.name}</value> -->
11
+ <value><%= @node[:poolparty][:hadoop_data_dir] %>/tmp</value>
12
+ <description>A base for other temporary directories.</description>
13
+ </property>
14
+
15
+ <property>
16
+ <name>fs.default.name</name>
17
+ <value>hdfs://<%= @node[:poolparty][:current_master] %>:54310</value>
18
+ <description>The name of the default file system. A URI whose
19
+ scheme and authority determine the FileSystem implementation. The
20
+ uri's scheme determines the config property (fs.SCHEME.impl) naming
21
+ the FileSystem implementation class. The uri's authority is used to
22
+ determine the host, port, etc. for a filesystem.</description>
23
+ </property>
24
+
25
+ <property>
26
+ <name>webinterface.private.actions</name>
27
+ <value>true</value>
28
+ </property>
29
+
30
+ <!--
31
+ String jobTrackerStr =
32
+ conf.get("mapred.job.tracker", "localhost:8012");
33
+ -->
34
+
35
+ <!-- take a look at below -->
36
+
37
+ <!--
38
+ <property>
39
+ <name>topology.node.switch.mapping.impl</name>
40
+ <value>org.apache.hadoop.net.ScriptBasedMapping</value>
41
+ <description> The default implementation of the DNSToSwitchMapping. It
42
+ invokes a script specified in topology.script.file.name to resolve
43
+ node names. If the value for topology.script.file.name is not set, the
44
+ default value of DEFAULT_RACK is returned for all node names.
45
+ </description>
46
+ </property>
47
+
48
+ <property>
49
+ <name>topology.script.file.name</name>
50
+ <value></value>
51
+ <description> The script name that should be invoked to resolve DNS names to
52
+ NetworkTopology names. Example: the script would take host.foo.bar as an
53
+ argument, and return /rack1 as the output.
54
+ </description>
55
+ </property>
56
+ -->
57
+
58
+ <property>
59
+ <name>hadoop.rpc.socket.factory.class.default</name>
60
+ <value>org.apache.hadoop.net.StandardSocketFactory</value>
61
+ <final>true</final>
62
+ </property>
63
+
64
+ </configuration>
@@ -0,0 +1,3 @@
1
+ # The java implementation to use. Required.
2
+ export JAVA_HOME=/usr/lib/jvm/java-6-sun
3
+ export HADOOP_OPTS=-Djava.net.preferIPv4Stack=true
@@ -0,0 +1,9 @@
1
+ <?xml version="1.0"?>
2
+ <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3
+
4
+ <!-- Put site-specific property overrides in this file. -->
5
+
6
+ <configuration>
7
+
8
+
9
+ </configuration>
@@ -0,0 +1,47 @@
1
+ <?xml version="1.0"?>
2
+ <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3
+
4
+ <!-- Put site-specific property overrides in this file. -->
5
+
6
+ <configuration>
7
+
8
+ <property>
9
+ <name>dfs.replication</name>
10
+ <value><%= @node[:poolparty][:number_of_nodes] || 1 %></value>
11
+ <description>Default block replication.
12
+ The actual number of replications can be specified when the file is created.
13
+ The default is used if replication is not specified in create time.
14
+ </description>
15
+ </property>
16
+
17
+ <property>
18
+ <name>dfs.name.dir</name>
19
+ <value><%= @node[:poolparty][:hadoop_data_dir] %>/dfs/name</value>
20
+ </property>
21
+
22
+ <property>
23
+ <name>dfs.data.dir</name>
24
+ <value><%= @node[:poolparty][:hadoop_data_dir] %>/dfs/data</value>
25
+ </property>
26
+
27
+ <property>
28
+ <name>dfs.hosts</name>
29
+ <value>/usr/local/hadoop/conf/slaves</value>
30
+ <description>
31
+ When we have a number of users connecting to the hadoop cloud they may
32
+ accidentially run "start-all.sh" and then their local machine will try to
33
+ connect to the cluster. We limit the machines that can be a part of the
34
+ hdfs to be only those listed in the slaves file. See
35
+ http://www.cloudera.com/blog/2008/12/03/securing-a-hadoop-cluster-through-a-gateway/
36
+ </description>
37
+ </property>
38
+
39
+
40
+ <!--
41
+ <property>
42
+ <name>dfs.permissions</name>
43
+ <value>false</value>
44
+ </property>
45
+ -->
46
+
47
+ </configuration>
@@ -0,0 +1,119 @@
1
+ #! /bin/sh
2
+ #
3
+ # skeleton example file to build /etc/init.d/ scripts.
4
+ # This file should be used to construct scripts for /etc/init.d.
5
+ #
6
+ # Written by Miquel van Smoorenburg <miquels@cistron.nl>.
7
+ # Modified for Debian
8
+ # by Ian Murdock <imurdock@gnu.ai.mit.edu>.
9
+ # Further changes by Javier Fernandez-Sanguino <jfs@debian.org>
10
+ #
11
+ # Version: @(#)skeleton 1.9 26-Feb-2001 miquels@cistron.nl
12
+ #
13
+ ### BEGIN INIT INFO
14
+ # Provides: hadoop-datanode
15
+ # Required-Start: $network $local_fs
16
+ # Required-Stop:
17
+ # Should-Start: $named
18
+ # Should-Stop:
19
+ # Default-Start: 2 3 4 5
20
+ # Default-Stop: 0 1 6
21
+ # Short-Description: Hadoop datanode daemon
22
+ ### END INIT INFO
23
+
24
+ set -e
25
+
26
+ # Include hadoop defaults if available
27
+ if [ -f /etc/default/hadoop ] ; then
28
+ . /etc/default/hadoop
29
+ fi
30
+
31
+ PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
32
+ DAEMON_SCRIPT=$HADOOP_HOME/bin/hadoop-daemon.sh
33
+ NAME=hadoop-datanode
34
+ DESC="Hadoop datanode daemon"
35
+
36
+ test -x $DAEMON_SCRIPT || exit 0
37
+
38
+ LOGDIR=$HADOOP_LOG_DIR
39
+ PIDFILE=/var/run/hadoop/datanode.pid
40
+ DODTIME=3 # Time to wait for the server to die, in seconds
41
+ # If this value is set too low you might not
42
+ # let some servers to die gracefully and
43
+ # 'restart' will not work
44
+
45
+ get_running_pid() {
46
+ pid=$(ps axw -eo pid,command | tr 'A-Z' 'a-z' | grep org.apache.hadoop | grep datanode | grep java | awk '{print $1}')
47
+ }
48
+
49
+ running() {
50
+ get_running_pid
51
+ [ -z "$pid" ] && return 1
52
+ return 0
53
+ }
54
+
55
+ start() {
56
+ su -s /bin/sh hadoop -c "$HADOOP_HOME/bin/hadoop-daemon.sh start datanode"
57
+ }
58
+ stop() {
59
+ su -s /bin/sh hadoop -c "$HADOOP_HOME/bin/hadoop-daemon.sh stop datanode"
60
+ }
61
+
62
+
63
+ case "$1" in
64
+ start)
65
+ echo -n "Starting $DESC: "
66
+ start
67
+ if running ; then
68
+ echo "$NAME."
69
+ else
70
+ echo "ERROR."
71
+ fi
72
+ ;;
73
+ stop)
74
+ echo -n "Stopping $DESC: "
75
+ stop
76
+ if ! running ; then
77
+ echo 'ERROR'
78
+ else
79
+ echo "$NAME."
80
+ fi
81
+ ;;
82
+ force-stop)
83
+ echo -n "Forcefully stopping $DESC: "
84
+ get_running_pid
85
+ kill -9 $pid
86
+ if ! running ; then
87
+ echo "$NAME."
88
+ else
89
+ echo " ERROR."
90
+ fi
91
+ ;;
92
+ force-reload)
93
+ # check wether $DAEMON is running. If so, restart
94
+ running && $0 restart
95
+ ;;
96
+ restart)
97
+ echo -n "Restarting $DESC: "
98
+ stop
99
+ [ -n "$DODTIME" ] && sleep $DODTIME
100
+ $0 start
101
+ ;;
102
+ status)
103
+ echo -n "$NAME is "
104
+ if running ; then
105
+ echo "running"
106
+ else
107
+ echo "not running."
108
+ exit 1
109
+ fi
110
+ ;;
111
+ *)
112
+ N=/etc/init.d/$NAME
113
+ # echo "Usage: $N {start|stop|restart|reload|force-reload}" >&2
114
+ echo "Usage: $N {start|stop|restart|force-reload|status|force-stop}" >&2
115
+ exit 1
116
+ ;;
117
+ esac
118
+
119
+ exit 0
@@ -0,0 +1,119 @@
1
+ #! /bin/sh
2
+ #
3
+ # skeleton example file to build /etc/init.d/ scripts.
4
+ # This file should be used to construct scripts for /etc/init.d.
5
+ #
6
+ # Written by Miquel van Smoorenburg <miquels@cistron.nl>.
7
+ # Modified for Debian
8
+ # by Ian Murdock <imurdock@gnu.ai.mit.edu>.
9
+ # Further changes by Javier Fernandez-Sanguino <jfs@debian.org>
10
+ #
11
+ # Version: @(#)skeleton 1.9 26-Feb-2001 miquels@cistron.nl
12
+ #
13
+ ### BEGIN INIT INFO
14
+ # Provides: hadoop-jobtracker
15
+ # Required-Start: $network $local_fs
16
+ # Required-Stop:
17
+ # Should-Start: $named
18
+ # Should-Stop:
19
+ # Default-Start: 2 3 4 5
20
+ # Default-Stop: 0 1 6
21
+ # Short-Description: Hadoop jobtracker daemon
22
+ ### END INIT INFO
23
+
24
+ set -e
25
+
26
+ # Include hadoop defaults if available
27
+ if [ -f /etc/default/hadoop ] ; then
28
+ . /etc/default/hadoop
29
+ fi
30
+
31
+ PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
32
+ DAEMON_SCRIPT=$HADOOP_HOME/bin/hadoop-daemon.sh
33
+ NAME=hadoop-jobtracker
34
+ DESC="Hadoop jobtracker daemon"
35
+
36
+ test -x $DAEMON_SCRIPT || exit 0
37
+
38
+ LOGDIR=$HADOOP_LOG_DIR
39
+ PIDFILE=/var/run/hadoop/jobtracker.pid
40
+ DODTIME=3 # Time to wait for the server to die, in seconds
41
+ # If this value is set too low you might not
42
+ # let some servers to die gracefully and
43
+ # 'restart' will not work
44
+
45
+ get_running_pid() {
46
+ pid=$(ps axw -eo pid,command | tr 'A-Z' 'a-z' | grep org.apache.hadoop | grep jobtracker | grep java | awk '{print $1}')
47
+ }
48
+
49
+ running() {
50
+ get_running_pid
51
+ [ -z "$pid" ] && return 1
52
+ return 0
53
+ }
54
+
55
+ start() {
56
+ su -s /bin/sh hadoop -c "$HADOOP_HOME/bin/hadoop-daemon.sh start jobtracker"
57
+ }
58
+ stop() {
59
+ su -s /bin/sh hadoop -c "$HADOOP_HOME/bin/hadoop-daemon.sh stop jobtracker"
60
+ }
61
+
62
+
63
+ case "$1" in
64
+ start)
65
+ echo -n "Starting $DESC: "
66
+ start
67
+ if running ; then
68
+ echo "$NAME."
69
+ else
70
+ echo "ERROR."
71
+ fi
72
+ ;;
73
+ stop)
74
+ echo -n "Stopping $DESC: "
75
+ stop
76
+ if ! running ; then
77
+ echo 'ERROR'
78
+ else
79
+ echo "$NAME."
80
+ fi
81
+ ;;
82
+ force-stop)
83
+ echo -n "Forcefully stopping $DESC: "
84
+ get_running_pid
85
+ kill -9 $pid
86
+ if ! running ; then
87
+ echo "$NAME."
88
+ else
89
+ echo " ERROR."
90
+ fi
91
+ ;;
92
+ force-reload)
93
+ # check wether $DAEMON is running. If so, restart
94
+ running && $0 restart
95
+ ;;
96
+ restart)
97
+ echo -n "Restarting $DESC: "
98
+ stop
99
+ [ -n "$DODTIME" ] && sleep $DODTIME
100
+ $0 start
101
+ ;;
102
+ status)
103
+ echo -n "$NAME is "
104
+ if running ; then
105
+ echo "running"
106
+ else
107
+ echo "not running."
108
+ exit 1
109
+ fi
110
+ ;;
111
+ *)
112
+ N=/etc/init.d/$NAME
113
+ # echo "Usage: $N {start|stop|restart|reload|force-reload}" >&2
114
+ echo "Usage: $N {start|stop|restart|force-reload|status|force-stop}" >&2
115
+ exit 1
116
+ ;;
117
+ esac
118
+
119
+ exit 0
@@ -0,0 +1,119 @@
1
+ #! /bin/sh
2
+ #
3
+ # skeleton example file to build /etc/init.d/ scripts.
4
+ # This file should be used to construct scripts for /etc/init.d.
5
+ #
6
+ # Written by Miquel van Smoorenburg <miquels@cistron.nl>.
7
+ # Modified for Debian
8
+ # by Ian Murdock <imurdock@gnu.ai.mit.edu>.
9
+ # Further changes by Javier Fernandez-Sanguino <jfs@debian.org>
10
+ #
11
+ # Version: @(#)skeleton 1.9 26-Feb-2001 miquels@cistron.nl
12
+ #
13
+ ### BEGIN INIT INFO
14
+ # Provides: hadoop-namenode
15
+ # Required-Start: $network $local_fs
16
+ # Required-Stop:
17
+ # Should-Start: $named
18
+ # Should-Stop:
19
+ # Default-Start: 2 3 4 5
20
+ # Default-Stop: 0 1 6
21
+ # Short-Description: Hadoop namenode daemon
22
+ ### END INIT INFO
23
+
24
+ set -e
25
+
26
+ # Include hadoop defaults if available
27
+ if [ -f /etc/default/hadoop ] ; then
28
+ . /etc/default/hadoop
29
+ fi
30
+
31
+ PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
32
+ DAEMON_SCRIPT=$HADOOP_HOME/bin/hadoop-daemon.sh
33
+ NAME=hadoop-namenode
34
+ DESC="Hadoop namenode daemon"
35
+
36
+ test -x $DAEMON_SCRIPT || exit 0
37
+
38
+ LOGDIR=$HADOOP_LOG_DIR
39
+ PIDFILE=/var/run/hadoop/namenode.pid
40
+ DODTIME=3 # Time to wait for the server to die, in seconds
41
+ # If this value is set too low you might not
42
+ # let some servers to die gracefully and
43
+ # 'restart' will not work
44
+
45
+ get_running_pid() {
46
+ pid=$(ps axw -eo pid,command | tr 'A-Z' 'a-z' | grep org.apache.hadoop | grep namenode | grep java | awk '{print $1}')
47
+ }
48
+
49
+ running() {
50
+ get_running_pid
51
+ [ -z "$pid" ] && return 1
52
+ return 0
53
+ }
54
+
55
+ start() {
56
+ su -s /bin/sh hadoop -c "$HADOOP_HOME/bin/hadoop-daemon.sh start namenode"
57
+ }
58
+ stop() {
59
+ su -s /bin/sh hadoop -c "$HADOOP_HOME/bin/hadoop-daemon.sh stop namenode"
60
+ }
61
+
62
+
63
+ case "$1" in
64
+ start)
65
+ echo -n "Starting $DESC: "
66
+ start
67
+ if running ; then
68
+ echo "$NAME."
69
+ else
70
+ echo "ERROR."
71
+ fi
72
+ ;;
73
+ stop)
74
+ echo -n "Stopping $DESC: "
75
+ stop
76
+ if ! running ; then
77
+ echo 'ERROR'
78
+ else
79
+ echo "$NAME."
80
+ fi
81
+ ;;
82
+ force-stop)
83
+ echo -n "Forcefully stopping $DESC: "
84
+ get_running_pid
85
+ kill -9 $pid
86
+ if ! running ; then
87
+ echo "$NAME."
88
+ else
89
+ echo " ERROR."
90
+ fi
91
+ ;;
92
+ force-reload)
93
+ # check wether $DAEMON is running. If so, restart
94
+ running && $0 restart
95
+ ;;
96
+ restart)
97
+ echo -n "Restarting $DESC: "
98
+ stop
99
+ [ -n "$DODTIME" ] && sleep $DODTIME
100
+ $0 start
101
+ ;;
102
+ status)
103
+ echo -n "$NAME is "
104
+ if running ; then
105
+ echo "running"
106
+ else
107
+ echo "not running."
108
+ exit 1
109
+ fi
110
+ ;;
111
+ *)
112
+ N=/etc/init.d/$NAME
113
+ # echo "Usage: $N {start|stop|restart|reload|force-reload}" >&2
114
+ echo "Usage: $N {start|stop|restart|force-reload|status|force-stop}" >&2
115
+ exit 1
116
+ ;;
117
+ esac
118
+
119
+ exit 0