auser-poolparty-extensions 0.0.7 → 0.1.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (30) hide show
  1. data/README.rdoc +15 -0
  2. data/Rakefile +2 -2
  3. data/VERSION.yml +2 -2
  4. data/lib/extensions/bash_alias.rb +3 -0
  5. data/lib/extensions/convenience_helpers.rb +89 -0
  6. data/lib/extensions/development_gem_package.rb +3 -2
  7. data/lib/extensions/ganglia/ganglia.rb +219 -0
  8. data/lib/extensions/ganglia/templates/bin/gmetad.erb +38 -0
  9. data/lib/extensions/ganglia/templates/bin/gmond.erb +38 -0
  10. data/lib/extensions/ganglia/templates/ganglia-web-conf.php.erb +191 -0
  11. data/lib/extensions/ganglia/templates/gmetad.conf.erb +116 -0
  12. data/lib/extensions/ganglia/templates/gmond.conf.erb +426 -0
  13. data/lib/extensions/ganglia/templates/hadoop-metrics.properties.erb +65 -0
  14. data/lib/extensions/hadoop/hadoop.rb +427 -0
  15. data/lib/extensions/hadoop/templates/core-site.xml.erb +62 -0
  16. data/lib/extensions/hadoop/templates/hadoop-env.sh +4 -0
  17. data/lib/extensions/hadoop/templates/hadoop-site.xml.erb +9 -0
  18. data/lib/extensions/hadoop/templates/hadoop_hosts.erb +0 -0
  19. data/lib/extensions/hadoop/templates/hdfs-site.xml.erb +48 -0
  20. data/lib/extensions/hadoop/templates/init.d/hadoop-datanode +125 -0
  21. data/lib/extensions/hadoop/templates/init.d/hadoop-jobtracker +125 -0
  22. data/lib/extensions/hadoop/templates/init.d/hadoop-namenode +124 -0
  23. data/lib/extensions/hadoop/templates/init.d/hadoop-secondarynamenode +124 -0
  24. data/lib/extensions/hadoop/templates/init.d/hadoop-tasktracker +124 -0
  25. data/lib/extensions/hadoop/templates/jvm.conf +12 -0
  26. data/lib/extensions/hadoop/templates/log4j.properties.erb +94 -0
  27. data/lib/extensions/hadoop/templates/mapred-site.xml.erb +70 -0
  28. data/lib/extensions/hive/hive.rb +138 -0
  29. data/lib/poolparty-extensions.rb +5 -2
  30. metadata +28 -4
@@ -0,0 +1,191 @@
1
+ <?php
2
+ # $Id: conf.php.in 1831 2008-09-26 12:18:54Z carenas $
3
+ #
4
+ # Gmetad-webfrontend version. Used to check for updates.
5
+ #
6
+ include_once "./version.php";
7
+
8
+ #
9
+ # The name of the directory in "./templates" which contains the
10
+ # templates that you want to use. Templates are like a skin for the
11
+ # site that can alter its look and feel.
12
+ #
13
+ $template_name = "default";
14
+
15
+ #
16
+ # If you installed gmetad in a directory other than the default
17
+ # make sure you change it here.
18
+ #
19
+
20
+ # Where gmetad stores the rrd archives.
21
+ $gmetad_root = "/var/lib/ganglia";
22
+ $rrds = "$gmetad_root/rrds";
23
+
24
+ # Leave this alone if rrdtool is installed in $gmetad_root,
25
+ # otherwise, change it if it is installed elsewhere (like /usr/bin)
26
+ define("RRDTOOL", "/usr/bin/rrdtool");
27
+
28
+ # Location for modular-graph files.
29
+ $graphdir='./graph.d';
30
+
31
+ #
32
+ # If you want to grab data from a different ganglia source specify it here.
33
+ # Although, it would be strange to alter the IP since the Round-Robin
34
+ # databases need to be local to be read.
35
+ #
36
+ $ganglia_ip = "127.0.0.1";
37
+ $ganglia_port = 8652;
38
+
39
+ #
40
+ # The maximum number of dynamic graphs to display. If you set this
41
+ # to 0 (the default) all graphs will be shown. This option is
42
+ # helpful if you are viewing the web pages from a browser with a
43
+ # small pipe.
44
+ #
45
+ $max_graphs = 0;
46
+
47
+ #
48
+ # In the Cluster View this sets the default number of columns used to
49
+ # display the host grid below the summary graphs.
50
+ #
51
+ $hostcols = 4;
52
+
53
+ #
54
+ # In the Host View this sets the default number of columns used to
55
+ # display the metric grid below the summary graphs.
56
+ #
57
+ $metriccols = 2;
58
+
59
+ #
60
+ # Turn on and off the Grid Snapshot. Now that we have a
61
+ # hierarchical snapshot (per-cluster instead of per-node) on
62
+ # the meta page this makes more sense. Most people will want this
63
+ # on.
64
+ #
65
+ $show_meta_snapshot = "yes";
66
+
67
+ #
68
+ # The default refresh frequency on pages.
69
+ #
70
+ $default_refresh = 300;
71
+
72
+ #
73
+ # Colors for the CPU report graph
74
+ #
75
+ $cpu_user_color = "3333bb";
76
+ $cpu_nice_color = "ffea00";
77
+ $cpu_system_color = "dd0000";
78
+ $cpu_wio_color = "ff8a60";
79
+ $cpu_idle_color = "e2e2f2";
80
+
81
+ #
82
+ # Colors for the MEMORY report graph
83
+ #
84
+ $mem_used_color = "5555cc";
85
+ $mem_shared_color = "0000aa";
86
+ $mem_cached_color = "33cc33";
87
+ $mem_buffered_color = "99ff33";
88
+ $mem_free_color = "00ff00";
89
+ $mem_swapped_color = "9900CC";
90
+
91
+ #
92
+ # Colors for the LOAD report graph
93
+ #
94
+ $load_one_color = "CCCCCC";
95
+ $proc_run_color = "0000FF";
96
+ $cpu_num_color = "FF0000";
97
+ $num_nodes_color = "00FF00";
98
+
99
+ # Other colors
100
+ $jobstart_color = "ff3300";
101
+
102
+ #
103
+ # Colors for the load ranks.
104
+ #
105
+ $load_colors = array(
106
+ "100+" => "ff634f",
107
+ "75-100" =>"ffa15e",
108
+ "50-75" => "ffde5e",
109
+ "25-50" => "caff98",
110
+ "0-25" => "e2ecff",
111
+ "down" => "515151"
112
+ );
113
+
114
+ #
115
+ # Load scaling
116
+ #
117
+ $load_scale = 1.0;
118
+
119
+ #
120
+ # Default color for single metric graphs
121
+ #
122
+ $default_metric_color = "555555";
123
+
124
+ #
125
+ # Default metric
126
+ #
127
+ $default_metric = "load_one";
128
+
129
+ #
130
+ # remove the domainname from the FQDN hostnames in graphs
131
+ # (to help with long hostnames in small charts)
132
+ #
133
+ $strip_domainname = false;
134
+
135
+ #
136
+ # Optional summary graphs
137
+ #
138
+ #$optional_graphs = array('packet');
139
+
140
+ #
141
+ # Time ranges
142
+ # Each value is the # of seconds in that range.
143
+ #
144
+ $time_ranges = array(
145
+ 'hour'=>3600,
146
+ 'day'=>86400,
147
+ 'week'=>604800,
148
+ 'month'=>2419200,
149
+ 'year'=>31449600
150
+ );
151
+
152
+ # this key must exist in $time_ranges
153
+ $default_time_range = 'hour';
154
+
155
+ #
156
+ # Graph sizes
157
+ #
158
+ $graph_sizes = array(
159
+ 'small'=>array(
160
+ 'height'=>40,
161
+ 'width'=>130,
162
+ 'fudge_0'=>0,
163
+ 'fudge_1'=>0,
164
+ 'fudge_2'=>0
165
+ ),
166
+ 'medium'=>array(
167
+ 'height'=>75,
168
+ 'width'=>300,
169
+ 'fudge_0'=>0,
170
+ 'fudge_1'=>14,
171
+ 'fudge_2'=>28
172
+ ),
173
+ 'large'=>array(
174
+ 'height'=>600,
175
+ 'width'=>800,
176
+ 'fudge_0'=>0,
177
+ 'fudge_1'=>0,
178
+ 'fudge_2'=>0
179
+ ),
180
+ # this was the default value when no other size was provided.
181
+ 'default'=>array(
182
+ 'height'=>100,
183
+ 'width'=>400,
184
+ 'fudge_0'=>0,
185
+ 'fudge_1'=>0,
186
+ 'fudge_2'=>0
187
+ )
188
+ );
189
+ $default_graph_size = 'default';
190
+ $graph_sizes_keys = array_keys( $graph_sizes );
191
+ ?>
@@ -0,0 +1,116 @@
1
+ # This is an example of a Ganglia Meta Daemon configuration file
2
+ # http://ganglia.sourceforge.net/
3
+ #
4
+ # $Id: gmetad.conf.in 1639 2008-08-09 23:30:32Z carenas $
5
+ #
6
+ #-------------------------------------------------------------------------------
7
+ # Setting the debug_level to 1 will keep daemon in the forground and
8
+ # show only error messages. Setting this value higher than 1 will make
9
+ # gmetad output debugging information and stay in the foreground.
10
+ # default: 0
11
+ # debug_level 10
12
+ #
13
+ #-------------------------------------------------------------------------------
14
+ # What to monitor. The most important section of this file.
15
+ #
16
+ # The data_source tag specifies either a cluster or a grid to
17
+ # monitor. If we detect the source is a cluster, we will maintain a complete
18
+ # set of RRD databases for it, which can be used to create historical
19
+ # graphs of the metrics. If the source is a grid (it comes from another gmetad),
20
+ # we will only maintain summary RRDs for it.
21
+ #
22
+ # Format:
23
+ # data_source "my cluster" [polling interval] address1:port addreses2:port ...
24
+ #
25
+ # The keyword 'data_source' must immediately be followed by a unique
26
+ # string which identifies the source, then an optional polling interval in
27
+ # seconds. The source will be polled at this interval on average.
28
+ # If the polling interval is omitted, 15sec is asssumed.
29
+ #
30
+ # A list of machines which service the data source follows, in the
31
+ # format ip:port, or name:port. If a port is not specified then 8649
32
+ # (the default gmond port) is assumed.
33
+ # default: There is no default value
34
+ #
35
+ # data_source "my cluster" 10 localhost my.machine.edu:8649 1.2.3.5:8655
36
+ # data_source "my grid" 50 1.3.4.7:8655 grid.org:8651 grid-backup.org:8651
37
+ # data_source "another source" 1.3.4.7:8655 1.3.4.8
38
+ # data_source "my cluster" localhost
39
+ #
40
+ #
41
+ <%= @node[:poolparty][:ganglia_gmetad_data_sources] %>
42
+
43
+ #
44
+ # Round-Robin Archives
45
+ # You can specify custom Round-Robin archives here (defaults are listed below)
46
+ #
47
+ # RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
48
+ # "RRA:AVERAGE:0.5:5760:374"
49
+ #
50
+
51
+ #
52
+ #-------------------------------------------------------------------------------
53
+ # Scalability mode. If on, we summarize over downstream grids, and respect
54
+ # authority tags. If off, we take on 2.5.0-era behavior: we do not wrap our output
55
+ # in <GRID></GRID> tags, we ignore all <GRID> tags we see, and always assume
56
+ # we are the "authority" on data source feeds. This approach does not scale to
57
+ # large groups of clusters, but is provided for backwards compatibility.
58
+ # default: on
59
+ # scalable off
60
+ #
61
+ #-------------------------------------------------------------------------------
62
+ # The name of this Grid. All the data sources above will be wrapped in a GRID
63
+ # tag with this name.
64
+ # default: unspecified
65
+ gridname "<%= @node[:poolparty][:ganglia_pool_name] %>"
66
+ #
67
+ #-------------------------------------------------------------------------------
68
+ # The authority URL for this grid. Used by other gmetads to locate graphs
69
+ # for our data sources. Generally points to a ganglia/
70
+ # website on this machine.
71
+ # default: "http://hostname/ganglia/",
72
+ # where hostname is the name of this machine, as defined by gethostname().
73
+ # authority "http://mycluster.org/newprefix/"
74
+ #
75
+ #-------------------------------------------------------------------------------
76
+ # List of machines this gmetad will share XML with. Localhost
77
+ # is always trusted.
78
+ # default: There is no default value
79
+ # trusted_hosts 127.0.0.1 169.229.50.165 my.gmetad.org
80
+ #
81
+ #-------------------------------------------------------------------------------
82
+ # If you want any host which connects to the gmetad XML to receive
83
+ # data, then set this value to "on"
84
+ # default: off
85
+ # all_trusted on
86
+ #
87
+ #-------------------------------------------------------------------------------
88
+ # If you don't want gmetad to setuid then set this to off
89
+ # default: on
90
+ # setuid off
91
+ #
92
+ #-------------------------------------------------------------------------------
93
+ # User gmetad will setuid to (defaults to "nobody")
94
+ # default: "nobody"
95
+ setuid_username "ganglia"
96
+ #
97
+ #-------------------------------------------------------------------------------
98
+ # The port gmetad will answer requests for XML
99
+ # default: 8651
100
+ # xml_port 8651
101
+ #
102
+ #-------------------------------------------------------------------------------
103
+ # The port gmetad will answer queries for XML. This facility allows
104
+ # simple subtree and summation views of the XML tree.
105
+ # default: 8652
106
+ interactive_port 8652
107
+ #
108
+ #-------------------------------------------------------------------------------
109
+ # The number of threads answering XML requests
110
+ # default: 4
111
+ # server_threads 10
112
+ #
113
+ #-------------------------------------------------------------------------------
114
+ # Where gmetad stores its round-robin databases
115
+ # default: "/var/lib/ganglia/rrds"
116
+ # rrd_rootdir "/some/other/place"
@@ -0,0 +1,426 @@
1
+ /* This configuration is as close to 2.5.x default behavior as possible
2
+ The values closely match ./gmond/metric.h definitions in 2.5.x */
3
+ /* some comments borrowed from http://www-user.tu-chemnitz.de/~aas/wiki/lib/exe/fetch.php?id=projekte%3Aganglia&cache=cache&media=gmond.conf.template */
4
+ globals {
5
+ daemonize = yes
6
+ setuid = yes
7
+ user = ganglia
8
+ debug_level = 0
9
+ max_udp_msg_len = 1472
10
+ mute = no
11
+ deaf = no
12
+ allow_extra_data = yes
13
+ host_dmax = 0 /*secs */
14
+ cleanup_threshold = 300 /*secs */
15
+ gexec = no
16
+ send_metadata_interval = 10
17
+ }
18
+
19
+ /*
20
+ * The cluster attributes specified will be used as part of the <CLUSTER>
21
+ * tag that will wrap all hosts collected by this instance.
22
+ */
23
+ cluster {
24
+ name = "<%= @node[:poolparty][:ganglia_cloud_name] %>"
25
+ owner = "unspecified"
26
+ latlong = "unspecified"
27
+ url = "unspecified"
28
+ }
29
+
30
+ /* The host section describes attributes of the host, like the location */
31
+ host {
32
+ location = "unspecified"
33
+ }
34
+
35
+ /*
36
+ UDP send channels
37
+
38
+ Gmond will use UDP send channel(s) for sending monitoring
39
+ data it collects from _the local node_ to other nodes.
40
+
41
+ There are two different kinds of UDP send channels:
42
+ - unicast channel:
43
+ specifies a single host that is supposed to receive monitoring
44
+ data from other client nodes
45
+
46
+ - multicast channel:
47
+ specifies a multicast domain that will be used to spread monitoring data
48
+
49
+ Note:
50
+ - the type of channel that should be used depends on the kind of monitoring
51
+ architecture you want to implement
52
+ - make sure to set 'mute = no' in the global configuration section!
53
+ - multiple UDP send channels can be configured
54
+ - gmond will send its data through all channels simultaneously
55
+
56
+ See "Monitoringsoftware Ganglia - Installation, Konfiguration und Bewertung",
57
+ section "Architektur der Monitoringsoftware Ganglia" for further details!
58
+
59
+ Unicast UDP send channels:
60
+ - the target host has to be a node running gmond
61
+ - the gmond running on the target host has to be configured to
62
+ accept data from other gmonds
63
+ */
64
+
65
+ /* Feel free to specify as many udp_send_channels as you like. Gmond
66
+ used to only support having a single channel */
67
+ udp_send_channel {
68
+ /* mcast_join = <%= @node[:poolparty][:ganglia_masters_ip] %> */
69
+ /* host = <%= @node[:poolparty][:ganglia_masters_ip] %> */
70
+ host = <%= @node[:poolparty][:ganglia_first_node_in_clusters_ip] %>
71
+ port = 8649
72
+ ttl = 1
73
+ }
74
+
75
+ /*
76
+ UDP receive channels
77
+
78
+ Gmond will 'listen' on all UDP receive channels for incoming
79
+ monitoring data. Incoming data from other gmonds will be added
80
+ to the cluster state maintained by the local node.
81
+
82
+ There are two different kinds of UDP receive channels:
83
+ - unicast channel:
84
+ specifies a single UDP port that will accept incoming data
85
+
86
+ - multicast channel:
87
+ tells gmond to join a multicast domain and accept incoming
88
+ data sent by other members of the same domain
89
+
90
+ Note:
91
+ - the type of channel that should be used depends on the kind of monitoring
92
+ architecture you want to implement
93
+ - make sure to set 'deaf = no' in the global configuration section!
94
+ - multiple UDP receive channels can be configured
95
+ - gmond will accept data on all channels simultaneously
96
+
97
+ See "Monitoringsoftware Ganglia - Installation, Konfiguration und Bewertung",
98
+ section "Architektur der Monitoringsoftware Ganglia" for further details!
99
+
100
+ You can specify as many udp_recv_channels as you like. */
101
+ udp_recv_channel {
102
+ /* mcast_join = <%= @node[:poolparty][:ganglia_this_nodes_private_ip] %> */
103
+ port = 8649
104
+ bind = <%= @node[:poolparty][:ganglia_this_nodes_private_ip] %>
105
+ /* bind = 0.0.0.0 */
106
+ family = inet4
107
+
108
+ acl {
109
+ default = "allow"
110
+ }
111
+ }
112
+ <%# if @node[:poolparty][:ganglia_gmond_is_master] %>
113
+ <%# end %>
114
+ /*
115
+ TCP accept channels
116
+
117
+ Gmond will listen on all TCP accept channels for incoming connections
118
+ from nodes running gmetad. When a gmetad connects, gmond will transmit
119
+ _all monitoring data_ it has collected so far (i.e. the state of the
120
+ _whole cluster_ as far as gmond knows about it).
121
+
122
+ Note:
123
+ - make sure to set 'mute = no' in the global configuration section
124
+ - multiple TCP accept channels can be configured
125
+ - gmond will accept requests on all channels simultaneously
126
+
127
+ You can specify as many tcp_accept_channels as you like to share
128
+ an xml description of the state of the cluster */
129
+ tcp_accept_channel {
130
+ port = 8649
131
+ bind = <%= @node[:poolparty][:ganglia_this_nodes_private_ip] %>
132
+ acl {
133
+ default = "allow"
134
+ }
135
+ }
136
+
137
+ /* Each metrics module that is referenced by gmond must be specified and
138
+ loaded. If the module has been statically linked with gmond, it does
139
+ not require a load path. However all dynamically loadable modules must
140
+ include a load path. */
141
+ modules {
142
+ module {
143
+ name = "core_metrics"
144
+ }
145
+ module {
146
+ name = "cpu_module"
147
+ path = "modcpu.so"
148
+ }
149
+ module {
150
+ name = "disk_module"
151
+ path = "moddisk.so"
152
+ }
153
+ module {
154
+ name = "load_module"
155
+ path = "modload.so"
156
+ }
157
+ module {
158
+ name = "mem_module"
159
+ path = "modmem.so"
160
+ }
161
+ module {
162
+ name = "net_module"
163
+ path = "modnet.so"
164
+ }
165
+ module {
166
+ name = "proc_module"
167
+ path = "modproc.so"
168
+ }
169
+ module {
170
+ name = "sys_module"
171
+ path = "modsys.so"
172
+ }
173
+ }
174
+
175
+ include ('/etc/ganglia/conf.d/*.conf')
176
+
177
+ /* The old internal 2.5.x metric array has been replaced by the following
178
+ collection_group directives. What follows is the default behavior for
179
+ collecting and sending metrics that is as close to 2.5.x behavior as
180
+ possible. */
181
+
182
+ /* This collection group will cause a heartbeat (or beacon) to be sent every
183
+ 20 seconds. In the heartbeat is the GMOND_STARTED data which expresses
184
+ the age of the running gmond. */
185
+ collection_group {
186
+ collect_once = yes
187
+ time_threshold = 20
188
+ metric {
189
+ name = "heartbeat"
190
+ }
191
+ }
192
+
193
+ /* This collection group will send general info about this host every
194
+ 1200 secs.
195
+ This information doesn't change between reboots and is only collected
196
+ once. */
197
+ collection_group {
198
+ collect_once = yes
199
+ time_threshold = 1200
200
+ metric {
201
+ name = "cpu_num"
202
+ title = "CPU Count"
203
+ }
204
+ metric {
205
+ name = "cpu_speed"
206
+ title = "CPU Speed"
207
+ }
208
+ metric {
209
+ name = "mem_total"
210
+ title = "Memory Total"
211
+ }
212
+ /* Should this be here? Swap can be added/removed between reboots. */
213
+ metric {
214
+ name = "swap_total"
215
+ title = "Swap Space Total"
216
+ }
217
+ metric {
218
+ name = "boottime"
219
+ title = "Last Boot Time"
220
+ }
221
+ metric {
222
+ name = "machine_type"
223
+ title = "Machine Type"
224
+ }
225
+ metric {
226
+ name = "os_name"
227
+ title = "Operating System"
228
+ }
229
+ metric {
230
+ name = "os_release"
231
+ title = "Operating System Release"
232
+ }
233
+ metric {
234
+ name = "location"
235
+ title = "Location"
236
+ }
237
+ }
238
+
239
+ /* This collection group will send the status of gexecd for this host
240
+ every 300 secs.*/
241
+ /* Unlike 2.5.x the default behavior is to report gexecd OFF. */
242
+ collection_group {
243
+ collect_once = yes
244
+ time_threshold = 300
245
+ metric {
246
+ name = "gexec"
247
+ title = "Gexec Status"
248
+ }
249
+ }
250
+
251
+ /* This collection group will collect the CPU status info every 20 secs.
252
+ The time threshold is set to 90 seconds. In honesty, this
253
+ time_threshold could be set significantly higher to reduce
254
+ unneccessary network chatter. */
255
+ collection_group {
256
+ collect_every = 20
257
+ time_threshold = 90
258
+ /* CPU status */
259
+ metric {
260
+ name = "cpu_user"
261
+ value_threshold = "1.0"
262
+ title = "CPU User"
263
+ }
264
+ metric {
265
+ name = "cpu_system"
266
+ value_threshold = "1.0"
267
+ title = "CPU System"
268
+ }
269
+ metric {
270
+ name = "cpu_idle"
271
+ value_threshold = "5.0"
272
+ title = "CPU Idle"
273
+ }
274
+ metric {
275
+ name = "cpu_nice"
276
+ value_threshold = "1.0"
277
+ title = "CPU Nice"
278
+ }
279
+ metric {
280
+ name = "cpu_aidle"
281
+ value_threshold = "5.0"
282
+ title = "CPU aidle"
283
+ }
284
+ metric {
285
+ name = "cpu_wio"
286
+ value_threshold = "1.0"
287
+ title = "CPU wio"
288
+ }
289
+ /* The next two metrics are optional if you want more detail...
290
+ ... since they are accounted for in cpu_system.
291
+ metric {
292
+ name = "cpu_intr"
293
+ value_threshold = "1.0"
294
+ title = "CPU intr"
295
+ }
296
+ metric {
297
+ name = "cpu_sintr"
298
+ value_threshold = "1.0"
299
+ title = "CPU sintr"
300
+ }
301
+ */
302
+ }
303
+
304
+ collection_group {
305
+ collect_every = 20
306
+ time_threshold = 90
307
+ /* Load Averages */
308
+ metric {
309
+ name = "load_one"
310
+ value_threshold = "1.0"
311
+ title = "One Minute Load Average"
312
+ }
313
+ metric {
314
+ name = "load_five"
315
+ value_threshold = "1.0"
316
+ title = "Five Minute Load Average"
317
+ }
318
+ metric {
319
+ name = "load_fifteen"
320
+ value_threshold = "1.0"
321
+ title = "Fifteen Minute Load Average"
322
+ }
323
+ }
324
+
325
+ /* This group collects the number of running and total processes */
326
+ collection_group {
327
+ collect_every = 80
328
+ time_threshold = 950
329
+ metric {
330
+ name = "proc_run"
331
+ value_threshold = "1.0"
332
+ title = "Total Running Processes"
333
+ }
334
+ metric {
335
+ name = "proc_total"
336
+ value_threshold = "1.0"
337
+ title = "Total Processes"
338
+ }
339
+ }
340
+
341
+ /* This collection group grabs the volatile memory metrics every 40 secs and
342
+ sends them at least every 180 secs. This time_threshold can be increased
343
+ significantly to reduce unneeded network traffic. */
344
+ collection_group {
345
+ collect_every = 40
346
+ time_threshold = 180
347
+ metric {
348
+ name = "mem_free"
349
+ value_threshold = "1024.0"
350
+ title = "Free Memory"
351
+ }
352
+ metric {
353
+ name = "mem_shared"
354
+ value_threshold = "1024.0"
355
+ title = "Shared Memory"
356
+ }
357
+ metric {
358
+ name = "mem_buffers"
359
+ value_threshold = "1024.0"
360
+ title = "Memory Buffers"
361
+ }
362
+ metric {
363
+ name = "mem_cached"
364
+ value_threshold = "1024.0"
365
+ title = "Cached Memory"
366
+ }
367
+ metric {
368
+ name = "swap_free"
369
+ value_threshold = "1024.0"
370
+ title = "Free Swap Space"
371
+ }
372
+ }
373
+
374
+ collection_group {
375
+ collect_every = 40
376
+ time_threshold = 300
377
+ metric {
378
+ name = "bytes_out"
379
+ value_threshold = 4096
380
+ title = "Bytes Sent"
381
+ }
382
+ metric {
383
+ name = "bytes_in"
384
+ value_threshold = 4096
385
+ title = "Bytes Received"
386
+ }
387
+ metric {
388
+ name = "pkts_in"
389
+ value_threshold = 256
390
+ title = "Packets Received"
391
+ }
392
+ metric {
393
+ name = "pkts_out"
394
+ value_threshold = 256
395
+ title = "Packets Sent"
396
+ }
397
+ }
398
+
399
+ /* Different than 2.5.x default since the old config made no sense */
400
+ collection_group {
401
+ collect_every = 1800
402
+ time_threshold = 3600
403
+ metric {
404
+ name = "disk_total"
405
+ value_threshold = 1.0
406
+ title = "Total Disk Space"
407
+ }
408
+ }
409
+
410
+ collection_group {
411
+ collect_every = 40
412
+ time_threshold = 180
413
+ metric {
414
+ name = "disk_free"
415
+ value_threshold = 1.0
416
+ title = "Disk Space Available"
417
+ }
418
+ metric {
419
+ name = "part_max_used"
420
+ value_threshold = 1.0
421
+ title = "Maximum Disk Space Used"
422
+ }
423
+ }
424
+
425
+
426
+ /* vim: ft=javascript */