rubber 2.6.5 → 2.7.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (71) hide show
  1. checksums.yaml +4 -4
  2. data/.travis.yml +2 -0
  3. data/CHANGELOG +37 -0
  4. data/lib/rubber/cloud/vsphere.rb +4 -0
  5. data/lib/rubber/generator.rb +37 -19
  6. data/lib/rubber/recipes/rubber/security_groups.rb +1 -1
  7. data/lib/rubber/recipes/rubber/volumes.rb +1 -1
  8. data/lib/rubber/version.rb +1 -1
  9. data/templates/delayed_job/config/rubber/deploy-delayed_job.rb +44 -0
  10. data/templates/delayed_job/config/rubber/role/delayed_job/monit-delayed_job.conf +18 -0
  11. data/templates/delayed_job/config/rubber/rubber-delayed_job.yml +1 -0
  12. data/templates/delayed_job/templates.yml +1 -0
  13. data/templates/discourse/config/rubber/deploy-discourse.rb +16 -0
  14. data/templates/discourse/config/rubber/role/nginx/nginx.conf +43 -0
  15. data/templates/discourse/config/rubber/role/nginx/unicorn_nginx.conf +102 -0
  16. data/templates/discourse/config/rubber/role/unicorn/unicorn.rb +99 -0
  17. data/templates/discourse/config/rubber/rubber-discourse.yml +3 -0
  18. data/templates/discourse/templates.yml +5 -0
  19. data/templates/elasticsearch/config/rubber/deploy-elasticsearch.rb +7 -7
  20. data/templates/elasticsearch/config/rubber/role/elasticsearch/elasticsearch-default.conf +47 -0
  21. data/templates/elasticsearch/config/rubber/role/elasticsearch/elasticsearch.yml +49 -30
  22. data/templates/elasticsearch/config/rubber/role/elasticsearch/logging.yml +14 -3
  23. data/templates/elasticsearch/config/rubber/role/elasticsearch/monit-elasticsearch.conf +8 -0
  24. data/templates/elasticsearch/config/rubber/rubber-elasticsearch.yml +1 -4
  25. data/templates/ffmpeg/config/rubber/deploy-ffmpeg.rb +13 -0
  26. data/templates/ffmpeg/config/rubber/rubber-ffmpeg.yml +5 -0
  27. data/templates/ffmpeg/templates.yml +1 -0
  28. data/templates/graylog/config/rubber/deploy-graylog.rb +17 -20
  29. data/templates/graylog/config/rubber/role/graylog_server/graylog2.conf +157 -43
  30. data/templates/graylog/config/rubber/role/graylog_server/monit-graylog_server.conf +8 -0
  31. data/templates/graylog/config/rubber/role/graylog_web/graylog2-web-interface.conf +19 -0
  32. data/templates/graylog/config/rubber/role/graylog_web/graylog_web-upstart.conf +1 -1
  33. data/templates/graylog/config/rubber/role/graylog_web/monit-graylog_web.conf +8 -0
  34. data/templates/graylog/config/rubber/rubber-graylog.yml +5 -12
  35. data/templates/graylog/templates.rb +13 -5
  36. data/templates/memcached/config/rubber/deploy-memcached.rb +16 -0
  37. data/templates/memcached/config/rubber/role/memcached/dalli.rb +20 -0
  38. data/templates/memcached/config/rubber/role/memcached/memcached.conf +17 -8
  39. data/templates/memcached/config/rubber/rubber-memcached.yml +25 -1
  40. data/templates/monit/config/rubber/role/memcached/monit-memcached.conf +9 -0
  41. data/templates/monit/config/rubber/role/mongodb/monit-mongodb.conf +1 -0
  42. data/templates/mysql/config/rubber/deploy-mysql.rb +1 -2
  43. data/templates/newrelic/config/rubber/deploy-newrelic.rb +49 -0
  44. data/templates/newrelic/config/rubber/role/newrelic/monit-newrelic.conf +9 -0
  45. data/templates/newrelic/config/rubber/rubber-newrelic.yml +5 -0
  46. data/templates/newrelic/templates.yml +1 -0
  47. data/templates/passenger_nginx/config/rubber/deploy-passenger_nginx.rb +7 -4
  48. data/templates/passenger_nginx/config/rubber/rubber-passenger_nginx.yml +1 -0
  49. data/templates/redis/config/rubber/role/redis/redis.conf +120 -16
  50. data/templates/redis/config/rubber/rubber-redis.yml +1 -1
  51. data/templates/solr/config/rubber/deploy-solr.rb +8 -8
  52. data/templates/solr/config/rubber/rubber-solr.yml +6 -6
  53. data/templates/solr/templates.yml +1 -3
  54. data/templates/solr_sunspot/config/rubber/common/solr_sunspot.yml +10 -0
  55. data/templates/solr_sunspot/config/rubber/deploy-solr_sunspot.rb +30 -0
  56. data/templates/solr_sunspot/config/rubber/rubber-solr_sunspot.yml +5 -0
  57. data/templates/solr_sunspot/templates.yml +1 -0
  58. data/templates/unicorn/config/rubber/deploy-unicorn.rb +16 -11
  59. data/templates/unicorn/config/rubber/role/unicorn/unicorn +91 -0
  60. data/test/generator_test.rb +54 -0
  61. data/test/test_helper.rb +2 -0
  62. metadata +31 -11
  63. data/templates/elasticsearch/config/rubber/role/elasticsearch/elasticsearch-upstart.conf +0 -23
  64. data/templates/graylog/config/initializers/graylog.rb +0 -53
  65. data/templates/graylog/config/rubber/role/graylog_web/crontab +0 -9
  66. data/templates/graylog/config/rubber/role/graylog_web/email.yml +0 -16
  67. data/templates/graylog/config/rubber/role/graylog_web/general.yml +0 -27
  68. data/templates/graylog/config/rubber/role/graylog_web/mongoid.yml +0 -28
  69. data/templates/memcached/config/memcached.yml +0 -28
  70. data/templates/memcached/config/rubber/common/memcached.yml +0 -14
  71. data/templates/unicorn/config/rubber/role/unicorn/unicorn-upstart.conf +0 -13
@@ -0,0 +1,3 @@
1
+ roles:
2
+ discourse:
3
+ packages: [libtool, libpq-dev, gawk, pngcrush, imagemagick, python-software-properties, postgresql-contrib]
@@ -0,0 +1,5 @@
1
+ description: A fairly complete and scalable discourse deployment setup using unicorn/nginx
2
+ dependent_templates:
3
+ - complete_unicorn_nginx
4
+ - postgresql
5
+ - sidekiq
@@ -8,13 +8,13 @@ namespace :rubber do
8
8
 
9
9
  task :install, :roles => :elasticsearch do
10
10
  rubber.sudo_script 'install_elasticsearch', <<-ENDSCRIPT
11
- if [[ ! -d "#{rubber_env.elasticsearch_dir}" ]]; then
12
- wget --no-check-certificate -qNP /tmp http://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-#{rubber_env.elasticsearch_version}.zip
13
- unzip -d #{rubber_env.elasticsearch_prefix} /tmp/elasticsearch-#{rubber_env.elasticsearch_version}.zip
14
- rm /tmp/elasticsearch-#{rubber_env.elasticsearch_version}.zip
11
+ if [[ ! -f /usr/share/elasticsearch/lib/elasticsearch-#{rubber_env.elasticsearch_version}.jar ]]; then
12
+ wget -qNP /tmp https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-#{rubber_env.elasticsearch_version}.deb
13
+ dpkg -i /tmp/elasticsearch-#{rubber_env.elasticsearch_version}.deb
14
+ rm /tmp/elasticsearch-#{rubber_env.elasticsearch_version}.deb
15
15
 
16
- #{rubber_env.elasticsearch_dir}/bin/plugin -install mobz/elasticsearch-head
17
-
16
+ rm -rf /usr/share/elasticsearch/plugins/head
17
+ /usr/share/elasticsearch/bin/plugin -install mobz/elasticsearch-head
18
18
  fi
19
19
  ENDSCRIPT
20
20
  end
@@ -22,7 +22,7 @@ namespace :rubber do
22
22
  after "rubber:bootstrap", "rubber:elasticsearch:bootstrap"
23
23
 
24
24
  task :bootstrap, :roles => :elasticsearch do
25
- exists = capture("echo $(ls /etc/init/elasticsearch.conf 2> /dev/null)")
25
+ exists = capture("echo $(ls #{rubber_env.elasticsearch_data_dir} 2> /dev/null)")
26
26
  if exists.strip.size == 0
27
27
  # After everything installed on machines, we need the source tree
28
28
  # on hosts in order to run rubber:config for bootstrapping the db
@@ -0,0 +1,47 @@
1
+ <%
2
+ @path = '/etc/default/elasticsearch'
3
+ %>
4
+ # Run ElasticSearch as this user ID and group ID
5
+ #ES_USER=elasticsearch
6
+ #ES_GROUP=elasticsearch
7
+
8
+ # Heap Size (defaults to 256m min, 1g max)
9
+ #ES_HEAP_SIZE=2g
10
+
11
+ # Heap new generation
12
+ #ES_HEAP_NEWSIZE=
13
+
14
+ # max direct memory
15
+ #ES_DIRECT_SIZE=
16
+
17
+ # Maximum number of open files, defaults to 65535.
18
+ #MAX_OPEN_FILES=65535
19
+
20
+ # Maximum locked memory size. Set to "unlimited" if you use the
21
+ # bootstrap.mlockall option in elasticsearch.yml. You must also set
22
+ # ES_HEAP_SIZE.
23
+ #MAX_LOCKED_MEMORY=unlimited
24
+
25
+ # Maximum number of VMA (Virtual Memory Areas) a process can own
26
+ #MAX_MAP_COUNT=262144
27
+
28
+ # ElasticSearch log directory
29
+ LOG_DIR=<%= rubber_env.elasticsearch_log_dir %>
30
+
31
+ # ElasticSearch data directory
32
+ DATA_DIR=<%= rubber_env.elasticsearch_data_dir %>
33
+
34
+ # ElasticSearch work directory
35
+ WORK_DIR=<%= rubber_env.elasticsearch_work_dir %>
36
+
37
+ # ElasticSearch configuration directory
38
+ #CONF_DIR=/etc/elasticsearch
39
+
40
+ # ElasticSearch configuration file (elasticsearch.yml)
41
+ #CONF_FILE=/etc/elasticsearch/elasticsearch.yml
42
+
43
+ # Additional Java OPTS
44
+ #ES_JAVA_OPTS=
45
+
46
+ # Configure restart on package upgrade (true, every other setting will lead to not restarting)
47
+ #RESTART_ON_UPGRADE=true
@@ -1,12 +1,6 @@
1
1
  <%
2
- @path = "#{rubber_env.elasticsearch_dir}/config/elasticsearch.yml"
3
- @post = <<-POST
4
- mkdir -p #{rubber_env.elasticsearch_work_dir}
5
- mkdir -p #{rubber_env.elasticsearch_data_dir}
6
- mkdir -p #{rubber_env.elasticsearch_log_dir}
7
- POST
2
+ @path = '/etc/elasticsearch/elasticsearch.yml'
8
3
  -%>
9
-
10
4
  ##################### ElasticSearch Configuration Example #####################
11
5
 
12
6
  # This file contains an overview of various configuration settings,
@@ -14,7 +8,7 @@
14
8
  # consult the guide at <http://elasticsearch.org/guide>.
15
9
  #
16
10
  # The installation procedure is covered at
17
- # <http://elasticsearch.org/guide/reference/setup/installation.html>.
11
+ # <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html>.
18
12
  #
19
13
  # ElasticSearch comes with reasonable defaults for most settings,
20
14
  # so you can try it out without bothering with configuration.
@@ -29,8 +23,8 @@
29
23
  #
30
24
  # node.rack: ${RACK_ENV_VAR}
31
25
 
32
- # See <http://elasticsearch.org/guide/reference/setup/configuration.html>
33
- # for information on supported formats and syntax for the configuration file.
26
+ # For information on supported formats and syntax for the config file, see
27
+ # <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup-configuration.html>
34
28
 
35
29
 
36
30
  ################################### Cluster ###################################
@@ -38,7 +32,7 @@
38
32
  # Cluster name identifies your cluster for auto-discovery. If you're running
39
33
  # multiple clusters on the same network, make sure you're using unique names.
40
34
  #
41
- cluster.name: <%= rubber_env.app_name %>
35
+ cluster.name: <%= rubber_env.graylog_elasticsearch_index %>
42
36
 
43
37
 
44
38
  #################################### Node #####################################
@@ -91,6 +85,10 @@ cluster.name: <%= rubber_env.app_name %>
91
85
  #
92
86
  # node.rack: rack314
93
87
 
88
+ # By default, multiple nodes are allowed to start from the same installation location
89
+ # to disable it, set the following:
90
+ # node.max_local_storage_nodes: 1
91
+
94
92
 
95
93
  #################################### Index ####################################
96
94
 
@@ -101,8 +99,8 @@ cluster.name: <%= rubber_env.app_name %>
101
99
  # Note, that it makes more sense to configure index settings specifically for
102
100
  # a certain index, either when creating it or by using the index templates API.
103
101
  #
104
- # See <http://elasticsearch.org/guide/reference/index-modules/> and
105
- # <http://elasticsearch.org/guide/reference/api/admin-indices-create-index.html>
102
+ # See <http://elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules.html> and
103
+ # <http://elasticsearch.org/guide/en/elasticsearch/reference/current/indices-create-index.html>
106
104
  # for more information.
107
105
 
108
106
  # Set the number of shards (splits) of an index (5 by default):
@@ -150,10 +148,9 @@ cluster.name: <%= rubber_env.app_name %>
150
148
  # Path to directory where to store index data allocated for this node.
151
149
  #
152
150
  path.data: <%= rubber_env.elasticsearch_data_dir %>
153
-
154
151
  #
155
152
  # Can optionally include more than one location, causing data to be striped across
156
- # the locations (à la RAID 0) on a file level, favouring locations with most free
153
+ # the locations (a la RAID 0) on a file level, favouring locations with most free
157
154
  # space on creation. For example:
158
155
  #
159
156
  # path.data: /path/to/data1,/path/to/data2
@@ -171,6 +168,13 @@ path.logs: <%= rubber_env.elasticsearch_log_dir %>
171
168
  # path.plugins: /path/to/plugins
172
169
 
173
170
 
171
+ #################################### Plugin ###################################
172
+
173
+ # If a plugin listed here is not installed for current node, the node will not start.
174
+ #
175
+ # plugin.mandatory: mapper-attachments,lang-groovy
176
+
177
+
174
178
  ################################### Memory ####################################
175
179
 
176
180
  # ElasticSearch performs poorly when JVM starts swapping: you should ensure that
@@ -236,15 +240,16 @@ http.port: <%= rubber_env.elasticsearch_http_port %>
236
240
  # in the gateway, and when the cluster starts up for the first time,
237
241
  # it will read its state from the gateway.
238
242
 
239
- # There are several types of gateway implementations. For more information,
240
- # see <http://elasticsearch.org/guide/reference/modules/gateway>.
243
+ # There are several types of gateway implementations. For more information, see
244
+ # <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-gateway.html>.
241
245
 
242
246
  # The default gateway type is the "local" gateway (recommended):
243
247
  #
244
248
  # gateway.type: local
245
249
 
246
250
  # Settings below control how and when to start the initial recovery process on
247
- # a full cluster restart (to reuse as much local data as possible).
251
+ # a full cluster restart (to reuse as much local data as possible when using shared
252
+ # gateway).
248
253
 
249
254
  # Allow recovery process after N nodes in a cluster are up:
250
255
  #
@@ -256,7 +261,8 @@ http.port: <%= rubber_env.elasticsearch_http_port %>
256
261
  # gateway.recover_after_time: 5m
257
262
 
258
263
  # Set how many nodes are expected in this cluster. Once these N nodes
259
- # are up, begin recovery process immediately:
264
+ # are up (and recover_after_nodes is met), begin recovery process immediately
265
+ # (without waiting for recover_after_time to expire):
260
266
  #
261
267
  # gateway.expected_nodes: 2
262
268
 
@@ -277,9 +283,9 @@ http.port: <%= rubber_env.elasticsearch_http_port %>
277
283
  #
278
284
  # cluster.routing.allocation.node_concurrent_recoveries: 2
279
285
 
280
- # Set to throttle throughput when recovering (eg. 100mb, by default unlimited):
286
+ # Set to throttle throughput when recovering (eg. 100mb, by default 20mb):
281
287
  #
282
- # indices.recovery.max_size_per_sec: 0
288
+ # indices.recovery.max_bytes_per_sec: 20mb
283
289
 
284
290
  # Set to limit the number of open concurrent streams when
285
291
  # recovering a shard from a peer:
@@ -293,8 +299,8 @@ http.port: <%= rubber_env.elasticsearch_http_port %>
293
299
  # and master node is elected. Multicast discovery is the default.
294
300
 
295
301
  # Set to ensure a node sees N other master eligible nodes to be considered
296
- # operational within the cluster. Set this option to a higher value (2-4)
297
- # for large clusters:
302
+ # operational within the cluster. Its recommended to set it to a higher value
303
+ # than 1 when running more than 2 nodes in the cluster.
298
304
  #
299
305
  # discovery.zen.minimum_master_nodes: 1
300
306
 
@@ -304,8 +310,8 @@ http.port: <%= rubber_env.elasticsearch_http_port %>
304
310
  #
305
311
  # discovery.zen.ping.timeout: 3s
306
312
 
307
- # See <http://elasticsearch.org/guide/reference/modules/discovery/zen.html>
308
- # for more information.
313
+ # For more information, see
314
+ # <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-zen.html>
309
315
 
310
316
  # Unicast discovery allows to explicitly control which nodes will be used
311
317
  # to discover the cluster. It can be used when multicast is not present,
@@ -318,16 +324,16 @@ http.port: <%= rubber_env.elasticsearch_http_port %>
318
324
  # 2. Configure an initial list of master nodes in the cluster
319
325
  # to perform discovery when new nodes (master or data) are started:
320
326
  #
321
- # discovery.zen.ping.unicast.hosts: ["host1", "host2:port", "host3[portX-portY]"]
327
+ # discovery.zen.ping.unicast.hosts: ["host1", "host2:port"]
322
328
 
323
329
  # EC2 discovery allows to use AWS EC2 API in order to perform discovery.
324
330
  #
325
331
  # You have to install the cloud-aws plugin for enabling the EC2 discovery.
326
332
  #
327
- # See <http://elasticsearch.org/guide/reference/modules/discovery/ec2.html>
328
- # for more information.
333
+ # For more information, see
334
+ # <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-ec2.html>
329
335
  #
330
- # See <http://elasticsearch.org/tutorials/2011/08/22/elasticsearch-on-ec2.html>
336
+ # See <http://elasticsearch.org/tutorials/elasticsearch-on-ec2/>
331
337
  # for a step-by-step tutorial.
332
338
 
333
339
 
@@ -335,7 +341,6 @@ http.port: <%= rubber_env.elasticsearch_http_port %>
335
341
 
336
342
  # Shard level query and fetch threshold logging.
337
343
 
338
- #index.search.slowlog.level: TRACE
339
344
  #index.search.slowlog.threshold.query.warn: 10s
340
345
  #index.search.slowlog.threshold.query.info: 5s
341
346
  #index.search.slowlog.threshold.query.debug: 2s
@@ -346,3 +351,17 @@ http.port: <%= rubber_env.elasticsearch_http_port %>
346
351
  #index.search.slowlog.threshold.fetch.debug: 500ms
347
352
  #index.search.slowlog.threshold.fetch.trace: 200ms
348
353
 
354
+ #index.indexing.slowlog.threshold.index.warn: 10s
355
+ #index.indexing.slowlog.threshold.index.info: 5s
356
+ #index.indexing.slowlog.threshold.index.debug: 2s
357
+ #index.indexing.slowlog.threshold.index.trace: 500ms
358
+
359
+ ################################## GC Logging ################################
360
+
361
+ #monitor.jvm.gc.young.warn: 1000ms
362
+ #monitor.jvm.gc.young.info: 700ms
363
+ #monitor.jvm.gc.young.debug: 400ms
364
+
365
+ #monitor.jvm.gc.old.warn: 10s
366
+ #monitor.jvm.gc.old.info: 5s
367
+ #monitor.jvm.gc.old.debug: 2s
@@ -1,8 +1,9 @@
1
1
  <%
2
- @path = "#{rubber_env.elasticsearch_dir}/config/logging.yml"
2
+ @path = '/etc/elasticsearch/logging.yml'
3
3
  %>
4
-
5
- rootLogger: INFO, console, file
4
+ # you can override this using by setting a system property, for example -Des.logger.level=DEBUG
5
+ es.logger.level: INFO
6
+ rootLogger: ${es.logger.level}, console, file
6
7
  logger:
7
8
  # log action execution errors for easier debugging
8
9
  action: DEBUG
@@ -20,9 +21,11 @@ logger:
20
21
  #discovery: TRACE
21
22
 
22
23
  index.search.slowlog: TRACE, index_search_slow_log_file
24
+ index.indexing.slowlog: TRACE, index_indexing_slow_log_file
23
25
 
24
26
  additivity:
25
27
  index.search.slowlog: false
28
+ index.indexing.slowlog: false
26
29
 
27
30
  appender:
28
31
  console:
@@ -46,3 +49,11 @@ appender:
46
49
  layout:
47
50
  type: pattern
48
51
  conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
52
+
53
+ index_indexing_slow_log_file:
54
+ type: dailyRollingFile
55
+ file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log
56
+ datePattern: "'.'yyyy-MM-dd"
57
+ layout:
58
+ type: pattern
59
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
@@ -0,0 +1,8 @@
1
+ <%
2
+ @path = '/etc/monit/monit.d/monit-elasticsearch.conf'
3
+ %>
4
+ check process elasticsearch with pidfile /var/run/elasticsearch.pid
5
+ group elasticsearch-<%= Rubber.env %>
6
+ start program = "/usr/bin/env service elasticsearch start"
7
+ stop program = "/usr/bin/env service elasticsearch stop"
8
+ if failed host <%= rubber_env.host %> port <%= rubber_env.elasticsearch_http_port %> with timeout 10 seconds for 10 cycles then restart
@@ -1,10 +1,7 @@
1
- elasticsearch_version: 0.19.12
2
- elasticsearch_prefix: /usr/local
3
- elasticsearch_dir: "#{elasticsearch_prefix}/elasticsearch-#{elasticsearch_version}"
1
+ elasticsearch_version: 0.90.10
4
2
  elasticsearch_data_dir: "/mnt/elasticsearch/data"
5
3
  elasticsearch_work_dir: "/mnt/elasticsearch/work"
6
4
  elasticsearch_log_dir: "/mnt/elasticsearch/logs"
7
- elasticsearch_pid_file: "/var/run/elasticsearch.pid"
8
5
  elasticsearch_http_port: 9200
9
6
 
10
7
  roles:
@@ -0,0 +1,13 @@
1
+ namespace :rubber do
2
+ namespace :ffmpeg do
3
+
4
+ rubber.allow_optional_tasks(self)
5
+
6
+ # Use Jon Severinsson's FFmpeg PPA so we can install the "real" ffmpeg (Ubuntu uses libav / avconv)
7
+ before "rubber:install_packages", "rubber:ffmpeg:setup_apt_sources"
8
+ task :setup_apt_sources do
9
+ run "add-apt-repository -y ppa:jon-severinsson/ffmpeg"
10
+ end
11
+ end
12
+ end
13
+
@@ -0,0 +1,5 @@
1
+ roles:
2
+ ffmpeg:
3
+ # NOTE: In order to get the correct ffmpeg package, we must use Jon Severinsson's FFmpeg PPA
4
+ # See deploy-ffmpeg.rb. libavcodec-extra-53 is needed to encode AAC audio
5
+ packages: [ffmpeg, libavcodec-extra-53]
@@ -0,0 +1 @@
1
+ description: The FFmpeg module
@@ -11,8 +11,9 @@ namespace :rubber do
11
11
  task :install, :roles => :graylog_server do
12
12
  rubber.sudo_script 'install_graylog_server', <<-ENDSCRIPT
13
13
  if [[ ! -d "#{rubber_env.graylog_server_dir}" ]]; then
14
- wget --no-check-certificate -qNP /tmp #{rubber_env.graylog_server_pkg_url}
15
- tar -C #{rubber_env.graylog_server_prefix} -zxf /tmp/graylog2-server-#{rubber_env.graylog_server_version}.tar.gz
14
+ wget --no-check-certificate -qNP /tmp https://github.com/Graylog2/graylog2-server/releases/download/#{rubber_env.graylog_server_version}/graylog2-server-#{rubber_env.graylog_server_version}.tgz
15
+ tar -C #{rubber_env.graylog_server_prefix} -zxf /tmp/graylog2-server-#{rubber_env.graylog_server_version}.tgz
16
+ rm /tmp/graylog2-server-#{rubber_env.graylog_server_version}.tgz
16
17
  fi
17
18
  ENDSCRIPT
18
19
  end
@@ -27,6 +28,8 @@ namespace :rubber do
27
28
  end
28
29
  end
29
30
 
31
+ before "rubber:graylog:server:bootstrap", "rubber:mongodb:restart"
32
+
30
33
  task :bootstrap, :roles => :graylog_server do
31
34
  exists = capture("echo $(ls /etc/graylog2.conf 2> /dev/null)")
32
35
  if exists.strip.size == 0
@@ -64,11 +67,9 @@ namespace :rubber do
64
67
  task :install, :roles => :graylog_web do
65
68
  rubber.sudo_script 'install_graylog_web', <<-ENDSCRIPT
66
69
  if [[ ! -d "#{rubber_env.graylog_web_dir}" ]]; then
67
- wget --no-check-certificate -qNP /tmp #{rubber_env.graylog_web_pkg_url}
68
- tar -C #{rubber_env.graylog_web_prefix} -zxf /tmp/graylog2-web-interface-#{rubber_env.graylog_web_version}.tar.gz
69
-
70
- mkdir #{rubber_env.graylog_web_dir}/log
71
- mkdir #{rubber_env.graylog_web_dir}/tmp
70
+ wget --no-check-certificate -qNP /tmp https://github.com/Graylog2/graylog2-web-interface/releases/download/#{rubber_env.graylog_web_version}/graylog2-web-interface-#{rubber_env.graylog_web_version}.tgz
71
+ tar -C #{rubber_env.graylog_web_prefix} -zxf /tmp/graylog2-web-interface-#{rubber_env.graylog_web_version}.tgz
72
+ rm /tmp/graylog2-web-interface-#{rubber_env.graylog_web_version}.tgz
72
73
  fi
73
74
  ENDSCRIPT
74
75
  end
@@ -82,23 +83,19 @@ namespace :rubber do
82
83
 
83
84
  rubber.run_config(:file => "role/graylog_web/", :force => true, :deploy_path => release_path)
84
85
 
85
- rubber.sudo_script 'bootstrap_graylog_web', <<-ENDSCRIPT
86
- cd #{rubber_env.graylog_web_dir}
87
-
88
- # Add puma to the Gemfile so we can run the server.
89
- echo "gem 'puma'" >> Gemfile
90
-
91
- export RAILS_ENV=production
92
- bundle install
93
-
94
- # Create the Graylog Web admin account.
95
- ./script/rails runner "User.create(:login => '#{rubber_env.graylog_web_username}', :email => '#{rubber_env.graylog_web_email}', :password => '#{rubber_env.graylog_web_password}', :password_confirmation => '#{rubber_env.graylog_web_password}', :role => 'admin') if User.count == 0"
96
- ENDSCRIPT
97
-
98
86
  restart
99
87
  end
100
88
  end
101
89
 
90
+ after "rubber:graylog:web:bootstrap", "rubber:graylog:web:create_inputs"
91
+
92
+ task :create_inputs, :roles => :graylog_web do
93
+ rubber.sudo_script 'create_inputs', <<-ENDSCRIPT
94
+ curl --user #{rubber_env.graylog_web_username}:#{rubber_env.graylog_web_password} -XPOST http://localhost:12900/system/inputs -H "Content-Type: application/json" -d '{"type": "org.graylog2.inputs.gelf.udp.GELFUDPInput", "creator_user_id": "admin", "title": "gelf-udp", "global": true, "configuration": { "port": #{rubber_env.graylog_server_port}, "bind_address": "0.0.0.0" } }'
95
+ curl --user #{rubber_env.graylog_web_username}:#{rubber_env.graylog_web_password} -XPOST http://localhost:12900/system/inputs -H "Content-Type: application/json" -d '{"type": "org.graylog2.inputs.syslog.udp.SyslogUDPInput", "creator_user_id": "admin", "title": "syslog-udp", "global": true, "configuration": { "port": #{rubber_env.graylog_server_syslog_port}, "bind_address": "0.0.0.0" } }'
96
+ ENDSCRIPT
97
+ end
98
+
102
99
  desc "Stops the graylog web"
103
100
  task :stop, :roles => :graylog_web, :on_error => :continue do
104
101
  rsudo "service graylog-web stop || true"
@@ -1,42 +1,131 @@
1
1
  <%
2
2
  @path = "/etc/graylog2.conf"
3
+
4
+ require 'digest/sha2'
5
+ root_password_sha2 = (Digest::SHA2.new << rubber_env.graylog_web_password).to_s
6
+
7
+ es_servers = rubber_instances.for_role('graylog_elasticsearch').collect { |i| "#{i.internal_ip}:9300" }.join(',')
3
8
  %>
9
+ # If you are running more than one instances of graylog2-server you have to select one of these
10
+ # instances as master. The master will perform some periodical tasks that non-masters won't perform.
11
+ is_master = true
12
+
13
+ # The auto-generated node ID will be stored in this file and read after restarts. It is a good idea
14
+ # to use an absolute file path here if you are starting graylog2-server from init scripts or similar.
15
+ node_id_file = /etc/graylog2-server-node-id
16
+
17
+ # You MUST set a secret to secure/pepper the stored user passwords here. Use at least 64 characters.
18
+ # Generate one by using for example: pwgen -s 96
19
+ password_secret = <%= rubber_env.graylog_server_secret %>
20
+
21
+ # the default root user is named 'admin'
22
+ root_username = <%= rubber_env.graylog_web_username %>
23
+
24
+ # You MUST specify a hash password for the root user (which you only need to initially set up the
25
+ # system and in case you lose connectivity to your authentication backend)
26
+ # Create one by using for example: echo -n yourpassword | shasum -a 256
27
+ # and put the resulting hash value into the following line
28
+ root_password_sha2 = <%= root_password_sha2 %>
29
+
30
+ # Set plugin directory here (relative or absolute)
31
+ plugin_dir = plugin
32
+
33
+ # REST API listen URI. Must be reachable by other graylog2-server nodes if you run a cluster.
34
+ rest_listen_uri = http://<%= rubber_instances[rubber_env.host].internal_ip %>:12900/
35
+
36
+ # REST API transport address. Defaults to first non-loopback IPv4 system address and port 12900.
37
+ # This will be promoted in the cluster discovery APIs and other nodes may try to connect on this
38
+ # address. (see rest_listen_uri)
39
+ rest_transport_uri = http://<%= rubber_instances[rubber_env.host].internal_ip %>:12900/
40
+
41
+ # Embedded elasticsearch configuration file
42
+ # pay attention to the working directory of the server, maybe use an absolute path here
43
+ elasticsearch_config_file = /etc/graylog2-elasticsearch.yml
44
+
45
+ elasticsearch_max_docs_per_index = 20000000
46
+
47
+ # How many indices do you want to keep?
48
+ # elasticsearch_max_number_of_indices*elasticsearch_max_docs_per_index=total number of messages in your setup
49
+ elasticsearch_max_number_of_indices = 20
50
+
51
+ # Decide what happens with the oldest indices when the maximum number of indices is reached.
52
+ # The following strategies are availble:
53
+ # - delete # Deletes the index completely (Default)
54
+ # - close # Closes the index and hides it from the system. Can be re-opened later.
55
+ retention_strategy = delete
56
+
57
+ # How many ElasticSearch shards and replicas should be used per index? Note that this only applies to newly created indices.
58
+ elasticsearch_shards = <%= [rubber_instances.for_role('graylog_elasticsearch').size, 1].max %>
59
+ elasticsearch_replicas = 0
4
60
 
5
- # On which port (UDP) should we listen for Syslog messages? (Standard: 514)
6
- syslog_listen_port = <%= rubber_env.graylog_server_syslog_port %>
7
- syslog_protocol = udp
61
+ elasticsearch_index_prefix = graylog2
8
62
 
9
- # ElasticSearch URL (default: http://localhost:9200/)
10
- elasticsearch_url = http://<%= rubber_instances.for_role('graylog_elasticsearch').first.full_name %>:9200/
11
- elasticsearch_index_name = <%= rubber_env.graylog_elasticsearch_index %>
63
+ # settings to be passed to elasticsearch's client (overriding those in the provided elasticsearch_config_file)
64
+ # all these
65
+ # this must be the same as for your elasticsearch cluster
66
+ elasticsearch_cluster_name = <%= rubber_env.graylog_elasticsearch_index %>
12
67
 
13
- # Always try a reverse DNS lookup instead of parsing hostname from syslog message?
14
- force_syslog_rdns = false
68
+ # you could also leave this out, but makes it easier to identify the graylog2 client instance
69
+ elasticsearch_node_name = <%= rubber_env.host %>
70
+
71
+ # we don't want the graylog2 server to store any data, or be master node
72
+ #elasticsearch_node_master = false
73
+ #elasticsearch_node_data = false
74
+
75
+ # use a different port if you run multiple elasticsearch nodes on one machine
76
+ #elasticsearch_transport_tcp_port = 9350
77
+ # we don't need to run the embedded HTTP server here
78
+ #elasticsearch_http_enabled = false
79
+
80
+ elasticsearch_discovery_zen_ping_multicast_enabled = false
81
+ elasticsearch_discovery_zen_ping_unicast_hosts = <%= es_servers %>
82
+
83
+
84
+ # Analyzer (tokenizer) to use for message and full_message field. The "standard" filter usually is a good idea.
85
+ # All supported analyzers are: standard, simple, whitespace, stop, keyword, pattern, language, snowball, custom
86
+ # ElasticSearch documentation: http://www.elasticsearch.org/guide/reference/index-modules/analysis/
87
+ # Note that this setting only takes effect on newly created indices.
88
+ elasticsearch_analyzer = standard
89
+
90
+ # Batch size for all outputs. This is the maximum (!) number of messages an output module will get at once.
91
+ # For example, if this is set to 5000 (default), the ElasticSearch Output will not index more than 5000 messages
92
+ # at once. After that index operation is performed, the next batch will be indexed. If there is only 1 message
93
+ # waiting, it will only index that single message. It is important to raise this parameter if you send in so
94
+ # many messages that it is not enough to index 5000 messages at once. (Only at *really* high message rates)
95
+ output_batch_size = 5000
96
+
97
+ # The number of parallel running processors.
98
+ # Raise this number if your buffers are filling up.
99
+ processbuffer_processors = 5
100
+ outputbuffer_processors = 5
101
+
102
+ # Wait strategy describing how buffer processors wait on a cursor sequence. (default: sleeping)
103
+ # Possible types:
104
+ # - yielding
105
+ # Compromise between performance and CPU usage.
106
+ # - sleeping
107
+ # Compromise between performance and CPU usage. Latency spikes can occur after quiet periods.
108
+ # - blocking
109
+ # High throughput, low latency, higher CPU usage.
110
+ # - busy_spinning
111
+ # Avoids syscalls which could introduce latency jitter. Best when threads can be bound to specific CPU cores.
112
+ processor_wait_strategy = blocking
113
+
114
+ # Size of internal ring buffers. Raise this if raising outputbuffer_processors does not help anymore.
115
+ # For optimum performance your LogMessage objects in the ring buffer should fit in your CPU L3 cache.
116
+ # Start server with --statistics flag to see buffer utilization.
117
+ # Must be a power of 2. (512, 1024, 2048, ...)
118
+ ring_size = 1024
15
119
 
16
120
  # MongoDB Configuration
17
121
  mongodb_useauth = false
18
- mongodb_user = grayloguser
19
- mongodb_password = 123
122
+ #mongodb_user = grayloguser
123
+ #mongodb_password = 123
20
124
  mongodb_host = <%= rubber_instances.for_role('graylog_mongodb').first.full_name %>
21
125
  #mongodb_replica_set = localhost:27017,localhost:27018,localhost:27019
22
126
  mongodb_database = <%= rubber_env.graylog_mongo_database %>
23
127
  mongodb_port = 27017
24
128
 
25
- # Graylog2 uses an internal message queue that holds all received messages until they are indexed. The mq_batch_size parameter defines how many messages are sent
26
- # to ElasticSearch at once (using a _bulk update: http://www.elasticsearch.org/guide/reference/api/bulk.html). The mq_poll_freq parameter controls in which
27
- # interval (in seconds) the message batch is sent. Example: If you leave the standard values (mq_batch_size = 4000, mq_poll_freq = 1), Graylog2 will index 4000 messages
28
- # every second. If you have spikes with more than 4000 messages per second, the queue will start growing until you get under 4000 messages/second again. The queue is
29
- # FIFO and can grow until you run out of RAM. Note that the queue *only* resists in RAM, so if you set the mq_poll_freq to a high value, you may lose a lot of not yet
30
- # indexed messages when the server crashes. Run the server in debug mode (java -jar graylog2-server.jar --debug) with a |grep '^INFO' to see debug information about
31
- # the queue and it's size. (INFO : org.graylog2.periodical.BulkIndexerThread - About to index max 4000 messages. You have a total of 103 messages in the queue. [freq:1s])
32
- # You can also monitor the queue size in your graylog2-web-interface.
33
- mq_batch_size = 4000
34
- mq_poll_freq = 1
35
-
36
- # You can set a maximum size of the message queue. If this size is reached, all new messages will be rejected until messages are removed/indexed from the queue.
37
- # 0 = unlimited queue size (default)
38
- mq_max_size = 0
39
-
40
129
  # Raise this according to the maximum connections your MongoDB server can handle if you encounter MongoDB connection problems.
41
130
  mongodb_max_connections = 100
42
131
 
@@ -45,23 +134,48 @@ mongodb_max_connections = 100
45
134
  # http://api.mongodb.org/java/current/com/mongodb/MongoOptions.html#threadsAllowedToBlockForConnectionMultiplier
46
135
  mongodb_threads_allowed_to_block_multiplier = 5
47
136
 
48
- # Graylog Extended Log Format (GELF)
49
- use_gelf = true
50
- gelf_listen_address = 0.0.0.0
51
- gelf_listen_port = <%= rubber_env.graylog_server_port %>
52
137
 
53
138
  # Drools Rule File (Use to rewrite incoming log messages)
54
- # rules_file = /etc/graylog2.d/rules/graylog2.drl
55
-
56
- # AMQP
57
- amqp_enabled = false
58
- amqp_subscribed_queues = somequeue1:gelf,somequeue2:gelf,somequeue3:syslog
59
- amqp_host = localhost
60
- amqp_port = 5672
61
- amqp_username = guest
62
- amqp_password = guest
63
- amqp_virtualhost = /
64
-
65
- # Forwarders
66
- # Timeout in seconds for each connection and read of Logg.ly API when forwarding messages. Default: 3
67
- forwarder_loggly_timeout = 3
139
+ # See: http://support.torch.sh/help/kb/graylog2-server/custom-message-rewritingprocessing
140
+ # rules_file = /etc/graylog2.drl
141
+
142
+ # Email transport
143
+ transport_email_enabled = false
144
+ transport_email_protocol = smtp
145
+ transport_email_hostname = mail.example.com
146
+ transport_email_port = 587
147
+ transport_email_use_auth = true
148
+ transport_email_use_tls = true
149
+ transport_email_auth_username = you@example.com
150
+ transport_email_auth_password = secret
151
+ transport_email_subject_prefix = [graylog2]
152
+ transport_email_from_email = graylog2@example.com
153
+ transport_email_from_name = Graylog2
154
+ transport_email_web_interface_url = http://your-graylog2.example.org
155
+
156
+ # Jabber/XMPP transport
157
+ transport_jabber_enabled = false
158
+ transport_jabber_hostname = jabber.example.com
159
+ transport_jabber_port = 5222
160
+ transport_jabber_use_sasl_auth = true
161
+ transport_jabber_allow_selfsigned_certs = false
162
+ transport_jabber_auth_username = your_user
163
+ transport_jabber_auth_password = secret
164
+ transport_jabber_message_prefix = [graylog2]
165
+
166
+ # Additional modules
167
+ # Graphite
168
+ #enable_graphite_output = false
169
+ #graphite_carbon_host = 127.0.0.1
170
+ #graphite_carbon_tcp_port = 2003
171
+ #graphite_prefix = logs
172
+
173
+ # Librato Metrics (http://support.torch.sh/help/kb/graylog2-server/using-librato-metrics-with-graylog2)
174
+ #enable_libratometrics_output = false
175
+ #enable_libratometrics_system_metrics = false
176
+ #libratometrics_api_user = you@example.com
177
+ #libratometrics_api_token = abcdefg12345
178
+ #libratometrics_prefix = gl2-
179
+ #libratometrics_interval = 60
180
+ #libratometrics_stream_filter =
181
+ #libratometrics_host_filter =