rubber 2.13.1 → 2.14.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 5a6f5d68d0e3d8fc7463dbf87fc649e57b1fc60d
4
- data.tar.gz: fe895924133250ad65d6c63d4b2bd517b26cbd8f
3
+ metadata.gz: 30a59a4d64e36a68db212a6981a372bca1c068dd
4
+ data.tar.gz: 7ceb0e3669077e1c233cfe4591070138bf34950f
5
5
  SHA512:
6
- metadata.gz: c2a26ce7c18c61d14d580283b8ca6aea53c78ab9779ed1d767f5443463b0d82bc41d50fc168cafa4e9393a412926968b96e7f84e61da461f4225a42b203c9058
7
- data.tar.gz: b4296c2d1ca778c49a91dba788353331add723eb0b55d79823adb0235edd6d12741d935be0db71b3c8917fb457e923032a43675311fd213d8126acfadf514391
6
+ metadata.gz: 16199697b6e470ef26c5408a628abee2ac085fb72db8ce240d37b544aae62d32923988b188d292472cf38e141392c181eaa29f479c16d9b77e4eb551cbb1668d
7
+ data.tar.gz: 74e1de3912d38cb50150d6c80d40e4c82ad3f66423c70770b424b75de7bbdde5a40e9e76eb81a403c162826b2312ccdca113a8011b3f6af72d378b6d24475f37
data/CHANGELOG CHANGED
@@ -1,3 +1,23 @@
1
+ 2.14.0 (10/13/2014)
2
+
3
+ Improvements:
4
+ ============
5
+
6
+ [base] Updated the base template to play a bit nicer when reading the rubber secret file at runtime. <dc0663c>
7
+ [collectd] Updated the collectd runner scripts to play nicer with JRuby. <283ba21>
8
+ [graylog] Upgraded from Graylog2 0.20.2 to 0.90.0. <6b3d47e>
9
+ [postgresql] Upgraded PostgreSQL from 9.1 to 9.3. <3b2b322>
10
+ [redis] Upgraded Redis from 2.8.8 to 2.8.17. <15c8040>
11
+
12
+ Bug Fixes:
13
+ =========
14
+
15
+ [core] Make sure we follow the full after_refresh_instance chain for vSphere (fixes a bug with not creating iptables rules introduced in 2.13.0). <46c8506>
16
+ [core] Make sure we always normalize hostnames with underscores in them. <4b4b4c7>
17
+ [postgresql] Make sure the PostgreSQL client version matches the server version. <f7b5223>
18
+ [postgresql] Reference the correct Ubuntu release in the PostgreSQL apt line. <82127cd>
19
+
20
+
1
21
  2.13.1 (10/01/2014)
2
22
 
3
23
  Improvements:
@@ -103,6 +103,8 @@ module Rubber
103
103
  end
104
104
 
105
105
  def after_refresh_instance(instance)
106
+ super
107
+
106
108
  rubber_cfg = Rubber::Configuration.get_configuration(Rubber.env)
107
109
  host_env = rubber_cfg.environment.bind(nil, instance.name)
108
110
 
@@ -312,4 +314,4 @@ module Rubber
312
314
  end
313
315
  end
314
316
  end
315
- end
317
+ end
@@ -166,7 +166,7 @@ namespace :rubber do
166
166
 
167
167
  if ic.role_names.include?('web_tools')
168
168
  Array(rubber_env.web_tools_proxies).each do |name, settings|
169
- hosts_data << "#{name}-#{ic.full_name}"
169
+ hosts_data << "#{name.gsub('_', '-')}-#{ic.full_name}"
170
170
  end
171
171
  end
172
172
 
@@ -185,7 +185,7 @@ namespace :rubber do
185
185
  # graphite web app)
186
186
  if ic.role_names.include?('web_tools')
187
187
  Array(rubber_env.web_tools_proxies).each do |name, settings|
188
- hosts_data << "#{name}-#{ic.full_name}"
188
+ hosts_data << "#{name.gsub('_', '-')}-#{ic.full_name}"
189
189
  end
190
190
  end
191
191
 
@@ -245,7 +245,7 @@ namespace :rubber do
245
245
  # graphite web app)
246
246
  if ic.role_names.include?('web_tools')
247
247
  Array(rubber_env.web_tools_proxies).each do |name, settings|
248
- hosts_data << "#{name}-#{ic.full_name}"
248
+ hosts_data << "#{name.gsub('_', '-')}-#{ic.full_name}"
249
249
  end
250
250
  end
251
251
 
@@ -597,8 +597,7 @@ namespace :rubber do
597
597
  # graphite web app)
598
598
  if instance_item.role_names.include?('web_tools')
599
599
  Array(rubber_env.web_tools_proxies).each do |name, settings|
600
- name = name.gsub('_', '-')
601
- provider.update("#{name}-#{instance_item.name}", instance_item.external_ip)
600
+ provider.update("#{name.gsub('_', '-')}-#{instance_item.name}", instance_item.external_ip)
602
601
  end
603
602
  end
604
603
  end
@@ -616,7 +615,7 @@ namespace :rubber do
616
615
  # graphite web app)
617
616
  if instance_item.role_names.include?('web_tools')
618
617
  Array(rubber_env.web_tools_proxies).each do |name, settings|
619
- provider.destroy("#{name}-#{instance_item.name}")
618
+ provider.destroy("#{name.gsub('_', '-')}-#{instance_item.name}")
620
619
  end
621
620
  end
622
621
  end
@@ -1,3 +1,3 @@
1
1
  module Rubber
2
- VERSION = '2.13.1'.freeze
2
+ VERSION = '2.14.0'.freeze
3
3
  end
@@ -28,7 +28,7 @@ domain: foo.com
28
28
  # OPTIONAL: Additional rubber file to pull config from if it exists. This file will
29
29
  # also be pushed to remote host at Rubber.root/config/rubber/rubber-secret.yml
30
30
  #
31
- # rubber_secret: "#{File.expand_path('~') + '/.ec2' + (Rubber.env == 'production' ? '' : '_dev') + '/rubber-secret.yml' rescue ''}"
31
+ # rubber_secret: "#{File.expand_path('~') + '/.ec2' + (Rubber.env == 'production' ? '' : '_dev') + '/rubber-secret.yml' rescue 'rubber-secret.yml'}"
32
32
 
33
33
  # OPTIONAL: Encryption key that was used to obfuscate the contents of rubber-secret.yml with "rubber util:obfuscation"
34
34
  # Not that much better when stored in here, but you could use a ruby snippet in here to fetch it from a key server or something
@@ -77,16 +77,34 @@ STDERR.puts "#{Time.now}: Starting rubber-collectd execution loop"
77
77
  loop do
78
78
  start_time = Time.now.to_i
79
79
 
80
- scripts.each do |script|
81
- fork do
82
- begin
83
- load script
84
- rescue Exception => e
85
- STDERR.puts("#{script}: #{e}")
80
+ if defined?(JRUBY_VERSION)
81
+ threads = []
82
+
83
+ scripts.each do |script|
84
+ threads << Thread.new do
85
+ begin
86
+ load script
87
+ rescue Exception => e
88
+ STDERR.puts("#{script}: #{e}")
89
+ end
86
90
  end
87
91
  end
92
+
93
+ threads.each(&:join)
94
+
95
+ else
96
+ scripts.each do |script|
97
+ fork do
98
+ begin
99
+ load script
100
+ rescue Exception => e
101
+ STDERR.puts("#{script}: #{e}")
102
+ end
103
+ end
104
+ end
105
+
106
+ Process.waitall
88
107
  end
89
- Process.waitall
90
108
 
91
109
  run_time = Time.now.to_i - start_time
92
110
  begin
@@ -103,5 +121,4 @@ loop do
103
121
  end
104
122
 
105
123
  sleep sleep_time
106
-
107
124
  end
@@ -1,31 +1,33 @@
1
- require 'pg'
1
+ unless defined?(JRUBY_VERSION)
2
+ require 'pg'
2
3
 
3
- master = Rubber.instances.for_role('postgresql_master').first.full_name
4
- slave = "localhost"
5
- opts = { :dbname => Rubber.config.db_name,
6
- :user => Rubber.config.db_user,
7
- :password => Rubber.config.db_pass }
8
- mconn = PGconn.open(opts.merge({:host => master}))
9
- sconn = PGconn.open(opts.merge({:host => slave}))
4
+ master = Rubber.instances.for_role('postgresql_master').first.full_name
5
+ slave = "localhost"
6
+ opts = { :dbname => Rubber.config.db_name,
7
+ :user => Rubber.config.db_user,
8
+ :password => Rubber.config.db_pass }
9
+ mconn = PGconn.open(opts.merge({:host => master}))
10
+ sconn = PGconn.open(opts.merge({:host => slave}))
10
11
 
11
- mval = mconn.exec("select pg_current_xlog_location()")[0]["pg_current_xlog_location"]
12
- sresult = sconn.exec("select pg_last_xlog_receive_location(), pg_last_xlog_replay_location()")[0]
13
- sval_receive = sresult["pg_last_xlog_receive_location"]
14
- sval_replay = sresult["pg_last_xlog_replay_location"]
12
+ mval = mconn.exec("select pg_current_xlog_location()")[0]["pg_current_xlog_location"]
13
+ sresult = sconn.exec("select pg_last_xlog_receive_location(), pg_last_xlog_replay_location()")[0]
14
+ sval_receive = sresult["pg_last_xlog_receive_location"]
15
+ sval_replay = sresult["pg_last_xlog_replay_location"]
15
16
 
16
17
 
17
- def numeric(val)
18
- # First part is logid, second part is record offset
19
- parts = val.split("/")
20
- raise "Invalid location" if parts.size != 2 && parts.any {|p| p.to_s.strip.size == 0}
21
- result = (0xFFFFFFFF * parts[0].to_i) + parts[1].to_i(16)
22
- return result
23
- end
18
+ def numeric(val)
19
+ # First part is logid, second part is record offset
20
+ parts = val.split("/")
21
+ raise "Invalid location" if parts.size != 2 && parts.any {|p| p.to_s.strip.size == 0}
22
+ result = (0xFFFFFFFF * parts[0].to_i) + parts[1].to_i(16)
23
+ return result
24
+ end
24
25
 
25
26
 
26
- master_offset = numeric(mval)
27
- receive_offset = numeric(sval_receive)
28
- replay_offset = numeric(sval_replay)
27
+ master_offset = numeric(mval)
28
+ receive_offset = numeric(sval_receive)
29
+ replay_offset = numeric(sval_replay)
29
30
 
30
- puts "PUTVAL #{HOSTNAME}/postgresql/gauge-replication_receive_delay interval=#{INTERVAL} N:#{master_offset - receive_offset}"
31
- puts "PUTVAL #{HOSTNAME}/postgresql/gauge-replication_replay_delay interval=#{INTERVAL} N:#{master_offset - replay_offset}"
31
+ puts "PUTVAL #{HOSTNAME}/postgresql/gauge-replication_receive_delay interval=#{INTERVAL} N:#{master_offset - receive_offset}"
32
+ puts "PUTVAL #{HOSTNAME}/postgresql/gauge-replication_replay_delay interval=#{INTERVAL} N:#{master_offset - replay_offset}"
33
+ end
@@ -11,7 +11,7 @@ namespace :rubber do
11
11
  task :install, :roles => :graylog_server do
12
12
  rubber.sudo_script 'install_graylog_server', <<-ENDSCRIPT
13
13
  if [[ ! -d "#{rubber_env.graylog_server_dir}" ]]; then
14
- wget --no-check-certificate -qNP /tmp https://github.com/Graylog2/graylog2-server/releases/download/#{rubber_env.graylog_server_version}/graylog2-server-#{rubber_env.graylog_server_version}.tgz
14
+ wget -qNP /tmp http://packages.graylog2.org/releases/graylog2-server/graylog2-server-#{rubber_env.graylog_server_version}.tgz
15
15
  tar -C #{rubber_env.graylog_server_prefix} -zxf /tmp/graylog2-server-#{rubber_env.graylog_server_version}.tgz
16
16
  rm /tmp/graylog2-server-#{rubber_env.graylog_server_version}.tgz
17
17
  fi
@@ -37,6 +37,7 @@ namespace :rubber do
37
37
  rubber.run_config(:file => "role/graylog_server/", :force => true, :deploy_path => release_path)
38
38
 
39
39
  restart
40
+ sleep 15 # Give graylog-server a bit of time to start up.
40
41
  end
41
42
  end
42
43
 
@@ -46,12 +47,12 @@ namespace :rubber do
46
47
  rubber.sudo_script 'create_inputs', <<-ENDSCRIPT
47
48
  # Only create inputs if the system has 0 inputs. It's a bit of a rough hack, but graylog currently (v0.20.2)
48
49
  # doesn't prevent the creation of duplicate conflicting inputs.
49
- if ! curl -s --user #{rubber_env.graylog_web_username}:#{rubber_env.graylog_web_password} -XGET http://localhost:12900/system/inputs | grep "GELFUDPInput" &> /dev/null; then
50
- curl --user #{rubber_env.graylog_web_username}:#{rubber_env.graylog_web_password} -XPOST http://localhost:12900/system/inputs -H "Content-Type: application/json" -d '{"type": "org.graylog2.inputs.gelf.udp.GELFUDPInput", "creator_user_id": "admin", "title": "gelf-udp", "global": true, "configuration": { "port": #{rubber_env.graylog_server_port}, "bind_address": "0.0.0.0" } }'
50
+ if ! curl -s --user #{rubber_env.graylog_web_username}:#{rubber_env.graylog_web_password} -XGET http://#{rubber_instance.internal_ip}:12900/system/inputs | grep "GELFUDPInput" &> /dev/null; then
51
+ curl --user #{rubber_env.graylog_web_username}:#{rubber_env.graylog_web_password} -XPOST http://#{rubber_instance.internal_ip}:12900/system/inputs -H "Content-Type: application/json" -d '{"type": "org.graylog2.inputs.gelf.udp.GELFUDPInput", "creator_user_id": "admin", "title": "gelf-udp", "global": true, "configuration": { "port": #{rubber_env.graylog_server_port}, "bind_address": "0.0.0.0" } }'
51
52
  fi
52
53
 
53
- if ! curl -s --user #{rubber_env.graylog_web_username}:#{rubber_env.graylog_web_password} -XGET http://localhost:12900/system/inputs | grep "SyslogUDPInput" &> /dev/null; then
54
- curl --user #{rubber_env.graylog_web_username}:#{rubber_env.graylog_web_password} -XPOST http://localhost:12900/system/inputs -H "Content-Type: application/json" -d '{"type": "org.graylog2.inputs.syslog.udp.SyslogUDPInput", "creator_user_id": "admin", "title": "syslog-udp", "global": true, "configuration": { "port": #{rubber_env.graylog_server_syslog_port}, "bind_address": "0.0.0.0" } }'
54
+ if ! curl -s --user #{rubber_env.graylog_web_username}:#{rubber_env.graylog_web_password} -XGET http://#{rubber_instance.internal_ip}:12900/system/inputs | grep "SyslogUDPInput" &> /dev/null; then
55
+ curl --user #{rubber_env.graylog_web_username}:#{rubber_env.graylog_web_password} -XPOST http://#{rubber_instance.internal_ip}:12900/system/inputs -H "Content-Type: application/json" -d '{"type": "org.graylog2.inputs.syslog.udp.SyslogUDPInput", "creator_user_id": "admin", "title": "syslog-udp", "global": true, "configuration": { "port": #{rubber_env.graylog_server_syslog_port}, "bind_address": "0.0.0.0" } }'
55
56
  fi
56
57
  ENDSCRIPT
57
58
  end
@@ -83,7 +84,7 @@ namespace :rubber do
83
84
  task :install, :roles => :graylog_web do
84
85
  rubber.sudo_script 'install_graylog_web', <<-ENDSCRIPT
85
86
  if [[ ! -d "#{rubber_env.graylog_web_dir}" ]]; then
86
- wget --no-check-certificate -qNP /tmp https://github.com/Graylog2/graylog2-web-interface/releases/download/#{rubber_env.graylog_web_version}/graylog2-web-interface-#{rubber_env.graylog_web_version}.tgz
87
+ wget -qNP /tmp http://packages.graylog2.org/releases/graylog2-web-interface/graylog2-web-interface-#{rubber_env.graylog_web_version}.tgz
87
88
  tar -C #{rubber_env.graylog_web_prefix} -zxf /tmp/graylog2-web-interface-#{rubber_env.graylog_web_version}.tgz
88
89
  rm /tmp/graylog2-web-interface-#{rubber_env.graylog_web_version}.tgz
89
90
  fi
@@ -100,7 +101,6 @@ namespace :rubber do
100
101
  rubber.run_config(:file => "role/graylog_web/", :force => true, :deploy_path => release_path)
101
102
 
102
103
  restart
103
- sleep 5 # Give graylog-web a bit of time to start up.
104
104
  end
105
105
  end
106
106
 
@@ -16,7 +16,7 @@ is_master = true
16
16
  node_id_file = /etc/graylog2-server-node-id
17
17
 
18
18
  # You MUST set a secret to secure/pepper the stored user passwords here. Use at least 64 characters.
19
- # Generate one by using for example: pwgen -s 96
19
+ # Generate one by using for example: pwgen -N 1 -s 96
20
20
  password_secret = <%= rubber_env.graylog_server_secret %>
21
21
 
22
22
  # the default root user is named 'admin'
@@ -36,7 +36,8 @@ plugin_dir = plugin
36
36
  # REST API listen URI. Must be reachable by other graylog2-server nodes if you run a cluster.
37
37
  rest_listen_uri = http://<%= rubber_instance.internal_ip %>:12900/
38
38
 
39
- # REST API transport address. Defaults to first non-loopback IPv4 system address and port 12900.
39
+ # REST API transport address. Defaults to the value of rest_listen_uri. Exception: If rest_listen_uri
40
+ # is set to a wildcard IP address (0.0.0.0) the first non-loopback IPv4 system address is used.
40
41
  # This will be promoted in the cluster discovery APIs and other nodes may try to connect on this
41
42
  # address. (see rest_listen_uri)
42
43
  rest_transport_uri = http://<%= rubber_instance.internal_ip %>:12900/
@@ -50,12 +51,17 @@ rest_transport_uri = http://<%= rubber_instance.internal_ip %>:12900/
50
51
  # overall round trip times. This is disabled by default. Uncomment the next line to enable it.
51
52
  #rest_enable_gzip = true
52
53
 
53
- # Embedded elasticsearch configuration file
54
+ # Embedded Elasticsearch configuration file
54
55
  # pay attention to the working directory of the server, maybe use an absolute path here
55
56
  #elasticsearch_config_file = /etc/graylog2-elasticsearch.yml
56
57
 
58
+ # (Approximate) maximum number of documents in an Elasticsearch index before a new index
59
+ # is being created, also see no_retention and elasticsearch_max_number_of_indices.
57
60
  elasticsearch_max_docs_per_index = 20000000
58
61
 
62
+ # Disable message retention on this node, i. e. disable Elasticsearch index rotation.
63
+ #no_retention = false
64
+
59
65
  # How many indices do you want to keep?
60
66
  # elasticsearch_max_number_of_indices*elasticsearch_max_docs_per_index=total number of messages in your setup
61
67
  elasticsearch_max_number_of_indices = 20
@@ -66,23 +72,24 @@ elasticsearch_max_number_of_indices = 20
66
72
  # - close # Closes the index and hides it from the system. Can be re-opened later.
67
73
  retention_strategy = delete
68
74
 
69
- # How many ElasticSearch shards and replicas should be used per index? Note that this only applies to newly created indices.
75
+ # How many Elasticsearch shards and replicas should be used per index? Note that this only applies to newly created indices.
70
76
  elasticsearch_shards = <%= [rubber_instances.for_role('graylog_elasticsearch').size, 1].max %>
71
77
  elasticsearch_replicas = 0
72
78
 
79
+ # Prefix for all Elasticsearch indices and index aliases managed by Graylog2.
73
80
  elasticsearch_index_prefix = graylog2
74
81
 
75
82
  # Do you want to allow searches with leading wildcards? This can be extremely resource hungry and should only
76
- # be enabled with care. See also: http://support.torch.sh/help/kb/graylog2-web-interface/the-search-bar-explained
83
+ # be enabled with care. See also: http://graylog2.org/resources/documentation/general/queries
77
84
  allow_leading_wildcard_searches = false
78
85
 
79
86
  # Do you want to allow searches to be highlighted? Depending on the size of your messages this can be memory hungry and
80
- # should only be enabled after making sure your elasticsearch cluster has enough memory.
87
+ # should only be enabled after making sure your Elasticsearch cluster has enough memory.
81
88
  allow_highlighting = false
82
89
 
83
90
  # settings to be passed to elasticsearch's client (overriding those in the provided elasticsearch_config_file)
84
91
  # all these
85
- # this must be the same as for your elasticsearch cluster
92
+ # this must be the same as for your Elasticsearch cluster
86
93
  elasticsearch_cluster_name = <%= rubber_env.graylog_elasticsearch_index %>
87
94
 
88
95
  # you could also leave this out, but makes it easier to identify the graylog2 client instance
@@ -92,7 +99,7 @@ elasticsearch_node_name = <%= rubber_env.host %>
92
99
  #elasticsearch_node_master = false
93
100
  #elasticsearch_node_data = false
94
101
 
95
- # use a different port if you run multiple elasticsearch nodes on one machine
102
+ # use a different port if you run multiple Elasticsearch nodes on one machine
96
103
  #elasticsearch_transport_tcp_port = 9350
97
104
  # we don't need to run the embedded HTTP server here
98
105
  #elasticsearch_http_enabled = false
@@ -100,31 +107,52 @@ elasticsearch_node_name = <%= rubber_env.host %>
100
107
  elasticsearch_discovery_zen_ping_multicast_enabled = false
101
108
  elasticsearch_discovery_zen_ping_unicast_hosts = <%= es_servers %>
102
109
 
103
- # the following settings allow to change the bind addresses for the elasticsearch client in graylog2
104
- # these settings are empty by default, letting elasticsearch choose automatically,
110
+ # Change the following setting if you are running into problems with timeouts during Elasticsearch cluster discovery.
111
+ # The setting is specified in milliseconds, the default is 5000ms (5 seconds).
112
+ # elasticsearch_cluster_discovery_timeout = 5000
113
+
114
+ # the following settings allow to change the bind addresses for the Elasticsearch client in graylog2
115
+ # these settings are empty by default, letting Elasticsearch choose automatically,
105
116
  # override them here or in the 'elasticsearch_config_file' if you need to bind to a special address
106
- # refer to http://www.elasticsearch.org/guide/en/elasticsearch/reference/0.90/modules-network.html for special values here
117
+ # refer to http://www.elasticsearch.org/guide/en/elasticsearch/reference/0.90/modules-network.html
118
+ # for special values here
107
119
  # elasticsearch_network_host =
108
120
  # elasticsearch_network_bind_host =
109
121
  # elasticsearch_network_publish_host =
110
122
 
123
+ # The total amount of time discovery will look for other Elasticsearch nodes in the cluster
124
+ # before giving up and declaring the current node master.
125
+ #elasticsearch_discovery_initial_state_timeout = 3s
126
+
111
127
  # Analyzer (tokenizer) to use for message and full_message field. The "standard" filter usually is a good idea.
112
128
  # All supported analyzers are: standard, simple, whitespace, stop, keyword, pattern, language, snowball, custom
113
- # ElasticSearch documentation: http://www.elasticsearch.org/guide/reference/index-modules/analysis/
129
+ # Elasticsearch documentation: http://www.elasticsearch.org/guide/reference/index-modules/analysis/
114
130
  # Note that this setting only takes effect on newly created indices.
115
131
  elasticsearch_analyzer = standard
116
132
 
117
- # Batch size for all outputs. This is the maximum (!) number of messages an output module will get at once.
118
- # For example, if this is set to 5000 (default), the ElasticSearch Output will not index more than 5000 messages
119
- # at once. After that index operation is performed, the next batch will be indexed. If there is only 1 message
120
- # waiting, it will only index that single message. It is important to raise this parameter if you send in so
121
- # many messages that it is not enough to index 5000 messages at once. (Only at *really* high message rates)
122
- output_batch_size = 5000
133
+ # Batch size for the Elasticsearch output. This is the maximum (!) number of messages the Elasticsearch output
134
+ # module will get at once and write to Elasticsearch in a batch call. If the configured batch size has not been
135
+ # reached within output_flush_interval seconds, everything that is available will be flushed at once. Remember
136
+ # that every outputbuffer processor manages its own batch and performs its own batch write calls.
137
+ # ("outputbuffer_processors" variable)
138
+ output_batch_size = 25
139
+
140
+ # Flush interval (in seconds) for the Elasticsearch output. This is the maximum amount of time between two
141
+ # batches of messages written to Elasticsearch. It is only effective at all if your minimum number of messages
142
+ # for this time period is less than output_batch_size * outputbuffer_processors.
143
+ output_flush_interval = 1
123
144
 
124
145
  # The number of parallel running processors.
125
146
  # Raise this number if your buffers are filling up.
126
147
  processbuffer_processors = 5
127
- outputbuffer_processors = 5
148
+ outputbuffer_processors = 3
149
+
150
+ #outputbuffer_processor_keep_alive_time = 5000
151
+ #outputbuffer_processor_threads_core_pool_size = 3
152
+ #outputbuffer_processor_threads_max_pool_size = 30
153
+
154
+ # UDP receive buffer size for all message inputs (e. g. SyslogUDPInput).
155
+ #udp_recvbuffer_sizes = 1048576
128
156
 
129
157
  # Wait strategy describing how buffer processors wait on a cursor sequence. (default: sleeping)
130
158
  # Possible types:
@@ -156,6 +184,29 @@ dead_letters_enabled = false
156
184
  # shutdown process. Set to 0 if you have no status checking load balancers in front.
157
185
  lb_recognition_period_seconds = 3
158
186
 
187
+ # Every message is matched against the configured streams and it can happen that a stream contains rules which
188
+ # take an unusual amount of time to run, for example if its using regular expressions that perform excessive backtracking.
189
+ # This will impact the processing of the entire server. To keep such misbehaving stream rules from impacting other
190
+ # streams, Graylog2 limits the execution time for each stream.
191
+ # The default values are noted below, the timeout is in milliseconds.
192
+ # If the stream matching for one stream took longer than the timeout value, and this happened more than "max_faults" times
193
+ # that stream is disabled and a notification is shown in the web interface.
194
+ # stream_processing_timeout = 2000
195
+ # stream_processing_max_faults = 3
196
+
197
+ # Since 0.21 the graylog2 server supports pluggable output modules. This means a single message can be written to multiple
198
+ # outputs. The next setting defines the timeout for a single output module, including the default output module where all
199
+ # messages end up. This setting is specified in milliseconds.
200
+
201
+ # Time in milliseconds to wait for all message outputs to finish writing a single message.
202
+ #output_module_timeout = 10000
203
+
204
+ # Time in milliseconds after which a detected stale master node is being rechecked on startup.
205
+ #stale_master_timeout = 2000
206
+
207
+ # Time in milliseconds which Graylog2 is waiting for all threads to stop on shutdown.
208
+ #shutdown_timeout = 30000
209
+
159
210
  # MongoDB Configuration
160
211
  mongodb_useauth = false
161
212
  #mongodb_user = grayloguser
@@ -174,8 +225,8 @@ mongodb_max_connections = 100
174
225
  mongodb_threads_allowed_to_block_multiplier = 5
175
226
 
176
227
  # Drools Rule File (Use to rewrite incoming log messages)
177
- # See: http://support.torch.sh/help/kb/graylog2-server/custom-message-rewritingprocessing
178
- # rules_file = /etc/graylog2.drl
228
+ # See: http://graylog2.org/resources/documentation/general/rewriting
229
+ #rules_file = /etc/graylog2.drl
179
230
 
180
231
  # Email transport
181
232
  transport_email_enabled = true
@@ -197,4 +248,43 @@ transport_email_from_name = Graylog2
197
248
  transport_email_web_interface_url = https://graylog-<%= rubber_env.full_host %>:<%= rubber_env.web_tools_ssl_port %>/
198
249
 
199
250
  # HTTP proxy for outgoing HTTP calls
200
- #http_proxy_uri =
251
+ #http_proxy_uri =
252
+
253
+ # Switch to enable/disable the off-heap message cache. Stores cached messages in the spool directory if set to true.
254
+ # Stores the messages in an in-memory data structure if set to false.
255
+ #message_cache_off_heap = true
256
+
257
+ # Directory for the off-heap message cache data. (absolute or relative)
258
+ #message_cache_spool_dir = spool
259
+
260
+ # The commit interval for the message cache in milliseconds. Only affects message cache implementations that need to commit data.
261
+ #message_cache_commit_interval = 1000
262
+
263
+ # When more messages are coming in as we can process, incoming messages will be cached in memory until
264
+ # they are ready to be processed. Per default this data structure is unbounded, so in situations of
265
+ # constant high load, it will grow endlessly until all allocatable memory has been consumed and the
266
+ # process dies.
267
+ # To prevent this, the next setting allows you to define an upper bound for this memory cache, if the
268
+ # number of messages in the cache has reached this limit, incoming messages will be dropped until it
269
+ # has shrunk again.
270
+ #
271
+ # The default is 0, which means no upper bound.
272
+ #
273
+ #input_cache_max_size = 0
274
+
275
+ # Connection timeout for a configured LDAP server (e. g. ActiveDirectory) in milliseconds.
276
+ #ldap_connection_timeout = 2000
277
+
278
+ # Version checks settings. All timeouts are in milliseconds.
279
+ #versionchecks = true
280
+ #versionchecks_uri = http://versioncheck.torch.sh/check
281
+ #versionchecks_connection_request_timeout = 10000
282
+ #versionchecks_connect_timeout = 10000
283
+ #versionchecks_socket_timeout = 10000
284
+
285
+ # https://github.com/bazhenov/groovy-shell-server
286
+ #groovy_shell_enable = false
287
+ #groovy_shell_port = 6789
288
+
289
+ # Enable collection of Graylog2-related metrics into MongoDB
290
+ #enable_metrics_collection = false
@@ -13,7 +13,7 @@ graylog2-server.uris="<%= graylog_server_uris %>"
13
13
  # ~~~~~
14
14
  # The secret key is used to secure cryptographics functions. Set this to a long and randomly generated string.
15
15
  # If you deploy your application to several instances be sure to use the same key!
16
- # Generate for example with: pwgen -s 96
16
+ # Generate for example with: pwgen -N 1 -s 96
17
17
  application.secret="<%= rubber_env.graylog_web_secret %>"
18
18
 
19
19
  # Web interface timezone
@@ -1,11 +1,11 @@
1
- graylog_server_version: "0.20.2"
1
+ graylog_server_version: "0.90.0"
2
2
  graylog_server_prefix: "/usr/local"
3
3
  graylog_server_dir: "#{graylog_server_prefix}/graylog2-server-#{graylog_server_version}"
4
4
  graylog_server_pid_file: "/var/run/graylog-server.pid"
5
5
  graylog_server_port: 12201
6
6
  graylog_server_syslog_port: 12514
7
7
 
8
- graylog_web_version: "0.20.2"
8
+ graylog_web_version: "0.90.0"
9
9
  graylog_web_prefix: "/usr/local"
10
10
  graylog_web_dir: "#{graylog_web_prefix}/graylog2-web-interface-#{graylog_web_version}"
11
11
  graylog_web_pid_file: "/var/run/graylog-web.pid"
@@ -8,7 +8,7 @@ namespace :rubber do
8
8
 
9
9
  task :setup_apt_sources do
10
10
  rubber.sudo_script 'configure_postgresql_repository', <<-ENDSCRIPT
11
- echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list
11
+ echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -sc`-pgdg main" > /etc/apt/sources.list.d/pgdg.list
12
12
  wget --quiet -O - http://apt.postgresql.org/pub/repos/apt/ACCC4CF8.asc | sudo apt-key add -
13
13
  ENDSCRIPT
14
14
  end
@@ -85,9 +85,9 @@
85
85
  # maintenance (custom daily cronjobs, replication, and similar tasks).
86
86
  #
87
87
  # Database administrative login by Unix domain socket
88
- local all postgres ident
88
+ local all postgres peer
89
89
 
90
- # TYPE DATABASE USER CIDR-ADDRESS METHOD
90
+ # TYPE DATABASE USER ADDRESS METHOD
91
91
  <%
92
92
  scheme = rubber_env.db_pass ? 'md5' : 'trust'
93
93
  %>
@@ -61,14 +61,15 @@ external_pid_file = '<%= rubber_env.postgresql_pid_file %>' # write an extra PI
61
61
 
62
62
  listen_addresses = '*' # what IP address(es) to listen on;
63
63
  # comma-separated list of addresses;
64
- # defaults to 'localhost', '*' = all
64
+ # defaults to 'localhost'; use '*' for all
65
65
  # (change requires restart)
66
66
  port = 5432 # (change requires restart)
67
67
  max_connections = 100 # (change requires restart)
68
68
  # Note: Increasing max_connections costs ~400 bytes of shared memory per
69
69
  # connection slot, plus lock space (see max_locks_per_transaction).
70
70
  #superuser_reserved_connections = 3 # (change requires restart)
71
- unix_socket_directory = '/var/run/postgresql' # (change requires restart)
71
+ unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
72
+ # (change requires restart)
72
73
  #unix_socket_group = '' # (change requires restart)
73
74
  #unix_socket_permissions = 0777 # begin with 0 to use octal notation
74
75
  # (change requires restart)
@@ -80,10 +81,14 @@ unix_socket_directory = '/var/run/postgresql' # (change requires restart)
80
81
  # - Security and Authentication -
81
82
 
82
83
  #authentication_timeout = 1min # 1s-600s
83
- #ssl = off # (change requires restart)
84
- #ssl_ciphers = 'ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH' # allowed SSL ciphers
84
+ ssl = true # (change requires restart)
85
+ #ssl_ciphers = 'DEFAULT:!LOW:!EXP:!MD5:@STRENGTH' # allowed SSL ciphers
85
86
  # (change requires restart)
86
87
  #ssl_renegotiation_limit = 512MB # amount of data between renegotiations
88
+ ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem' # (change requires restart)
89
+ ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key' # (change requires restart)
90
+ #ssl_ca_file = '' # (change requires restart)
91
+ #ssl_crl_file = '' # (change requires restart)
87
92
  #password_encryption = on
88
93
  #db_user_namespace = off
89
94
 
@@ -109,7 +114,7 @@ unix_socket_directory = '/var/run/postgresql' # (change requires restart)
109
114
 
110
115
  # - Memory -
111
116
 
112
- shared_buffers = 28MB # min 128kB
117
+ shared_buffers = 128MB # min 128kB
113
118
  # (change requires restart)
114
119
  #temp_buffers = 8MB # min 800kB
115
120
  #max_prepared_transactions = 0 # zero disables the feature
@@ -122,6 +127,11 @@ shared_buffers = 28MB # min 128kB
122
127
  #maintenance_work_mem = 16MB # min 1MB
123
128
  #max_stack_depth = 2MB # min 100kB
124
129
 
130
+ # - Disk -
131
+
132
+ #temp_file_limit = -1 # limits per-session temp file space
133
+ # in kB, or -1 for no limit
134
+
125
135
  # - Kernel Resource Usage -
126
136
 
127
137
  #max_files_per_process = 1000 # min 25
@@ -130,7 +140,7 @@ shared_buffers = 28MB # min 128kB
130
140
 
131
141
  # - Cost-Based Vacuum Delay -
132
142
 
133
- #vacuum_cost_delay = 0ms # 0-100 milliseconds
143
+ #vacuum_cost_delay = 0 # 0-100 milliseconds
134
144
  #vacuum_cost_page_hit = 1 # 0-10000 credits
135
145
  #vacuum_cost_page_miss = 10 # 0-10000 credits
136
146
  #vacuum_cost_page_dirty = 20 # 0-10000 credits
@@ -144,7 +154,7 @@ shared_buffers = 28MB # min 128kB
144
154
 
145
155
  # - Asynchronous Behavior -
146
156
 
147
- #effective_io_concurrency = 1 # 1-1000. 0 disables prefetching
157
+ #effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
148
158
 
149
159
 
150
160
  #------------------------------------------------------------------------------
@@ -156,7 +166,8 @@ shared_buffers = 28MB # min 128kB
156
166
  wal_level = hot_standby # minimal, archive, or hot_standby
157
167
  # (change requires restart)
158
168
  #fsync = on # turns forced synchronization on or off
159
- #synchronous_commit = on # synchronization level; on, off, or local
169
+ #synchronous_commit = on # synchronization level;
170
+ # off, local, remote_write, or on
160
171
  #wal_sync_method = fsync # the default is the first option
161
172
  # supported by the operating system:
162
173
  # open_datasync
@@ -184,6 +195,9 @@ wal_level = hot_standby # minimal, archive, or hot_standby
184
195
  #archive_mode = off # allows archiving to be done
185
196
  # (change requires restart)
186
197
  #archive_command = '' # command to use to archive a logfile segment
198
+ # placeholders: %p = path of file to archive
199
+ # %f = file name only
200
+ # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
187
201
  #archive_timeout = 0 # force a logfile segment switch after this
188
202
  # number of seconds; 0 disables
189
203
 
@@ -194,36 +208,32 @@ wal_level = hot_standby # minimal, archive, or hot_standby
194
208
  # REPLICATION
195
209
  #------------------------------------------------------------------------------
196
210
 
197
- # - Master Server -
211
+ # - Sending Server(s) -
198
212
 
199
- # These settings are ignored on a standby server
213
+ # Set these on the master and on any standby that will send replication data.
200
214
 
201
215
  max_wal_senders = 5 # max number of walsender processes
202
216
  # (change requires restart)
203
- #wal_sender_delay = 1s # walsender cycle time, 1-10000 milliseconds
204
-
205
- # To prevent the primary server from removing the WAL segments required for
206
- # the standby server before shipping them, set the minimum number of segments
207
- # retained in the pg_xlog directory. At least wal_keep_segments should be
208
- # larger than the number of segments generated between the beginning of
209
- # online-backup and the startup of streaming replication. If you enable WAL
210
- # archiving to an archive directory accessible from the standby, this may
211
- # not be necessary.
212
217
  wal_keep_segments = 128 # in logfile segments, 16MB each; 0 disables
213
- #vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
214
- #replication_timeout = 60s # in milliseconds; 0 disables
215
- #synchronous_standby_names = '' # standby servers that provide sync rep
216
- # comma-separated list of application_name
217
- # from standby(s); '*' = all
218
+ #wal_sender_timeout = 60s # in milliseconds; 0 disables
219
+
220
+ # - Master Server -
221
+
222
+ # These settings are ignored on a standby server.
218
223
 
219
224
  <%- if rubber_env.postgresql_synchronous_replication %>
220
225
  synchronous_standby_names = '<%= rubber_env.app_name %>'
221
226
  <%- end %>
227
+ #synchronous_standby_names = '' # standby servers that provide sync rep
228
+ # comma-separated list of application_name
229
+ # from standby(s); '*' = all
230
+ #vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
231
+
222
232
 
223
233
  <%- else %>
224
234
  # - Standby Servers -
225
235
 
226
- # These settings are ignored on a master server
236
+ # These settings are ignored on a master server.
227
237
 
228
238
  hot_standby = on # "on" allows queries during recovery
229
239
  # (change requires restart)
@@ -237,6 +247,9 @@ max_standby_streaming_delay = 10min # max delay before canceling queries
237
247
  # 0 disables
238
248
  hot_standby_feedback = on # send info from standby to prevent
239
249
  # query conflicts
250
+ #wal_receiver_timeout = 60s # time that receiver waits for
251
+ # communication from master
252
+ # in milliseconds; 0 disables
240
253
  <%- end %>
241
254
 
242
255
  #------------------------------------------------------------------------------
@@ -249,6 +262,7 @@ hot_standby_feedback = on # send info from standby to prevent
249
262
  #enable_hashagg = on
250
263
  #enable_hashjoin = on
251
264
  #enable_indexscan = on
265
+ #enable_indexonlyscan = on
252
266
  #enable_material = on
253
267
  #enable_mergejoin = on
254
268
  #enable_nestloop = on
@@ -327,11 +341,8 @@ hot_standby_feedback = on # send info from standby to prevent
327
341
  #syslog_facility = 'LOCAL0'
328
342
  #syslog_ident = 'postgres'
329
343
 
330
- #silent_mode = off # Run server silently.
331
- # DO NOT USE without syslog or
332
- # logging_collector
333
- # (change requires restart)
334
-
344
+ # This is only relevant when logging to eventlog (win32):
345
+ #event_source = 'PostgreSQL'
335
346
 
336
347
  # - When to Log -
337
348
 
@@ -361,12 +372,12 @@ hot_standby_feedback = on # send info from standby to prevent
361
372
  # panic
362
373
 
363
374
  #log_min_error_statement = error # values in order of decreasing detail:
364
- # debug5
375
+ # debug5
365
376
  # debug4
366
377
  # debug3
367
378
  # debug2
368
379
  # debug1
369
- # info
380
+ # info
370
381
  # notice
371
382
  # warning
372
383
  # error
@@ -417,7 +428,7 @@ log_line_prefix = '%t ' # special values:
417
428
  log_temp_files = 0 # log temporary files equal or larger
418
429
  # than the specified size in kilobytes;
419
430
  # -1 disables, 0 logs all temp files
420
- #log_timezone = '(defaults to server environment setting)'
431
+ log_timezone = 'localtime'
421
432
 
422
433
 
423
434
  #------------------------------------------------------------------------------
@@ -427,9 +438,10 @@ log_temp_files = 0 # log temporary files equal or larger
427
438
  # - Query/Index Statistics Collector -
428
439
 
429
440
  #track_activities = on
430
- track_counts = on
441
+ #track_counts = on
442
+ #track_io_timing = off
431
443
  #track_functions = none # none, pl, all
432
- #track_activity_query_size = 1024 # (change requires restart)
444
+ #track_activity_query_size = 1024 # (change requires restart)
433
445
  #update_process_title = on
434
446
  #stats_temp_directory = 'pg_stat_tmp'
435
447
 
@@ -446,7 +458,7 @@ track_counts = on
446
458
  # AUTOVACUUM PARAMETERS
447
459
  #------------------------------------------------------------------------------
448
460
 
449
- autovacuum = on # Enable autovacuum subprocess? 'on'
461
+ #autovacuum = on # Enable autovacuum subprocess? 'on'
450
462
  # requires track_counts to also be on.
451
463
  #log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
452
464
  # their durations, > 0 logs only
@@ -463,6 +475,9 @@ autovacuum = on # Enable autovacuum subprocess? 'on'
463
475
  #autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
464
476
  #autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
465
477
  # (change requires restart)
478
+ #autovacuum_multixact_freeze_max_age = 400000000 # maximum Multixact age
479
+ # before forced vacuum
480
+ # (change requires restart)
466
481
  #autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for
467
482
  # autovacuum, in milliseconds;
468
483
  # -1 means use vacuum_cost_delay
@@ -487,8 +502,11 @@ autovacuum = on # Enable autovacuum subprocess? 'on'
487
502
  #default_transaction_deferrable = off
488
503
  #session_replication_role = 'origin'
489
504
  #statement_timeout = 0 # in milliseconds, 0 is disabled
505
+ #lock_timeout = 0 # in milliseconds, 0 is disabled
490
506
  #vacuum_freeze_min_age = 50000000
491
507
  #vacuum_freeze_table_age = 150000000
508
+ #vacuum_multixact_freeze_min_age = 5000000
509
+ #vacuum_multixact_freeze_table_age = 150000000
492
510
  #bytea_output = 'hex' # hex, escape
493
511
  #xmlbinary = 'base64'
494
512
  #xmloption = 'content'
@@ -497,7 +515,7 @@ autovacuum = on # Enable autovacuum subprocess? 'on'
497
515
 
498
516
  datestyle = 'iso, mdy'
499
517
  #intervalstyle = 'postgres'
500
- #timezone = '(defaults to server environment setting)'
518
+ timezone = 'localtime'
501
519
  #timezone_abbreviations = 'Default' # Select the set of available time zone
502
520
  # abbreviations. Currently, there are
503
521
  # Default
@@ -538,6 +556,7 @@ default_text_search_config = 'pg_catalog.english'
538
556
  #max_pred_locks_per_transaction = 64 # min 10
539
557
  # (change requires restart)
540
558
 
559
+
541
560
  #------------------------------------------------------------------------------
542
561
  # VERSION/PLATFORM COMPATIBILITY
543
562
  #------------------------------------------------------------------------------
@@ -551,7 +570,7 @@ default_text_search_config = 'pg_catalog.english'
551
570
  #lo_compat_privileges = off
552
571
  #quote_all_identifiers = off
553
572
  #sql_inheritance = on
554
- standard_conforming_strings = off
573
+ #standard_conforming_strings = on
555
574
  #synchronize_seqscans = on
556
575
 
557
576
  # - Other Platforms and Clients -
@@ -563,12 +582,25 @@ standard_conforming_strings = off
563
582
  # ERROR HANDLING
564
583
  #------------------------------------------------------------------------------
565
584
 
566
- #exit_on_error = off # terminate session on any error?
567
- #restart_after_crash = on # reinitialize after backend crash?
585
+ #exit_on_error = off # terminate session on any error?
586
+ #restart_after_crash = on # reinitialize after backend crash?
587
+
588
+
589
+ #------------------------------------------------------------------------------
590
+ # CONFIG FILE INCLUDES
591
+ #------------------------------------------------------------------------------
592
+
593
+ # These options allow settings to be loaded from files other than the
594
+ # default postgresql.conf.
595
+
596
+ #include_dir = 'conf.d' # include files ending in '.conf' from
597
+ # directory 'conf.d'
598
+ #include_if_exists = 'exists.conf' # include file only if it exists
599
+ #include = 'special.conf' # include file
568
600
 
569
601
 
570
602
  #------------------------------------------------------------------------------
571
603
  # CUSTOMIZED OPTIONS
572
604
  #------------------------------------------------------------------------------
573
605
 
574
- #custom_variable_classes = '' # list of custom variable class names
606
+ # Add settings for extensions here
@@ -75,7 +75,7 @@
75
75
  # just after or just before the given target, respectively).
76
76
  #
77
77
  #
78
- #recovery_target_name = '' # e.g. 'daily backup 2011-01-26'
78
+ #recovery_target_name = '' # e.g. 'daily backup 2011-01-26'
79
79
  #
80
80
  #recovery_target_time = '' # e.g. '2004-07-14 22:39:00 EST'
81
81
  #
@@ -132,4 +132,4 @@ trigger_file = '<%= rubber_env.postgresql_data_dir %>/trigger_file'
132
132
  #
133
133
  # Hot Standby related parameters are listed in postgresql.conf
134
134
  #
135
- #---------------------------------------------------------------------------
135
+ #---------------------------------------------------------------------------
@@ -18,7 +18,7 @@ db_backup_cmd: "set -e -o pipefail; nice sudo -u postgres pg_dumpall -U postgres
18
18
  # its standard input
19
19
  db_restore_cmd: "zcat - | psql -U %user% -h %host% %name%"
20
20
 
21
- postgresql_ver: 9.1
21
+ postgresql_ver: 9.3
22
22
  postgresql_conf_dir: "/etc/postgresql/#{postgresql_ver}/main"
23
23
  postgresql_data_dir: "/mnt/postgresql/#{postgresql_ver}/data"
24
24
  postgresql_archive_dir: "/mnt/postgresql/#{postgresql_ver}/archive"
@@ -33,7 +33,7 @@ role_dependencies:
33
33
  "db:primary=true": [postgresql, postgresql_master]
34
34
  db: [postgresql, postgresql_slave]
35
35
 
36
- packages: [postgresql-client, libpq-dev]
36
+ packages: ["postgresql-client-#{postgresql_ver}", libpq-dev]
37
37
  gems: [pg]
38
38
 
39
39
  roles:
@@ -76,7 +76,7 @@ tcp-backlog 511
76
76
  # on a unix socket when not specified.
77
77
  #
78
78
  # unixsocket /tmp/redis.sock
79
- # unixsocketperm 755
79
+ # unixsocketperm 700
80
80
 
81
81
  # Close the connection after a client is idle for N seconds (0 to disable)
82
82
  timeout 300
@@ -202,9 +202,18 @@ dir <%= rubber_env.redis_db_dir %>
202
202
  ################################# REPLICATION #################################
203
203
 
204
204
  # Master-Slave replication. Use slaveof to make a Redis instance a copy of
205
- # another Redis server. Note that the configuration is local to the slave
206
- # so for example it is possible to configure the slave to save the DB with a
207
- # different interval, or to listen to another port, and so on.
205
+ # another Redis server. A few things to understand ASAP about Redis replication.
206
+ #
207
+ # 1) Redis replication is asynchronous, but you can configure a master to
208
+ # stop accepting writes if it appears to be not connected with at least
209
+ # a given number of slaves.
210
+ # 2) Redis slaves are able to perform a partial resynchronization with the
211
+ # master if the replication link is lost for a relatively small amount of
212
+ # time. You may want to configure the replication backlog size (see the next
213
+ # sections of this file) with a sensible value depending on your needs.
214
+ # 3) Replication is automatic and does not need user intervention. After a
215
+ # network partition slaves automatically try to reconnect to masters
216
+ # and resynchronize with them.
208
217
  #
209
218
  # slaveof <masterip> <masterport>
210
219
 
@@ -536,6 +545,30 @@ no-appendfsync-on-rewrite no
536
545
  auto-aof-rewrite-percentage 100
537
546
  auto-aof-rewrite-min-size 64mb
538
547
 
548
+ # An AOF file may be found to be truncated at the end during the Redis
549
+ # startup process, when the AOF data gets loaded back into memory.
550
+ # This may happen when the system where Redis is running
551
+ # crashes, especially when an ext4 filesystem is mounted without the
552
+ # data=ordered option (however this can't happen when Redis itself
553
+ # crashes or aborts but the operating system still works correctly).
554
+ #
555
+ # Redis can either exit with an error when this happens, or load as much
556
+ # data as possible (the default now) and start if the AOF file is found
557
+ # to be truncated at the end. The following option controls this behavior.
558
+ #
559
+ # If aof-load-truncated is set to yes, a truncated AOF file is loaded and
560
+ # the Redis server starts emitting a log to inform the user of the event.
561
+ # Otherwise if the option is set to no, the server aborts with an error
562
+ # and refuses to start. When the option is set to no, the user requires
563
+ # to fix the AOF file using the "redis-check-aof" utility before to restart
564
+ # the server.
565
+ #
566
+ # Note that if the AOF file will be found to be corrupted in the middle
567
+ # the server will still exit with an error. This option only applies when
568
+ # Redis will try to read more data from the AOF file but not enough bytes
569
+ # will be found.
570
+ aof-load-truncated no
571
+
539
572
  ################################ LUA SCRIPTING ###############################
540
573
 
541
574
  # Max execution time of a Lua script in milliseconds.
@@ -578,10 +611,31 @@ slowlog-log-slower-than 10000
578
611
  # You can reclaim memory used by the slow log with SLOWLOG RESET.
579
612
  slowlog-max-len 128
580
613
 
614
+ ################################ LATENCY MONITOR ##############################
615
+
616
+ # The Redis latency monitoring subsystem samples different operations
617
+ # at runtime in order to collect data related to possible sources of
618
+ # latency of a Redis instance.
619
+ #
620
+ # Via the LATENCY command this information is available to the user that can
621
+ # print graphs and obtain reports.
622
+ #
623
+ # The system only logs operations that were performed in a time equal or
624
+ # greater than the amount of milliseconds specified via the
625
+ # latency-monitor-threshold configuration directive. When its value is set
626
+ # to zero, the latency monitor is turned off.
627
+ #
628
+ # By default latency monitoring is disabled since it is mostly not needed
629
+ # if you don't have latency issues, and collecting data has a performance
630
+ # impact, that while very small, can be measured under big load. Latency
631
+ # monitoring can easily be enalbed at runtime using the command
632
+ # "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
633
+ latency-monitor-threshold 0
634
+
581
635
  ############################# Event notification ##############################
582
636
 
583
637
  # Redis can notify Pub/Sub clients about events happening in the key space.
584
- # This feature is documented at http://redis.io/topics/keyspace-events
638
+ # This feature is documented at http://redis.io/topics/notifications
585
639
  #
586
640
  # For instance if keyspace events notification is enabled, and a client
587
641
  # performs a DEL operation on key "foo" stored in the Database 0, two
@@ -651,6 +705,20 @@ set-max-intset-entries 512
651
705
  zset-max-ziplist-entries 128
652
706
  zset-max-ziplist-value 64
653
707
 
708
+ # HyperLogLog sparse representation bytes limit. The limit includes the
709
+ # 16 bytes header. When an HyperLogLog using the sparse representation crosses
710
+ # this limit, it is converted into the dense representation.
711
+ #
712
+ # A value greater than 16000 is totally useless, since at that point the
713
+ # dense representation is more memory efficient.
714
+ #
715
+ # The suggested value is ~ 3000 in order to have the benefits of
716
+ # the space efficient encoding without slowing down too much PFADD,
717
+ # which is O(N) with the sparse encoding. The value can be raised to
718
+ # ~ 10000 when CPU is not a concern, but space is, and the data set is
719
+ # composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
720
+ hll-sparse-max-bytes 3000
721
+
654
722
  # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
655
723
  # order to help rehashing the main Redis hash table (the one mapping top-level
656
724
  # keys to values). The hash table implementation Redis uses (see dict.c)
@@ -678,8 +746,8 @@ activerehashing yes
678
746
  #
679
747
  # The limit can be set differently for the three different classes of clients:
680
748
  #
681
- # normal -> normal clients
682
- # slave -> slave clients and MONITOR clients
749
+ # normal -> normal clients including MONITOR clients
750
+ # slave -> slave clients
683
751
  # pubsub -> clients subscribed to at least one pubsub channel or pattern
684
752
  #
685
753
  # The syntax of every client-output-buffer-limit directive is the following:
@@ -1,4 +1,4 @@
1
- redis_server_version: 2.8.8
1
+ redis_server_version: 2.8.17
2
2
  redis_server_pid_file: /var/run/redis-server.pid
3
3
  redis_server_conf_file: /etc/redis.conf
4
4
  redis_server_log_file: /var/log/redis-server.log
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: rubber
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.13.1
4
+ version: 2.14.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Matt Conway
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2014-10-02 00:00:00.000000000 Z
12
+ date: 2014-10-14 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  requirement: !ruby/object:Gem::Requirement
@@ -560,7 +560,6 @@ files:
560
560
  - templates/postgresql/config/rubber/deploy-postgresql.rb
561
561
  - templates/postgresql/config/rubber/role/postgresql/crontab
562
562
  - templates/postgresql/config/rubber/role/postgresql/pg_hba.conf
563
- - templates/postgresql/config/rubber/role/postgresql/postgresql-sysctl.conf
564
563
  - templates/postgresql/config/rubber/role/postgresql/postgresql.conf
565
564
  - templates/postgresql/config/rubber/role/postgresql_slave/recovery.conf
566
565
  - templates/postgresql/config/rubber/rubber-postgresql.yml
@@ -1,7 +0,0 @@
1
- <%
2
- @path = "/etc/sysctl.d/30-postgresql.conf"
3
- @post = "service procps start"
4
- %>
5
- kernel.shmmax=1610612736
6
- kernel.shmall=393216
7
- kernel.sem=2500 320000 320 1280