autocompl 0.2.1 → 0.2.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/autocompl/repository.rb +11 -1
- data/lib/autocompl/version.rb +1 -1
- data/test/dummy/log/development.log +467 -0
- data/test/dummy/log/test.log +3 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/bin/console +23 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/cache/pg-0.19.0.gem +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/cache/pq-0.0.1.gem +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/extensions/x86_64-darwin-15/2.3.0-static/pg-0.19.0/gem.build_complete +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/extensions/x86_64-darwin-15/2.3.0-static/pg-0.19.0/gem_make.out +78 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/extensions/x86_64-darwin-15/2.3.0-static/pg-0.19.0/mkmf.log +1346 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/extensions/x86_64-darwin-15/2.3.0-static/pg-0.19.0/pg_ext.bundle +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/BSDL +22 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ChangeLog +6378 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/Contributors.rdoc +46 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/History.rdoc +363 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/LICENSE +56 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/Manifest.txt +85 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/POSTGRES +23 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/README-OS_X.rdoc +68 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/README-Windows.rdoc +56 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/README.ja.rdoc +14 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/README.rdoc +168 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/Rakefile +216 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/Rakefile.cross +301 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/Makefile +261 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/errorcodes.def +947 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/errorcodes.rb +45 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/errorcodes.txt +467 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/extconf.h +38 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/extconf.rb +112 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/gvl_wrappers.c +13 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/gvl_wrappers.h +257 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/gvl_wrappers.o +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg.c +667 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg.h +395 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg.o +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_binary_decoder.c +162 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_binary_decoder.o +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_binary_encoder.c +162 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_binary_encoder.o +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_coder.c +500 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_coder.o +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_connection.c +4102 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_connection.o +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_copy_coder.c +591 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_copy_coder.o +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_errors.c +95 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_errors.o +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_ext.bundle +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_result.c +1271 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_result.o +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_text_decoder.c +421 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_text_decoder.o +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_text_encoder.c +683 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_text_encoder.o +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_type_map.c +159 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_type_map.o +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_type_map_all_strings.c +116 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_type_map_all_strings.o +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_type_map_by_class.c +239 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_type_map_by_class.o +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_type_map_by_column.c +312 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_type_map_by_column.o +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_type_map_by_mri_type.c +284 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_type_map_by_mri_type.o +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_type_map_by_oid.c +355 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_type_map_by_oid.o +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_type_map_in_ruby.c +299 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/pg_type_map_in_ruby.o +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/util.c +149 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/util.h +65 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/util.o +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/vc/pg.sln +26 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/vc/pg_18/pg.vcproj +216 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/ext/vc/pg_19/pg_19.vcproj +209 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/lib/pg.rb +64 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/lib/pg/basic_type_mapping.rb +426 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/lib/pg/coder.rb +83 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/lib/pg/connection.rb +271 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/lib/pg/constants.rb +11 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/lib/pg/exceptions.rb +11 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/lib/pg/result.rb +30 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/lib/pg/text_decoder.rb +51 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/lib/pg/text_encoder.rb +35 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/lib/pg/type_map_by_column.rb +15 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/lib/pg_ext.bundle +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/sample/array_insert.rb +20 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/sample/async_api.rb +106 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/sample/async_copyto.rb +39 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/sample/async_mixed.rb +56 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/sample/check_conn.rb +21 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/sample/copyfrom.rb +81 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/sample/copyto.rb +19 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/sample/cursor.rb +21 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/sample/disk_usage_report.rb +186 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/sample/issue-119.rb +94 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/sample/losample.rb +69 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/sample/minimal-testcase.rb +17 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/sample/notify_wait.rb +72 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/sample/pg_statistics.rb +294 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/sample/replication_monitor.rb +231 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/sample/test_binary_values.rb +33 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/sample/wal_shipper.rb +434 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/sample/warehouse_partitions.rb +320 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/spec/data/expected_trace.out +26 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/spec/data/random_binary_data +0 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/spec/helpers.rb +352 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/spec/pg/basic_type_mapping_spec.rb +305 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/spec/pg/connection_spec.rb +1676 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/spec/pg/result_spec.rb +449 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/spec/pg/type_map_by_class_spec.rb +138 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/spec/pg/type_map_by_column_spec.rb +222 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/spec/pg/type_map_by_mri_type_spec.rb +136 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/spec/pg/type_map_by_oid_spec.rb +149 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/spec/pg/type_map_in_ruby_spec.rb +164 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/spec/pg/type_map_spec.rb +22 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/spec/pg/type_spec.rb +777 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pg-0.19.0/spec/pg_spec.rb +50 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pq-0.0.1/Gemfile +4 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pq-0.0.1/LICENSE.txt +22 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pq-0.0.1/README.md +76 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pq-0.0.1/Rakefile +1 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pq-0.0.1/bin/console +7 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pq-0.0.1/lib/pq.rb +99 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pq-0.0.1/pq.gemspec +29 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pq-0.0.1/spec/helpers.rb +10 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/gems/pq-0.0.1/spec/queue_spec.rb +84 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/specifications/pg-0.19.0.gemspec +63 -0
- data/test/dummy/vendor/bundle/ruby/2.3.0/specifications/pq-0.0.1.gemspec +49 -0
- metadata +253 -1
@@ -0,0 +1,231 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
# vim: set noet nosta sw=4 ts=4 :
|
3
|
+
#
|
4
|
+
# Get the current WAL segment and offset from a master postgresql
|
5
|
+
# server, and compare slave servers to see how far behind they
|
6
|
+
# are in MB. This script should be easily modified for use with
|
7
|
+
# Nagios/Mon/Monit/Zabbix/whatever, or wrapping it in a display loop,
|
8
|
+
# and is suitable for both WAL shipping or streaming forms of replication.
|
9
|
+
#
|
10
|
+
# Mahlon E. Smith <mahlon@martini.nu>
|
11
|
+
#
|
12
|
+
# First argument is the master server, all other arguments are treated
|
13
|
+
# as slave machines.
|
14
|
+
#
|
15
|
+
# db_replication.monitor db-master.example.com ...
|
16
|
+
#
|
17
|
+
|
18
|
+
begin
|
19
|
+
require 'ostruct'
|
20
|
+
require 'optparse'
|
21
|
+
require 'pathname'
|
22
|
+
require 'etc'
|
23
|
+
require 'pg'
|
24
|
+
require 'pp'
|
25
|
+
|
26
|
+
rescue LoadError # 1.8 support
|
27
|
+
unless Object.const_defined?( :Gem )
|
28
|
+
require 'rubygems'
|
29
|
+
retry
|
30
|
+
end
|
31
|
+
raise
|
32
|
+
end
|
33
|
+
|
34
|
+
|
35
|
+
### A class to encapsulate the PG handles.
|
36
|
+
###
|
37
|
+
class PGMonitor
|
38
|
+
|
39
|
+
VERSION = %q$Id: replication_monitor.rb,v 36ca5b412583 2012/04/17 23:32:25 mahlon $
|
40
|
+
|
41
|
+
# When to consider a slave as 'behind', measured in WAL segments.
|
42
|
+
# The default WAL segment size is 16, so we'll alert after
|
43
|
+
# missing two WAL files worth of data.
|
44
|
+
#
|
45
|
+
LAG_ALERT = 32
|
46
|
+
|
47
|
+
### Create a new PGMonitor object.
|
48
|
+
###
|
49
|
+
def initialize( opts, hosts )
|
50
|
+
@opts = opts
|
51
|
+
@master = hosts.shift
|
52
|
+
@slaves = hosts
|
53
|
+
@current_wal = {}
|
54
|
+
@failures = []
|
55
|
+
end
|
56
|
+
|
57
|
+
attr_reader :opts, :current_wal, :master, :slaves, :failures
|
58
|
+
|
59
|
+
|
60
|
+
### Perform the connections and check the lag.
|
61
|
+
###
|
62
|
+
def check
|
63
|
+
# clear prior failures, get current xlog info
|
64
|
+
@failures = []
|
65
|
+
return unless self.get_current_wal
|
66
|
+
|
67
|
+
# check all slaves
|
68
|
+
self.slaves.each do |slave|
|
69
|
+
begin
|
70
|
+
slave_db = PG.connect(
|
71
|
+
:dbname => self.opts.database,
|
72
|
+
:host => slave,
|
73
|
+
:port => self.opts.port,
|
74
|
+
:user => self.opts.user,
|
75
|
+
:password => self.opts.pass,
|
76
|
+
:sslmode => 'prefer'
|
77
|
+
)
|
78
|
+
|
79
|
+
xlog = slave_db.exec( 'SELECT pg_last_xlog_receive_location()' ).getvalue( 0, 0 )
|
80
|
+
slave_db.close
|
81
|
+
|
82
|
+
lag_in_megs = ( self.find_lag( xlog ).to_f / 1024 / 1024 ).abs
|
83
|
+
if lag_in_megs >= LAG_ALERT
|
84
|
+
failures << { :host => slave,
|
85
|
+
:error => "%0.2fMB behind the master." % [ lag_in_megs ] }
|
86
|
+
end
|
87
|
+
rescue => err
|
88
|
+
failures << { :host => slave, :error => err.message }
|
89
|
+
end
|
90
|
+
end
|
91
|
+
end
|
92
|
+
|
93
|
+
|
94
|
+
#########
|
95
|
+
protected
|
96
|
+
#########
|
97
|
+
|
98
|
+
### Ask the master for the current xlog information, to compare
|
99
|
+
### to slaves. Returns true on succcess. On failure, populates
|
100
|
+
### the failures array and returns false.
|
101
|
+
###
|
102
|
+
def get_current_wal
|
103
|
+
master_db = PG.connect(
|
104
|
+
:dbname => self.opts.database,
|
105
|
+
:host => self.master,
|
106
|
+
:port => self.opts.port,
|
107
|
+
:user => self.opts.user,
|
108
|
+
:password => self.opts.pass,
|
109
|
+
:sslmode => 'prefer'
|
110
|
+
)
|
111
|
+
|
112
|
+
self.current_wal[ :segbytes ] = master_db.exec( 'SHOW wal_segment_size' ).
|
113
|
+
getvalue( 0, 0 ).sub( /\D+/, '' ).to_i << 20
|
114
|
+
|
115
|
+
current = master_db.exec( 'SELECT pg_current_xlog_location()' ).getvalue( 0, 0 )
|
116
|
+
self.current_wal[ :segment ], self.current_wal[ :offset ] = current.split( /\// )
|
117
|
+
|
118
|
+
master_db.close
|
119
|
+
return true
|
120
|
+
|
121
|
+
# If we can't get any of the info from the master, then there is no
|
122
|
+
# point in a comparison with slaves.
|
123
|
+
#
|
124
|
+
rescue => err
|
125
|
+
self.failures << { :host => self.master,
|
126
|
+
:error => 'Unable to retrieve required info from the master (%s)' % [ err.message ] }
|
127
|
+
|
128
|
+
return false
|
129
|
+
end
|
130
|
+
|
131
|
+
|
132
|
+
### Given an +xlog+ position from a slave server, return
|
133
|
+
### the number of bytes the slave needs to replay before it
|
134
|
+
### is caught up to the master.
|
135
|
+
###
|
136
|
+
def find_lag( xlog )
|
137
|
+
s_segment, s_offset = xlog.split( /\// )
|
138
|
+
m_segment = self.current_wal[ :segment ]
|
139
|
+
m_offset = self.current_wal[ :offset ]
|
140
|
+
m_segbytes = self.current_wal[ :segbytes ]
|
141
|
+
|
142
|
+
return (( m_segment.hex - s_segment.hex ) * m_segbytes) + ( m_offset.hex - s_offset.hex )
|
143
|
+
end
|
144
|
+
|
145
|
+
end
|
146
|
+
|
147
|
+
|
148
|
+
### Parse command line arguments. Return a struct of global options.
|
149
|
+
###
|
150
|
+
def parse_args( args )
|
151
|
+
options = OpenStruct.new
|
152
|
+
options.database = 'postgres'
|
153
|
+
options.port = 5432
|
154
|
+
options.user = Etc.getpwuid( Process.uid ).name
|
155
|
+
options.sslmode = 'prefer'
|
156
|
+
|
157
|
+
opts = OptionParser.new do |opts|
|
158
|
+
opts.banner = "Usage: #{$0} [options] <master> <slave> [slave2, slave3...]"
|
159
|
+
|
160
|
+
opts.separator ''
|
161
|
+
opts.separator 'Connection options:'
|
162
|
+
|
163
|
+
opts.on( '-d', '--database DBNAME',
|
164
|
+
"specify the database to connect to (default: \"#{options.database}\")" ) do |db|
|
165
|
+
options.database = db
|
166
|
+
end
|
167
|
+
|
168
|
+
opts.on( '-h', '--host HOSTNAME', 'database server host' ) do |host|
|
169
|
+
options.host = host
|
170
|
+
end
|
171
|
+
|
172
|
+
opts.on( '-p', '--port PORT', Integer,
|
173
|
+
"database server port (default: \"#{options.port}\")" ) do |port|
|
174
|
+
options.port = port
|
175
|
+
end
|
176
|
+
|
177
|
+
opts.on( '-U', '--user NAME',
|
178
|
+
"database user name (default: \"#{options.user}\")" ) do |user|
|
179
|
+
options.user = user
|
180
|
+
end
|
181
|
+
|
182
|
+
opts.on( '-W', 'force password prompt' ) do |pw|
|
183
|
+
print 'Password: '
|
184
|
+
begin
|
185
|
+
system 'stty -echo'
|
186
|
+
options.pass = $stdin.gets.chomp
|
187
|
+
ensure
|
188
|
+
system 'stty echo'
|
189
|
+
puts
|
190
|
+
end
|
191
|
+
end
|
192
|
+
|
193
|
+
opts.separator ''
|
194
|
+
opts.separator 'Other options:'
|
195
|
+
|
196
|
+
opts.on_tail( '--help', 'show this help, then exit' ) do
|
197
|
+
$stderr.puts opts
|
198
|
+
exit
|
199
|
+
end
|
200
|
+
|
201
|
+
opts.on_tail( '--version', 'output version information, then exit' ) do
|
202
|
+
puts PGMonitor::VERSION
|
203
|
+
exit
|
204
|
+
end
|
205
|
+
end
|
206
|
+
|
207
|
+
opts.parse!( args )
|
208
|
+
return options
|
209
|
+
end
|
210
|
+
|
211
|
+
|
212
|
+
|
213
|
+
if __FILE__ == $0
|
214
|
+
opts = parse_args( ARGV )
|
215
|
+
raise ArgumentError, "At least two PostgreSQL servers are required." if ARGV.length < 2
|
216
|
+
mon = PGMonitor.new( opts, ARGV )
|
217
|
+
|
218
|
+
mon.check
|
219
|
+
if mon.failures.empty?
|
220
|
+
puts "All is well!"
|
221
|
+
exit 0
|
222
|
+
else
|
223
|
+
puts "Database replication delayed or broken."
|
224
|
+
mon.failures.each do |bad|
|
225
|
+
puts "%s: %s" % [ bad[ :host ], bad[ :error ] ]
|
226
|
+
end
|
227
|
+
exit 1
|
228
|
+
end
|
229
|
+
end
|
230
|
+
|
231
|
+
|
@@ -0,0 +1,33 @@
|
|
1
|
+
#!/usr/bin/env ruby1.9.1
|
2
|
+
|
3
|
+
require 'pg'
|
4
|
+
|
5
|
+
db = PG.connect( :dbname => 'test' )
|
6
|
+
db.exec "DROP TABLE IF EXISTS test"
|
7
|
+
db.exec "CREATE TABLE test (a INTEGER, b BYTEA)"
|
8
|
+
|
9
|
+
a = 42
|
10
|
+
b = [1, 2, 3]
|
11
|
+
db.exec "INSERT INTO test(a, b) VALUES($1::int, $2::bytea)",
|
12
|
+
[a, {:value => b.pack('N*'), :format => 1}]
|
13
|
+
|
14
|
+
db.exec( "SELECT a::int, b::bytea FROM test LIMIT 1", [], 1 ) do |res|
|
15
|
+
|
16
|
+
res.nfields.times do |i|
|
17
|
+
puts "Field %d is: %s, a %s (%s) column from table %p" % [
|
18
|
+
i,
|
19
|
+
res.fname( i ),
|
20
|
+
db.exec( "SELECT format_type($1,$2)", [res.ftype(i), res.fmod(1)] ).getvalue(0,0),
|
21
|
+
res.fformat( i ).zero? ? "string" : "binary",
|
22
|
+
res.ftable( i ),
|
23
|
+
]
|
24
|
+
end
|
25
|
+
|
26
|
+
res.each do |row|
|
27
|
+
puts "a = #{row['a'].inspect}"
|
28
|
+
puts "a (unpacked) = #{row['a'].unpack('N*').inspect}"
|
29
|
+
puts "b = #{row['b'].unpack('N*').inspect}"
|
30
|
+
end
|
31
|
+
end
|
32
|
+
|
33
|
+
|
@@ -0,0 +1,434 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
#
|
3
|
+
# A script to wrap ssh and rsync for PostgreSQL WAL files shipping.
|
4
|
+
# Mahlon E. Smith <mahlon@martini.nu>
|
5
|
+
#
|
6
|
+
# Based off of Joshua Drake's PITRTools concept, but with some important
|
7
|
+
# differences:
|
8
|
+
#
|
9
|
+
# - Only supports PostgreSQL >= 8.3
|
10
|
+
# - No support for rsync version < 3
|
11
|
+
# - Only shipping, no client side sync (too much opportunity for failure,
|
12
|
+
# and it's easy to get a base backup manually)
|
13
|
+
# - WAL files are only stored once, regardless of how many
|
14
|
+
# slaves are configured or not responding, and are removed from
|
15
|
+
# the master when they are no longer needed.
|
16
|
+
# - Each slave can have completely distinct settings, instead
|
17
|
+
# of a single set of options applied to all slaves
|
18
|
+
# - slave sync can be individually paused from the master
|
19
|
+
# - can run synchronously, or if you have a lot of slaves, threaded async mode
|
20
|
+
# - It's ruby, instead of python. :)
|
21
|
+
#
|
22
|
+
# wal_shipper is configurable via an external YAML file, and will create
|
23
|
+
# a template on its first run -- you'll need to modify it! It expects
|
24
|
+
# a directory structure like so:
|
25
|
+
#
|
26
|
+
# postgres/
|
27
|
+
# data/...
|
28
|
+
# bin/wal_shipper.rb
|
29
|
+
# etc/wal_shipper.conf <-- YAML settings!
|
30
|
+
# wal/
|
31
|
+
#
|
32
|
+
# It should be loaded from the PostgreSQL master's postgresql.conf
|
33
|
+
# as such, after putting it into your postgres user homedir under 'bin':
|
34
|
+
#
|
35
|
+
# archive_command = '/path/to/postgres_home/bin/wal_shipper.rb %p'
|
36
|
+
#
|
37
|
+
# Passwordless ssh keys need to be set up for the postgres user on all
|
38
|
+
# participating masters and slaves.
|
39
|
+
#
|
40
|
+
# You can use any replay method of your choosing on the slaves.
|
41
|
+
# Here's a nice example using pg_standby, to be put in data/recovery.conf:
|
42
|
+
#
|
43
|
+
# restore_command = 'pg_standby -t /tmp/pgrecovery.done -s5 -w0 -c /path/to/postgres_home/wal_files/ %f %p %r'
|
44
|
+
#
|
45
|
+
# Or, here's another simple alternative data/recovery.conf, for using WAL shipping
|
46
|
+
# alongside streaming replication:
|
47
|
+
#
|
48
|
+
# standby_mode = 'on'
|
49
|
+
# primary_conninfo = 'host=master.example.com port=5432 user=repl password=XXXXXXX'
|
50
|
+
# restore_command = 'cp /usr/local/pgsql/wal/%f %p'
|
51
|
+
# trigger_file = '/usr/local/pgsql/pg.become_primary'
|
52
|
+
# archive_cleanup_command = '/usr/local/bin/pg_archivecleanup /usr/local/pgsql/wal %r'
|
53
|
+
#
|
54
|
+
#========================================================================================
|
55
|
+
|
56
|
+
|
57
|
+
require 'pathname'
|
58
|
+
require 'yaml'
|
59
|
+
require 'fileutils'
|
60
|
+
require 'ostruct'
|
61
|
+
|
62
|
+
|
63
|
+
### Encapsulate WAL shipping functionality.
|
64
|
+
###
|
65
|
+
module WalShipper
|
66
|
+
|
67
|
+
### Send messages to the PostgreSQL log files.
|
68
|
+
###
|
69
|
+
def log( msg )
|
70
|
+
return unless @debug
|
71
|
+
puts "WAL Shipper: %s" % [ msg ]
|
72
|
+
end
|
73
|
+
|
74
|
+
|
75
|
+
### An object that represents a single destination from the
|
76
|
+
### configuration file.
|
77
|
+
###
|
78
|
+
class Destination < OpenStruct
|
79
|
+
include WalShipper
|
80
|
+
|
81
|
+
### Create a new WalShipper::Destination object.
|
82
|
+
def initialize( dest, debug=false )
|
83
|
+
@debug = debug
|
84
|
+
super( dest )
|
85
|
+
self.validate
|
86
|
+
end
|
87
|
+
|
88
|
+
#########
|
89
|
+
protected
|
90
|
+
#########
|
91
|
+
|
92
|
+
|
93
|
+
### Check for required keys and normalize various keys.
|
94
|
+
###
|
95
|
+
def validate
|
96
|
+
# Check for required destination keys
|
97
|
+
%w[ label kind ].each do |key|
|
98
|
+
if self.send( key.to_sym ).nil?
|
99
|
+
self.log "Destination %p missing required '%s' key." % [ self, key ]
|
100
|
+
self.invalid = true
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
# Ensure paths are Pathnames for the 'file' destination type.
|
105
|
+
self.path = Pathname.new( self.path ) if self.kind == 'file'
|
106
|
+
|
107
|
+
if self.kind == 'rsync-ssh'
|
108
|
+
self.port ||= 22
|
109
|
+
self.user = self.user ? "#{self.user}@" : ''
|
110
|
+
end
|
111
|
+
end
|
112
|
+
end # Class Destination
|
113
|
+
|
114
|
+
|
115
|
+
|
116
|
+
### Class for creating new Destination objects and determining how to
|
117
|
+
### ship WAL files to them.
|
118
|
+
###
|
119
|
+
class Dispatcher
|
120
|
+
include WalShipper
|
121
|
+
|
122
|
+
### Create a new Shipper object, given a +conf+ hash and a +wal+ file
|
123
|
+
### Pathname object.
|
124
|
+
###
|
125
|
+
def initialize( wal, conf )
|
126
|
+
# Make the config keys instance variables.
|
127
|
+
conf.each_pair {|key, val| self.instance_variable_set( "@#{key}", val ) }
|
128
|
+
|
129
|
+
# Spool directory check.
|
130
|
+
#
|
131
|
+
@spool = Pathname.new( @spool )
|
132
|
+
@spool.exist? or raise "The configured spool directory (%s) doesn't exist." % [ @spool ]
|
133
|
+
|
134
|
+
# Stop right away if we have disabled shipping.
|
135
|
+
#
|
136
|
+
unless @enabled
|
137
|
+
self.log "WAL shipping is disabled, queuing segment %s" % [ wal.basename ]
|
138
|
+
exit 1
|
139
|
+
end
|
140
|
+
|
141
|
+
# Instantiate Destination objects, creating new spool directories
|
142
|
+
# for each.
|
143
|
+
#
|
144
|
+
@destinations.
|
145
|
+
collect!{|dest| WalShipper::Destination.new( dest, @debug ) }.
|
146
|
+
reject {|dest| dest.invalid }.
|
147
|
+
collect do |dest|
|
148
|
+
dest.spool = @spool + dest.label
|
149
|
+
dest.spool.mkdir( 0711 ) unless dest.spool.exist?
|
150
|
+
dest
|
151
|
+
end
|
152
|
+
|
153
|
+
# Put the WAL file into the spool for processing!
|
154
|
+
#
|
155
|
+
@waldir = @spool + 'wal_segments'
|
156
|
+
@waldir.mkdir( 0711 ) unless @waldir.exist?
|
157
|
+
|
158
|
+
self.log "Copying %s to %s" % [ wal.basename, @waldir ]
|
159
|
+
FileUtils::cp wal, @waldir
|
160
|
+
|
161
|
+
# 'wal' now references the copy. The original is managed and auto-expired
|
162
|
+
# by PostgreSQL when a new checkpoint segment it reached.
|
163
|
+
@wal = @waldir + wal.basename
|
164
|
+
end
|
165
|
+
|
166
|
+
|
167
|
+
### Create hardlinks for the WAL file into each of the destination directories
|
168
|
+
### for separate queueing and recording of what was shipped successfully.
|
169
|
+
###
|
170
|
+
def link
|
171
|
+
@destinations.each do |dest|
|
172
|
+
self.log "Linking %s into %s" % [ @wal.basename, dest.spool.basename ]
|
173
|
+
FileUtils::ln @wal, dest.spool, :force => true
|
174
|
+
end
|
175
|
+
end
|
176
|
+
|
177
|
+
|
178
|
+
### Decide to be synchronous or threaded, and delegate each destination
|
179
|
+
### to the proper ship method.
|
180
|
+
###
|
181
|
+
def dispatch
|
182
|
+
# Synchronous mode.
|
183
|
+
#
|
184
|
+
unless @async
|
185
|
+
self.log "Performing a synchronous dispatch."
|
186
|
+
@destinations.each {|dest| self.dispatch_dest( dest ) }
|
187
|
+
return
|
188
|
+
end
|
189
|
+
|
190
|
+
tg = ThreadGroup.new
|
191
|
+
|
192
|
+
# Async, one thread per destination
|
193
|
+
#
|
194
|
+
if @async_max.nil? || @async_max.to_i.zero?
|
195
|
+
self.log "Performing an asynchronous dispatch: one thread per destination."
|
196
|
+
@destinations.each do |dest|
|
197
|
+
t = Thread.new do
|
198
|
+
Thread.current.abort_on_exception = true
|
199
|
+
self.dispatch_dest( dest )
|
200
|
+
end
|
201
|
+
tg.add( t )
|
202
|
+
end
|
203
|
+
tg.list.each {|t| t.join }
|
204
|
+
return
|
205
|
+
end
|
206
|
+
|
207
|
+
# Async, one thread per destination, in groups of asynx_max size.
|
208
|
+
#
|
209
|
+
self.log "Performing an asynchronous dispatch: one thread per destination, %d at a time." % [ @async_max ]
|
210
|
+
all_dests = @destinations.dup
|
211
|
+
dest_chunks = []
|
212
|
+
until all_dests.empty? do
|
213
|
+
dest_chunks << all_dests.slice!( 0, @async_max )
|
214
|
+
end
|
215
|
+
|
216
|
+
dest_chunks.each do |chunk|
|
217
|
+
chunk.each do |dest|
|
218
|
+
t = Thread.new do
|
219
|
+
Thread.current.abort_on_exception = true
|
220
|
+
self.dispatch_dest( dest )
|
221
|
+
end
|
222
|
+
tg.add( t )
|
223
|
+
end
|
224
|
+
|
225
|
+
tg.list.each {|t| t.join }
|
226
|
+
end
|
227
|
+
|
228
|
+
return
|
229
|
+
end
|
230
|
+
|
231
|
+
|
232
|
+
### Remove any WAL segments no longer needed by slaves.
|
233
|
+
###
|
234
|
+
def clean_spool
|
235
|
+
total = 0
|
236
|
+
@waldir.children.each do |wal|
|
237
|
+
if wal.stat.nlink == 1
|
238
|
+
total += wal.unlink
|
239
|
+
end
|
240
|
+
end
|
241
|
+
|
242
|
+
self.log "Removed %d WAL segment%s." % [ total, total == 1 ? '' : 's' ]
|
243
|
+
end
|
244
|
+
|
245
|
+
|
246
|
+
|
247
|
+
#########
|
248
|
+
protected
|
249
|
+
#########
|
250
|
+
|
251
|
+
### Send WAL segments to remote +dest+ via rsync+ssh.
|
252
|
+
### Passwordless keys between the user running this script (postmaster owner)
|
253
|
+
### and remote user need to be set up in advance.
|
254
|
+
###
|
255
|
+
def ship_rsync_ssh( dest )
|
256
|
+
if dest.host.nil?
|
257
|
+
self.log "Destination %p missing required 'host' key. WAL is queued." % [ dest.host ]
|
258
|
+
return
|
259
|
+
end
|
260
|
+
|
261
|
+
rsync_flags = '-zc'
|
262
|
+
ssh_string = "%s -o ConnectTimeout=%d -o StrictHostKeyChecking=no -p %d" %
|
263
|
+
[ @ssh, @ssh_timeout || 10, dest.port ]
|
264
|
+
src_string = ''
|
265
|
+
dst_string = "%s%s:%s/" % [ dest.user, dest.host, dest.path ]
|
266
|
+
|
267
|
+
# If there are numerous files in the spool dir, it means there was
|
268
|
+
# an error transferring to this host in the past. Try and ship all
|
269
|
+
# WAL segments, instead of just the new one. PostgreSQL on the slave
|
270
|
+
# side will "do the right thing" as they come in, regardless of
|
271
|
+
# ordering.
|
272
|
+
#
|
273
|
+
if dest.spool.children.length > 1
|
274
|
+
src_string = dest.spool.to_s + '/'
|
275
|
+
rsync_flags << 'r'
|
276
|
+
else
|
277
|
+
src_string = dest.spool + @wal.basename
|
278
|
+
end
|
279
|
+
|
280
|
+
|
281
|
+
ship_wal_cmd = [
|
282
|
+
@rsync,
|
283
|
+
@debug ? (rsync_flags << 'vh') : (rsync_flags << 'q'),
|
284
|
+
'--remove-source-files',
|
285
|
+
'-e', ssh_string,
|
286
|
+
src_string, dst_string
|
287
|
+
]
|
288
|
+
|
289
|
+
self.log "Running command '%s'" % [ ship_wal_cmd.join(' ') ]
|
290
|
+
system *ship_wal_cmd
|
291
|
+
|
292
|
+
# Run external notification program on error, if one is configured.
|
293
|
+
#
|
294
|
+
unless $?.success?
|
295
|
+
self.log "Ack! Error while shipping to %p, WAL is queued." % [ dest.label ]
|
296
|
+
system @error_cmd, dest.label if @error_cmd
|
297
|
+
end
|
298
|
+
end
|
299
|
+
|
300
|
+
|
301
|
+
### Copy WAL segments to remote path as set in +dest+.
|
302
|
+
### This is useful for longer term PITR, copying to NFS shares, etc.
|
303
|
+
###
|
304
|
+
def ship_file( dest )
|
305
|
+
if dest.path.nil?
|
306
|
+
self.log "Destination %p missing required 'path' key. WAL is queued." % [ dest ]
|
307
|
+
return
|
308
|
+
end
|
309
|
+
dest.path.mkdir( 0711 ) unless dest.path.exist?
|
310
|
+
|
311
|
+
# If there are numerous files in the spool dir, it means there was
|
312
|
+
# an error transferring to this host in the past. Try and ship all
|
313
|
+
# WAL segments, instead of just the new one. PostgreSQL on the slave
|
314
|
+
# side will "do the right thing" as they come in, regardless of
|
315
|
+
# ordering.
|
316
|
+
#
|
317
|
+
if dest.spool.children.length > 1
|
318
|
+
dest.spool.children.each do |wal|
|
319
|
+
wal.unlink if self.copy_file( wal, dest.path, dest.label, dest.compress )
|
320
|
+
end
|
321
|
+
else
|
322
|
+
wal = dest.spool + @wal.basename
|
323
|
+
wal.unlink if self.copy_file( wal, dest.path, dest.label, dest.compress )
|
324
|
+
end
|
325
|
+
end
|
326
|
+
|
327
|
+
|
328
|
+
### Given a +wal+ Pathname, a +path+ destination, and the destination
|
329
|
+
### label, copy and optionally compress a WAL file.
|
330
|
+
###
|
331
|
+
def copy_file( wal, path, label, compress=false )
|
332
|
+
dest_file = path + wal.basename
|
333
|
+
FileUtils::cp wal, dest_file
|
334
|
+
if compress
|
335
|
+
system *[ 'gzip', '-f', dest_file ]
|
336
|
+
raise "Error while compressing: %s" % [ wal.basename ] unless $?.success?
|
337
|
+
end
|
338
|
+
self.log "Copied %s%s to %s." %
|
339
|
+
[ wal.basename, compress ? ' (and compressed)' : '', path ]
|
340
|
+
return true
|
341
|
+
rescue => err
|
342
|
+
self.log "Ack! Error while copying '%s' (%s) to %p, WAL is queued." %
|
343
|
+
[ wal.basename, err.message, path ]
|
344
|
+
system @error_cmd, label if @error_cmd
|
345
|
+
return false
|
346
|
+
end
|
347
|
+
|
348
|
+
|
349
|
+
### Figure out how to send the WAL file to its intended destination +dest+.
|
350
|
+
###
|
351
|
+
def dispatch_dest( dest )
|
352
|
+
if ! dest.enabled.nil? && ! dest.enabled
|
353
|
+
self.log "Skipping explicity disabled destination %p, WAL is queued." % [ dest.label ]
|
354
|
+
return
|
355
|
+
end
|
356
|
+
|
357
|
+
# Send to the appropriate method. ( rsync-ssh --> ship_rsync_ssh )
|
358
|
+
#
|
359
|
+
meth = ( 'ship_' + dest.kind.gsub(/-/, '_') ).to_sym
|
360
|
+
if WalShipper::Dispatcher.method_defined?( meth )
|
361
|
+
self.send( meth, dest )
|
362
|
+
else
|
363
|
+
self.log "Unknown destination kind %p for %p. WAL is queued." % [ dest.kind, dest.label ]
|
364
|
+
end
|
365
|
+
end
|
366
|
+
end
|
367
|
+
end
|
368
|
+
|
369
|
+
# Ship the WAL file!
|
370
|
+
#
|
371
|
+
if __FILE__ == $0
|
372
|
+
CONFIG_DIR = Pathname.new( __FILE__ ).dirname.parent + 'etc'
|
373
|
+
CONFIG = CONFIG_DIR + 'wal_shipper.conf'
|
374
|
+
|
375
|
+
unless CONFIG.exist?
|
376
|
+
CONFIG_DIR.mkdir( 0711 ) unless CONFIG_DIR.exist?
|
377
|
+
CONFIG.open('w') {|conf| conf.print(DATA.read) }
|
378
|
+
CONFIG.chmod( 0644 )
|
379
|
+
puts "No WAL shipping configuration found, default file created."
|
380
|
+
end
|
381
|
+
|
382
|
+
wal = ARGV[0] or raise "No WAL file was specified on the command line."
|
383
|
+
wal = Pathname.new( wal )
|
384
|
+
conf = YAML.load( CONFIG.read )
|
385
|
+
|
386
|
+
shipper = WalShipper::Dispatcher.new( wal, conf )
|
387
|
+
shipper.link
|
388
|
+
shipper.dispatch
|
389
|
+
shipper.clean_spool
|
390
|
+
end
|
391
|
+
|
392
|
+
|
393
|
+
__END__
|
394
|
+
---
|
395
|
+
# Spool from pg_xlog to the working area?
|
396
|
+
# This must be set to 'true' for wal shipping to function!
|
397
|
+
enabled: false
|
398
|
+
|
399
|
+
# Log everything to the PostgreSQL log files?
|
400
|
+
debug: true
|
401
|
+
|
402
|
+
# The working area for WAL segments.
|
403
|
+
spool: /opt/local/var/db/postgresql84/wal
|
404
|
+
|
405
|
+
# With multiple slaves, ship WAL in parallel, or be synchronous?
|
406
|
+
async: false
|
407
|
+
|
408
|
+
# Put a ceiling on the parallel threads?
|
409
|
+
# '0' or removing this option uses a thread for each destination,
|
410
|
+
# regardless of how many you have. Keep in mind that's 16 * destination
|
411
|
+
# count megs of simultaneous bandwidth.
|
412
|
+
async_max: 5
|
413
|
+
|
414
|
+
# Paths and settings for various binaries.
|
415
|
+
rsync: /usr/bin/rsync
|
416
|
+
ssh: /usr/bin/ssh
|
417
|
+
ssh_timeout: 10
|
418
|
+
|
419
|
+
destinations:
|
420
|
+
|
421
|
+
- label: rsync-example
|
422
|
+
port: 2222
|
423
|
+
kind: rsync-ssh
|
424
|
+
host: localhost
|
425
|
+
user: postgres
|
426
|
+
path: wal # relative to the user's homedir on the remote host
|
427
|
+
enabled: false
|
428
|
+
|
429
|
+
- label: file-example
|
430
|
+
kind: file
|
431
|
+
compress: true
|
432
|
+
enabled: true
|
433
|
+
path: /tmp/someplace
|
434
|
+
|