pg 1.2.3 → 1.3.0.rc1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (103) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.appveyor.yml +36 -0
  4. data/.gems +6 -0
  5. data/.github/workflows/binary-gems.yml +80 -0
  6. data/.github/workflows/source-gem.yml +129 -0
  7. data/.gitignore +13 -0
  8. data/.hgsigs +34 -0
  9. data/.hgtags +41 -0
  10. data/.irbrc +23 -0
  11. data/.pryrc +23 -0
  12. data/.tm_properties +21 -0
  13. data/.travis.yml +49 -0
  14. data/Gemfile +14 -0
  15. data/History.rdoc +75 -7
  16. data/Manifest.txt +0 -1
  17. data/README.rdoc +7 -6
  18. data/Rakefile +27 -138
  19. data/Rakefile.cross +5 -5
  20. data/certs/ged.pem +24 -0
  21. data/ext/errorcodes.def +8 -0
  22. data/ext/errorcodes.txt +3 -1
  23. data/ext/extconf.rb +90 -19
  24. data/ext/gvl_wrappers.c +4 -0
  25. data/ext/gvl_wrappers.h +23 -0
  26. data/ext/pg.c +35 -1
  27. data/ext/pg.h +18 -1
  28. data/ext/pg_coder.c +82 -28
  29. data/ext/pg_connection.c +538 -279
  30. data/ext/pg_copy_coder.c +45 -16
  31. data/ext/pg_record_coder.c +38 -10
  32. data/ext/pg_result.c +61 -31
  33. data/ext/pg_text_decoder.c +1 -1
  34. data/ext/pg_text_encoder.c +6 -6
  35. data/ext/pg_tuple.c +47 -21
  36. data/ext/pg_type_map.c +41 -8
  37. data/ext/pg_type_map_all_strings.c +14 -1
  38. data/ext/pg_type_map_by_class.c +49 -24
  39. data/ext/pg_type_map_by_column.c +64 -28
  40. data/ext/pg_type_map_by_mri_type.c +47 -18
  41. data/ext/pg_type_map_by_oid.c +52 -23
  42. data/ext/pg_type_map_in_ruby.c +50 -19
  43. data/ext/pg_util.c +2 -2
  44. data/lib/pg/basic_type_map_based_on_result.rb +47 -0
  45. data/lib/pg/basic_type_map_for_queries.rb +193 -0
  46. data/lib/pg/basic_type_map_for_results.rb +81 -0
  47. data/lib/pg/basic_type_registry.rb +296 -0
  48. data/lib/pg/coder.rb +1 -1
  49. data/lib/pg/connection.rb +369 -56
  50. data/lib/pg/version.rb +4 -0
  51. data/lib/pg.rb +38 -25
  52. data/misc/openssl-pg-segfault.rb +31 -0
  53. data/misc/postgres/History.txt +9 -0
  54. data/misc/postgres/Manifest.txt +5 -0
  55. data/misc/postgres/README.txt +21 -0
  56. data/misc/postgres/Rakefile +21 -0
  57. data/misc/postgres/lib/postgres.rb +16 -0
  58. data/misc/ruby-pg/History.txt +9 -0
  59. data/misc/ruby-pg/Manifest.txt +5 -0
  60. data/misc/ruby-pg/README.txt +21 -0
  61. data/misc/ruby-pg/Rakefile +21 -0
  62. data/misc/ruby-pg/lib/ruby/pg.rb +16 -0
  63. data/pg.gemspec +32 -0
  64. data/sample/array_insert.rb +20 -0
  65. data/sample/async_api.rb +106 -0
  66. data/sample/async_copyto.rb +39 -0
  67. data/sample/async_mixed.rb +56 -0
  68. data/sample/check_conn.rb +21 -0
  69. data/sample/copydata.rb +71 -0
  70. data/sample/copyfrom.rb +81 -0
  71. data/sample/copyto.rb +19 -0
  72. data/sample/cursor.rb +21 -0
  73. data/sample/disk_usage_report.rb +177 -0
  74. data/sample/issue-119.rb +94 -0
  75. data/sample/losample.rb +69 -0
  76. data/sample/minimal-testcase.rb +17 -0
  77. data/sample/notify_wait.rb +72 -0
  78. data/sample/pg_statistics.rb +285 -0
  79. data/sample/replication_monitor.rb +222 -0
  80. data/sample/test_binary_values.rb +33 -0
  81. data/sample/wal_shipper.rb +434 -0
  82. data/sample/warehouse_partitions.rb +311 -0
  83. data.tar.gz.sig +0 -0
  84. metadata +79 -226
  85. metadata.gz.sig +0 -0
  86. data/ChangeLog +0 -0
  87. data/lib/pg/basic_type_mapping.rb +0 -522
  88. data/spec/data/expected_trace.out +0 -26
  89. data/spec/data/random_binary_data +0 -0
  90. data/spec/helpers.rb +0 -380
  91. data/spec/pg/basic_type_mapping_spec.rb +0 -630
  92. data/spec/pg/connection_spec.rb +0 -1949
  93. data/spec/pg/connection_sync_spec.rb +0 -41
  94. data/spec/pg/result_spec.rb +0 -681
  95. data/spec/pg/tuple_spec.rb +0 -333
  96. data/spec/pg/type_map_by_class_spec.rb +0 -138
  97. data/spec/pg/type_map_by_column_spec.rb +0 -226
  98. data/spec/pg/type_map_by_mri_type_spec.rb +0 -136
  99. data/spec/pg/type_map_by_oid_spec.rb +0 -149
  100. data/spec/pg/type_map_in_ruby_spec.rb +0 -164
  101. data/spec/pg/type_map_spec.rb +0 -22
  102. data/spec/pg/type_spec.rb +0 -1123
  103. data/spec/pg_spec.rb +0 -50
@@ -0,0 +1,222 @@
1
+ # -*- ruby -*-
2
+ # vim: set noet nosta sw=4 ts=4 :
3
+ #
4
+ # Get the current WAL segment and offset from a master postgresql
5
+ # server, and compare slave servers to see how far behind they
6
+ # are in MB. This script should be easily modified for use with
7
+ # Nagios/Mon/Monit/Zabbix/whatever, or wrapping it in a display loop,
8
+ # and is suitable for both WAL shipping or streaming forms of replication.
9
+ #
10
+ # Mahlon E. Smith <mahlon@martini.nu>
11
+ #
12
+ # First argument is the master server, all other arguments are treated
13
+ # as slave machines.
14
+ #
15
+ # db_replication.monitor db-master.example.com ...
16
+ #
17
+
18
+ require 'ostruct'
19
+ require 'optparse'
20
+ require 'pathname'
21
+ require 'etc'
22
+ require 'pg'
23
+ require 'pp'
24
+
25
+
26
+ ### A class to encapsulate the PG handles.
27
+ ###
28
+ class PGMonitor
29
+
30
+ VERSION = %q$Id$
31
+
32
+ # When to consider a slave as 'behind', measured in WAL segments.
33
+ # The default WAL segment size is 16, so we'll alert after
34
+ # missing two WAL files worth of data.
35
+ #
36
+ LAG_ALERT = 32
37
+
38
+ ### Create a new PGMonitor object.
39
+ ###
40
+ def initialize( opts, hosts )
41
+ @opts = opts
42
+ @master = hosts.shift
43
+ @slaves = hosts
44
+ @current_wal = {}
45
+ @failures = []
46
+ end
47
+
48
+ attr_reader :opts, :current_wal, :master, :slaves, :failures
49
+
50
+
51
+ ### Perform the connections and check the lag.
52
+ ###
53
+ def check
54
+ # clear prior failures, get current xlog info
55
+ @failures = []
56
+ return unless self.get_current_wal
57
+
58
+ # check all slaves
59
+ self.slaves.each do |slave|
60
+ begin
61
+ slave_db = PG.connect(
62
+ :dbname => self.opts.database,
63
+ :host => slave,
64
+ :port => self.opts.port,
65
+ :user => self.opts.user,
66
+ :password => self.opts.pass,
67
+ :sslmode => 'prefer'
68
+ )
69
+
70
+ xlog = slave_db.exec( 'SELECT pg_last_xlog_receive_location()' ).getvalue( 0, 0 )
71
+ slave_db.close
72
+
73
+ lag_in_megs = ( self.find_lag( xlog ).to_f / 1024 / 1024 ).abs
74
+ if lag_in_megs >= LAG_ALERT
75
+ failures << { :host => slave,
76
+ :error => "%0.2fMB behind the master." % [ lag_in_megs ] }
77
+ end
78
+ rescue => err
79
+ failures << { :host => slave, :error => err.message }
80
+ end
81
+ end
82
+ end
83
+
84
+
85
+ #########
86
+ protected
87
+ #########
88
+
89
+ ### Ask the master for the current xlog information, to compare
90
+ ### to slaves. Returns true on success. On failure, populates
91
+ ### the failures array and returns false.
92
+ ###
93
+ def get_current_wal
94
+ master_db = PG.connect(
95
+ :dbname => self.opts.database,
96
+ :host => self.master,
97
+ :port => self.opts.port,
98
+ :user => self.opts.user,
99
+ :password => self.opts.pass,
100
+ :sslmode => 'prefer'
101
+ )
102
+
103
+ self.current_wal[ :segbytes ] = master_db.exec( 'SHOW wal_segment_size' ).
104
+ getvalue( 0, 0 ).sub( /\D+/, '' ).to_i << 20
105
+
106
+ current = master_db.exec( 'SELECT pg_current_xlog_location()' ).getvalue( 0, 0 )
107
+ self.current_wal[ :segment ], self.current_wal[ :offset ] = current.split( /\// )
108
+
109
+ master_db.close
110
+ return true
111
+
112
+ # If we can't get any of the info from the master, then there is no
113
+ # point in a comparison with slaves.
114
+ #
115
+ rescue => err
116
+ self.failures << { :host => self.master,
117
+ :error => 'Unable to retrieve required info from the master (%s)' % [ err.message ] }
118
+
119
+ return false
120
+ end
121
+
122
+
123
+ ### Given an +xlog+ position from a slave server, return
124
+ ### the number of bytes the slave needs to replay before it
125
+ ### is caught up to the master.
126
+ ###
127
+ def find_lag( xlog )
128
+ s_segment, s_offset = xlog.split( /\// )
129
+ m_segment = self.current_wal[ :segment ]
130
+ m_offset = self.current_wal[ :offset ]
131
+ m_segbytes = self.current_wal[ :segbytes ]
132
+
133
+ return (( m_segment.hex - s_segment.hex ) * m_segbytes) + ( m_offset.hex - s_offset.hex )
134
+ end
135
+
136
+ end
137
+
138
+
139
+ ### Parse command line arguments. Return a struct of global options.
140
+ ###
141
+ def parse_args( args )
142
+ options = OpenStruct.new
143
+ options.database = 'postgres'
144
+ options.port = 5432
145
+ options.user = Etc.getpwuid( Process.uid ).name
146
+ options.sslmode = 'prefer'
147
+
148
+ opts = OptionParser.new do |opts|
149
+ opts.banner = "Usage: #{$0} [options] <master> <slave> [slave2, slave3...]"
150
+
151
+ opts.separator ''
152
+ opts.separator 'Connection options:'
153
+
154
+ opts.on( '-d', '--database DBNAME',
155
+ "specify the database to connect to (default: \"#{options.database}\")" ) do |db|
156
+ options.database = db
157
+ end
158
+
159
+ opts.on( '-h', '--host HOSTNAME', 'database server host' ) do |host|
160
+ options.host = host
161
+ end
162
+
163
+ opts.on( '-p', '--port PORT', Integer,
164
+ "database server port (default: \"#{options.port}\")" ) do |port|
165
+ options.port = port
166
+ end
167
+
168
+ opts.on( '-U', '--user NAME',
169
+ "database user name (default: \"#{options.user}\")" ) do |user|
170
+ options.user = user
171
+ end
172
+
173
+ opts.on( '-W', 'force password prompt' ) do |pw|
174
+ print 'Password: '
175
+ begin
176
+ system 'stty -echo'
177
+ options.pass = $stdin.gets.chomp
178
+ ensure
179
+ system 'stty echo'
180
+ puts
181
+ end
182
+ end
183
+
184
+ opts.separator ''
185
+ opts.separator 'Other options:'
186
+
187
+ opts.on_tail( '--help', 'show this help, then exit' ) do
188
+ $stderr.puts opts
189
+ exit
190
+ end
191
+
192
+ opts.on_tail( '--version', 'output version information, then exit' ) do
193
+ puts PGMonitor::VERSION
194
+ exit
195
+ end
196
+ end
197
+
198
+ opts.parse!( args )
199
+ return options
200
+ end
201
+
202
+
203
+
204
+ if __FILE__ == $0
205
+ opts = parse_args( ARGV )
206
+ raise ArgumentError, "At least two PostgreSQL servers are required." if ARGV.length < 2
207
+ mon = PGMonitor.new( opts, ARGV )
208
+
209
+ mon.check
210
+ if mon.failures.empty?
211
+ puts "All is well!"
212
+ exit 0
213
+ else
214
+ puts "Database replication delayed or broken."
215
+ mon.failures.each do |bad|
216
+ puts "%s: %s" % [ bad[ :host ], bad[ :error ] ]
217
+ end
218
+ exit 1
219
+ end
220
+ end
221
+
222
+
@@ -0,0 +1,33 @@
1
+ # -*- ruby -*-1.9.1
2
+
3
+ require 'pg'
4
+
5
+ db = PG.connect( :dbname => 'test' )
6
+ db.exec "DROP TABLE IF EXISTS test"
7
+ db.exec "CREATE TABLE test (a INTEGER, b BYTEA)"
8
+
9
+ a = 42
10
+ b = [1, 2, 3]
11
+ db.exec "INSERT INTO test(a, b) VALUES($1::int, $2::bytea)",
12
+ [a, {:value => b.pack('N*'), :format => 1}]
13
+
14
+ db.exec( "SELECT a::int, b::bytea FROM test LIMIT 1", [], 1 ) do |res|
15
+
16
+ res.nfields.times do |i|
17
+ puts "Field %d is: %s, a %s (%s) column from table %p" % [
18
+ i,
19
+ res.fname( i ),
20
+ db.exec( "SELECT format_type($1,$2)", [res.ftype(i), res.fmod(1)] ).getvalue(0,0),
21
+ res.fformat( i ).zero? ? "string" : "binary",
22
+ res.ftable( i ),
23
+ ]
24
+ end
25
+
26
+ res.each do |row|
27
+ puts "a = #{row['a'].inspect}"
28
+ puts "a (unpacked) = #{row['a'].unpack('N*').inspect}"
29
+ puts "b = #{row['b'].unpack('N*').inspect}"
30
+ end
31
+ end
32
+
33
+
@@ -0,0 +1,434 @@
1
+ # -*- ruby -*-
2
+ #
3
+ # A script to wrap ssh and rsync for PostgreSQL WAL files shipping.
4
+ # Mahlon E. Smith <mahlon@martini.nu>
5
+ #
6
+ # Based off of Joshua Drake's PITRTools concept, but with some important
7
+ # differences:
8
+ #
9
+ # - Only supports PostgreSQL >= 8.3
10
+ # - No support for rsync version < 3
11
+ # - Only shipping, no client side sync (too much opportunity for failure,
12
+ # and it's easy to get a base backup manually)
13
+ # - WAL files are only stored once, regardless of how many
14
+ # slaves are configured or not responding, and are removed from
15
+ # the master when they are no longer needed.
16
+ # - Each slave can have completely distinct settings, instead
17
+ # of a single set of options applied to all slaves
18
+ # - slave sync can be individually paused from the master
19
+ # - can run synchronously, or if you have a lot of slaves, threaded async mode
20
+ # - It's ruby, instead of python. :)
21
+ #
22
+ # wal_shipper is configurable via an external YAML file, and will create
23
+ # a template on its first run -- you'll need to modify it! It expects
24
+ # a directory structure like so:
25
+ #
26
+ # postgres/
27
+ # data/...
28
+ # bin/wal_shipper.rb
29
+ # etc/wal_shipper.conf <-- YAML settings!
30
+ # wal/
31
+ #
32
+ # It should be loaded from the PostgreSQL master's postgresql.conf
33
+ # as such, after putting it into your postgres user homedir under 'bin':
34
+ #
35
+ # archive_command = '/path/to/postgres_home/bin/wal_shipper.rb %p'
36
+ #
37
+ # Passwordless ssh keys need to be set up for the postgres user on all
38
+ # participating masters and slaves.
39
+ #
40
+ # You can use any replay method of your choosing on the slaves.
41
+ # Here's a nice example using pg_standby, to be put in data/recovery.conf:
42
+ #
43
+ # restore_command = 'pg_standby -t /tmp/pgrecovery.done -s5 -w0 -c /path/to/postgres_home/wal_files/ %f %p %r'
44
+ #
45
+ # Or, here's another simple alternative data/recovery.conf, for using WAL shipping
46
+ # alongside streaming replication:
47
+ #
48
+ # standby_mode = 'on'
49
+ # primary_conninfo = 'host=master.example.com port=5432 user=repl password=XXXXXXX'
50
+ # restore_command = 'cp /usr/local/pgsql/wal/%f %p'
51
+ # trigger_file = '/usr/local/pgsql/pg.become_primary'
52
+ # archive_cleanup_command = '/usr/local/bin/pg_archivecleanup /usr/local/pgsql/wal %r'
53
+ #
54
+ #========================================================================================
55
+
56
+
57
+ require 'pathname'
58
+ require 'yaml'
59
+ require 'fileutils'
60
+ require 'ostruct'
61
+
62
+
63
+ ### Encapsulate WAL shipping functionality.
64
+ ###
65
+ module WalShipper
66
+
67
+ ### Send messages to the PostgreSQL log files.
68
+ ###
69
+ def log( msg )
70
+ return unless @debug
71
+ puts "WAL Shipper: %s" % [ msg ]
72
+ end
73
+
74
+
75
+ ### An object that represents a single destination from the
76
+ ### configuration file.
77
+ ###
78
+ class Destination < OpenStruct
79
+ include WalShipper
80
+
81
+ ### Create a new WalShipper::Destination object.
82
+ def initialize( dest, debug=false )
83
+ @debug = debug
84
+ super( dest )
85
+ self.validate
86
+ end
87
+
88
+ #########
89
+ protected
90
+ #########
91
+
92
+
93
+ ### Check for required keys and normalize various keys.
94
+ ###
95
+ def validate
96
+ # Check for required destination keys
97
+ %w[ label kind ].each do |key|
98
+ if self.send( key.to_sym ).nil?
99
+ self.log "Destination %p missing required '%s' key." % [ self, key ]
100
+ self.invalid = true
101
+ end
102
+ end
103
+
104
+ # Ensure paths are Pathnames for the 'file' destination type.
105
+ self.path = Pathname.new( self.path ) if self.kind == 'file'
106
+
107
+ if self.kind == 'rsync-ssh'
108
+ self.port ||= 22
109
+ self.user = self.user ? "#{self.user}@" : ''
110
+ end
111
+ end
112
+ end # Class Destination
113
+
114
+
115
+
116
+ ### Class for creating new Destination objects and determining how to
117
+ ### ship WAL files to them.
118
+ ###
119
+ class Dispatcher
120
+ include WalShipper
121
+
122
+ ### Create a new Shipper object, given a +conf+ hash and a +wal+ file
123
+ ### Pathname object.
124
+ ###
125
+ def initialize( wal, conf )
126
+ # Make the config keys instance variables.
127
+ conf.each_pair {|key, val| self.instance_variable_set( "@#{key}", val ) }
128
+
129
+ # Spool directory check.
130
+ #
131
+ @spool = Pathname.new( @spool )
132
+ @spool.exist? or raise "The configured spool directory (%s) doesn't exist." % [ @spool ]
133
+
134
+ # Stop right away if we have disabled shipping.
135
+ #
136
+ unless @enabled
137
+ self.log "WAL shipping is disabled, queuing segment %s" % [ wal.basename ]
138
+ exit 1
139
+ end
140
+
141
+ # Instantiate Destination objects, creating new spool directories
142
+ # for each.
143
+ #
144
+ @destinations.
145
+ collect!{|dest| WalShipper::Destination.new( dest, @debug ) }.
146
+ reject {|dest| dest.invalid }.
147
+ collect do |dest|
148
+ dest.spool = @spool + dest.label
149
+ dest.spool.mkdir( 0711 ) unless dest.spool.exist?
150
+ dest
151
+ end
152
+
153
+ # Put the WAL file into the spool for processing!
154
+ #
155
+ @waldir = @spool + 'wal_segments'
156
+ @waldir.mkdir( 0711 ) unless @waldir.exist?
157
+
158
+ self.log "Copying %s to %s" % [ wal.basename, @waldir ]
159
+ FileUtils::cp wal, @waldir
160
+
161
+ # 'wal' now references the copy. The original is managed and auto-expired
162
+ # by PostgreSQL when a new checkpoint segment it reached.
163
+ @wal = @waldir + wal.basename
164
+ end
165
+
166
+
167
+ ### Create hardlinks for the WAL file into each of the destination directories
168
+ ### for separate queueing and recording of what was shipped successfully.
169
+ ###
170
+ def link
171
+ @destinations.each do |dest|
172
+ self.log "Linking %s into %s" % [ @wal.basename, dest.spool.basename ]
173
+ FileUtils::ln @wal, dest.spool, :force => true
174
+ end
175
+ end
176
+
177
+
178
+ ### Decide to be synchronous or threaded, and delegate each destination
179
+ ### to the proper ship method.
180
+ ###
181
+ def dispatch
182
+ # Synchronous mode.
183
+ #
184
+ unless @async
185
+ self.log "Performing a synchronous dispatch."
186
+ @destinations.each {|dest| self.dispatch_dest( dest ) }
187
+ return
188
+ end
189
+
190
+ tg = ThreadGroup.new
191
+
192
+ # Async, one thread per destination
193
+ #
194
+ if @async_max.nil? || @async_max.to_i.zero?
195
+ self.log "Performing an asynchronous dispatch: one thread per destination."
196
+ @destinations.each do |dest|
197
+ t = Thread.new do
198
+ Thread.current.abort_on_exception = true
199
+ self.dispatch_dest( dest )
200
+ end
201
+ tg.add( t )
202
+ end
203
+ tg.list.each {|t| t.join }
204
+ return
205
+ end
206
+
207
+ # Async, one thread per destination, in groups of asynx_max size.
208
+ #
209
+ self.log "Performing an asynchronous dispatch: one thread per destination, %d at a time." % [ @async_max ]
210
+ all_dests = @destinations.dup
211
+ dest_chunks = []
212
+ until all_dests.empty? do
213
+ dest_chunks << all_dests.slice!( 0, @async_max )
214
+ end
215
+
216
+ dest_chunks.each do |chunk|
217
+ chunk.each do |dest|
218
+ t = Thread.new do
219
+ Thread.current.abort_on_exception = true
220
+ self.dispatch_dest( dest )
221
+ end
222
+ tg.add( t )
223
+ end
224
+
225
+ tg.list.each {|t| t.join }
226
+ end
227
+
228
+ return
229
+ end
230
+
231
+
232
+ ### Remove any WAL segments no longer needed by slaves.
233
+ ###
234
+ def clean_spool
235
+ total = 0
236
+ @waldir.children.each do |wal|
237
+ if wal.stat.nlink == 1
238
+ total += wal.unlink
239
+ end
240
+ end
241
+
242
+ self.log "Removed %d WAL segment%s." % [ total, total == 1 ? '' : 's' ]
243
+ end
244
+
245
+
246
+
247
+ #########
248
+ protected
249
+ #########
250
+
251
+ ### Send WAL segments to remote +dest+ via rsync+ssh.
252
+ ### Passwordless keys between the user running this script (postmaster owner)
253
+ ### and remote user need to be set up in advance.
254
+ ###
255
+ def ship_rsync_ssh( dest )
256
+ if dest.host.nil?
257
+ self.log "Destination %p missing required 'host' key. WAL is queued." % [ dest.host ]
258
+ return
259
+ end
260
+
261
+ rsync_flags = '-zc'
262
+ ssh_string = "%s -o ConnectTimeout=%d -o StrictHostKeyChecking=no -p %d" %
263
+ [ @ssh, @ssh_timeout || 10, dest.port ]
264
+ src_string = ''
265
+ dst_string = "%s%s:%s/" % [ dest.user, dest.host, dest.path ]
266
+
267
+ # If there are numerous files in the spool dir, it means there was
268
+ # an error transferring to this host in the past. Try and ship all
269
+ # WAL segments, instead of just the new one. PostgreSQL on the slave
270
+ # side will "do the right thing" as they come in, regardless of
271
+ # ordering.
272
+ #
273
+ if dest.spool.children.length > 1
274
+ src_string = dest.spool.to_s + '/'
275
+ rsync_flags << 'r'
276
+ else
277
+ src_string = dest.spool + @wal.basename
278
+ end
279
+
280
+
281
+ ship_wal_cmd = [
282
+ @rsync,
283
+ @debug ? (rsync_flags << 'vh') : (rsync_flags << 'q'),
284
+ '--remove-source-files',
285
+ '-e', ssh_string,
286
+ src_string, dst_string
287
+ ]
288
+
289
+ self.log "Running command '%s'" % [ ship_wal_cmd.join(' ') ]
290
+ system *ship_wal_cmd
291
+
292
+ # Run external notification program on error, if one is configured.
293
+ #
294
+ unless $?.success?
295
+ self.log "Ack! Error while shipping to %p, WAL is queued." % [ dest.label ]
296
+ system @error_cmd, dest.label if @error_cmd
297
+ end
298
+ end
299
+
300
+
301
+ ### Copy WAL segments to remote path as set in +dest+.
302
+ ### This is useful for longer term PITR, copying to NFS shares, etc.
303
+ ###
304
+ def ship_file( dest )
305
+ if dest.path.nil?
306
+ self.log "Destination %p missing required 'path' key. WAL is queued." % [ dest ]
307
+ return
308
+ end
309
+ dest.path.mkdir( 0711 ) unless dest.path.exist?
310
+
311
+ # If there are numerous files in the spool dir, it means there was
312
+ # an error transferring to this host in the past. Try and ship all
313
+ # WAL segments, instead of just the new one. PostgreSQL on the slave
314
+ # side will "do the right thing" as they come in, regardless of
315
+ # ordering.
316
+ #
317
+ if dest.spool.children.length > 1
318
+ dest.spool.children.each do |wal|
319
+ wal.unlink if self.copy_file( wal, dest.path, dest.label, dest.compress )
320
+ end
321
+ else
322
+ wal = dest.spool + @wal.basename
323
+ wal.unlink if self.copy_file( wal, dest.path, dest.label, dest.compress )
324
+ end
325
+ end
326
+
327
+
328
+ ### Given a +wal+ Pathname, a +path+ destination, and the destination
329
+ ### label, copy and optionally compress a WAL file.
330
+ ###
331
+ def copy_file( wal, path, label, compress=false )
332
+ dest_file = path + wal.basename
333
+ FileUtils::cp wal, dest_file
334
+ if compress
335
+ system *[ 'gzip', '-f', dest_file ]
336
+ raise "Error while compressing: %s" % [ wal.basename ] unless $?.success?
337
+ end
338
+ self.log "Copied %s%s to %s." %
339
+ [ wal.basename, compress ? ' (and compressed)' : '', path ]
340
+ return true
341
+ rescue => err
342
+ self.log "Ack! Error while copying '%s' (%s) to %p, WAL is queued." %
343
+ [ wal.basename, err.message, path ]
344
+ system @error_cmd, label if @error_cmd
345
+ return false
346
+ end
347
+
348
+
349
+ ### Figure out how to send the WAL file to its intended destination +dest+.
350
+ ###
351
+ def dispatch_dest( dest )
352
+ if ! dest.enabled.nil? && ! dest.enabled
353
+ self.log "Skipping explicitly disabled destination %p, WAL is queued." % [ dest.label ]
354
+ return
355
+ end
356
+
357
+ # Send to the appropriate method. ( rsync-ssh --> ship_rsync_ssh )
358
+ #
359
+ meth = ( 'ship_' + dest.kind.gsub(/-/, '_') ).to_sym
360
+ if WalShipper::Dispatcher.method_defined?( meth )
361
+ self.send( meth, dest )
362
+ else
363
+ self.log "Unknown destination kind %p for %p. WAL is queued." % [ dest.kind, dest.label ]
364
+ end
365
+ end
366
+ end
367
+ end
368
+
369
+ # Ship the WAL file!
370
+ #
371
+ if __FILE__ == $0
372
+ CONFIG_DIR = Pathname.new( __FILE__ ).dirname.parent + 'etc'
373
+ CONFIG = CONFIG_DIR + 'wal_shipper.conf'
374
+
375
+ unless CONFIG.exist?
376
+ CONFIG_DIR.mkdir( 0711 ) unless CONFIG_DIR.exist?
377
+ CONFIG.open('w') {|conf| conf.print(DATA.read) }
378
+ CONFIG.chmod( 0644 )
379
+ puts "No WAL shipping configuration found, default file created."
380
+ end
381
+
382
+ wal = ARGV[0] or raise "No WAL file was specified on the command line."
383
+ wal = Pathname.new( wal )
384
+ conf = YAML.load( CONFIG.read )
385
+
386
+ shipper = WalShipper::Dispatcher.new( wal, conf )
387
+ shipper.link
388
+ shipper.dispatch
389
+ shipper.clean_spool
390
+ end
391
+
392
+
393
+ __END__
394
+ ---
395
+ # Spool from pg_xlog to the working area?
396
+ # This must be set to 'true' for wal shipping to function!
397
+ enabled: false
398
+
399
+ # Log everything to the PostgreSQL log files?
400
+ debug: true
401
+
402
+ # The working area for WAL segments.
403
+ spool: /opt/local/var/db/postgresql84/wal
404
+
405
+ # With multiple slaves, ship WAL in parallel, or be synchronous?
406
+ async: false
407
+
408
+ # Put a ceiling on the parallel threads?
409
+ # '0' or removing this option uses a thread for each destination,
410
+ # regardless of how many you have. Keep in mind that's 16 * destination
411
+ # count megs of simultaneous bandwidth.
412
+ async_max: 5
413
+
414
+ # Paths and settings for various binaries.
415
+ rsync: /usr/bin/rsync
416
+ ssh: /usr/bin/ssh
417
+ ssh_timeout: 10
418
+
419
+ destinations:
420
+
421
+ - label: rsync-example
422
+ port: 2222
423
+ kind: rsync-ssh
424
+ host: localhost
425
+ user: postgres
426
+ path: wal # relative to the user's homedir on the remote host
427
+ enabled: false
428
+
429
+ - label: file-example
430
+ kind: file
431
+ compress: true
432
+ enabled: true
433
+ path: /tmp/someplace
434
+