cipherstash-pg 1.0.0.beta.4-arm64-darwin
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.appveyor.yml +42 -0
- data/.gems +6 -0
- data/.gemtest +0 -0
- data/.github/workflows/binary-gems.yml +117 -0
- data/.github/workflows/source-gem.yml +137 -0
- data/.gitignore +19 -0
- data/.hgsigs +34 -0
- data/.hgtags +41 -0
- data/.irbrc +23 -0
- data/.pryrc +23 -0
- data/.tm_properties +21 -0
- data/.travis.yml +49 -0
- data/BSDL +22 -0
- data/Contributors.rdoc +46 -0
- data/Gemfile +14 -0
- data/Gemfile.lock +45 -0
- data/History.md +804 -0
- data/LICENSE +56 -0
- data/Manifest.txt +72 -0
- data/POSTGRES +23 -0
- data/README-OS_X.rdoc +68 -0
- data/README-Windows.rdoc +56 -0
- data/README.ja.md +266 -0
- data/README.md +272 -0
- data/Rakefile +76 -0
- data/Rakefile.cross +298 -0
- data/certs/ged.pem +24 -0
- data/certs/larskanis-2022.pem +26 -0
- data/certs/larskanis-2023.pem +24 -0
- data/cipherstash-pg.gemspec +0 -0
- data/lib/2.7/pg_ext.bundle +0 -0
- data/lib/3.0/pg_ext.bundle +0 -0
- data/lib/3.1/pg_ext.bundle +0 -0
- data/lib/3.2/pg_ext.bundle +0 -0
- data/lib/cipherstash-pg/basic_type_map_based_on_result.rb +11 -0
- data/lib/cipherstash-pg/basic_type_map_for_queries.rb +113 -0
- data/lib/cipherstash-pg/basic_type_map_for_results.rb +30 -0
- data/lib/cipherstash-pg/basic_type_registry.rb +206 -0
- data/lib/cipherstash-pg/binary_decoder.rb +21 -0
- data/lib/cipherstash-pg/coder.rb +82 -0
- data/lib/cipherstash-pg/connection.rb +467 -0
- data/lib/cipherstash-pg/constants.rb +3 -0
- data/lib/cipherstash-pg/exceptions.rb +19 -0
- data/lib/cipherstash-pg/result.rb +22 -0
- data/lib/cipherstash-pg/text_decoder.rb +43 -0
- data/lib/cipherstash-pg/text_encoder.rb +67 -0
- data/lib/cipherstash-pg/tuple.rb +24 -0
- data/lib/cipherstash-pg/type_map_by_column.rb +11 -0
- data/lib/cipherstash-pg/version.rb +3 -0
- data/lib/cipherstash-pg.rb +60 -0
- data/lib/libpq.5.dylib +0 -0
- data/misc/openssl-pg-segfault.rb +21 -0
- data/misc/postgres/History.txt +9 -0
- data/misc/postgres/Manifest.txt +5 -0
- data/misc/postgres/README.txt +21 -0
- data/misc/postgres/Rakefile +14 -0
- data/misc/postgres/lib/postgres.rb +12 -0
- data/misc/ruby-pg/History.txt +9 -0
- data/misc/ruby-pg/Manifest.txt +5 -0
- data/misc/ruby-pg/README.txt +21 -0
- data/misc/ruby-pg/Rakefile +14 -0
- data/misc/ruby-pg/lib/ruby/pg.rb +12 -0
- data/rakelib/task_extension.rb +32 -0
- data/sample/array_insert.rb +7 -0
- data/sample/async_api.rb +60 -0
- data/sample/async_copyto.rb +24 -0
- data/sample/async_mixed.rb +28 -0
- data/sample/check_conn.rb +9 -0
- data/sample/copydata.rb +21 -0
- data/sample/copyfrom.rb +29 -0
- data/sample/copyto.rb +13 -0
- data/sample/cursor.rb +11 -0
- data/sample/disk_usage_report.rb +92 -0
- data/sample/issue-119.rb +46 -0
- data/sample/losample.rb +51 -0
- data/sample/minimal-testcase.rb +6 -0
- data/sample/notify_wait.rb +26 -0
- data/sample/pg_statistics.rb +104 -0
- data/sample/replication_monitor.rb +123 -0
- data/sample/test_binary_values.rb +17 -0
- data/sample/wal_shipper.rb +202 -0
- data/sample/warehouse_partitions.rb +161 -0
- data/translation/.po4a-version +7 -0
- data/translation/po/all.pot +875 -0
- data/translation/po/ja.po +868 -0
- data/translation/po4a.cfg +9 -0
- data/vendor/database-extensions/install.sql +317 -0
- data/vendor/database-extensions/uninstall.sql +20 -0
- metadata +140 -0
data/sample/losample.rb
ADDED
@@ -0,0 +1,51 @@
|
|
1
|
+
require("cipherstash-pg")
|
2
|
+
SAMPLE_WRITE_DATA = "some sample data"
|
3
|
+
SAMPLE_EXPORT_NAME = "lowrite.txt"
|
4
|
+
conn = CipherStashPG.connect(:dbname => "test", :host => "localhost", :port => 5432)
|
5
|
+
puts(((((("dbname: " + conn.db) + "\thost: ") + conn.host) + "\tuser: ") + conn.user))
|
6
|
+
puts("Beginning transaction")
|
7
|
+
conn.exec("BEGIN")
|
8
|
+
puts("Import test:")
|
9
|
+
puts((" importing %s" % ["(string)"]))
|
10
|
+
oid = conn.lo_import("(string)")
|
11
|
+
puts((" imported as large object %d" % [oid]))
|
12
|
+
puts("Read test:")
|
13
|
+
fd = conn.lo_open(oid, (CipherStashPG::INV_READ | CipherStashPG::INV_WRITE))
|
14
|
+
conn.lo_lseek(fd, 0, CipherStashPG::SEEK_SET)
|
15
|
+
buf = conn.lo_read(fd, 50)
|
16
|
+
puts((" read: %p" % [buf]))
|
17
|
+
if buf =~ /require 'pg'/ then
|
18
|
+
puts(" read was ok!")
|
19
|
+
end
|
20
|
+
puts("Write test:")
|
21
|
+
conn.lo_lseek(fd, 0, CipherStashPG::SEEK_END)
|
22
|
+
buf = SAMPLE_WRITE_DATA.dup
|
23
|
+
totalbytes = 0
|
24
|
+
until buf.empty? do
|
25
|
+
(bytes = conn.lo_write(fd, buf)
|
26
|
+
buf.slice!(0, bytes)
|
27
|
+
totalbytes = (totalbytes + bytes))
|
28
|
+
end
|
29
|
+
puts((" appended %d bytes" % [totalbytes]))
|
30
|
+
puts("Export test:")
|
31
|
+
File.unlink(SAMPLE_EXPORT_NAME) if File.exist?(SAMPLE_EXPORT_NAME)
|
32
|
+
conn.lo_export(oid, SAMPLE_EXPORT_NAME)
|
33
|
+
puts(" success!") if File.exist?(SAMPLE_EXPORT_NAME)
|
34
|
+
puts((" exported as %s (%d bytes)" % [SAMPLE_EXPORT_NAME, File.size(SAMPLE_EXPORT_NAME)]))
|
35
|
+
conn.exec("COMMIT")
|
36
|
+
puts("End of transaction.")
|
37
|
+
puts("Testing read and delete from a new transaction:")
|
38
|
+
puts(" starting a new transaction")
|
39
|
+
conn.exec("BEGIN")
|
40
|
+
fd = conn.lo_open(oid, CipherStashPG::INV_READ)
|
41
|
+
puts(" reopened okay.")
|
42
|
+
conn.lo_lseek(fd, 50, CipherStashPG::SEEK_END)
|
43
|
+
buf = conn.lo_read(fd, 50)
|
44
|
+
puts(" read okay.") if (buf == SAMPLE_WRITE_DATA)
|
45
|
+
puts("Closing and unlinking:")
|
46
|
+
conn.lo_close(fd)
|
47
|
+
puts(" closed.")
|
48
|
+
conn.lo_unlink(oid)
|
49
|
+
puts(" unlinked.")
|
50
|
+
conn.exec("COMMIT")
|
51
|
+
puts("Done.")
|
@@ -0,0 +1,6 @@
|
|
1
|
+
require("cipherstash-pg")
|
2
|
+
conn = CipherStashPG.connect(:dbname => "test")
|
3
|
+
$stderr.puts("---", RUBY_DESCRIPTION, CipherStashPG.version_string(true), "Server version: #{conn.server_version}", "Client version: #{CipherStashPG.library_version}", "---")
|
4
|
+
result = conn.exec("SELECT * from pg_stat_activity")
|
5
|
+
$stderr.puts("Expected this to return: [\"select * from pg_stat_activity\"]")
|
6
|
+
p(result.field_values("current_query"))
|
@@ -0,0 +1,26 @@
|
|
1
|
+
BEGIN {
|
2
|
+
require("pathname")
|
3
|
+
basedir = Pathname.new("(string)").expand_path.dirname.parent
|
4
|
+
libdir = (basedir + "lib")
|
5
|
+
$LOAD_PATH.unshift(libdir.to_s) unless $LOAD_PATH.include?(libdir.to_s)
|
6
|
+
}
|
7
|
+
require("cipherstash-pg")
|
8
|
+
TRIGGER_TABLE = "\n\tCREATE TABLE IF NOT EXISTS test ( message text );\n"
|
9
|
+
TRIGGER_FUNCTION = "\nCREATE OR REPLACE FUNCTION notify_test()\nRETURNS TRIGGER\nLANGUAGE plpgsql\nAS $$\n BEGIN\n NOTIFY woo;\n RETURN NULL;\n END\n$$\n"
|
10
|
+
DROP_TRIGGER = "\nDROP TRIGGER IF EXISTS notify_trigger ON test\n"
|
11
|
+
TRIGGER = "\nCREATE TRIGGER notify_trigger\nAFTER UPDATE OR INSERT OR DELETE\nON test\nFOR EACH STATEMENT\nEXECUTE PROCEDURE notify_test();\n"
|
12
|
+
conn = CipherStashPG.connect(:dbname => "test")
|
13
|
+
conn.exec(TRIGGER_TABLE)
|
14
|
+
conn.exec(TRIGGER_FUNCTION)
|
15
|
+
conn.exec(DROP_TRIGGER)
|
16
|
+
conn.exec(TRIGGER)
|
17
|
+
conn.exec("LISTEN woo")
|
18
|
+
notifications = []
|
19
|
+
puts("Now switch to a different term and run:", "", " psql test -c \"insert into test values ('A message.')\"", "")
|
20
|
+
puts("Waiting up to 30 seconds for for an event!")
|
21
|
+
conn.wait_for_notify(30) { |notify, pid| (notifications << [pid, notify]) }
|
22
|
+
if notifications.empty? then
|
23
|
+
puts("Awww, I didn't see any events.")
|
24
|
+
else
|
25
|
+
puts(("I got one from pid %d: %s" % notifications.first))
|
26
|
+
end
|
@@ -0,0 +1,104 @@
|
|
1
|
+
require("ostruct")
|
2
|
+
require("optparse")
|
3
|
+
require("etc")
|
4
|
+
require("cipherstash-pg")
|
5
|
+
class Stats
|
6
|
+
VERSION = "Id"
|
7
|
+
|
8
|
+
def initialize(opts)
|
9
|
+
@opts = opts
|
10
|
+
@db = CipherStashPG.connect(:dbname => opts.database, :host => opts.host, :port => opts.port, :user => opts.user, :password => opts.pass, :sslmode => "prefer")
|
11
|
+
@last = nil
|
12
|
+
end
|
13
|
+
|
14
|
+
public
|
15
|
+
|
16
|
+
def run
|
17
|
+
run_count = 0
|
18
|
+
loop do
|
19
|
+
current_stat = self.get_stats
|
20
|
+
if @last.nil? then
|
21
|
+
@last = current_stat
|
22
|
+
sleep(@opts.interval)
|
23
|
+
next
|
24
|
+
end
|
25
|
+
if ((run_count == 0) or ((run_count % 50) == 0)) then
|
26
|
+
puts(("%-20s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s" % ["time", "commits", "rollbks", "blksrd", "blkshit", "bkends", "seqscan", "seqtprd", "idxscn", "idxtrd", "ins", "upd", "del", "locks", "activeq"]))
|
27
|
+
end
|
28
|
+
delta = current_stat.inject({}) do |h, pair|
|
29
|
+
stat, val = *pair
|
30
|
+
if ["activeq", "locks", "bkends"].include?(stat) then
|
31
|
+
h[stat] = current_stat[stat].to_i
|
32
|
+
else
|
33
|
+
h[stat] = (current_stat[stat].to_i - @last[stat].to_i)
|
34
|
+
end
|
35
|
+
h
|
36
|
+
end
|
37
|
+
delta["time"] = Time.now.strftime("%F %T")
|
38
|
+
puts(("%-20s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s" % [delta["time"], delta["commits"], delta["rollbks"], delta["blksrd"], delta["blkshit"], delta["bkends"], delta["seqscan"], delta["seqtprd"], delta["idxscn"], delta["idxtrd"], delta["ins"], delta["upd"], delta["del"], delta["locks"], delta["activeq"]]))
|
39
|
+
@last = current_stat
|
40
|
+
run_count = (run_count + 1)
|
41
|
+
sleep(@opts.interval)
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
def get_stats
|
46
|
+
res = @db.exec(("\n\t\t\tSELECT\n\t\t\t\tMAX(stat_db.xact_commit) AS commits,\n\t\t\t\tMAX(stat_db.xact_rollback) AS rollbks,\n\t\t\t\tMAX(stat_db.blks_read) AS blksrd,\n\t\t\t\tMAX(stat_db.blks_hit) AS blkshit,\n\t\t\t\tMAX(stat_db.numbackends) AS bkends,\n\t\t\t\tSUM(stat_tables.seq_scan) AS seqscan,\n\t\t\t\tSUM(stat_tables.seq_tup_read) AS seqtprd,\n\t\t\t\tSUM(stat_tables.idx_scan) AS idxscn,\n\t\t\t\tSUM(stat_tables.idx_tup_fetch) AS idxtrd,\n\t\t\t\tSUM(stat_tables.n_tup_ins) AS ins,\n\t\t\t\tSUM(stat_tables.n_tup_upd) AS upd,\n\t\t\t\tSUM(stat_tables.n_tup_del) AS del,\n\t\t\t\tMAX(stat_locks.locks) AS locks,\n\t\t\t\tMAX(activity.sess) AS activeq\n\t\t\tFROM\n\t\t\t\tpg_stat_database AS stat_db,\n\t\t\t\tpg_stat_user_tables AS stat_tables,\n\t\t\t\t(SELECT COUNT(*) AS locks FROM pg_locks ) AS stat_locks,\n\t\t\t\t(SELECT COUNT(*) AS sess FROM pg_stat_activity WHERE current_query <> '<IDLE>') AS activity\n\t\t\tWHERE\n\t\t\t\tstat_db.datname = '%s';\n\t\t" % [@opts.database]))
|
47
|
+
return res[0]
|
48
|
+
end
|
49
|
+
end
|
50
|
+
def parse_args(args)
|
51
|
+
options = OpenStruct.new
|
52
|
+
options.database = Etc.getpwuid(Process.uid).name
|
53
|
+
options.host = "127.0.0.1"
|
54
|
+
options.port = 5432
|
55
|
+
options.user = Etc.getpwuid(Process.uid).name
|
56
|
+
options.sslmode = "disable"
|
57
|
+
options.interval = 5
|
58
|
+
opts = OptionParser.new do |opts|
|
59
|
+
opts.banner = "Usage: #{$0} [options]"
|
60
|
+
opts.separator("")
|
61
|
+
opts.separator("Connection options:")
|
62
|
+
opts.on("-d", "--database DBNAME", "specify the database to connect to (default: \"#{options.database}\")") do |db|
|
63
|
+
options.database = db
|
64
|
+
end
|
65
|
+
opts.on("-h", "--host HOSTNAME", "database server host") do |host|
|
66
|
+
options.host = host
|
67
|
+
end
|
68
|
+
opts.on("-p", "--port PORT", Integer, "database server port (default: \"#{options.port}\")") do |port|
|
69
|
+
options.port = port
|
70
|
+
end
|
71
|
+
opts.on("-U", "--user NAME", "database user name (default: \"#{options.user}\")") do |user|
|
72
|
+
options.user = user
|
73
|
+
end
|
74
|
+
opts.on("-W", "force password prompt") do |pw|
|
75
|
+
print("Password: ")
|
76
|
+
begin
|
77
|
+
(system("stty -echo")
|
78
|
+
options.pass = gets.chomp)
|
79
|
+
ensure
|
80
|
+
(system("stty echo")
|
81
|
+
puts)
|
82
|
+
end
|
83
|
+
end
|
84
|
+
opts.separator("")
|
85
|
+
opts.separator("Other options:")
|
86
|
+
opts.on("-i", "--interval SECONDS", Integer, "refresh interval in seconds (default: \"#{options.interval}\")") do |seconds|
|
87
|
+
options.interval = seconds
|
88
|
+
end
|
89
|
+
opts.on_tail("--help", "show this help, then exit") do
|
90
|
+
$stderr.puts(opts)
|
91
|
+
exit
|
92
|
+
end
|
93
|
+
opts.on_tail("--version", "output version information, then exit") do
|
94
|
+
puts(Stats::VERSION)
|
95
|
+
exit
|
96
|
+
end
|
97
|
+
end
|
98
|
+
opts.parse!(args)
|
99
|
+
return options
|
100
|
+
end
|
101
|
+
if ("(string)" == $0) then
|
102
|
+
$stdout.sync = true
|
103
|
+
Stats.new(parse_args(ARGV)).run
|
104
|
+
end
|
@@ -0,0 +1,123 @@
|
|
1
|
+
require("ostruct")
|
2
|
+
require("optparse")
|
3
|
+
require("pathname")
|
4
|
+
require("etc")
|
5
|
+
require("cipherstash-pg")
|
6
|
+
require("pp")
|
7
|
+
class PGMonitor
|
8
|
+
VERSION = "Id"
|
9
|
+
|
10
|
+
LAG_ALERT = 32
|
11
|
+
|
12
|
+
def initialize(opts, hosts)
|
13
|
+
@opts = opts
|
14
|
+
@master = hosts.shift
|
15
|
+
@slaves = hosts
|
16
|
+
@current_wal = {}
|
17
|
+
@failures = []
|
18
|
+
end
|
19
|
+
|
20
|
+
attr_reader(:opts, :current_wal, :master, :slaves, :failures)
|
21
|
+
|
22
|
+
def check
|
23
|
+
@failures = []
|
24
|
+
return unless self.get_current_wal
|
25
|
+
self.slaves.each do |slave|
|
26
|
+
begin
|
27
|
+
(slave_db = CipherStashPG.connect(:dbname => self.opts.database, :host => slave, :port => self.opts.port, :user => self.opts.user, :password => self.opts.pass, :sslmode => "prefer")
|
28
|
+
xlog = slave_db.exec("SELECT pg_last_xlog_receive_location()").getvalue(0, 0)
|
29
|
+
slave_db.close
|
30
|
+
lag_in_megs = ((self.find_lag(xlog).to_f / 1024) / 1024).abs
|
31
|
+
if (lag_in_megs >= LAG_ALERT) then
|
32
|
+
(failures << { :host => slave, :error => ("%0.2fMB behind the master." % [lag_in_megs]) })
|
33
|
+
end)
|
34
|
+
rescue => err
|
35
|
+
(failures << { :host => slave, :error => err.message })
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
protected
|
41
|
+
|
42
|
+
def get_current_wal
|
43
|
+
(master_db = CipherStashPG.connect(:dbname => self.opts.database, :host => self.master, :port => self.opts.port, :user => self.opts.user, :password => self.opts.pass, :sslmode => "prefer")
|
44
|
+
self.current_wal[:segbytes] = (master_db.exec("SHOW wal_segment_size").getvalue(0, 0).sub(/\D+/, "").to_i << 20)
|
45
|
+
current = master_db.exec("SELECT pg_current_xlog_location()").getvalue(0, 0)
|
46
|
+
self.current_wal[:segment], self.current_wal[:offset] = current.split(/\//)
|
47
|
+
master_db.close
|
48
|
+
return true)
|
49
|
+
rescue => err
|
50
|
+
(self.failures << { :host => self.master, :error => ("Unable to retrieve required info from the master (%s)" % [err.message]) })
|
51
|
+
return false
|
52
|
+
end
|
53
|
+
|
54
|
+
def find_lag(xlog)
|
55
|
+
s_segment, s_offset = xlog.split(/\//)
|
56
|
+
m_segment = self.current_wal[:segment]
|
57
|
+
m_offset = self.current_wal[:offset]
|
58
|
+
m_segbytes = self.current_wal[:segbytes]
|
59
|
+
return (((m_segment.hex - s_segment.hex) * m_segbytes) + (m_offset.hex - s_offset.hex))
|
60
|
+
end
|
61
|
+
end
|
62
|
+
def parse_args(args)
|
63
|
+
options = OpenStruct.new
|
64
|
+
options.database = "postgres"
|
65
|
+
options.port = 5432
|
66
|
+
options.user = Etc.getpwuid(Process.uid).name
|
67
|
+
options.sslmode = "prefer"
|
68
|
+
opts = OptionParser.new do |opts|
|
69
|
+
opts.banner = "Usage: #{$0} [options] <master> <slave> [slave2, slave3...]"
|
70
|
+
opts.separator("")
|
71
|
+
opts.separator("Connection options:")
|
72
|
+
opts.on("-d", "--database DBNAME", "specify the database to connect to (default: \"#{options.database}\")") do |db|
|
73
|
+
options.database = db
|
74
|
+
end
|
75
|
+
opts.on("-h", "--host HOSTNAME", "database server host") do |host|
|
76
|
+
options.host = host
|
77
|
+
end
|
78
|
+
opts.on("-p", "--port PORT", Integer, "database server port (default: \"#{options.port}\")") do |port|
|
79
|
+
options.port = port
|
80
|
+
end
|
81
|
+
opts.on("-U", "--user NAME", "database user name (default: \"#{options.user}\")") do |user|
|
82
|
+
options.user = user
|
83
|
+
end
|
84
|
+
opts.on("-W", "force password prompt") do |pw|
|
85
|
+
print("Password: ")
|
86
|
+
begin
|
87
|
+
(system("stty -echo")
|
88
|
+
options.pass = $stdin.gets.chomp)
|
89
|
+
ensure
|
90
|
+
(system("stty echo")
|
91
|
+
puts)
|
92
|
+
end
|
93
|
+
end
|
94
|
+
opts.separator("")
|
95
|
+
opts.separator("Other options:")
|
96
|
+
opts.on_tail("--help", "show this help, then exit") do
|
97
|
+
$stderr.puts(opts)
|
98
|
+
exit
|
99
|
+
end
|
100
|
+
opts.on_tail("--version", "output version information, then exit") do
|
101
|
+
puts(PGMonitor::VERSION)
|
102
|
+
exit
|
103
|
+
end
|
104
|
+
end
|
105
|
+
opts.parse!(args)
|
106
|
+
return options
|
107
|
+
end
|
108
|
+
if ("(string)" == $0) then
|
109
|
+
opts = parse_args(ARGV)
|
110
|
+
if (ARGV.length < 2) then
|
111
|
+
raise(ArgumentError, "At least two PostgreSQL servers are required.")
|
112
|
+
end
|
113
|
+
mon = PGMonitor.new(opts, ARGV)
|
114
|
+
mon.check
|
115
|
+
if mon.failures.empty? then
|
116
|
+
puts("All is well!")
|
117
|
+
exit(0)
|
118
|
+
else
|
119
|
+
puts("Database replication delayed or broken.")
|
120
|
+
mon.failures.each { |bad| puts(("%s: %s" % [bad[:host], bad[:error]])) }
|
121
|
+
exit(1)
|
122
|
+
end
|
123
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
require("cipherstash-pg")
|
2
|
+
db = CipherStashPG.connect(:dbname => "test")
|
3
|
+
db.exec("DROP TABLE IF EXISTS test")
|
4
|
+
db.exec("CREATE TABLE test (a INTEGER, b BYTEA)")
|
5
|
+
a = 42
|
6
|
+
b = [1, 2, 3]
|
7
|
+
db.exec("INSERT INTO test(a, b) VALUES($1::int, $2::bytea)", [a, { :value => b.pack("N*"), :format => 1 }])
|
8
|
+
db.exec("SELECT a::int, b::bytea FROM test LIMIT 1", [], 1) do |res|
|
9
|
+
res.nfields.times do |i|
|
10
|
+
puts(("Field %d is: %s, a %s (%s) column from table %p" % [i, res.fname(i), db.exec("SELECT format_type($1,$2)", [res.ftype(i), res.fmod(1)]).getvalue(0, 0), res.fformat(i).zero? ? ("string") : ("binary"), res.ftable(i)]))
|
11
|
+
end
|
12
|
+
res.each do |row|
|
13
|
+
puts("a = #{row["a"].inspect}")
|
14
|
+
puts("a (unpacked) = #{row["a"].unpack("N*").inspect}")
|
15
|
+
puts("b = #{row["b"].unpack("N*").inspect}")
|
16
|
+
end
|
17
|
+
end
|
@@ -0,0 +1,202 @@
|
|
1
|
+
require("pathname")
|
2
|
+
require("yaml")
|
3
|
+
require("fileutils")
|
4
|
+
require("ostruct")
|
5
|
+
module WalShipper
|
6
|
+
def log(msg)
|
7
|
+
return unless @debug
|
8
|
+
puts(("WAL Shipper: %s" % [msg]))
|
9
|
+
end
|
10
|
+
|
11
|
+
class Destination < OpenStruct
|
12
|
+
include(WalShipper)
|
13
|
+
|
14
|
+
def initialize(dest, debug = false)
|
15
|
+
@debug = debug
|
16
|
+
super(dest)
|
17
|
+
self.validate
|
18
|
+
end
|
19
|
+
|
20
|
+
protected
|
21
|
+
|
22
|
+
def validate
|
23
|
+
["label", "kind"].each do |key|
|
24
|
+
if self.send(key.to_sym).nil? then
|
25
|
+
self.log(("Destination %p missing required '%s' key." % [self, key]))
|
26
|
+
self.invalid = true
|
27
|
+
end
|
28
|
+
end
|
29
|
+
self.path = Pathname.new(self.path) if (self.kind == "file")
|
30
|
+
if (self.kind == "rsync-ssh") then
|
31
|
+
self.port ||= 22
|
32
|
+
self.user = self.user ? ("#{self.user}@") : ("")
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
class Dispatcher
|
38
|
+
include(WalShipper)
|
39
|
+
|
40
|
+
def initialize(wal, conf)
|
41
|
+
conf.each_pair { |key, val| self.instance_variable_set("@#{key}", val) }
|
42
|
+
@spool = Pathname.new(@spool)
|
43
|
+
(@spool.exist? or raise(("The configured spool directory (%s) doesn't exist." % [@spool])))
|
44
|
+
unless @enabled then
|
45
|
+
self.log(("WAL shipping is disabled, queuing segment %s" % [wal.basename]))
|
46
|
+
exit(1)
|
47
|
+
end
|
48
|
+
@destinations.collect! { |dest| WalShipper::Destination.new(dest, @debug) }.reject do |dest|
|
49
|
+
dest.invalid
|
50
|
+
end.collect do |dest|
|
51
|
+
dest.spool = (@spool + dest.label)
|
52
|
+
dest.spool.mkdir(457) unless dest.spool.exist?
|
53
|
+
dest
|
54
|
+
end
|
55
|
+
@waldir = (@spool + "wal_segments")
|
56
|
+
@waldir.mkdir(457) unless @waldir.exist?
|
57
|
+
self.log(("Copying %s to %s" % [wal.basename, @waldir]))
|
58
|
+
FileUtils.cp(wal, @waldir)
|
59
|
+
@wal = (@waldir + wal.basename)
|
60
|
+
end
|
61
|
+
|
62
|
+
def link
|
63
|
+
@destinations.each do |dest|
|
64
|
+
self.log(("Linking %s into %s" % [@wal.basename, dest.spool.basename]))
|
65
|
+
FileUtils.ln(@wal, dest.spool, :force => true)
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
def dispatch
|
70
|
+
unless @async then
|
71
|
+
self.log("Performing a synchronous dispatch.")
|
72
|
+
@destinations.each { |dest| self.dispatch_dest(dest) }
|
73
|
+
return
|
74
|
+
end
|
75
|
+
tg = ThreadGroup.new
|
76
|
+
if (@async_max.nil? or @async_max.to_i.zero?) then
|
77
|
+
self.log("Performing an asynchronous dispatch: one thread per destination.")
|
78
|
+
@destinations.each do |dest|
|
79
|
+
t = Thread.new do
|
80
|
+
Thread.current.abort_on_exception = true
|
81
|
+
self.dispatch_dest(dest)
|
82
|
+
end
|
83
|
+
tg.add(t)
|
84
|
+
end
|
85
|
+
tg.list.each { |t| t.join }
|
86
|
+
return
|
87
|
+
end
|
88
|
+
self.log(("Performing an asynchronous dispatch: one thread per destination, %d at a time." % [@async_max]))
|
89
|
+
all_dests = @destinations.dup
|
90
|
+
dest_chunks = []
|
91
|
+
until all_dests.empty? do
|
92
|
+
(dest_chunks << all_dests.slice!(0, @async_max))
|
93
|
+
end
|
94
|
+
dest_chunks.each do |chunk|
|
95
|
+
chunk.each do |dest|
|
96
|
+
t = Thread.new do
|
97
|
+
Thread.current.abort_on_exception = true
|
98
|
+
self.dispatch_dest(dest)
|
99
|
+
end
|
100
|
+
tg.add(t)
|
101
|
+
end
|
102
|
+
tg.list.each { |t| t.join }
|
103
|
+
end
|
104
|
+
return
|
105
|
+
end
|
106
|
+
|
107
|
+
def clean_spool
|
108
|
+
total = 0
|
109
|
+
@waldir.children.each do |wal|
|
110
|
+
total = (total + wal.unlink) if (wal.stat.nlink == 1)
|
111
|
+
end
|
112
|
+
self.log(("Removed %d WAL segment%s." % [total, (total == 1) ? ("") : ("s")]))
|
113
|
+
end
|
114
|
+
|
115
|
+
protected
|
116
|
+
|
117
|
+
def ship_rsync_ssh(dest)
|
118
|
+
if dest.host.nil? then
|
119
|
+
self.log(("Destination %p missing required 'host' key. WAL is queued." % [dest.host]))
|
120
|
+
return
|
121
|
+
end
|
122
|
+
rsync_flags = "-zc"
|
123
|
+
ssh_string = ("%s -o ConnectTimeout=%d -o StrictHostKeyChecking=no -p %d" % [@ssh, (@ssh_timeout or 10), dest.port])
|
124
|
+
src_string = ""
|
125
|
+
dst_string = ("%s%s:%s/" % [dest.user, dest.host, dest.path])
|
126
|
+
if (dest.spool.children.length > 1) then
|
127
|
+
src_string = (dest.spool.to_s + "/")
|
128
|
+
(rsync_flags << "r")
|
129
|
+
else
|
130
|
+
src_string = (dest.spool + @wal.basename)
|
131
|
+
end
|
132
|
+
ship_wal_cmd = [@rsync, @debug ? ((rsync_flags << "vh")) : ((rsync_flags << "q")), "--remove-source-files", "-e", ssh_string, src_string, dst_string]
|
133
|
+
self.log(("Running command '%s'" % [ship_wal_cmd.join(" ")]))
|
134
|
+
system(*ship_wal_cmd)
|
135
|
+
unless $?.success? then
|
136
|
+
self.log(("Ack! Error while shipping to %p, WAL is queued." % [dest.label]))
|
137
|
+
system(@error_cmd, dest.label) if @error_cmd
|
138
|
+
end
|
139
|
+
end
|
140
|
+
|
141
|
+
def ship_file(dest)
|
142
|
+
if dest.path.nil? then
|
143
|
+
self.log(("Destination %p missing required 'path' key. WAL is queued." % [dest]))
|
144
|
+
return
|
145
|
+
end
|
146
|
+
dest.path.mkdir(457) unless dest.path.exist?
|
147
|
+
if (dest.spool.children.length > 1) then
|
148
|
+
dest.spool.children.each do |wal|
|
149
|
+
wal.unlink if self.copy_file(wal, dest.path, dest.label, dest.compress)
|
150
|
+
end
|
151
|
+
else
|
152
|
+
wal = (dest.spool + @wal.basename)
|
153
|
+
wal.unlink if self.copy_file(wal, dest.path, dest.label, dest.compress)
|
154
|
+
end
|
155
|
+
end
|
156
|
+
|
157
|
+
def copy_file(wal, path, label, compress = false)
|
158
|
+
(dest_file = (path + wal.basename)
|
159
|
+
FileUtils.cp(wal, dest_file)
|
160
|
+
if compress then
|
161
|
+
system(*["gzip", "-f", dest_file])
|
162
|
+
raise(("Error while compressing: %s" % [wal.basename])) unless $?.success?
|
163
|
+
end
|
164
|
+
self.log(("Copied %s%s to %s." % [wal.basename, compress ? (" (and compressed)") : (""), path]))
|
165
|
+
return true)
|
166
|
+
rescue => err
|
167
|
+
self.log(("Ack! Error while copying '%s' (%s) to %p, WAL is queued." % [wal.basename, err.message, path]))
|
168
|
+
system(@error_cmd, label) if @error_cmd
|
169
|
+
return false
|
170
|
+
end
|
171
|
+
|
172
|
+
def dispatch_dest(dest)
|
173
|
+
if (not dest.enabled.nil?) and (not dest.enabled) then
|
174
|
+
self.log(("Skipping explicitly disabled destination %p, WAL is queued." % [dest.label]))
|
175
|
+
return
|
176
|
+
end
|
177
|
+
meth = ("ship_" + dest.kind.gsub(/-/, "_")).to_sym
|
178
|
+
if WalShipper::Dispatcher.method_defined?(meth) then
|
179
|
+
self.send(meth, dest)
|
180
|
+
else
|
181
|
+
self.log(("Unknown destination kind %p for %p. WAL is queued." % [dest.kind, dest.label]))
|
182
|
+
end
|
183
|
+
end
|
184
|
+
end
|
185
|
+
end
|
186
|
+
if ("(string)" == $0) then
|
187
|
+
CONFIG_DIR = (Pathname.new("(string)").dirname.parent + "etc")
|
188
|
+
CONFIG = (CONFIG_DIR + "wal_shipper.conf")
|
189
|
+
unless CONFIG.exist? then
|
190
|
+
CONFIG_DIR.mkdir(457) unless CONFIG_DIR.exist?
|
191
|
+
CONFIG.open("w") { |conf| conf.print(DATA.read) }
|
192
|
+
CONFIG.chmod(420)
|
193
|
+
puts("No WAL shipping configuration found, default file created.")
|
194
|
+
end
|
195
|
+
(wal = ARGV[0] or raise("No WAL file was specified on the command line."))
|
196
|
+
wal = Pathname.new(wal)
|
197
|
+
conf = YAML.load(CONFIG.read)
|
198
|
+
shipper = WalShipper::Dispatcher.new(wal, conf)
|
199
|
+
shipper.link
|
200
|
+
shipper.dispatch
|
201
|
+
shipper.clean_spool
|
202
|
+
end
|