pg 1.2.3 → 1.3.0.rc1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/.appveyor.yml +36 -0
- data/.gems +6 -0
- data/.github/workflows/binary-gems.yml +80 -0
- data/.github/workflows/source-gem.yml +129 -0
- data/.gitignore +13 -0
- data/.hgsigs +34 -0
- data/.hgtags +41 -0
- data/.irbrc +23 -0
- data/.pryrc +23 -0
- data/.tm_properties +21 -0
- data/.travis.yml +49 -0
- data/Gemfile +14 -0
- data/History.rdoc +75 -7
- data/Manifest.txt +0 -1
- data/README.rdoc +7 -6
- data/Rakefile +27 -138
- data/Rakefile.cross +5 -5
- data/certs/ged.pem +24 -0
- data/ext/errorcodes.def +8 -0
- data/ext/errorcodes.txt +3 -1
- data/ext/extconf.rb +90 -19
- data/ext/gvl_wrappers.c +4 -0
- data/ext/gvl_wrappers.h +23 -0
- data/ext/pg.c +35 -1
- data/ext/pg.h +18 -1
- data/ext/pg_coder.c +82 -28
- data/ext/pg_connection.c +538 -279
- data/ext/pg_copy_coder.c +45 -16
- data/ext/pg_record_coder.c +38 -10
- data/ext/pg_result.c +61 -31
- data/ext/pg_text_decoder.c +1 -1
- data/ext/pg_text_encoder.c +6 -6
- data/ext/pg_tuple.c +47 -21
- data/ext/pg_type_map.c +41 -8
- data/ext/pg_type_map_all_strings.c +14 -1
- data/ext/pg_type_map_by_class.c +49 -24
- data/ext/pg_type_map_by_column.c +64 -28
- data/ext/pg_type_map_by_mri_type.c +47 -18
- data/ext/pg_type_map_by_oid.c +52 -23
- data/ext/pg_type_map_in_ruby.c +50 -19
- data/ext/pg_util.c +2 -2
- data/lib/pg/basic_type_map_based_on_result.rb +47 -0
- data/lib/pg/basic_type_map_for_queries.rb +193 -0
- data/lib/pg/basic_type_map_for_results.rb +81 -0
- data/lib/pg/basic_type_registry.rb +296 -0
- data/lib/pg/coder.rb +1 -1
- data/lib/pg/connection.rb +369 -56
- data/lib/pg/version.rb +4 -0
- data/lib/pg.rb +38 -25
- data/misc/openssl-pg-segfault.rb +31 -0
- data/misc/postgres/History.txt +9 -0
- data/misc/postgres/Manifest.txt +5 -0
- data/misc/postgres/README.txt +21 -0
- data/misc/postgres/Rakefile +21 -0
- data/misc/postgres/lib/postgres.rb +16 -0
- data/misc/ruby-pg/History.txt +9 -0
- data/misc/ruby-pg/Manifest.txt +5 -0
- data/misc/ruby-pg/README.txt +21 -0
- data/misc/ruby-pg/Rakefile +21 -0
- data/misc/ruby-pg/lib/ruby/pg.rb +16 -0
- data/pg.gemspec +32 -0
- data/sample/array_insert.rb +20 -0
- data/sample/async_api.rb +106 -0
- data/sample/async_copyto.rb +39 -0
- data/sample/async_mixed.rb +56 -0
- data/sample/check_conn.rb +21 -0
- data/sample/copydata.rb +71 -0
- data/sample/copyfrom.rb +81 -0
- data/sample/copyto.rb +19 -0
- data/sample/cursor.rb +21 -0
- data/sample/disk_usage_report.rb +177 -0
- data/sample/issue-119.rb +94 -0
- data/sample/losample.rb +69 -0
- data/sample/minimal-testcase.rb +17 -0
- data/sample/notify_wait.rb +72 -0
- data/sample/pg_statistics.rb +285 -0
- data/sample/replication_monitor.rb +222 -0
- data/sample/test_binary_values.rb +33 -0
- data/sample/wal_shipper.rb +434 -0
- data/sample/warehouse_partitions.rb +311 -0
- data.tar.gz.sig +0 -0
- metadata +79 -226
- metadata.gz.sig +0 -0
- data/ChangeLog +0 -0
- data/lib/pg/basic_type_mapping.rb +0 -522
- data/spec/data/expected_trace.out +0 -26
- data/spec/data/random_binary_data +0 -0
- data/spec/helpers.rb +0 -380
- data/spec/pg/basic_type_mapping_spec.rb +0 -630
- data/spec/pg/connection_spec.rb +0 -1949
- data/spec/pg/connection_sync_spec.rb +0 -41
- data/spec/pg/result_spec.rb +0 -681
- data/spec/pg/tuple_spec.rb +0 -333
- data/spec/pg/type_map_by_class_spec.rb +0 -138
- data/spec/pg/type_map_by_column_spec.rb +0 -226
- data/spec/pg/type_map_by_mri_type_spec.rb +0 -136
- data/spec/pg/type_map_by_oid_spec.rb +0 -149
- data/spec/pg/type_map_in_ruby_spec.rb +0 -164
- data/spec/pg/type_map_spec.rb +0 -22
- data/spec/pg/type_spec.rb +0 -1123
- data/spec/pg_spec.rb +0 -50
@@ -0,0 +1,177 @@
|
|
1
|
+
# -*- ruby -*-
|
2
|
+
# vim: set noet nosta sw=4 ts=4 :
|
3
|
+
#
|
4
|
+
# Quickly dump size information for a given database.
|
5
|
+
# Top twenty objects, and size per schema.
|
6
|
+
#
|
7
|
+
# Mahlon E. Smith <mahlon@martini.nu>
|
8
|
+
#
|
9
|
+
# Based on work by Jeff Davis <ruby@j-davis.com>.
|
10
|
+
#
|
11
|
+
|
12
|
+
|
13
|
+
require 'ostruct'
|
14
|
+
require 'optparse'
|
15
|
+
require 'etc'
|
16
|
+
require 'pg'
|
17
|
+
|
18
|
+
SCRIPT_VERSION = %q$Id$
|
19
|
+
|
20
|
+
|
21
|
+
### Gather data and output it to $stdout.
|
22
|
+
###
|
23
|
+
def report( opts )
|
24
|
+
db = PG.connect(
|
25
|
+
:dbname => opts.database,
|
26
|
+
:host => opts.host,
|
27
|
+
:port => opts.port,
|
28
|
+
:user => opts.user,
|
29
|
+
:password => opts.pass,
|
30
|
+
:sslmode => 'prefer'
|
31
|
+
)
|
32
|
+
|
33
|
+
# -----------------------------------------
|
34
|
+
|
35
|
+
db_info = db.exec %Q{
|
36
|
+
SELECT
|
37
|
+
count(oid) AS num_relations,
|
38
|
+
pg_size_pretty(pg_database_size('#{opts.database}')) AS dbsize
|
39
|
+
FROM
|
40
|
+
pg_class
|
41
|
+
}
|
42
|
+
|
43
|
+
puts '=' * 70
|
44
|
+
puts "Disk usage information for %s: (%d relations, %s total)" % [
|
45
|
+
opts.database,
|
46
|
+
db_info[0]['num_relations'],
|
47
|
+
db_info[0]['dbsize']
|
48
|
+
]
|
49
|
+
puts '=' * 70
|
50
|
+
|
51
|
+
# -----------------------------------------
|
52
|
+
|
53
|
+
top_twenty = db.exec %q{
|
54
|
+
SELECT
|
55
|
+
relname AS name,
|
56
|
+
relkind AS kind,
|
57
|
+
pg_size_pretty(pg_relation_size(pg_class.oid)) AS size
|
58
|
+
FROM
|
59
|
+
pg_class
|
60
|
+
ORDER BY
|
61
|
+
pg_relation_size(pg_class.oid) DESC
|
62
|
+
LIMIT 20
|
63
|
+
}
|
64
|
+
|
65
|
+
puts 'Top twenty objects by size:'
|
66
|
+
puts '-' * 70
|
67
|
+
top_twenty.each do |row|
|
68
|
+
type = case row['kind']
|
69
|
+
when 'i'; 'index'
|
70
|
+
when 't'; 'toast'
|
71
|
+
when 'r'; 'table'
|
72
|
+
when 'S'; 'sequence'
|
73
|
+
else; '???'
|
74
|
+
end
|
75
|
+
|
76
|
+
puts "%40s %10s (%s)" % [ row['name'], row['size'], type ]
|
77
|
+
end
|
78
|
+
puts '-' * 70
|
79
|
+
|
80
|
+
# -----------------------------------------
|
81
|
+
|
82
|
+
schema_sizes = db.exec %q{
|
83
|
+
SELECT
|
84
|
+
table_schema,
|
85
|
+
pg_size_pretty( CAST( SUM(pg_total_relation_size(table_schema || '.' || table_name)) AS bigint)) AS size
|
86
|
+
FROM
|
87
|
+
information_schema.tables
|
88
|
+
GROUP BY
|
89
|
+
table_schema
|
90
|
+
ORDER BY
|
91
|
+
CAST( SUM(pg_total_relation_size(table_schema || '.' || table_name)) AS bigint ) DESC
|
92
|
+
}
|
93
|
+
|
94
|
+
|
95
|
+
puts 'Size per schema:'
|
96
|
+
puts '-' * 70
|
97
|
+
schema_sizes.each do |row|
|
98
|
+
puts "%20s %10s" % [ row['table_schema'], row['size'] ]
|
99
|
+
end
|
100
|
+
puts '-' * 70
|
101
|
+
puts
|
102
|
+
|
103
|
+
db.finish
|
104
|
+
end
|
105
|
+
|
106
|
+
|
107
|
+
### Parse command line arguments. Return a struct of global options.
|
108
|
+
###
|
109
|
+
def parse_args( args )
|
110
|
+
options = OpenStruct.new
|
111
|
+
options.database = Etc.getpwuid( Process.uid ).name
|
112
|
+
options.host = '127.0.0.1'
|
113
|
+
options.port = 5432
|
114
|
+
options.user = Etc.getpwuid( Process.uid ).name
|
115
|
+
options.sslmode = 'prefer'
|
116
|
+
options.interval = 5
|
117
|
+
|
118
|
+
opts = OptionParser.new do |opts|
|
119
|
+
opts.banner = "Usage: #{$0} [options]"
|
120
|
+
|
121
|
+
opts.separator ''
|
122
|
+
opts.separator 'Connection options:'
|
123
|
+
|
124
|
+
opts.on( '-d', '--database DBNAME',
|
125
|
+
"specify the database to connect to (default: \"#{options.database}\")" ) do |db|
|
126
|
+
options.database = db
|
127
|
+
end
|
128
|
+
|
129
|
+
opts.on( '-h', '--host HOSTNAME', 'database server host' ) do |host|
|
130
|
+
options.host = host
|
131
|
+
end
|
132
|
+
|
133
|
+
opts.on( '-p', '--port PORT', Integer,
|
134
|
+
"database server port (default: \"#{options.port}\")" ) do |port|
|
135
|
+
options.port = port
|
136
|
+
end
|
137
|
+
|
138
|
+
opts.on( '-U', '--user NAME',
|
139
|
+
"database user name (default: \"#{options.user}\")" ) do |user|
|
140
|
+
options.user = user
|
141
|
+
end
|
142
|
+
|
143
|
+
opts.on( '-W', 'force password prompt' ) do |pw|
|
144
|
+
print 'Password: '
|
145
|
+
begin
|
146
|
+
system 'stty -echo'
|
147
|
+
options.pass = gets.chomp
|
148
|
+
ensure
|
149
|
+
system 'stty echo'
|
150
|
+
puts
|
151
|
+
end
|
152
|
+
end
|
153
|
+
|
154
|
+
opts.separator ''
|
155
|
+
opts.separator 'Other options:'
|
156
|
+
|
157
|
+
opts.on_tail( '--help', 'show this help, then exit' ) do
|
158
|
+
$stderr.puts opts
|
159
|
+
exit
|
160
|
+
end
|
161
|
+
|
162
|
+
opts.on_tail( '--version', 'output version information, then exit' ) do
|
163
|
+
puts SCRIPT_VERSION
|
164
|
+
exit
|
165
|
+
end
|
166
|
+
end
|
167
|
+
|
168
|
+
opts.parse!( args )
|
169
|
+
return options
|
170
|
+
end
|
171
|
+
|
172
|
+
|
173
|
+
if __FILE__ == $0
|
174
|
+
opts = parse_args( ARGV )
|
175
|
+
report( opts )
|
176
|
+
end
|
177
|
+
|
data/sample/issue-119.rb
ADDED
@@ -0,0 +1,94 @@
|
|
1
|
+
# -*- ruby -*-
|
2
|
+
|
3
|
+
require 'pg'
|
4
|
+
|
5
|
+
# This is another example of how to use COPY FROM, this time as a
|
6
|
+
# minimal test case used to try to figure out what was going on in
|
7
|
+
# an issue submitted from a user:
|
8
|
+
#
|
9
|
+
# https://bitbucket.org/ged/ruby-pg/issue/119
|
10
|
+
#
|
11
|
+
|
12
|
+
conn = PG.connect( dbname: 'test' )
|
13
|
+
table_name = 'issue_119'
|
14
|
+
field_list = %w[name body_weight brain_weight]
|
15
|
+
method = 0
|
16
|
+
options = { truncate: true }
|
17
|
+
sql_parameters = ''
|
18
|
+
|
19
|
+
conn.set_error_verbosity( PG::PQERRORS_VERBOSE )
|
20
|
+
conn.exec( "DROP TABLE IF EXISTS #{table_name}" )
|
21
|
+
conn.exec( "CREATE TABLE #{table_name} ( id SERIAL, name TEXT, body_weight REAL, brain_weight REAL )" )
|
22
|
+
|
23
|
+
text = <<-END_DATA
|
24
|
+
Mountain beaver 1.35 465
|
25
|
+
Cow 465 423
|
26
|
+
Grey wolf 36.33 119.5
|
27
|
+
Goat 27.66 115
|
28
|
+
Guinea pig 1.04 5.5
|
29
|
+
Dipliodocus 11700 50
|
30
|
+
Asian elephant 2547 4603
|
31
|
+
Donkey 187.1 419
|
32
|
+
Horse 521 655
|
33
|
+
Potar monkey 10 115
|
34
|
+
Cat 3.3 25.6
|
35
|
+
Giraffe 529 680
|
36
|
+
Gorilla 207 406
|
37
|
+
Human 62 1320
|
38
|
+
African elephant 6654 5712
|
39
|
+
Triceratops 9400 70
|
40
|
+
Rhesus monkey 6.8 179
|
41
|
+
Kangaroo 35 56
|
42
|
+
Golden hamster 0.12 1
|
43
|
+
Mouse 0.023 0.4
|
44
|
+
Rabbit 2.5 12.1
|
45
|
+
Sheep 55.5 175
|
46
|
+
Jaguar 100 157
|
47
|
+
Chimpanzee 52.16 440
|
48
|
+
Brachiosaurus 87000 154.5
|
49
|
+
Mole 0.122 3
|
50
|
+
Pig 192 18
|
51
|
+
END_DATA
|
52
|
+
|
53
|
+
#ActiveRecord::Base.connection_pool.with_connection do |conn|
|
54
|
+
conn.transaction do
|
55
|
+
rc = conn #.raw_connection
|
56
|
+
rc.exec "TRUNCATE TABLE #{table_name};" if options[:truncate]
|
57
|
+
sql = "COPY #{table_name} (#{field_list.join(',')}) FROM STDIN #{sql_parameters} "
|
58
|
+
p sql
|
59
|
+
rc.exec(sql)
|
60
|
+
errmsg = nil # scope this outside of the rescue below so it's visible later
|
61
|
+
begin
|
62
|
+
if method == 1
|
63
|
+
rc.put_copy_data text + "\\.\n"
|
64
|
+
else
|
65
|
+
text.each_line { |line| rc.put_copy_data(line) }
|
66
|
+
end
|
67
|
+
rescue Errno => err
|
68
|
+
errmsg = "%s while reading copy data: %s" % [err.class.name, err.message]
|
69
|
+
puts "an error occurred"
|
70
|
+
end
|
71
|
+
|
72
|
+
if errmsg
|
73
|
+
rc.put_copy_end(errmsg)
|
74
|
+
puts "ERROR #{errmsg}"
|
75
|
+
else
|
76
|
+
rc.put_copy_end
|
77
|
+
end
|
78
|
+
|
79
|
+
while res = rc.get_result
|
80
|
+
st = res.res_status( res.result_status )
|
81
|
+
puts "Result of COPY is: %s" % [ st ]
|
82
|
+
if res.result_status != PG::PGRES_COPY_IN
|
83
|
+
puts res.error_message
|
84
|
+
end
|
85
|
+
end
|
86
|
+
puts "end"
|
87
|
+
end #transaction
|
88
|
+
#end #connection
|
89
|
+
|
90
|
+
conn.exec( "SELECT name, brain_weight FROM #{table_name}" ) do |res|
|
91
|
+
p res.values
|
92
|
+
end
|
93
|
+
|
94
|
+
|
data/sample/losample.rb
ADDED
@@ -0,0 +1,69 @@
|
|
1
|
+
# -*- ruby -*-
|
2
|
+
|
3
|
+
require 'pg'
|
4
|
+
|
5
|
+
SAMPLE_WRITE_DATA = 'some sample data'
|
6
|
+
SAMPLE_EXPORT_NAME = 'lowrite.txt'
|
7
|
+
|
8
|
+
conn = PG.connect( :dbname => 'test', :host => 'localhost', :port => 5432 )
|
9
|
+
puts "dbname: " + conn.db + "\thost: " + conn.host + "\tuser: " + conn.user
|
10
|
+
|
11
|
+
# Start a transaction, as all large object functions require one.
|
12
|
+
puts "Beginning transaction"
|
13
|
+
conn.exec( 'BEGIN' )
|
14
|
+
|
15
|
+
# Test importing from a file
|
16
|
+
puts "Import test:"
|
17
|
+
puts " importing %s" % [ __FILE__ ]
|
18
|
+
oid = conn.lo_import( __FILE__ )
|
19
|
+
puts " imported as large object %d" % [ oid ]
|
20
|
+
|
21
|
+
# Read back 50 bytes of the imported data
|
22
|
+
puts "Read test:"
|
23
|
+
fd = conn.lo_open( oid, PG::INV_READ|PG::INV_WRITE )
|
24
|
+
conn.lo_lseek( fd, 0, PG::SEEK_SET )
|
25
|
+
buf = conn.lo_read( fd, 50 )
|
26
|
+
puts " read: %p" % [ buf ]
|
27
|
+
puts " read was ok!" if buf =~ /require 'pg'/
|
28
|
+
|
29
|
+
# Append some test data onto the end of the object
|
30
|
+
puts "Write test:"
|
31
|
+
conn.lo_lseek( fd, 0, PG::SEEK_END )
|
32
|
+
buf = SAMPLE_WRITE_DATA.dup
|
33
|
+
totalbytes = 0
|
34
|
+
until buf.empty?
|
35
|
+
bytes = conn.lo_write( fd, buf )
|
36
|
+
buf.slice!( 0, bytes )
|
37
|
+
totalbytes += bytes
|
38
|
+
end
|
39
|
+
puts " appended %d bytes" % [ totalbytes ]
|
40
|
+
|
41
|
+
# Now export it
|
42
|
+
puts "Export test:"
|
43
|
+
File.unlink( SAMPLE_EXPORT_NAME ) if File.exist?( SAMPLE_EXPORT_NAME )
|
44
|
+
conn.lo_export( oid, SAMPLE_EXPORT_NAME )
|
45
|
+
puts " success!" if File.exist?( SAMPLE_EXPORT_NAME )
|
46
|
+
puts " exported as %s (%d bytes)" % [ SAMPLE_EXPORT_NAME, File.size(SAMPLE_EXPORT_NAME) ]
|
47
|
+
|
48
|
+
conn.exec( 'COMMIT' )
|
49
|
+
puts "End of transaction."
|
50
|
+
|
51
|
+
|
52
|
+
puts 'Testing read and delete from a new transaction:'
|
53
|
+
puts ' starting a new transaction'
|
54
|
+
conn.exec( 'BEGIN' )
|
55
|
+
|
56
|
+
fd = conn.lo_open( oid, PG::INV_READ )
|
57
|
+
puts ' reopened okay.'
|
58
|
+
conn.lo_lseek( fd, 50, PG::SEEK_END )
|
59
|
+
buf = conn.lo_read( fd, 50 )
|
60
|
+
puts ' read okay.' if buf == SAMPLE_WRITE_DATA
|
61
|
+
|
62
|
+
puts 'Closing and unlinking:'
|
63
|
+
conn.lo_close( fd )
|
64
|
+
puts ' closed.'
|
65
|
+
conn.lo_unlink( oid )
|
66
|
+
puts ' unlinked.'
|
67
|
+
conn.exec( 'COMMIT' )
|
68
|
+
puts 'Done.'
|
69
|
+
|
@@ -0,0 +1,17 @@
|
|
1
|
+
# -*- ruby -*-
|
2
|
+
|
3
|
+
require 'pg'
|
4
|
+
|
5
|
+
conn = PG.connect( :dbname => 'test' )
|
6
|
+
$stderr.puts '---',
|
7
|
+
RUBY_DESCRIPTION,
|
8
|
+
PG.version_string( true ),
|
9
|
+
"Server version: #{conn.server_version}",
|
10
|
+
"Client version: #{PG.library_version}",
|
11
|
+
'---'
|
12
|
+
|
13
|
+
result = conn.exec( "SELECT * from pg_stat_activity" )
|
14
|
+
|
15
|
+
$stderr.puts %Q{Expected this to return: ["select * from pg_stat_activity"]}
|
16
|
+
p result.field_values( 'current_query' )
|
17
|
+
|
@@ -0,0 +1,72 @@
|
|
1
|
+
# -*- ruby -*-
|
2
|
+
#
|
3
|
+
# Test script, demonstrating a non-poll notification for a table event.
|
4
|
+
#
|
5
|
+
|
6
|
+
BEGIN {
|
7
|
+
require 'pathname'
|
8
|
+
basedir = Pathname.new( __FILE__ ).expand_path.dirname.parent
|
9
|
+
libdir = basedir + 'lib'
|
10
|
+
$LOAD_PATH.unshift( libdir.to_s ) unless $LOAD_PATH.include?( libdir.to_s )
|
11
|
+
}
|
12
|
+
|
13
|
+
require 'pg'
|
14
|
+
|
15
|
+
TRIGGER_TABLE = %{
|
16
|
+
CREATE TABLE IF NOT EXISTS test ( message text );
|
17
|
+
}
|
18
|
+
|
19
|
+
TRIGGER_FUNCTION = %{
|
20
|
+
CREATE OR REPLACE FUNCTION notify_test()
|
21
|
+
RETURNS TRIGGER
|
22
|
+
LANGUAGE plpgsql
|
23
|
+
AS $$
|
24
|
+
BEGIN
|
25
|
+
NOTIFY woo;
|
26
|
+
RETURN NULL;
|
27
|
+
END
|
28
|
+
$$
|
29
|
+
}
|
30
|
+
|
31
|
+
DROP_TRIGGER = %{
|
32
|
+
DROP TRIGGER IF EXISTS notify_trigger ON test
|
33
|
+
}
|
34
|
+
|
35
|
+
|
36
|
+
TRIGGER = %{
|
37
|
+
CREATE TRIGGER notify_trigger
|
38
|
+
AFTER UPDATE OR INSERT OR DELETE
|
39
|
+
ON test
|
40
|
+
FOR EACH STATEMENT
|
41
|
+
EXECUTE PROCEDURE notify_test();
|
42
|
+
}
|
43
|
+
|
44
|
+
conn = PG.connect( :dbname => 'test' )
|
45
|
+
|
46
|
+
conn.exec( TRIGGER_TABLE )
|
47
|
+
conn.exec( TRIGGER_FUNCTION )
|
48
|
+
conn.exec( DROP_TRIGGER )
|
49
|
+
conn.exec( TRIGGER )
|
50
|
+
|
51
|
+
conn.exec( 'LISTEN woo' ) # register interest in the 'woo' event
|
52
|
+
|
53
|
+
notifications = []
|
54
|
+
|
55
|
+
puts "Now switch to a different term and run:",
|
56
|
+
'',
|
57
|
+
%{ psql test -c "insert into test values ('A message.')"},
|
58
|
+
''
|
59
|
+
|
60
|
+
puts "Waiting up to 30 seconds for for an event!"
|
61
|
+
conn.wait_for_notify( 30 ) do |notify, pid|
|
62
|
+
notifications << [ pid, notify ]
|
63
|
+
end
|
64
|
+
|
65
|
+
if notifications.empty?
|
66
|
+
puts "Awww, I didn't see any events."
|
67
|
+
else
|
68
|
+
puts "I got one from pid %d: %s" % notifications.first
|
69
|
+
end
|
70
|
+
|
71
|
+
|
72
|
+
|
@@ -0,0 +1,285 @@
|
|
1
|
+
# -*- ruby -*-
|
2
|
+
# vim: set noet nosta sw=4 ts=4 :
|
3
|
+
#
|
4
|
+
# PostgreSQL statistic gatherer.
|
5
|
+
# Mahlon E. Smith <mahlon@martini.nu>
|
6
|
+
#
|
7
|
+
# Based on queries by Kenny Gorman.
|
8
|
+
# http://www.kennygorman.com/wordpress/?page_id=491
|
9
|
+
#
|
10
|
+
# An example gnuplot input script is included in the __END__ block
|
11
|
+
# of this script. Using it, you can feed the output this script
|
12
|
+
# generates to gnuplot (after removing header lines) to generate
|
13
|
+
# some nice performance charts.
|
14
|
+
#
|
15
|
+
|
16
|
+
require 'ostruct'
|
17
|
+
require 'optparse'
|
18
|
+
require 'etc'
|
19
|
+
require 'pg'
|
20
|
+
|
21
|
+
|
22
|
+
### PostgreSQL Stats. Fetch information from pg_stat_* tables.
|
23
|
+
### Optionally run in a continuous loop, displaying deltas.
|
24
|
+
###
|
25
|
+
class Stats
|
26
|
+
VERSION = %q$Id$
|
27
|
+
|
28
|
+
def initialize( opts )
|
29
|
+
@opts = opts
|
30
|
+
@db = PG.connect(
|
31
|
+
:dbname => opts.database,
|
32
|
+
:host => opts.host,
|
33
|
+
:port => opts.port,
|
34
|
+
:user => opts.user,
|
35
|
+
:password => opts.pass,
|
36
|
+
:sslmode => 'prefer'
|
37
|
+
)
|
38
|
+
@last = nil
|
39
|
+
end
|
40
|
+
|
41
|
+
######
|
42
|
+
public
|
43
|
+
######
|
44
|
+
|
45
|
+
### Primary loop. Gather statistics and generate deltas.
|
46
|
+
###
|
47
|
+
def run
|
48
|
+
run_count = 0
|
49
|
+
|
50
|
+
loop do
|
51
|
+
current_stat = self.get_stats
|
52
|
+
|
53
|
+
# First run, store and continue
|
54
|
+
#
|
55
|
+
if @last.nil?
|
56
|
+
@last = current_stat
|
57
|
+
sleep @opts.interval
|
58
|
+
next
|
59
|
+
end
|
60
|
+
|
61
|
+
# headers
|
62
|
+
#
|
63
|
+
if run_count == 0 || run_count % 50 == 0
|
64
|
+
puts "%-20s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s" % %w[
|
65
|
+
time commits rollbks blksrd blkshit bkends seqscan
|
66
|
+
seqtprd idxscn idxtrd ins upd del locks activeq
|
67
|
+
]
|
68
|
+
end
|
69
|
+
|
70
|
+
# calculate deltas
|
71
|
+
#
|
72
|
+
delta = current_stat.inject({}) do |h, pair|
|
73
|
+
stat, val = *pair
|
74
|
+
|
75
|
+
if %w[ activeq locks bkends ].include?( stat )
|
76
|
+
h[stat] = current_stat[stat].to_i
|
77
|
+
else
|
78
|
+
h[stat] = current_stat[stat].to_i - @last[stat].to_i
|
79
|
+
end
|
80
|
+
|
81
|
+
h
|
82
|
+
end
|
83
|
+
delta[ 'time' ] = Time.now.strftime('%F %T')
|
84
|
+
|
85
|
+
# new values
|
86
|
+
#
|
87
|
+
puts "%-20s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s" % [
|
88
|
+
delta['time'], delta['commits'], delta['rollbks'], delta['blksrd'],
|
89
|
+
delta['blkshit'], delta['bkends'], delta['seqscan'],
|
90
|
+
delta['seqtprd'], delta['idxscn'], delta['idxtrd'],
|
91
|
+
delta['ins'], delta['upd'], delta['del'], delta['locks'], delta['activeq']
|
92
|
+
]
|
93
|
+
|
94
|
+
@last = current_stat
|
95
|
+
run_count += 1
|
96
|
+
sleep @opts.interval
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
|
101
|
+
### Query the database for performance measurements. Returns a hash.
|
102
|
+
###
|
103
|
+
def get_stats
|
104
|
+
res = @db.exec %Q{
|
105
|
+
SELECT
|
106
|
+
MAX(stat_db.xact_commit) AS commits,
|
107
|
+
MAX(stat_db.xact_rollback) AS rollbks,
|
108
|
+
MAX(stat_db.blks_read) AS blksrd,
|
109
|
+
MAX(stat_db.blks_hit) AS blkshit,
|
110
|
+
MAX(stat_db.numbackends) AS bkends,
|
111
|
+
SUM(stat_tables.seq_scan) AS seqscan,
|
112
|
+
SUM(stat_tables.seq_tup_read) AS seqtprd,
|
113
|
+
SUM(stat_tables.idx_scan) AS idxscn,
|
114
|
+
SUM(stat_tables.idx_tup_fetch) AS idxtrd,
|
115
|
+
SUM(stat_tables.n_tup_ins) AS ins,
|
116
|
+
SUM(stat_tables.n_tup_upd) AS upd,
|
117
|
+
SUM(stat_tables.n_tup_del) AS del,
|
118
|
+
MAX(stat_locks.locks) AS locks,
|
119
|
+
MAX(activity.sess) AS activeq
|
120
|
+
FROM
|
121
|
+
pg_stat_database AS stat_db,
|
122
|
+
pg_stat_user_tables AS stat_tables,
|
123
|
+
(SELECT COUNT(*) AS locks FROM pg_locks ) AS stat_locks,
|
124
|
+
(SELECT COUNT(*) AS sess FROM pg_stat_activity WHERE current_query <> '<IDLE>') AS activity
|
125
|
+
WHERE
|
126
|
+
stat_db.datname = '%s';
|
127
|
+
} % [ @opts.database ]
|
128
|
+
|
129
|
+
return res[0]
|
130
|
+
end
|
131
|
+
end
|
132
|
+
|
133
|
+
|
134
|
+
### Parse command line arguments. Return a struct of global options.
|
135
|
+
###
|
136
|
+
def parse_args( args )
|
137
|
+
options = OpenStruct.new
|
138
|
+
options.database = Etc.getpwuid( Process.uid ).name
|
139
|
+
options.host = '127.0.0.1'
|
140
|
+
options.port = 5432
|
141
|
+
options.user = Etc.getpwuid( Process.uid ).name
|
142
|
+
options.sslmode = 'disable'
|
143
|
+
options.interval = 5
|
144
|
+
|
145
|
+
opts = OptionParser.new do |opts|
|
146
|
+
opts.banner = "Usage: #{$0} [options]"
|
147
|
+
|
148
|
+
opts.separator ''
|
149
|
+
opts.separator 'Connection options:'
|
150
|
+
|
151
|
+
opts.on( '-d', '--database DBNAME',
|
152
|
+
"specify the database to connect to (default: \"#{options.database}\")" ) do |db|
|
153
|
+
options.database = db
|
154
|
+
end
|
155
|
+
|
156
|
+
opts.on( '-h', '--host HOSTNAME', 'database server host' ) do |host|
|
157
|
+
options.host = host
|
158
|
+
end
|
159
|
+
|
160
|
+
opts.on( '-p', '--port PORT', Integer,
|
161
|
+
"database server port (default: \"#{options.port}\")" ) do |port|
|
162
|
+
options.port = port
|
163
|
+
end
|
164
|
+
|
165
|
+
opts.on( '-U', '--user NAME',
|
166
|
+
"database user name (default: \"#{options.user}\")" ) do |user|
|
167
|
+
options.user = user
|
168
|
+
end
|
169
|
+
|
170
|
+
opts.on( '-W', 'force password prompt' ) do |pw|
|
171
|
+
print 'Password: '
|
172
|
+
begin
|
173
|
+
system 'stty -echo'
|
174
|
+
options.pass = gets.chomp
|
175
|
+
ensure
|
176
|
+
system 'stty echo'
|
177
|
+
puts
|
178
|
+
end
|
179
|
+
end
|
180
|
+
|
181
|
+
opts.separator ''
|
182
|
+
opts.separator 'Other options:'
|
183
|
+
|
184
|
+
opts.on( '-i', '--interval SECONDS', Integer,
|
185
|
+
"refresh interval in seconds (default: \"#{options.interval}\")") do |seconds|
|
186
|
+
options.interval = seconds
|
187
|
+
end
|
188
|
+
|
189
|
+
opts.on_tail( '--help', 'show this help, then exit' ) do
|
190
|
+
$stderr.puts opts
|
191
|
+
exit
|
192
|
+
end
|
193
|
+
|
194
|
+
opts.on_tail( '--version', 'output version information, then exit' ) do
|
195
|
+
puts Stats::VERSION
|
196
|
+
exit
|
197
|
+
end
|
198
|
+
end
|
199
|
+
|
200
|
+
opts.parse!( args )
|
201
|
+
return options
|
202
|
+
end
|
203
|
+
|
204
|
+
|
205
|
+
### Go!
|
206
|
+
###
|
207
|
+
if __FILE__ == $0
|
208
|
+
$stdout.sync = true
|
209
|
+
Stats.new( parse_args( ARGV ) ).run
|
210
|
+
end
|
211
|
+
|
212
|
+
|
213
|
+
__END__
|
214
|
+
######################################################################
|
215
|
+
### T E R M I N A L O P T I O N S
|
216
|
+
######################################################################
|
217
|
+
|
218
|
+
#set terminal png nocrop enhanced font arial 8 size '800x600' x000000 xffffff x444444
|
219
|
+
#set output 'graph.png'
|
220
|
+
|
221
|
+
set terminal pdf linewidth 4 size 11,8
|
222
|
+
set output 'graph.pdf'
|
223
|
+
|
224
|
+
#set terminal aqua
|
225
|
+
|
226
|
+
|
227
|
+
######################################################################
|
228
|
+
### O P T I O N S F O R A L L G R A P H S
|
229
|
+
######################################################################
|
230
|
+
|
231
|
+
set multiplot layout 2,1 title "PostgreSQL Statistics\n5 second sample rate (smoothed)"
|
232
|
+
|
233
|
+
set grid x y
|
234
|
+
set key right vertical outside
|
235
|
+
set key nobox
|
236
|
+
set xdata time
|
237
|
+
set timefmt "%Y-%m-%d.%H:%M:%S"
|
238
|
+
set format x "%l%p"
|
239
|
+
set xtic rotate by -45
|
240
|
+
input_file = "database_stats.txt"
|
241
|
+
|
242
|
+
# edit to taste!
|
243
|
+
set xrange ["2012-04-16.00:00:00":"2012-04-17.00:00:00"]
|
244
|
+
|
245
|
+
|
246
|
+
######################################################################
|
247
|
+
### G R A P H 1
|
248
|
+
######################################################################
|
249
|
+
|
250
|
+
set title "Database Operations and Connection Totals"
|
251
|
+
set yrange [0:200]
|
252
|
+
|
253
|
+
plot \
|
254
|
+
input_file using 1:2 title "Commits" with lines smooth bezier, \
|
255
|
+
input_file using 1:3 title "Rollbacks" with lines smooth bezier, \
|
256
|
+
input_file using 1:11 title "Inserts" with lines smooth bezier, \
|
257
|
+
input_file using 1:12 title "Updates" with lines smooth bezier, \
|
258
|
+
input_file using 1:13 title "Deletes" with lines smooth bezier, \
|
259
|
+
input_file using 1:6 title "Backends (total)" with lines, \
|
260
|
+
input_file using 1:15 title "Active queries (total)" with lines smooth bezier
|
261
|
+
|
262
|
+
|
263
|
+
######################################################################
|
264
|
+
### G R A P H 2
|
265
|
+
######################################################################
|
266
|
+
|
267
|
+
set title "Backend Performance"
|
268
|
+
set yrange [0:10000]
|
269
|
+
|
270
|
+
plot \
|
271
|
+
input_file using 1:4 title "Block (cache) reads" with lines smooth bezier, \
|
272
|
+
input_file using 1:5 title "Block (cache) hits" with lines smooth bezier, \
|
273
|
+
input_file using 1:7 title "Sequence scans" with lines smooth bezier, \
|
274
|
+
input_file using 1:8 title "Sequence tuple reads" with lines smooth bezier, \
|
275
|
+
input_file using 1:9 title "Index scans" with lines smooth bezier, \
|
276
|
+
input_file using 1:10 title "Index tuple reads" with lines smooth bezier
|
277
|
+
|
278
|
+
|
279
|
+
######################################################################
|
280
|
+
### C L E A N U P
|
281
|
+
######################################################################
|
282
|
+
|
283
|
+
unset multiplot
|
284
|
+
reset
|
285
|
+
|