postgrestats 1.0.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,5 @@
1
+ lib/**/*.rb
2
+ bin/*
3
+ -
4
+ features/**/*.feature
5
+ LICENSE.txt
data/.rspec ADDED
@@ -0,0 +1 @@
1
+ --color
data/Gemfile ADDED
@@ -0,0 +1,19 @@
1
+ source "http://rubygems.org"
2
+ # Add dependencies required to use your gem here.
3
+ # Example:
4
+ # gem "activesupport", ">= 2.3.5"
5
+
6
+ gem "pg"
7
+ gem "gmetric"
8
+
9
+ # Add dependencies to develop your gem here.
10
+ # Include everything needed to run rake, tests, features, etc.
11
+ group :development do
12
+ gem "rspec", "~> 2.8.0"
13
+ gem "yard", "~> 0.7"
14
+ gem "rdoc", "~> 3.12"
15
+ gem "cucumber", ">= 0"
16
+ gem "bundler", "> 1.0.0"
17
+ gem "jeweler", "~> 1.8.3"
18
+ gem (RUBY_VERSION =~ /^1\.9/ ? "simplecov" : "rcov"), ">= 0"
19
+ end
@@ -0,0 +1,20 @@
1
+ Copyright (c) 2012 Artem Veremey
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining
4
+ a copy of this software and associated documentation files (the
5
+ "Software"), to deal in the Software without restriction, including
6
+ without limitation the rights to use, copy, modify, merge, publish,
7
+ distribute, sublicense, and/or sell copies of the Software, and to
8
+ permit persons to whom the Software is furnished to do so, subject to
9
+ the following conditions:
10
+
11
+ The above copyright notice and this permission notice shall be
12
+ included in all copies or substantial portions of the Software.
13
+
14
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
@@ -0,0 +1,19 @@
1
+ = postgrestats
2
+
3
+ Postgrestats is a library that captures important PostgreSQL statistics.
4
+
5
+ == Contributing to postgrestats
6
+
7
+ * Check out the latest master to make sure the feature hasn't been implemented or the bug hasn't been fixed yet.
8
+ * Check out the issue tracker to make sure someone already hasn't requested it and/or contributed it.
9
+ * Fork the project.
10
+ * Start a feature/bugfix branch.
11
+ * Commit and push until you are happy with your contribution.
12
+ * Make sure to add tests for it. This is important so I don't break it in a future version unintentionally.
13
+ * Please try not to mess with the Rakefile, version, or history. If you want to have your own version, or is otherwise necessary, that is fine, but please isolate to its own commit so I can cherry-pick around it.
14
+
15
+ == Copyright
16
+
17
+ Copyright (c) 2012 Artem Veremey. See LICENSE.txt for
18
+ further details.
19
+
@@ -0,0 +1,53 @@
1
+ # encoding: utf-8
2
+
3
+ require 'rubygems'
4
+ require 'bundler'
5
+ begin
6
+ Bundler.setup(:default, :development)
7
+ rescue Bundler::BundlerError => e
8
+ $stderr.puts e.message
9
+ $stderr.puts "Run `bundle install` to install missing gems"
10
+ exit e.status_code
11
+ end
12
+ require 'rake'
13
+
14
+ require 'jeweler'
15
+ Jeweler::Tasks.new do |gem|
16
+ # gem is a Gem::Specification... see http://docs.rubygems.org/read/chapter/20 for more options
17
+ gem.name = "postgrestats"
18
+ gem.homepage = "http://github.com/aia/postgrestats"
19
+ gem.license = "MIT"
20
+ gem.summary = %Q{Postgrestats is a library that captures important PostgreSQL statistics}
21
+ gem.description = %Q{Postgrestats is a library that captures important PostgreSQL statistics}
22
+ gem.email = "artem@veremey.net"
23
+ gem.authors = ["Artem Veremey"]
24
+ # dependencies defined in Gemfile
25
+ end
26
+ Jeweler::RubygemsDotOrgTasks.new
27
+
28
+ require 'rspec/core'
29
+ require 'rspec/core/rake_task'
30
+ RSpec::Core::RakeTask.new(:spec) do |spec|
31
+ spec.pattern = FileList['spec/**/*_spec.rb']
32
+ end
33
+
34
+ if RUBY_VERSION =~ /^1\.9/
35
+ desc "Code coverage detail"
36
+ task :simplecov do
37
+ ENV['COVERAGE'] = "true"
38
+ Rake::Task['spec'].execute
39
+ end
40
+ else
41
+ RSpec::Core::RakeTask.new(:rcov) do |spec|
42
+ spec.pattern = 'spec/**/*_spec.rb'
43
+ spec.rcov = true
44
+ end
45
+ end
46
+
47
+ require 'cucumber/rake/task'
48
+ Cucumber::Rake::Task.new(:features)
49
+
50
+ task :default => :spec
51
+
52
+ require 'yard'
53
+ YARD::Rake::YardocTask.new
data/VERSION ADDED
@@ -0,0 +1 @@
1
+ 1.0.0
@@ -0,0 +1,181 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ $LOAD_PATH.unshift File.dirname(__FILE__) + '/../lib'
4
+
5
+ require 'postgrestats'
6
+ require 'yaml'
7
+ require 'gmetric'
8
+
9
+ @counter_list = {}
10
+
11
+ def counter_to_gauge(prefix, counter, value)
12
+ field_name = [prefix, counter].join("_")
13
+ if @counter_list[field_name].nil?
14
+ @counter_list[field_name] = value.to_f
15
+ return 0.0
16
+ else
17
+ ret = value.to_f - @counter_list[field_name]
18
+ @counter_list[field_name] = value.to_f
19
+ return ret
20
+ end
21
+ end
22
+
23
+ def publish_table_metric(stats_ds, publish_host, name, value, slope, units)
24
+ type = @watch_table_double.include?(name) ? 'double' : 'uint32'
25
+ c_value = @watch_table_double.include?(name) ? value.to_f : value.to_i
26
+
27
+ Ganglia::GMetric.send(
28
+ stats_ds.config['gmond']['tables']['host'],
29
+ stats_ds.config['gmond']['tables']['port'], {
30
+ :name => name,
31
+ :units => units,
32
+ :type => type,
33
+ :value => c_value,
34
+ :tmax => 60,
35
+ :dmax => 300,
36
+ :group => 'staging_tables',
37
+ :slope => slope,
38
+ :spoof => 1,
39
+ :hostname => "#{publish_host}:#{publish_host}"
40
+ })
41
+ end
42
+
43
+ def publish_aggregate_metric(stats_ds, publish_host, name, value, slope, units)
44
+ type = @watch_aggregate_double.include?(name) ? 'double' : 'uint32'
45
+ c_value = @watch_aggregate_double.include?(name) ? value.to_f : value.to_i
46
+
47
+ Ganglia::GMetric.send(
48
+ stats_ds.config['gmond']['aggregate']['host'],
49
+ stats_ds.config['gmond']['aggregate']['port'], {
50
+ :name => name,
51
+ :units => units,
52
+ :type => type,
53
+ :value => c_value,
54
+ :tmax => 60,
55
+ :dmax => 300,
56
+ :group => 'staging_aggregate',
57
+ :slope => slope,
58
+ :spoof => 1,
59
+ :hostname => "#{publish_host}:#{publish_host}"
60
+ })
61
+ end
62
+
63
+ def publish_hierarchy(stats_ds, tree)
64
+ tree.each_key do |table_name|
65
+ if table_name =~ /aggregate/
66
+ tree[table_name].each_key do |metric|
67
+ publish_aggregate_metric(
68
+ stats_ds,
69
+ table_name,
70
+ metric,
71
+ tree[table_name][metric]['value'],
72
+ tree[table_name][metric]['slope'],
73
+ tree[table_name][metric]['units']
74
+ )
75
+ end
76
+ else
77
+ tree[table_name].each_key do |metric|
78
+ publish_table_metric(
79
+ stats_ds,
80
+ table_name,
81
+ metric,
82
+ tree[table_name][metric]['value'],
83
+ tree[table_name][metric]['slope'],
84
+ tree[table_name][metric]['units']
85
+ )
86
+ end
87
+ end
88
+ end
89
+ end
90
+
91
+
92
+ STDOUT.sync = true
93
+
94
+ usage = <<-USAGE
95
+ postgrestats <configuration file>
96
+ USAGE
97
+
98
+ if !ARGV[0].nil? && File.exists?(ARGV[0])
99
+ config = YAML.load_file(ARGV[0])
100
+ else
101
+ puts usage
102
+ exit
103
+ end
104
+
105
+ stats = PostgreStats.new(config)
106
+
107
+ dbs = stats.get_databases
108
+
109
+ @table_metrics = stats.get_table_metrics
110
+ @aggregate_metrics = stats.get_aggregate_metrics
111
+ @watch_aggregate_double = stats.get_aggregate_ganglia_double
112
+ @watch_table_double = stats.get_table_ganglia_double
113
+
114
+ hierarchy = {}
115
+
116
+ while (true)
117
+ begin
118
+ dbs.each do |database|
119
+ time_start = Time.now()
120
+
121
+ hierarchy["#{database}_aggregate"] = {}
122
+
123
+ stats.table_gauges.each_key do |gauge|
124
+ stats.send(stats.table_gauges[gauge]['handler'].to_sym, database).each do |table, value|
125
+ hierarchy["#{database}_#{table}"] ||= {}
126
+ hierarchy["#{database}_#{table}"][gauge] = {}
127
+ hierarchy["#{database}_#{table}"][gauge]['value'] = value
128
+ hierarchy["#{database}_#{table}"][gauge]['units'] = stats.table_gauges[gauge]['units']
129
+ hierarchy["#{database}_#{table}"][gauge]['slope'] = 'both'
130
+ end
131
+ end
132
+
133
+ stats.table_counters.each_key do |counter|
134
+ stats.send(stats.table_counters[counter]['handler'].to_sym, database).each do |table, value|
135
+ hierarchy["#{database}_#{table}"] ||= {}
136
+ hierarchy["#{database}_#{table}"][counter] = {}
137
+ hierarchy["#{database}_#{table}"][counter]['value'] = counter_to_gauge(
138
+ "#{database}_#{table}",
139
+ counter,
140
+ value
141
+ )
142
+ hierarchy["#{database}_#{table}"][counter]['units'] = stats.table_counters[counter]['units']
143
+ hierarchy["#{database}_#{table}"][counter]['slope'] = 'both'
144
+ end
145
+ end
146
+
147
+ hierarchy["#{database}_aggregate"] = {}
148
+
149
+ stats.aggregate_gauges.each_key do |gauge|
150
+ hierarchy["#{database}_aggregate"][gauge] = {}
151
+ hierarchy["#{database}_aggregate"][gauge]['value'] = stats.send(
152
+ stats.aggregate_gauges[gauge]['handler'].to_sym, database
153
+ )
154
+ hierarchy["#{database}_aggregate"][gauge]['units'] = stats.aggregate_gauges[gauge]['units']
155
+ hierarchy["#{database}_aggregate"][gauge]['slope'] = 'both'
156
+ end
157
+
158
+ stats.aggregate_counters.each_key do |counter|
159
+ hierarchy["#{database}_aggregate"][counter] = {}
160
+ hierarchy["#{database}_aggregate"][counter]['value'] = counter_to_gauge(
161
+ "#{database}_aggregate",
162
+ counter,
163
+ stats.send(stats.aggregate_counters[counter]['handler'].to_sym, database)
164
+ )
165
+ hierarchy["#{database}_aggregate"][counter]['units'] = stats.aggregate_counters[counter]['units']
166
+ hierarchy["#{database}_aggregate"][counter]['slope'] = 'both'
167
+ end
168
+
169
+ time_mid = Time.now
170
+ stats.log.info("Processed #{database} database in #{time_mid - time_start}")
171
+
172
+ publish_hierarchy(stats, hierarchy)
173
+ hierarchy = {}
174
+ end
175
+ rescue Exception => e
176
+ puts e.message
177
+ puts e.backtrace.inspect
178
+ end
179
+
180
+ sleep(config['sleep'] || 30)
181
+ end
@@ -0,0 +1,18 @@
1
+ host: your database host
2
+ user: the user to access your database
3
+ password: user password to access te database
4
+ dbname: initial database to connect to
5
+ gmond:
6
+ tables:
7
+ host: your gmond host to send table data to
8
+ port: gmond port to send table data to
9
+ group: group name to publish table data as
10
+ aggregate:
11
+ host: your gmond host to send aggregate data to
12
+ port: gmond port to send aggregate data to
13
+ group: group name to publish aggregate data as
14
+ exclude_dbs:
15
+ - template0
16
+ - template1
17
+ - postgres
18
+ - repmgr
@@ -0,0 +1,9 @@
1
+ Feature: something something
2
+ In order to something something
3
+ A user something something
4
+ something something something
5
+
6
+ Scenario: something something
7
+ Given inspiration
8
+ When I create a sweet new gem
9
+ Then everyone should see how awesome I am
@@ -0,0 +1,13 @@
1
+ require 'bundler'
2
+ begin
3
+ Bundler.setup(:default, :development)
4
+ rescue Bundler::BundlerError => e
5
+ $stderr.puts e.message
6
+ $stderr.puts "Run `bundle install` to install missing gems"
7
+ exit e.status_code
8
+ end
9
+
10
+ $LOAD_PATH.unshift(File.dirname(__FILE__) + '/../../lib')
11
+ require 'postgrestats'
12
+
13
+ require 'rspec/expectations'
@@ -0,0 +1,747 @@
1
+ require 'pg'
2
+ require 'logger'
3
+
4
+ class PostgreStats
5
+
6
+ attr_accessor :config, :connection, :db, :log
7
+ attr_accessor :table_gauges, :table_counters, :aggregate_gauges, :aggregate_counters
8
+
9
+ def initialize(config, logger = nil)
10
+ @config = config
11
+
12
+ initialize_log unless logger
13
+ @log = logger if logger
14
+ @log.error("Logging started")
15
+
16
+ initialize_metrics
17
+ end
18
+
19
+ def initialize_log
20
+ @config['log'] = {
21
+ 'file' => STDOUT,
22
+ 'level' => 'INFO'
23
+ }.merge(@config['log'] || {})
24
+
25
+ log_initialize = [@config['log']['file']]
26
+ log_initialize << @config['log']['shift_age'] if @config['log']['shift_age']
27
+ log_initialize << @config['log']['shift_size'] if @config['log']['shift_size']
28
+
29
+ begin
30
+ @log = Logger.new(*log_initialize)
31
+ @log.level = Logger.const_get(@config['log']['level'])
32
+ rescue Exception => e
33
+ @config['log'] = {
34
+ 'file' => STDOUT,
35
+ 'level' => 'INFO'
36
+ }
37
+ @log = Logger.new(@config['log']['file'])
38
+ @log.level = Logger.const_get(@config['log']['level'])
39
+ @log.error("Caught a problem with log settings")
40
+ @log.error("#{e.message}")
41
+ @log.error("Setting log settings to defaults")
42
+ end
43
+ end
44
+
45
+ def initialize_metrics
46
+ @table_gauges = {
47
+ 'table_size' => {
48
+ 'units' => 'bytes',
49
+ 'handler' => :get_table_sizes
50
+ },
51
+ 'index_size' => {
52
+ 'units' => 'bytes',
53
+ 'handler' => :get_index_sizes
54
+ },
55
+ 'estimated_rows' => {
56
+ 'units' => 'rows',
57
+ 'handler' => :get_tables_estimated_rows
58
+ },
59
+ 'dead' => {
60
+ 'units' => 'rows',
61
+ 'handler' => :get_dead_per_table
62
+ },
63
+ 'last_vacuum' => {
64
+ 'units' => 'seconds since',
65
+ 'handler' => :get_last_autovacuum_per_table
66
+ }
67
+ }
68
+
69
+ @table_counters = {
70
+ 'inserts' => {
71
+ 'units' => 'inserts/s',
72
+ 'ganglia_double' => true,
73
+ 'handler' => :get_inserts_per_table
74
+ },
75
+ 'updates' => {
76
+ 'units' => 'updates/s',
77
+ 'ganglia_double' => true,
78
+ 'handler' => :get_updates_per_table
79
+ },
80
+ 'deletes' => {
81
+ 'units' => 'deletes/s',
82
+ 'ganglia_double' => true,
83
+ 'handler' => :get_deletes_per_table
84
+ },
85
+ 'hot_updates' => {
86
+ 'units' => 'updates/s',
87
+ 'ganglia_double' => true,
88
+ 'handler' => :get_hot_updates_per_table
89
+ },
90
+ 'seq_scan' => {
91
+ 'units' => 'scans/s',
92
+ 'ganglia_double' => true,
93
+ 'handler' => :get_sequential_scans_per_table
94
+ },
95
+ 'idx_scan' => {
96
+ 'units' => 'scans/s',
97
+ 'ganglia_double' => true,
98
+ 'handler' => :get_index_scans_per_table
99
+ },
100
+ 'seq_tup_read' => {
101
+ 'units' => 'rows/s',
102
+ 'ganglia_double' => true,
103
+ 'handler' => :get_sequential_rows_read_per_table
104
+ },
105
+ 'idx_tup_fetch' => {
106
+ 'units' => 'rows/s',
107
+ 'ganglia_double' => true,
108
+ 'handler' => :get_index_rows_fetched_per_table
109
+ },
110
+ 'heap_blks_read' => {
111
+ 'units' => 'blocks/s',
112
+ 'ganglia_double' => true,
113
+ 'handler' => :get_heap_blocks_read_per_table
114
+ },
115
+ 'heap_blks_hit' => {
116
+ 'units' => 'blocks/s',
117
+ 'ganglia_double' => true,
118
+ 'handler' => :get_heap_blocks_hit_per_table
119
+ },
120
+ 'idx_blks_read' => {
121
+ 'units' => 'blocks/s',
122
+ 'ganglia_double' => true,
123
+ 'handler' => :get_index_blocks_read_per_table
124
+ },
125
+ 'idx_blks_hit' => {
126
+ 'units' => 'blocks/s',
127
+ 'ganglia_double' => true,
128
+ 'handler' => :get_index_blocks_hit_per_table
129
+ }
130
+ }
131
+
132
+ @aggregate_gauges = {
133
+ 'locks' => {
134
+ 'units' => 'locks',
135
+ 'handler' => :get_number_of_locks_per_database
136
+ },
137
+ 'connections' => {
138
+ 'units' => 'connections',
139
+ 'handler' => :get_connections_per_database
140
+ }
141
+ }
142
+
143
+ @aggregate_counters = {
144
+ 'blocks_fetched' => {
145
+ 'units' => 'blocks/s',
146
+ 'ganglia_double' => true,
147
+ 'handler' => :get_blocks_fetched_per_database
148
+ },
149
+ 'blocks_hit' => {
150
+ 'units' => 'blocks/s',
151
+ 'ganglia_double' => true,
152
+ 'handler' => :get_blocks_hit_per_database
153
+ },
154
+ 'commits' => {
155
+ 'units' => 'commits/s',
156
+ 'ganglia_double' => true,
157
+ 'handler' => :get_commits_per_database
158
+ },
159
+ 'rollbacks' => {
160
+ 'units' => 'rollbacks/s',
161
+ 'ganglia_double' => true,
162
+ 'handler' => :get_rollbacks_per_database
163
+ },
164
+ 'inserts' => {
165
+ 'units' => 'inserts/s',
166
+ 'ganglia_double' => true,
167
+ 'handler' => :get_inserts_per_database
168
+ },
169
+ 'updates' => {
170
+ 'units' => 'updates/s',
171
+ 'ganglia_double' => true,
172
+ 'handler' => :get_updates_per_database
173
+ },
174
+ 'deletes' => {
175
+ 'units' => 'deletes/s',
176
+ 'ganglia_double' => true,
177
+ 'handler' => :get_deletes_per_database
178
+ }
179
+ }
180
+ end
181
+
182
+ def connect(db)
183
+ return if connection and (@db == db)
184
+
185
+ connection.close if @db == db
186
+
187
+ tmp_config ={
188
+ 'host' => @config['host'],
189
+ 'user' => @config['user'],
190
+ 'password' => @config['password'],
191
+ 'dbname' => db
192
+ }
193
+
194
+ @connection = PG.connect(tmp_config)
195
+ @db = db
196
+ end
197
+
198
+ def run_query(db, query)
199
+ connect(db)
200
+
201
+ result = @connection.exec(query)
202
+ fields = result.fields
203
+ ret = []
204
+
205
+ result.each do |row|
206
+ item = {}
207
+ fields.each { |field| item[field] = row[field] }
208
+ ret << item
209
+ end
210
+
211
+ return ret
212
+ end
213
+
214
+ def get_table_metrics()
215
+ return [table_gauges.keys, table_counters.keys].flatten
216
+ end
217
+
218
+ def get_aggregate_metrics()
219
+ return [aggregate_gauges.keys, aggregate_counters.keys].flatten
220
+ end
221
+
222
+ def get_table_ganglia_double()
223
+ return [
224
+ table_gauges.select{ |k, v| v.has_key?('ganglia_double') }.keys,
225
+ table_counters.select{ |k, v| v.has_key?('ganglia_double') }.keys
226
+ ].flatten
227
+ end
228
+
229
+ def get_aggregate_ganglia_double()
230
+ return [
231
+ aggregate_gauges.select{ |k, v| v.has_key?('ganglia_double') }.keys,
232
+ aggregate_counters.select{ |k, v| v.has_key?('ganglia_double') }.keys
233
+ ].flatten
234
+ end
235
+
236
+ def get_databases()
237
+ query = <<-END_GET_DATABASES_QUERY
238
+ SELECT pg_database.datname AS "Database", pg_user.usename AS "Owner"
239
+ FROM pg_database, pg_user
240
+ WHERE pg_database.datdba = pg_user.usesysid
241
+ UNION
242
+ SELECT pg_database.datname AS "Database", NULL AS "Owner"
243
+ FROM pg_database
244
+ WHERE pg_database.datdba NOT IN (SELECT usesysid FROM pg_user)
245
+ ORDER BY "Database"
246
+ END_GET_DATABASES_QUERY
247
+
248
+ ret = run_query('postgres', query)
249
+
250
+ ret.select!{ |v| not @config['exclude_dbs'].include?(v['Database']) }.map!{ |v| v['Database'] }
251
+
252
+ return ret
253
+ end
254
+
255
+ def get_tables(db)
256
+ query = <<-END_GET_TABLES_QUERY
257
+ SELECT c.relname FROM pg_catalog.pg_class c
258
+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
259
+ WHERE c.relkind IN ('r','') AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
260
+ AND pg_catalog.pg_table_is_visible(c.oid)
261
+ END_GET_TABLES_QUERY
262
+
263
+ ret = run_query(db, query)
264
+
265
+ ret.map!{ |v| v['relname'] }
266
+
267
+ return ret
268
+ end
269
+
270
+ # * size per database/table
271
+ def get_table_sizes(db)
272
+ query = <<-END_GET_TABLE_SIZES_QUERY
273
+ SELECT table_name,pg_relation_size(table_name) AS size
274
+ FROM information_schema.tables
275
+ WHERE table_schema NOT IN ('information_schema', 'pg_catalog') AND table_type = 'BASE TABLE'
276
+ END_GET_TABLE_SIZES_QUERY
277
+
278
+ ret = run_query(db, query)
279
+
280
+ ret.map!{ |v| [v['table_name'], v['size'].to_i] }
281
+
282
+ return Hash[*ret.flatten]
283
+ end
284
+
285
+ # * size of indexes
286
+ def get_index_sizes(db)
287
+ query = <<-END_GET_INDEX_SIZES_QUERY
288
+ SELECT table_name,(pg_total_relation_size(table_name) - pg_relation_size(table_name)) AS size
289
+ FROM information_schema.tables
290
+ WHERE table_schema NOT IN ('information_schema', 'pg_catalog') AND table_type = 'BASE TABLE'
291
+ END_GET_INDEX_SIZES_QUERY
292
+
293
+ ret = run_query(db, query)
294
+
295
+ ret.map!{ |v| [v['table_name'], v['size'].to_i] }
296
+
297
+ return Hash[*ret.flatten]
298
+ end
299
+
300
+ # * size of individual indexes
301
+ # SELECT c3.relname AS "Table",
302
+ # c2.relname AS "Index",
303
+ # pg_size_pretty(pg_relation_size(c3.relname::text)) AS "Data Size",
304
+ # pg_size_pretty(pg_relation_size(c2.relname::text)) AS "Index Size",
305
+ # pg_size_pretty(pg_total_relation_size(c3.relname::text)) AS "Total"
306
+ # FROM pg_class c2
307
+ # LEFT JOIN pg_index i ON c2.oid = i.indexrelid
308
+ # LEFT JOIN pg_class c1 ON c1.oid = i.indrelid
309
+ # RIGHT OUTER JOIN pg_class c3 ON c3.oid = c1.oid
310
+ # LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c3.relnamespace
311
+ # WHERE c3.relkind IN ('r','')
312
+ # AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
313
+ # AND pg_catalog.pg_table_is_visible(c3.oid)
314
+ # ORDER BY c3.relpages DESC;
315
+
316
+ # * estimated rows per database/table
317
+ def get_table_estimated_rows(db, table)
318
+ query = <<-END_GET_TABLE_ESTIMATED_ROWS_QUERY
319
+ SELECT (
320
+ CASE
321
+ WHEN reltuples > 0 THEN pg_relation_size('#{table}')/(8192*relpages::bigint/reltuples)
322
+ ELSE 0
323
+ END
324
+ )::bigint AS estimated_row_count
325
+ FROM pg_class
326
+ WHERE oid = '#{table}'::regclass;
327
+ END_GET_TABLE_ESTIMATED_ROWS_QUERY
328
+
329
+ ret = run_query(db, query)
330
+
331
+ return ret.first['estimated_row_count'].to_i
332
+ end
333
+
334
+ # * estimated rows for every table in the database
335
+ def get_tables_estimated_rows(db)
336
+ ret = {}
337
+
338
+ get_tables(db).each do |table|
339
+ ret[table] = get_table_estimated_rows(db, table)
340
+ end
341
+
342
+ return ret
343
+ end
344
+
345
+ # * number of connections
346
+ def get_connections_per_database(db = nil)
347
+ query = <<-END_GET_CONNECTIONS_PER_DATABASE_QUERY
348
+ SELECT datname, count(*) FROM pg_stat_activity GROUP BY datname;
349
+ END_GET_CONNECTIONS_PER_DATABASE_QUERY
350
+
351
+ ret = run_query('postgres', query)
352
+
353
+ ret.select!{ |v| not @config['exclude_dbs'].include?(v['datname']) }.map!{ |v| [v['datname'], v['count'].to_i] }
354
+
355
+ if db
356
+ res = Hash[*ret.flatten][db].to_i || 0
357
+ else
358
+ res = Hash[*ret.flatten]
359
+ end
360
+
361
+ return res
362
+ end
363
+
364
+ # * dead unused space taken up in a table or index
365
+ # * number of seconds since the last checkpoint per database
366
+ # * number of seconds since the last autovacuum per database/table
367
+
368
+ def get_last_autovacuum_per_table(db)
369
+ query = <<-END_GET_LAST_AUTOVACUUM_PER_DATABASE_QUERY
370
+ SELECT current_database() AS datname, nspname AS sname, relname AS tname,
371
+ CASE WHEN v IS NULL THEN -1 ELSE round(extract(epoch FROM now()-v)) END AS ltime
372
+ FROM (
373
+ SELECT nspname, relname, pg_stat_get_last_autovacuum_time(c.oid) AS v
374
+ FROM pg_class c, pg_namespace n
375
+ WHERE relkind = 'r'
376
+ AND n.oid = c.relnamespace
377
+ AND n.nspname <> 'information_schema'
378
+ AND n.nspname = 'public'
379
+ ORDER BY 3
380
+ ) AS foo
381
+ END_GET_LAST_AUTOVACUUM_PER_DATABASE_QUERY
382
+
383
+ ret = run_query(db, query)
384
+
385
+ ret.map!{ |v| [v['tname'], v['ltime']] }
386
+
387
+ return Hash[*ret.flatten]
388
+ end
389
+
390
+ # * number of blocks fetched per database
391
+
392
+ def get_blocks_fetched_per_database(db)
393
+ query = <<-END_GET_BLOCKS_FETCHED_PER_DATABASE_QUERY
394
+ SELECT pg_stat_get_db_blocks_fetched(oid)
395
+ FROM pg_database
396
+ WHERE datname = '#{db}'
397
+ END_GET_BLOCKS_FETCHED_PER_DATABASE_QUERY
398
+
399
+ ret = run_query(db, query)
400
+
401
+ return ret.first['pg_stat_get_db_blocks_fetched'].to_i
402
+ end
403
+
404
+ # * number of blocks hit per database
405
+
406
+ def get_blocks_hit_per_database(db)
407
+ query = <<-END_GET_BLOCK_HITS_PER_DATABASE_QUERY
408
+ SELECT pg_stat_get_db_blocks_hit(oid)
409
+ FROM pg_database
410
+ WHERE datname = '#{db}'
411
+ END_GET_BLOCK_HITS_PER_DATABASE_QUERY
412
+
413
+ ret = run_query(db, query)
414
+
415
+ return ret.first['pg_stat_get_db_blocks_hit'].to_i
416
+ end
417
+
418
+ # * number of commits per database
419
+
420
+ def get_commits_per_database(db)
421
+ query = <<-END_GET_COMMITS_PER_DATABASE_QUERY
422
+ SELECT pg_stat_get_db_xact_commit(oid)
423
+ FROM pg_database
424
+ WHERE datname = '#{db}'
425
+ END_GET_COMMITS_PER_DATABASE_QUERY
426
+
427
+ ret = run_query(db, query)
428
+
429
+ return ret.first['pg_stat_get_db_xact_commit'].to_i
430
+ end
431
+
432
+ # * number of rollbacks per database
433
+
434
+ def get_rollbacks_per_database(db)
435
+ query = <<-END_GET_ROLLBACKS_PER_DATABASE_QUERY
436
+ SELECT pg_stat_get_db_xact_rollback(oid)
437
+ FROM pg_database
438
+ WHERE datname = '#{db}'
439
+ END_GET_ROLLBACKS_PER_DATABASE_QUERY
440
+
441
+ ret = run_query(db, query)
442
+
443
+ return ret.first['pg_stat_get_db_xact_rollback'].to_i
444
+ end
445
+
446
+ # * number of disk block reads per database
447
+ # * number of buffer hits per database
448
+ # * number of rows returned per database
449
+ # * number of rows inserted per database
450
+
451
+ def get_inserts_per_database(db)
452
+ query = <<-END_GET_INSERTS_PER_DATABASE_QUERY
453
+ SELECT pg_stat_get_db_tuples_inserted(oid)
454
+ FROM pg_database
455
+ WHERE datname = '#{db}'
456
+ END_GET_INSERTS_PER_DATABASE_QUERY
457
+
458
+ ret = run_query(db, query)
459
+
460
+ return ret.first['pg_stat_get_db_tuples_inserted'].to_i
461
+ end
462
+
463
+ # * number of rows updates per database
464
+
465
+ def get_updates_per_database(db)
466
+ query = <<-END_GET_UPDATES_PER_DATABASE_QUERY
467
+ SELECT pg_stat_get_db_tuples_updated(oid)
468
+ FROM pg_database
469
+ WHERE datname = '#{db}'
470
+ END_GET_UPDATES_PER_DATABASE_QUERY
471
+
472
+ ret = run_query(db, query)
473
+
474
+ return ret.first['pg_stat_get_db_tuples_updated'].to_i
475
+ end
476
+
477
+ # * number of rows deleted per database
478
+
479
+ def get_deletes_per_database(db)
480
+ query = <<-END_GET_DELETES_PER_DATABASE_QUERY
481
+ SELECT pg_stat_get_db_tuples_deleted(oid)
482
+ FROM pg_database
483
+ WHERE datname = '#{db}'
484
+ END_GET_DELETES_PER_DATABASE_QUERY
485
+
486
+ ret = run_query(db, query)
487
+
488
+ return ret.first['pg_stat_get_db_tuples_deleted'].to_i
489
+ end
490
+
491
+ # * number of sequential scans per table
492
+ # * number of index scans per table
493
+
494
+ # * list of locks
495
+
496
+ def get_locks_per_database(db)
497
+ query = <<-END_GET_LOCKS_PER_DATABASE_QUERY
498
+ SELECT pid, virtualxid, datname, relname, locktype, mode
499
+ FROM pg_locks l
500
+ LEFT JOIN pg_database d ON (d.oid=l.database)
501
+ LEFT JOIN pg_class c on (c.oid=l.relation)
502
+ WHERE datname = '#{db}' AND NOT relname ~ 'pg_'
503
+ END_GET_LOCKS_PER_DATABASE_QUERY
504
+
505
+ ret = run_query(db, query)
506
+
507
+ return ret
508
+ end
509
+
510
+ # * number of locks
511
+ def get_number_of_locks_per_database(db)
512
+ return get_locks_per_database(db).size
513
+ end
514
+
515
+ # * number of "idle in transaction" queries per database
516
+ # * number of open transactions per database
517
+
518
+ # * number of returned rows per table
519
+
520
+ def get_returned_per_table(db)
521
+ query = <<-END_GET_RETURNED_PER_TABLE_QUERY
522
+ SELECT c.relname, pg_stat_get_tuples_returned(c.oid) FROM pg_catalog.pg_class c
523
+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
524
+ WHERE c.relkind IN ('r','') AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
525
+ AND pg_catalog.pg_table_is_visible(c.oid)
526
+ END_GET_RETURNED_PER_TABLE_QUERY
527
+
528
+ ret = run_query(db, query)
529
+
530
+ ret.map!{ |v| [v['relname'], v['pg_stat_get_tuples_returned'].to_i] }
531
+
532
+ return Hash[*ret.flatten]
533
+ end
534
+
535
+ # * number of fetched rows per table
536
+
537
+ def get_fetched_per_table(db)
538
+ query = <<-END_GET_FETCHED_PER_TABLE_QUERY
539
+ SELECT c.relname, pg_stat_get_tuples_fetched(c.oid) FROM pg_catalog.pg_class c
540
+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
541
+ WHERE c.relkind IN ('r','') AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
542
+ AND pg_catalog.pg_table_is_visible(c.oid)
543
+ END_GET_FETCHED_PER_TABLE_QUERY
544
+
545
+ ret = run_query(db, query)
546
+
547
+ ret.map!{ |v| [v['relname'], v['pg_stat_get_tuples_fetched'].to_i] }
548
+
549
+ return Hash[*ret.flatten]
550
+ end
551
+
552
+ # * number of inserted rows per table
553
+
554
+ def get_inserts_per_table(db)
555
+ query = <<-END_GET_INSERTS_PER_TABLE_QUERY
556
+ SELECT c.relname, pg_stat_get_tuples_inserted(c.oid) FROM pg_catalog.pg_class c
557
+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
558
+ WHERE c.relkind IN ('r','') AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
559
+ AND pg_catalog.pg_table_is_visible(c.oid)
560
+ END_GET_INSERTS_PER_TABLE_QUERY
561
+
562
+ ret = run_query(db, query)
563
+
564
+ ret.map!{ |v| [v['relname'], v['pg_stat_get_tuples_inserted'].to_i] }
565
+
566
+ return Hash[*ret.flatten]
567
+ end
568
+
569
+ # * number of updates rows per table
570
+
571
+ def get_updates_per_table(db)
572
+ query = <<-END_GET_UPDATES_PER_TABLE_QUERY
573
+ SELECT c.relname, pg_stat_get_tuples_updated(c.oid) FROM pg_catalog.pg_class c
574
+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
575
+ WHERE c.relkind IN ('r','') AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
576
+ AND pg_catalog.pg_table_is_visible(c.oid)
577
+ END_GET_UPDATES_PER_TABLE_QUERY
578
+
579
+ ret = run_query(db, query)
580
+
581
+ ret.map!{ |v| [v['relname'], v['pg_stat_get_tuples_updated'].to_i] }
582
+
583
+ return Hash[*ret.flatten]
584
+ end
585
+
586
+ def get_hot_updates_per_table(db)
587
+ query = <<-END_GET_HOT_UPDATES_PER_TABLE_QUERY
588
+ SELECT c.relname, pg_stat_get_tuples_hot_updated(c.oid) FROM pg_catalog.pg_class c
589
+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
590
+ WHERE c.relkind IN ('r','') AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
591
+ AND pg_catalog.pg_table_is_visible(c.oid)
592
+ END_GET_HOT_UPDATES_PER_TABLE_QUERY
593
+
594
+ ret = run_query(db, query)
595
+
596
+ ret.map!{ |v| [v['relname'], v['pg_stat_get_tuples_hot_updated'].to_i] }
597
+
598
+ return Hash[*ret.flatten]
599
+ end
600
+
601
+ # * number of deleted rows per table
602
+
603
+ def get_deletes_per_table(db)
604
+ query = <<-END_GET_DELETES_PER_TABLE_QUERY
605
+ SELECT c.relname, pg_stat_get_tuples_deleted(c.oid) FROM pg_catalog.pg_class c
606
+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
607
+ WHERE c.relkind IN ('r','') AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
608
+ AND pg_catalog.pg_table_is_visible(c.oid)
609
+ END_GET_DELETES_PER_TABLE_QUERY
610
+
611
+ ret = run_query(db, query)
612
+
613
+ ret.map!{ |v| [v['relname'], v['pg_stat_get_tuples_deleted'].to_i] }
614
+
615
+ return Hash[*ret.flatten]
616
+ end
617
+
618
+ # * number of dead rows per table
619
+
620
+ def get_dead_per_table(db)
621
+ query = <<-END_GET_DEAD_PER_TABLE_QUERY
622
+ SELECT c.relname, pg_stat_get_dead_tuples(c.oid) FROM pg_catalog.pg_class c
623
+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
624
+ WHERE c.relkind IN ('r','') AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
625
+ AND pg_catalog.pg_table_is_visible(c.oid)
626
+ END_GET_DEAD_PER_TABLE_QUERY
627
+
628
+ ret = run_query(db, query)
629
+
630
+ ret.map!{ |v| [v['relname'], v['pg_stat_get_dead_tuples'].to_i] }
631
+
632
+ return Hash[*ret.flatten]
633
+ end
634
+
635
+ def get_sequential_scans_per_table(db)
636
+ query = <<-END_GET_SEQ_SCANS_PER_TABLE_QUERY
637
+ SELECT stat.relname AS relname, seq_scan
638
+ FROM pg_stat_user_tables stat
639
+ RIGHT JOIN pg_statio_user_tables statio ON stat.relid=statio.relid;
640
+ END_GET_SEQ_SCANS_PER_TABLE_QUERY
641
+
642
+ ret = run_query(db, query)
643
+
644
+ ret.map!{ |v| [v['relname'], v['seq_scan'].to_i] }
645
+
646
+ return Hash[*ret.flatten]
647
+ end
648
+
649
+ def get_sequential_rows_read_per_table(db)
650
+ query = <<-END_GET_SEQ_ROWS_READ_PER_TABLE_QUERY
651
+ SELECT stat.relname AS relname, seq_tup_read
652
+ FROM pg_stat_user_tables stat
653
+ RIGHT JOIN pg_statio_user_tables statio ON stat.relid=statio.relid;
654
+ END_GET_SEQ_ROWS_READ_PER_TABLE_QUERY
655
+
656
+ ret = run_query(db, query)
657
+
658
+ ret.map!{ |v| [v['relname'], v['seq_tup_read'].to_i] }
659
+
660
+ return Hash[*ret.flatten]
661
+ end
662
+
663
+ def get_index_scans_per_table(db)
664
+ query = <<-END_GET_INDEX_SCANS_PER_TABLE_QUERY
665
+ SELECT stat.relname AS relname, idx_scan
666
+ FROM pg_stat_user_tables stat
667
+ RIGHT JOIN pg_statio_user_tables statio ON stat.relid=statio.relid;
668
+ END_GET_INDEX_SCANS_PER_TABLE_QUERY
669
+
670
+ ret = run_query(db, query)
671
+
672
+ ret.map!{ |v| [v['relname'], v['idx_scan'].to_i] }
673
+
674
+ return Hash[*ret.flatten]
675
+ end
676
+
677
+ def get_index_rows_fetched_per_table(db)
678
+ query = <<-END_GET_INDEX_ROWS_FETCHED_PER_TABLE_QUERY
679
+ SELECT stat.relname AS relname, idx_tup_fetch
680
+ FROM pg_stat_user_tables stat
681
+ RIGHT JOIN pg_statio_user_tables statio ON stat.relid=statio.relid;
682
+ END_GET_INDEX_ROWS_FETCHED_PER_TABLE_QUERY
683
+
684
+ ret = run_query(db, query)
685
+
686
+ ret.map!{ |v| [v['relname'], v['idx_tup_fetch'].to_i] }
687
+
688
+ return Hash[*ret.flatten]
689
+ end
690
+
691
+ def get_heap_blocks_read_per_table(db)
692
+ query = <<-END_GET_HEAP_BLOCKS_READ_PER_TABLE_QUERY
693
+ SELECT stat.relname AS relname, heap_blks_read
694
+ FROM pg_stat_user_tables stat
695
+ RIGHT JOIN pg_statio_user_tables statio ON stat.relid=statio.relid;
696
+ END_GET_HEAP_BLOCKS_READ_PER_TABLE_QUERY
697
+
698
+ ret = run_query(db, query)
699
+
700
+ ret.map!{ |v| [v['relname'], v['heap_blks_read'].to_i] }
701
+
702
+ return Hash[*ret.flatten]
703
+ end
704
+
705
+ def get_heap_blocks_hit_per_table(db)
706
+ query = <<-END_GET_HEAP_BLOCKS_HIT_PER_TABLE_QUERY
707
+ SELECT stat.relname AS relname, heap_blks_hit
708
+ FROM pg_stat_user_tables stat
709
+ RIGHT JOIN pg_statio_user_tables statio ON stat.relid=statio.relid;
710
+ END_GET_HEAP_BLOCKS_HIT_PER_TABLE_QUERY
711
+
712
+ ret = run_query(db, query)
713
+
714
+ ret.map!{ |v| [v['relname'], v['heap_blks_hit'].to_i] }
715
+
716
+ return Hash[*ret.flatten]
717
+ end
718
+
719
+ def get_index_blocks_read_per_table(db)
720
+ query = <<-END_GET_INDEX_BLOCKS_READ_PER_TABLE_QUERY
721
+ SELECT stat.relname AS relname, idx_blks_read
722
+ FROM pg_stat_user_tables stat
723
+ RIGHT JOIN pg_statio_user_tables statio ON stat.relid=statio.relid;
724
+ END_GET_INDEX_BLOCKS_READ_PER_TABLE_QUERY
725
+
726
+ ret = run_query(db, query)
727
+
728
+ ret.map!{ |v| [v['relname'], v['idx_blks_read'].to_i] }
729
+
730
+ return Hash[*ret.flatten]
731
+ end
732
+
733
+ def get_index_blocks_hit_per_table(db)
734
+ query = <<-END_GET_INDEX_BLOCKS_HIT_PER_TABLE_QUERY
735
+ SELECT stat.relname AS relname, idx_blks_hit
736
+ FROM pg_stat_user_tables stat
737
+ RIGHT JOIN pg_statio_user_tables statio ON stat.relid=statio.relid;
738
+ END_GET_INDEX_BLOCKS_HIT_PER_TABLE_QUERY
739
+
740
+ ret = run_query(db, query)
741
+
742
+ ret.map!{ |v| [v['relname'], v['idx_blks_hit'].to_i] }
743
+
744
+ return Hash[*ret.flatten]
745
+ end
746
+ end
747
+
@@ -0,0 +1,7 @@
1
+ require File.expand_path(File.dirname(__FILE__) + '/spec_helper')
2
+
3
+ describe "Postgrestats" do
4
+ it "fails" do
5
+ fail "hey buddy, you should probably rename this file and start specing for real"
6
+ end
7
+ end
@@ -0,0 +1,32 @@
1
+ $LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
2
+ $LOAD_PATH.unshift(File.dirname(__FILE__))
3
+
4
+ if RUBY_VERSION =~ /^1\.9/
5
+ require 'simplecov'
6
+
7
+ module SimpleCov::Configuration
8
+ def clean_filters
9
+ @filters = []
10
+ end
11
+ end
12
+
13
+ SimpleCov.configure do
14
+ clean_filters
15
+ load_adapter 'test_frameworks'
16
+ end
17
+
18
+ ENV["COVERAGE"] && SimpleCov.start do
19
+ add_filter "/.rvm/"
20
+ end
21
+ end
22
+
23
+ require 'rspec'
24
+ require 'postgrestats'
25
+
26
+ # Requires supporting files with custom matchers and macros, etc,
27
+ # in ./support/ and its subdirectories.
28
+ Dir["#{File.dirname(__FILE__)}/support/**/*.rb"].each {|f| require f}
29
+
30
+ RSpec.configure do |config|
31
+
32
+ end
metadata ADDED
@@ -0,0 +1,165 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: postgrestats
3
+ version: !ruby/object:Gem::Version
4
+ version: 1.0.0
5
+ prerelease:
6
+ platform: ruby
7
+ authors:
8
+ - Artem Veremey
9
+ autorequire:
10
+ bindir: bin
11
+ cert_chain: []
12
+ date: 2013-04-19 00:00:00.000000000 Z
13
+ dependencies:
14
+ - !ruby/object:Gem::Dependency
15
+ name: pg
16
+ requirement: &70239789569380 !ruby/object:Gem::Requirement
17
+ none: false
18
+ requirements:
19
+ - - ! '>='
20
+ - !ruby/object:Gem::Version
21
+ version: '0'
22
+ type: :runtime
23
+ prerelease: false
24
+ version_requirements: *70239789569380
25
+ - !ruby/object:Gem::Dependency
26
+ name: gmetric
27
+ requirement: &70239789567620 !ruby/object:Gem::Requirement
28
+ none: false
29
+ requirements:
30
+ - - ! '>='
31
+ - !ruby/object:Gem::Version
32
+ version: '0'
33
+ type: :runtime
34
+ prerelease: false
35
+ version_requirements: *70239789567620
36
+ - !ruby/object:Gem::Dependency
37
+ name: rspec
38
+ requirement: &70239789583240 !ruby/object:Gem::Requirement
39
+ none: false
40
+ requirements:
41
+ - - ~>
42
+ - !ruby/object:Gem::Version
43
+ version: 2.8.0
44
+ type: :development
45
+ prerelease: false
46
+ version_requirements: *70239789583240
47
+ - !ruby/object:Gem::Dependency
48
+ name: yard
49
+ requirement: &70239789582260 !ruby/object:Gem::Requirement
50
+ none: false
51
+ requirements:
52
+ - - ~>
53
+ - !ruby/object:Gem::Version
54
+ version: '0.7'
55
+ type: :development
56
+ prerelease: false
57
+ version_requirements: *70239789582260
58
+ - !ruby/object:Gem::Dependency
59
+ name: rdoc
60
+ requirement: &70239789581240 !ruby/object:Gem::Requirement
61
+ none: false
62
+ requirements:
63
+ - - ~>
64
+ - !ruby/object:Gem::Version
65
+ version: '3.12'
66
+ type: :development
67
+ prerelease: false
68
+ version_requirements: *70239789581240
69
+ - !ruby/object:Gem::Dependency
70
+ name: cucumber
71
+ requirement: &70239789580580 !ruby/object:Gem::Requirement
72
+ none: false
73
+ requirements:
74
+ - - ! '>='
75
+ - !ruby/object:Gem::Version
76
+ version: '0'
77
+ type: :development
78
+ prerelease: false
79
+ version_requirements: *70239789580580
80
+ - !ruby/object:Gem::Dependency
81
+ name: bundler
82
+ requirement: &70239789579900 !ruby/object:Gem::Requirement
83
+ none: false
84
+ requirements:
85
+ - - ! '>'
86
+ - !ruby/object:Gem::Version
87
+ version: 1.0.0
88
+ type: :development
89
+ prerelease: false
90
+ version_requirements: *70239789579900
91
+ - !ruby/object:Gem::Dependency
92
+ name: jeweler
93
+ requirement: &70239789579040 !ruby/object:Gem::Requirement
94
+ none: false
95
+ requirements:
96
+ - - ~>
97
+ - !ruby/object:Gem::Version
98
+ version: 1.8.3
99
+ type: :development
100
+ prerelease: false
101
+ version_requirements: *70239789579040
102
+ - !ruby/object:Gem::Dependency
103
+ name: simplecov
104
+ requirement: &70239789578200 !ruby/object:Gem::Requirement
105
+ none: false
106
+ requirements:
107
+ - - ! '>='
108
+ - !ruby/object:Gem::Version
109
+ version: '0'
110
+ type: :development
111
+ prerelease: false
112
+ version_requirements: *70239789578200
113
+ description: Postgrestats is a library that captures important PostgreSQL statistics
114
+ email: artem@veremey.net
115
+ executables:
116
+ - postgrestats
117
+ extensions: []
118
+ extra_rdoc_files:
119
+ - LICENSE.txt
120
+ - README.md
121
+ files:
122
+ - .document
123
+ - .rspec
124
+ - Gemfile
125
+ - LICENSE.txt
126
+ - README.md
127
+ - Rakefile
128
+ - VERSION
129
+ - bin/postgrestats
130
+ - config/example.yml
131
+ - features/postgrestats.feature
132
+ - features/step_definitions/postgrestats_steps.rb
133
+ - features/support/env.rb
134
+ - lib/postgrestats.rb
135
+ - spec/postgrestats_spec.rb
136
+ - spec/spec_helper.rb
137
+ homepage: http://github.com/aia/postgrestats
138
+ licenses:
139
+ - MIT
140
+ post_install_message:
141
+ rdoc_options: []
142
+ require_paths:
143
+ - lib
144
+ required_ruby_version: !ruby/object:Gem::Requirement
145
+ none: false
146
+ requirements:
147
+ - - ! '>='
148
+ - !ruby/object:Gem::Version
149
+ version: '0'
150
+ segments:
151
+ - 0
152
+ hash: -785327322279302061
153
+ required_rubygems_version: !ruby/object:Gem::Requirement
154
+ none: false
155
+ requirements:
156
+ - - ! '>='
157
+ - !ruby/object:Gem::Version
158
+ version: '0'
159
+ requirements: []
160
+ rubyforge_project:
161
+ rubygems_version: 1.8.17
162
+ signing_key:
163
+ specification_version: 3
164
+ summary: Postgrestats is a library that captures important PostgreSQL statistics
165
+ test_files: []