pgdexter 0.1.0 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +13 -13
- data/lib/dexter/indexer.rb +249 -0
- data/lib/dexter/log_parser.rb +84 -9
- data/lib/dexter/version.rb +1 -1
- data/lib/dexter.rb +7 -219
- metadata +3 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: a9dd13cbe8dc39e26b90bdc13abd16201ccec455
|
4
|
+
data.tar.gz: bc4693ab0595c9dc1e38b720c294b8e600d597c5
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 5f96e60ac1660786dd0f059ec0e88a4f017c056f73eb0be07bb6d0d9ad55332a3c36c72cd9fb7f8b3a5ed370254b361f29979baa031850d93800ce8ad74775d2
|
7
|
+
data.tar.gz: f6b5a5a8487ea44eb38a4aca3ec59aaaea6c8cdd2c3e40267203fa29b4999d03a507fbe8bbe7e4207a5110c7bcc1e649dba624b4eb11e859be1f432e705095a8
|
data/README.md
CHANGED
@@ -33,28 +33,28 @@ gem install pgdexter
|
|
33
33
|
Dexter needs a connection to your database and a log file to process.
|
34
34
|
|
35
35
|
```sh
|
36
|
-
|
36
|
+
tail -F -n +1 <log-file> | dexter <database-url>
|
37
37
|
```
|
38
38
|
|
39
39
|
This finds slow queries and generates output like:
|
40
40
|
|
41
|
+
```log
|
42
|
+
2017-06-25T17:52:19+00:00 Started
|
43
|
+
2017-06-25T17:52:22+00:00 Processing 189 new query fingerprints
|
44
|
+
2017-06-25T17:52:22+00:00 Index found: genres_movies (genre_id)
|
45
|
+
2017-06-25T17:52:22+00:00 Index found: genres_movies (movie_id)
|
46
|
+
2017-06-25T17:52:22+00:00 Index found: movies (title)
|
47
|
+
2017-06-25T17:52:22+00:00 Index found: ratings (movie_id)
|
48
|
+
2017-06-25T17:52:22+00:00 Index found: ratings (rating)
|
49
|
+
2017-06-25T17:52:22+00:00 Index found: ratings (user_id)
|
50
|
+
2017-06-25T17:53:22+00:00 Processing 12 new query fingerprints
|
41
51
|
```
|
42
|
-
SELECT * FROM ratings ORDER BY user_id LIMIT 10
|
43
|
-
Starting cost: 3797.99
|
44
|
-
Final cost: 0.5
|
45
|
-
CREATE INDEX CONCURRENTLY ON ratings (user_id);
|
46
|
-
```
|
47
|
-
|
48
|
-
To be safe, Dexter does not create indexes unless you pass the `--create` flag.
|
49
|
-
|
50
|
-
You can also pass a single statement with:
|
51
52
|
|
52
|
-
|
53
|
-
dexter <database-url> -s "SELECT * FROM ..."
|
54
|
-
```
|
53
|
+
To be safe, Dexter will not create indexes unless you pass the `--create` flag.
|
55
54
|
|
56
55
|
## Options
|
57
56
|
|
57
|
+
- `--interval` - time to wait between processing queries
|
58
58
|
- `--min-time` - only consider queries that have consumed a certain amount of DB time (in minutes)
|
59
59
|
|
60
60
|
## Contributing
|
@@ -0,0 +1,249 @@
|
|
1
|
+
module Dexter
|
2
|
+
class Indexer
|
3
|
+
attr_reader :client
|
4
|
+
|
5
|
+
def initialize(client)
|
6
|
+
@client = client
|
7
|
+
|
8
|
+
select_all("SET client_min_messages = warning")
|
9
|
+
select_all("CREATE EXTENSION IF NOT EXISTS hypopg")
|
10
|
+
end
|
11
|
+
|
12
|
+
def process_queries(queries)
|
13
|
+
# narrow down queries and tables
|
14
|
+
tables, queries = narrow_queries(queries)
|
15
|
+
return [] if tables.empty?
|
16
|
+
|
17
|
+
# get ready for hypothetical indexes
|
18
|
+
select_all("SELECT hypopg_reset()")
|
19
|
+
|
20
|
+
# ensure tables have recently been analyzed
|
21
|
+
analyze_tables(tables)
|
22
|
+
|
23
|
+
# get initial plans
|
24
|
+
initial_plans = {}
|
25
|
+
queries.each do |query|
|
26
|
+
begin
|
27
|
+
initial_plans[query] = plan(query)
|
28
|
+
rescue PG::Error
|
29
|
+
# do nothing
|
30
|
+
end
|
31
|
+
end
|
32
|
+
queries.select! { |q| initial_plans[q] }
|
33
|
+
|
34
|
+
# get existing indexes
|
35
|
+
index_set = Set.new
|
36
|
+
indexes(tables).each do |index|
|
37
|
+
# TODO make sure btree
|
38
|
+
index_set << [index["table"], index["columns"]]
|
39
|
+
end
|
40
|
+
|
41
|
+
# create hypothetical indexes
|
42
|
+
candidates = {}
|
43
|
+
columns(tables).each do |col|
|
44
|
+
unless index_set.include?([col[:table], [col[:column]]])
|
45
|
+
candidates[col] = select_all("SELECT * FROM hypopg_create_index('CREATE INDEX ON #{col[:table]} (#{[col[:column]].join(", ")})');").first["indexname"]
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
queries_by_index = {}
|
50
|
+
|
51
|
+
new_indexes = []
|
52
|
+
queries.each do |query|
|
53
|
+
starting_cost = initial_plans[query]["Total Cost"]
|
54
|
+
plan2 = plan(query)
|
55
|
+
cost2 = plan2["Total Cost"]
|
56
|
+
best_indexes = []
|
57
|
+
|
58
|
+
candidates.each do |col, index_name|
|
59
|
+
if plan2.inspect.include?(index_name) && cost2 < starting_cost * 0.5
|
60
|
+
best_indexes << {
|
61
|
+
table: col[:table],
|
62
|
+
columns: [col[:column]]
|
63
|
+
}
|
64
|
+
(queries_by_index[best_indexes.last] ||= []) << {
|
65
|
+
starting_cost: starting_cost,
|
66
|
+
final_cost: cost2,
|
67
|
+
query: query
|
68
|
+
}
|
69
|
+
end
|
70
|
+
end
|
71
|
+
|
72
|
+
new_indexes.concat(best_indexes)
|
73
|
+
end
|
74
|
+
|
75
|
+
new_indexes = new_indexes.uniq.sort_by(&:to_a)
|
76
|
+
|
77
|
+
# create indexes
|
78
|
+
if new_indexes.any?
|
79
|
+
new_indexes.each do |index|
|
80
|
+
index[:queries] = queries_by_index[index]
|
81
|
+
|
82
|
+
log "Index found: #{index[:table]} (#{index[:columns].join(", ")})"
|
83
|
+
# log "CREATE INDEX CONCURRENTLY ON #{index[:table]} (#{index[:columns].join(", ")});"
|
84
|
+
# index[:queries].sort_by { |q| fingerprints[q[:query]] }.each do |query|
|
85
|
+
# log "Query #{fingerprints[query[:query]]} (Cost: #{query[:starting_cost]} -> #{query[:final_cost]})"
|
86
|
+
# puts
|
87
|
+
# puts query[:query]
|
88
|
+
# puts
|
89
|
+
# end
|
90
|
+
end
|
91
|
+
|
92
|
+
new_indexes.each do |index|
|
93
|
+
statement = "CREATE INDEX CONCURRENTLY ON #{index[:table]} (#{index[:columns].join(", ")})"
|
94
|
+
# puts "#{statement};"
|
95
|
+
if client.options[:create]
|
96
|
+
log "Creating index: #{statement}"
|
97
|
+
started_at = Time.now
|
98
|
+
select_all(statement)
|
99
|
+
log "Index created: #{((Time.now - started_at) * 1000).to_i} ms"
|
100
|
+
end
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
new_indexes
|
105
|
+
end
|
106
|
+
|
107
|
+
def conn
|
108
|
+
@conn ||= begin
|
109
|
+
uri = URI.parse(client.arguments[0])
|
110
|
+
config = {
|
111
|
+
host: uri.host,
|
112
|
+
port: uri.port,
|
113
|
+
dbname: uri.path.sub(/\A\//, ""),
|
114
|
+
user: uri.user,
|
115
|
+
password: uri.password,
|
116
|
+
connect_timeout: 3
|
117
|
+
}.reject { |_, value| value.to_s.empty? }
|
118
|
+
PG::Connection.new(config)
|
119
|
+
end
|
120
|
+
rescue PG::ConnectionBad
|
121
|
+
abort "Bad database url"
|
122
|
+
end
|
123
|
+
|
124
|
+
def select_all(query)
|
125
|
+
conn.exec(query).to_a
|
126
|
+
end
|
127
|
+
|
128
|
+
def plan(query)
|
129
|
+
JSON.parse(select_all("EXPLAIN (FORMAT JSON) #{query}").first["QUERY PLAN"]).first["Plan"]
|
130
|
+
end
|
131
|
+
|
132
|
+
def narrow_queries(queries)
|
133
|
+
result = select_all <<-SQL
|
134
|
+
SELECT
|
135
|
+
table_name
|
136
|
+
FROM
|
137
|
+
information_schema.tables
|
138
|
+
WHERE
|
139
|
+
table_catalog = current_database() AND
|
140
|
+
table_schema NOT IN ('pg_catalog', 'information_schema')
|
141
|
+
SQL
|
142
|
+
possible_tables = Set.new(result.map { |r| r["table_name"] })
|
143
|
+
|
144
|
+
tables = queries.flat_map { |q| PgQuery.parse(q).tables }.uniq.select { |t| possible_tables.include?(t) }
|
145
|
+
|
146
|
+
[tables, queries.select { |q| PgQuery.parse(q).tables.all? { |t| possible_tables.include?(t) } }]
|
147
|
+
end
|
148
|
+
|
149
|
+
def columns(tables)
|
150
|
+
columns = select_all <<-SQL
|
151
|
+
SELECT
|
152
|
+
table_name,
|
153
|
+
column_name
|
154
|
+
FROM
|
155
|
+
information_schema.columns
|
156
|
+
WHERE
|
157
|
+
table_schema = 'public' AND
|
158
|
+
table_name IN (#{tables.map { |t| quote(t) }.join(", ")})
|
159
|
+
SQL
|
160
|
+
|
161
|
+
columns.map { |v| {table: v["table_name"], column: v["column_name"]} }
|
162
|
+
end
|
163
|
+
|
164
|
+
def indexes(tables)
|
165
|
+
select_all(<<-SQL
|
166
|
+
SELECT
|
167
|
+
schemaname AS schema,
|
168
|
+
t.relname AS table,
|
169
|
+
ix.relname AS name,
|
170
|
+
regexp_replace(pg_get_indexdef(i.indexrelid), '^[^\\(]*\\((.*)\\)$', '\\1') AS columns,
|
171
|
+
regexp_replace(pg_get_indexdef(i.indexrelid), '.* USING ([^ ]*) \\(.*', '\\1') AS using,
|
172
|
+
indisunique AS unique,
|
173
|
+
indisprimary AS primary,
|
174
|
+
indisvalid AS valid,
|
175
|
+
indexprs::text,
|
176
|
+
indpred::text,
|
177
|
+
pg_get_indexdef(i.indexrelid) AS definition
|
178
|
+
FROM
|
179
|
+
pg_index i
|
180
|
+
INNER JOIN
|
181
|
+
pg_class t ON t.oid = i.indrelid
|
182
|
+
INNER JOIN
|
183
|
+
pg_class ix ON ix.oid = i.indexrelid
|
184
|
+
LEFT JOIN
|
185
|
+
pg_stat_user_indexes ui ON ui.indexrelid = i.indexrelid
|
186
|
+
WHERE
|
187
|
+
t.relname IN (#{tables.map { |t| quote(t) }.join(", ")}) AND
|
188
|
+
schemaname IS NOT NULL AND
|
189
|
+
indisvalid = 't' AND
|
190
|
+
indexprs IS NULL AND
|
191
|
+
indpred IS NULL
|
192
|
+
ORDER BY
|
193
|
+
1, 2
|
194
|
+
SQL
|
195
|
+
).map { |v| v["columns"] = v["columns"].sub(") WHERE (", " WHERE ").split(", ").map { |c| unquote(c) }; v }
|
196
|
+
end
|
197
|
+
|
198
|
+
def unquote(part)
|
199
|
+
if part && part.start_with?('"')
|
200
|
+
part[1..-2]
|
201
|
+
else
|
202
|
+
part
|
203
|
+
end
|
204
|
+
end
|
205
|
+
|
206
|
+
def analyze_tables(tables)
|
207
|
+
analyze_stats = select_all <<-SQL
|
208
|
+
SELECT
|
209
|
+
schemaname AS schema,
|
210
|
+
relname AS table,
|
211
|
+
last_analyze,
|
212
|
+
last_autoanalyze
|
213
|
+
FROM
|
214
|
+
pg_stat_user_tables
|
215
|
+
WHERE
|
216
|
+
relname IN (#{tables.map { |t| quote(t) }.join(", ")})
|
217
|
+
SQL
|
218
|
+
|
219
|
+
last_analyzed = {}
|
220
|
+
analyze_stats.each do |stats|
|
221
|
+
last_analyzed[stats["table"]] = Time.parse(stats["last_analyze"]) if stats["last_analyze"]
|
222
|
+
end
|
223
|
+
|
224
|
+
tables.each do |table|
|
225
|
+
if !last_analyzed[table] || last_analyzed[table] < Time.now - 3600
|
226
|
+
log "Analyzing #{table}"
|
227
|
+
select_all("ANALYZE #{table}")
|
228
|
+
end
|
229
|
+
end
|
230
|
+
end
|
231
|
+
|
232
|
+
def quote(value)
|
233
|
+
if value.is_a?(String)
|
234
|
+
"'#{quote_string(value)}'"
|
235
|
+
else
|
236
|
+
value
|
237
|
+
end
|
238
|
+
end
|
239
|
+
|
240
|
+
# activerecord
|
241
|
+
def quote_string(s)
|
242
|
+
s.gsub(/\\/, '\&\&').gsub(/'/, "''")
|
243
|
+
end
|
244
|
+
|
245
|
+
def log(message)
|
246
|
+
puts "#{Time.now.iso8601} #{message}"
|
247
|
+
end
|
248
|
+
end
|
249
|
+
end
|
data/lib/dexter/log_parser.rb
CHANGED
@@ -2,18 +2,38 @@ module Dexter
|
|
2
2
|
class LogParser
|
3
3
|
REGEX = /duration: (\d+\.\d+) ms (statement|execute <unnamed>): (.+)/
|
4
4
|
|
5
|
-
def initialize(logfile,
|
5
|
+
def initialize(logfile, client)
|
6
6
|
@logfile = logfile
|
7
|
-
@min_time = options[:min_time] * 60000 # convert minutes to ms
|
8
|
-
end
|
9
|
-
|
10
|
-
def queries
|
7
|
+
@min_time = client.options[:min_time] * 60000 # convert minutes to ms
|
11
8
|
@top_queries = {}
|
9
|
+
@indexer = Indexer.new(client)
|
10
|
+
@new_queries = Set.new
|
11
|
+
@new_queries_mutex = Mutex.new
|
12
|
+
@process_queries_mutex = Mutex.new
|
13
|
+
@last_checked_at = {}
|
14
|
+
|
15
|
+
log "Started"
|
16
|
+
|
17
|
+
if @logfile == STDIN
|
18
|
+
Thread.abort_on_exception = true
|
12
19
|
|
20
|
+
@timer_thread = Thread.new do
|
21
|
+
sleep(3) # starting sleep
|
22
|
+
loop do
|
23
|
+
@process_queries_mutex.synchronize do
|
24
|
+
process_queries
|
25
|
+
end
|
26
|
+
sleep(client.options[:interval])
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
def perform
|
13
33
|
active_line = nil
|
14
34
|
duration = nil
|
15
35
|
|
16
|
-
|
36
|
+
each_line do |line|
|
17
37
|
if active_line
|
18
38
|
if line.include?(": ")
|
19
39
|
process_entry(active_line, duration)
|
@@ -33,17 +53,72 @@ module Dexter
|
|
33
53
|
end
|
34
54
|
process_entry(active_line, duration) if active_line
|
35
55
|
|
36
|
-
@
|
56
|
+
@process_queries_mutex.synchronize do
|
57
|
+
process_queries
|
58
|
+
end
|
37
59
|
end
|
38
60
|
|
39
61
|
private
|
40
62
|
|
63
|
+
def each_line
|
64
|
+
if @logfile == STDIN
|
65
|
+
STDIN.each_line do |line|
|
66
|
+
yield line
|
67
|
+
end
|
68
|
+
else
|
69
|
+
File.foreach(@logfile) do |line|
|
70
|
+
yield line
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
41
75
|
def process_entry(query, duration)
|
42
76
|
return unless query =~ /SELECT/i
|
43
|
-
fingerprint =
|
44
|
-
|
77
|
+
fingerprint =
|
78
|
+
begin
|
79
|
+
PgQuery.fingerprint(query)
|
80
|
+
rescue PgQuery::ParseError
|
81
|
+
# do nothing
|
82
|
+
end
|
83
|
+
return unless fingerprint
|
84
|
+
|
85
|
+
@top_queries[fingerprint] ||= {calls: 0, total_time: 0}
|
45
86
|
@top_queries[fingerprint][:calls] += 1
|
46
87
|
@top_queries[fingerprint][:total_time] += duration
|
88
|
+
@top_queries[fingerprint][:query] = query
|
89
|
+
@new_queries_mutex.synchronize do
|
90
|
+
@new_queries << fingerprint
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
def process_queries
|
95
|
+
new_queries = nil
|
96
|
+
|
97
|
+
@new_queries_mutex.synchronize do
|
98
|
+
new_queries = @new_queries.dup
|
99
|
+
@new_queries.clear
|
100
|
+
end
|
101
|
+
|
102
|
+
now = Time.now
|
103
|
+
min_checked_at = now - 3600 # don't recheck for an hour
|
104
|
+
queries = []
|
105
|
+
fingerprints = {}
|
106
|
+
@top_queries.each do |k, v|
|
107
|
+
if new_queries.include?(k) && v[:total_time] > @min_time && (!@last_checked_at[k] || @last_checked_at[k] < min_checked_at)
|
108
|
+
fingerprints[v[:query]] = k
|
109
|
+
queries << v[:query]
|
110
|
+
@last_checked_at[k] = now
|
111
|
+
end
|
112
|
+
end
|
113
|
+
|
114
|
+
log "Processing #{queries.size} new query fingerprints"
|
115
|
+
if queries.any?
|
116
|
+
@indexer.process_queries(queries)
|
117
|
+
end
|
118
|
+
end
|
119
|
+
|
120
|
+
def log(message)
|
121
|
+
puts "#{Time.now.iso8601} #{message}"
|
47
122
|
end
|
48
123
|
end
|
49
124
|
end
|
data/lib/dexter/version.rb
CHANGED
data/lib/dexter.rb
CHANGED
@@ -4,6 +4,8 @@ require "pg"
|
|
4
4
|
require "pg_query"
|
5
5
|
require "time"
|
6
6
|
require "set"
|
7
|
+
require "thread"
|
8
|
+
require "dexter/indexer"
|
7
9
|
require "dexter/log_parser"
|
8
10
|
|
9
11
|
module Dexter
|
@@ -22,240 +24,26 @@ module Dexter
|
|
22
24
|
queries = []
|
23
25
|
if options[:s]
|
24
26
|
queries << options[:s]
|
27
|
+
Indexer.new(self).process_queries(queries)
|
25
28
|
end
|
26
29
|
if arguments[1]
|
27
30
|
begin
|
28
|
-
|
29
|
-
queries.concat(parser.queries)
|
31
|
+
LogParser.new(arguments[1], self).perform
|
30
32
|
rescue Errno::ENOENT
|
31
33
|
abort "Log file not found"
|
32
34
|
end
|
33
35
|
end
|
34
|
-
|
35
|
-
|
36
|
-
tables, queries = narrow_queries(queries)
|
37
|
-
return if tables.empty?
|
38
|
-
|
39
|
-
# get ready for hypothetical indexes
|
40
|
-
select_all("SET client_min_messages = warning")
|
41
|
-
select_all("CREATE EXTENSION IF NOT EXISTS hypopg")
|
42
|
-
select_all("SELECT hypopg_reset()")
|
43
|
-
|
44
|
-
# ensure tables have recently been analyzed
|
45
|
-
analyze_tables(tables)
|
46
|
-
|
47
|
-
# get initial plans
|
48
|
-
initial_plans = {}
|
49
|
-
queries.each do |query|
|
50
|
-
begin
|
51
|
-
initial_plans[query] = plan(query)
|
52
|
-
rescue PG::Error
|
53
|
-
# do nothing
|
54
|
-
end
|
55
|
-
end
|
56
|
-
queries.select! { |q| initial_plans[q] }
|
57
|
-
|
58
|
-
# get existing indexes
|
59
|
-
index_set = Set.new
|
60
|
-
indexes(tables).each do |index|
|
61
|
-
# TODO make sure btree
|
62
|
-
index_set << [index["table"], index["columns"]]
|
63
|
-
end
|
64
|
-
|
65
|
-
# create hypothetical indexes
|
66
|
-
candidates = {}
|
67
|
-
columns(tables).each do |col|
|
68
|
-
unless index_set.include?([col[:table], [col[:column]]])
|
69
|
-
candidates[col] = select_all("SELECT * FROM hypopg_create_index('CREATE INDEX ON #{col[:table]} (#{[col[:column]].join(", ")})');").first["indexname"]
|
70
|
-
end
|
71
|
-
end
|
72
|
-
|
73
|
-
new_indexes = []
|
74
|
-
queries.each do |query|
|
75
|
-
starting_cost = initial_plans[query]["Total Cost"]
|
76
|
-
plan2 = plan(query)
|
77
|
-
cost2 = plan2["Total Cost"]
|
78
|
-
best_indexes = []
|
79
|
-
|
80
|
-
candidates.each do |col, index_name|
|
81
|
-
if plan2.inspect.include?(index_name)
|
82
|
-
best_indexes << {
|
83
|
-
table: col[:table],
|
84
|
-
columns: [col[:column]]
|
85
|
-
}
|
86
|
-
end
|
87
|
-
end
|
88
|
-
|
89
|
-
puts query
|
90
|
-
puts "Starting cost: #{starting_cost}"
|
91
|
-
puts "Final cost: #{cost2}"
|
92
|
-
|
93
|
-
# must make it 20% faster
|
94
|
-
if cost2 < starting_cost * 0.8
|
95
|
-
new_indexes.concat(best_indexes)
|
96
|
-
best_indexes.each do |index|
|
97
|
-
puts "CREATE INDEX CONCURRENTLY ON #{index[:table]} (#{index[:columns].join(", ")});"
|
98
|
-
end
|
99
|
-
else
|
100
|
-
puts "Nope!"
|
101
|
-
end
|
102
|
-
puts
|
103
|
-
end
|
104
|
-
|
105
|
-
# create indexes
|
106
|
-
if new_indexes.any?
|
107
|
-
puts "Indexes to be created:"
|
108
|
-
new_indexes.uniq.sort_by(&:to_a).each do |index|
|
109
|
-
statement = "CREATE INDEX CONCURRENTLY ON #{index[:table]} (#{index[:columns].join(", ")})"
|
110
|
-
puts "#{statement};"
|
111
|
-
select_all(statement) if options[:create]
|
112
|
-
end
|
113
|
-
end
|
114
|
-
end
|
115
|
-
|
116
|
-
def conn
|
117
|
-
@conn ||= begin
|
118
|
-
uri = URI.parse(arguments[0])
|
119
|
-
config = {
|
120
|
-
host: uri.host,
|
121
|
-
port: uri.port,
|
122
|
-
dbname: uri.path.sub(/\A\//, ""),
|
123
|
-
user: uri.user,
|
124
|
-
password: uri.password,
|
125
|
-
connect_timeout: 3
|
126
|
-
}.reject { |_, value| value.to_s.empty? }
|
127
|
-
PG::Connection.new(config)
|
128
|
-
end
|
129
|
-
rescue PG::ConnectionBad
|
130
|
-
abort "Bad database url"
|
131
|
-
end
|
132
|
-
|
133
|
-
def select_all(query)
|
134
|
-
conn.exec(query).to_a
|
135
|
-
end
|
136
|
-
|
137
|
-
def plan(query)
|
138
|
-
JSON.parse(select_all("EXPLAIN (FORMAT JSON) #{query}").first["QUERY PLAN"]).first["Plan"]
|
139
|
-
end
|
140
|
-
|
141
|
-
def narrow_queries(queries)
|
142
|
-
result = select_all <<-SQL
|
143
|
-
SELECT
|
144
|
-
table_name
|
145
|
-
FROM
|
146
|
-
information_schema.tables
|
147
|
-
WHERE
|
148
|
-
table_catalog = current_database() AND
|
149
|
-
table_schema NOT IN ('pg_catalog', 'information_schema')
|
150
|
-
SQL
|
151
|
-
possible_tables = Set.new(result.map { |r| r["table_name"] })
|
152
|
-
|
153
|
-
tables = queries.flat_map { |q| PgQuery.parse(q).tables }.uniq.select { |t| possible_tables.include?(t) }
|
154
|
-
|
155
|
-
[tables, queries.select { |q| PgQuery.parse(q).tables.all? { |t| possible_tables.include?(t) } }]
|
156
|
-
end
|
157
|
-
|
158
|
-
def columns(tables)
|
159
|
-
columns = select_all <<-SQL
|
160
|
-
SELECT
|
161
|
-
table_name,
|
162
|
-
column_name
|
163
|
-
FROM
|
164
|
-
information_schema.columns
|
165
|
-
WHERE
|
166
|
-
table_schema = 'public' AND
|
167
|
-
table_name IN (#{tables.map { |t| quote(t) }.join(", ")})
|
168
|
-
SQL
|
169
|
-
|
170
|
-
columns.map { |v| {table: v["table_name"], column: v["column_name"]} }
|
171
|
-
end
|
172
|
-
|
173
|
-
def indexes(tables)
|
174
|
-
select_all(<<-SQL
|
175
|
-
SELECT
|
176
|
-
schemaname AS schema,
|
177
|
-
t.relname AS table,
|
178
|
-
ix.relname AS name,
|
179
|
-
regexp_replace(pg_get_indexdef(i.indexrelid), '^[^\\(]*\\((.*)\\)$', '\\1') AS columns,
|
180
|
-
regexp_replace(pg_get_indexdef(i.indexrelid), '.* USING ([^ ]*) \\(.*', '\\1') AS using,
|
181
|
-
indisunique AS unique,
|
182
|
-
indisprimary AS primary,
|
183
|
-
indisvalid AS valid,
|
184
|
-
indexprs::text,
|
185
|
-
indpred::text,
|
186
|
-
pg_get_indexdef(i.indexrelid) AS definition
|
187
|
-
FROM
|
188
|
-
pg_index i
|
189
|
-
INNER JOIN
|
190
|
-
pg_class t ON t.oid = i.indrelid
|
191
|
-
INNER JOIN
|
192
|
-
pg_class ix ON ix.oid = i.indexrelid
|
193
|
-
LEFT JOIN
|
194
|
-
pg_stat_user_indexes ui ON ui.indexrelid = i.indexrelid
|
195
|
-
WHERE
|
196
|
-
t.relname IN (#{tables.map { |t| quote(t) }.join(", ")}) AND
|
197
|
-
schemaname IS NOT NULL AND
|
198
|
-
indisvalid = 't' AND
|
199
|
-
indexprs IS NULL AND
|
200
|
-
indpred IS NULL
|
201
|
-
ORDER BY
|
202
|
-
1, 2
|
203
|
-
SQL
|
204
|
-
).map { |v| v["columns"] = v["columns"].sub(") WHERE (", " WHERE ").split(", ").map { |c| unquote(c) }; v }
|
205
|
-
end
|
206
|
-
|
207
|
-
def unquote(part)
|
208
|
-
if part && part.start_with?('"')
|
209
|
-
part[1..-2]
|
210
|
-
else
|
211
|
-
part
|
36
|
+
if !options[:s] && !arguments[1]
|
37
|
+
LogParser.new(STDIN, self).perform
|
212
38
|
end
|
213
39
|
end
|
214
40
|
|
215
|
-
def analyze_tables(tables)
|
216
|
-
analyze_stats = select_all <<-SQL
|
217
|
-
SELECT
|
218
|
-
schemaname AS schema,
|
219
|
-
relname AS table,
|
220
|
-
last_analyze,
|
221
|
-
last_autoanalyze
|
222
|
-
FROM
|
223
|
-
pg_stat_user_tables
|
224
|
-
WHERE
|
225
|
-
relname IN (#{tables.map { |t| quote(t) }.join(", ")})
|
226
|
-
SQL
|
227
|
-
|
228
|
-
last_analyzed = {}
|
229
|
-
analyze_stats.each do |stats|
|
230
|
-
last_analyzed[stats["table"]] = Time.parse(stats["last_analyze"]) if stats["last_analyze"]
|
231
|
-
end
|
232
|
-
|
233
|
-
tables.each do |table|
|
234
|
-
if !last_analyzed[table] || last_analyzed[table] < Time.now - 3600
|
235
|
-
puts "Analyzing #{table}"
|
236
|
-
select_all("ANALYZE #{table}")
|
237
|
-
end
|
238
|
-
end
|
239
|
-
end
|
240
|
-
|
241
|
-
def quote(value)
|
242
|
-
if value.is_a?(String)
|
243
|
-
"'#{quote_string(value)}'"
|
244
|
-
else
|
245
|
-
value
|
246
|
-
end
|
247
|
-
end
|
248
|
-
|
249
|
-
# activerecord
|
250
|
-
def quote_string(s)
|
251
|
-
s.gsub(/\\/, '\&\&').gsub(/'/, "''")
|
252
|
-
end
|
253
|
-
|
254
41
|
def parse_args(args)
|
255
42
|
opts = Slop.parse(args) do |o|
|
256
43
|
o.boolean "--create", default: false
|
257
44
|
o.string "-s"
|
258
45
|
o.float "--min-time", default: 0
|
46
|
+
o.integer "--interval", default: 60
|
259
47
|
end
|
260
48
|
[opts.arguments, opts.to_hash]
|
261
49
|
rescue Slop::Error => e
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: pgdexter
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.1.
|
4
|
+
version: 0.1.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Andrew Kane
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2017-06-
|
11
|
+
date: 2017-06-25 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: slop
|
@@ -95,6 +95,7 @@ files:
|
|
95
95
|
- Rakefile
|
96
96
|
- exe/dexter
|
97
97
|
- lib/dexter.rb
|
98
|
+
- lib/dexter/indexer.rb
|
98
99
|
- lib/dexter/log_parser.rb
|
99
100
|
- lib/dexter/version.rb
|
100
101
|
- pgdexter.gemspec
|