pg_reports 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. checksums.yaml +7 -0
  2. data/CHANGELOG.md +22 -0
  3. data/LICENSE.txt +21 -0
  4. data/README.md +335 -0
  5. data/app/controllers/pg_reports/dashboard_controller.rb +133 -0
  6. data/app/views/layouts/pg_reports/application.html.erb +594 -0
  7. data/app/views/pg_reports/dashboard/index.html.erb +435 -0
  8. data/app/views/pg_reports/dashboard/show.html.erb +481 -0
  9. data/config/routes.rb +13 -0
  10. data/lib/pg_reports/annotation_parser.rb +114 -0
  11. data/lib/pg_reports/configuration.rb +83 -0
  12. data/lib/pg_reports/dashboard/reports_registry.rb +89 -0
  13. data/lib/pg_reports/engine.rb +22 -0
  14. data/lib/pg_reports/error.rb +15 -0
  15. data/lib/pg_reports/executor.rb +51 -0
  16. data/lib/pg_reports/modules/connections.rb +106 -0
  17. data/lib/pg_reports/modules/indexes.rb +111 -0
  18. data/lib/pg_reports/modules/queries.rb +140 -0
  19. data/lib/pg_reports/modules/system.rb +148 -0
  20. data/lib/pg_reports/modules/tables.rb +113 -0
  21. data/lib/pg_reports/report.rb +228 -0
  22. data/lib/pg_reports/sql/connections/active_connections.sql +20 -0
  23. data/lib/pg_reports/sql/connections/blocking_queries.sql +35 -0
  24. data/lib/pg_reports/sql/connections/connection_stats.sql +13 -0
  25. data/lib/pg_reports/sql/connections/idle_connections.sql +19 -0
  26. data/lib/pg_reports/sql/connections/locks.sql +20 -0
  27. data/lib/pg_reports/sql/connections/long_running_queries.sql +21 -0
  28. data/lib/pg_reports/sql/indexes/bloated_indexes.sql +36 -0
  29. data/lib/pg_reports/sql/indexes/duplicate_indexes.sql +38 -0
  30. data/lib/pg_reports/sql/indexes/index_sizes.sql +14 -0
  31. data/lib/pg_reports/sql/indexes/index_usage.sql +19 -0
  32. data/lib/pg_reports/sql/indexes/invalid_indexes.sql +15 -0
  33. data/lib/pg_reports/sql/indexes/missing_indexes.sql +27 -0
  34. data/lib/pg_reports/sql/indexes/unused_indexes.sql +18 -0
  35. data/lib/pg_reports/sql/queries/all_queries.sql +20 -0
  36. data/lib/pg_reports/sql/queries/expensive_queries.sql +22 -0
  37. data/lib/pg_reports/sql/queries/heavy_queries.sql +17 -0
  38. data/lib/pg_reports/sql/queries/low_cache_hit_queries.sql +19 -0
  39. data/lib/pg_reports/sql/queries/missing_index_queries.sql +25 -0
  40. data/lib/pg_reports/sql/queries/slow_queries.sql +17 -0
  41. data/lib/pg_reports/sql/system/activity_overview.sql +29 -0
  42. data/lib/pg_reports/sql/system/cache_stats.sql +19 -0
  43. data/lib/pg_reports/sql/system/database_sizes.sql +10 -0
  44. data/lib/pg_reports/sql/system/extensions.sql +12 -0
  45. data/lib/pg_reports/sql/system/settings.sql +33 -0
  46. data/lib/pg_reports/sql/tables/bloated_tables.sql +23 -0
  47. data/lib/pg_reports/sql/tables/cache_hit_ratios.sql +24 -0
  48. data/lib/pg_reports/sql/tables/recently_modified.sql +20 -0
  49. data/lib/pg_reports/sql/tables/row_counts.sql +18 -0
  50. data/lib/pg_reports/sql/tables/seq_scans.sql +26 -0
  51. data/lib/pg_reports/sql/tables/table_sizes.sql +16 -0
  52. data/lib/pg_reports/sql/tables/vacuum_needed.sql +22 -0
  53. data/lib/pg_reports/sql_loader.rb +35 -0
  54. data/lib/pg_reports/telegram_sender.rb +83 -0
  55. data/lib/pg_reports/version.rb +5 -0
  56. data/lib/pg_reports.rb +114 -0
  57. metadata +184 -0
@@ -0,0 +1,113 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PgReports
4
+ module Modules
5
+ # Table analysis module
6
+ module Tables
7
+ extend self
8
+
9
+ # Table sizes including indexes
10
+ # @return [Report] Report with table sizes
11
+ def table_sizes(limit: 50)
12
+ data = executor.execute_from_file(:tables, :table_sizes)
13
+ .first(limit)
14
+
15
+ Report.new(
16
+ title: "Table Sizes (top #{limit})",
17
+ data: data,
18
+ columns: %w[schema table_name table_size_mb index_size_mb total_size_mb row_count]
19
+ )
20
+ end
21
+
22
+ # Bloated tables - tables with high dead tuple ratio
23
+ # @return [Report] Report with bloated tables
24
+ def bloated_tables(limit: 20)
25
+ data = executor.execute_from_file(:tables, :bloated_tables)
26
+ threshold = PgReports.config.bloat_threshold_percent
27
+
28
+ filtered = data.select { |row| row["bloat_percent"].to_f >= threshold }
29
+ .first(limit)
30
+
31
+ Report.new(
32
+ title: "Bloated Tables (bloat >= #{threshold}%)",
33
+ data: filtered,
34
+ columns: %w[schema table_name live_rows dead_rows bloat_percent table_size_mb]
35
+ )
36
+ end
37
+
38
+ # Tables needing vacuum - high dead rows count
39
+ # @return [Report] Report with tables needing vacuum
40
+ def vacuum_needed(limit: 20)
41
+ data = executor.execute_from_file(:tables, :vacuum_needed)
42
+ threshold = PgReports.config.dead_rows_threshold
43
+
44
+ filtered = data.select { |row| row["n_dead_tup"].to_i >= threshold }
45
+ .first(limit)
46
+
47
+ Report.new(
48
+ title: "Tables Needing Vacuum (dead rows >= #{threshold})",
49
+ data: filtered,
50
+ columns: %w[schema table_name n_live_tup n_dead_tup last_vacuum last_autovacuum]
51
+ )
52
+ end
53
+
54
+ # Table row counts
55
+ # @return [Report] Report with table row counts
56
+ def row_counts(limit: 50)
57
+ data = executor.execute_from_file(:tables, :row_counts)
58
+ .first(limit)
59
+
60
+ Report.new(
61
+ title: "Table Row Counts (top #{limit})",
62
+ data: data,
63
+ columns: %w[schema table_name row_count table_size_mb]
64
+ )
65
+ end
66
+
67
+ # Table cache hit ratios
68
+ # @return [Report] Report with table cache hit ratios
69
+ def cache_hit_ratios(limit: 50)
70
+ data = executor.execute_from_file(:tables, :cache_hit_ratios)
71
+ .first(limit)
72
+
73
+ Report.new(
74
+ title: "Table Cache Hit Ratios",
75
+ data: data,
76
+ columns: %w[schema table_name heap_blks_read heap_blks_hit cache_hit_ratio]
77
+ )
78
+ end
79
+
80
+ # Sequential scan statistics
81
+ # @return [Report] Report with sequential scan statistics
82
+ def seq_scans(limit: 20)
83
+ data = executor.execute_from_file(:tables, :seq_scans)
84
+ .first(limit)
85
+
86
+ Report.new(
87
+ title: "Sequential Scans (top #{limit})",
88
+ data: data,
89
+ columns: %w[schema table_name seq_scan seq_tup_read idx_scan rows_per_seq_scan]
90
+ )
91
+ end
92
+
93
+ # Recently modified tables
94
+ # @return [Report] Report with recently modified tables
95
+ def recently_modified(limit: 20)
96
+ data = executor.execute_from_file(:tables, :recently_modified)
97
+ .first(limit)
98
+
99
+ Report.new(
100
+ title: "Recently Modified Tables",
101
+ data: data,
102
+ columns: %w[schema table_name n_tup_ins n_tup_upd n_tup_del last_analyze]
103
+ )
104
+ end
105
+
106
+ private
107
+
108
+ def executor
109
+ @executor ||= Executor.new
110
+ end
111
+ end
112
+ end
113
+ end
@@ -0,0 +1,228 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "cgi"
4
+
5
+ module PgReports
6
+ # Report class that wraps query results and provides display/send methods
7
+ # Every module method returns a Report instance for chaining
8
+ class Report
9
+ attr_reader :title, :data, :columns, :generated_at
10
+
11
+ def initialize(title:, data:, columns: nil)
12
+ @title = title
13
+ @data = data
14
+ @columns = columns || detect_columns
15
+ @generated_at = Time.current
16
+ end
17
+
18
+ # Display report to STDOUT
19
+ def display
20
+ puts to_text
21
+ end
22
+
23
+ # Send report to configured Telegram channel as a message
24
+ def send_to_telegram
25
+ TelegramSender.send_message(to_markdown)
26
+ self
27
+ end
28
+
29
+ # Send report to configured Telegram channel as a file
30
+ def send_to_telegram_as_file(filename: nil)
31
+ filename ||= "#{title.parameterize}-#{generated_at.strftime("%Y%m%d-%H%M%S")}.txt"
32
+ TelegramSender.send_file(to_text, filename: filename, caption: title)
33
+ self
34
+ end
35
+
36
+ # Return plain text representation
37
+ def to_text
38
+ return empty_report_text if data.empty?
39
+
40
+ lines = []
41
+ lines << title
42
+ lines << "=" * title.length
43
+ lines << "Generated: #{generated_at.strftime("%Y-%m-%d %H:%M:%S")}"
44
+ lines << ""
45
+ lines << format_table_text
46
+ lines << ""
47
+ lines << "Total: #{data.size} rows"
48
+
49
+ lines.join("\n")
50
+ end
51
+
52
+ # Return Markdown representation
53
+ def to_markdown
54
+ return empty_report_markdown if data.empty?
55
+
56
+ lines = []
57
+ lines << "**#{title}**"
58
+ lines << "_Generated: #{generated_at.strftime("%Y-%m-%d %H:%M:%S")}_"
59
+ lines << ""
60
+ lines << format_table_markdown
61
+ lines << ""
62
+ lines << "_Total: #{data.size} rows_"
63
+
64
+ lines.join("\n")
65
+ end
66
+
67
+ # Return HTML representation
68
+ def to_html
69
+ return empty_report_html if data.empty?
70
+
71
+ lines = []
72
+ lines << "<h2>#{CGI.escapeHTML(title)}</h2>"
73
+ lines << "<p><em>Generated: #{generated_at.strftime("%Y-%m-%d %H:%M:%S")}</em></p>"
74
+ lines << format_table_html
75
+ lines << "<p><em>Total: #{data.size} rows</em></p>"
76
+
77
+ lines.join("\n")
78
+ end
79
+
80
+ # Return CSV representation
81
+ def to_csv
82
+ require "csv"
83
+
84
+ CSV.generate do |csv|
85
+ csv << columns
86
+ data.each do |row|
87
+ csv << columns.map { |col| row[col] }
88
+ end
89
+ end
90
+ end
91
+
92
+ # Get raw data as array of hashes
93
+ def to_a
94
+ data
95
+ end
96
+
97
+ # Check if report is empty
98
+ def empty?
99
+ data.empty?
100
+ end
101
+
102
+ # Get row count
103
+ def size
104
+ data.size
105
+ end
106
+
107
+ alias_method :length, :size
108
+ alias_method :count, :size
109
+
110
+ # Iterate over rows
111
+ def each(&block)
112
+ data.each(&block)
113
+ end
114
+
115
+ include Enumerable
116
+
117
+ private
118
+
119
+ def detect_columns
120
+ return [] if data.empty?
121
+
122
+ data.first.keys
123
+ end
124
+
125
+ def empty_report_text
126
+ "#{title}\n#{"=" * title.length}\nNo data found."
127
+ end
128
+
129
+ def empty_report_markdown
130
+ "**#{title}**\n\n_No data found._"
131
+ end
132
+
133
+ def empty_report_html
134
+ "<h2>#{CGI.escapeHTML(title)}</h2>\n<p><em>No data found.</em></p>"
135
+ end
136
+
137
+ def format_table_text
138
+ return "" if data.empty?
139
+
140
+ # Calculate column widths
141
+ widths = calculate_column_widths
142
+
143
+ # Build header
144
+ header = columns.map.with_index { |col, i| col.to_s.ljust(widths[i]) }.join(" | ")
145
+ separator = widths.map { |w| "-" * w }.join("-+-")
146
+
147
+ # Build rows
148
+ rows = data.map do |row|
149
+ columns.map.with_index do |col, i|
150
+ format_cell(row[col]).to_s.ljust(widths[i])
151
+ end.join(" | ")
152
+ end
153
+
154
+ [header, separator, *rows].join("\n")
155
+ end
156
+
157
+ def format_table_markdown
158
+ return "" if data.empty?
159
+
160
+ # Markdown table header
161
+ header = "| " + columns.map(&:to_s).join(" | ") + " |"
162
+ separator = "| " + columns.map { "---" }.join(" | ") + " |"
163
+
164
+ # Build rows (limit for Telegram message size)
165
+ limited_data = data.first(50)
166
+ rows = limited_data.map do |row|
167
+ "| " + columns.map { |col| format_cell(row[col]) }.join(" | ") + " |"
168
+ end
169
+
170
+ result = [header, separator, *rows]
171
+ result << "| ... and #{data.size - 50} more rows |" if data.size > 50
172
+
173
+ result.join("\n")
174
+ end
175
+
176
+ def format_table_html
177
+ return "" if data.empty?
178
+
179
+ lines = ["<table>"]
180
+
181
+ # Header
182
+ lines << "<thead><tr>"
183
+ columns.each { |col| lines << "<th>#{CGI.escapeHTML(col.to_s)}</th>" }
184
+ lines << "</tr></thead>"
185
+
186
+ # Body
187
+ lines << "<tbody>"
188
+ data.each do |row|
189
+ lines << "<tr>"
190
+ columns.each { |col| lines << "<td>#{CGI.escapeHTML(format_cell(row[col]))}</td>" }
191
+ lines << "</tr>"
192
+ end
193
+ lines << "</tbody>"
194
+
195
+ lines << "</table>"
196
+ lines.join("\n")
197
+ end
198
+
199
+ def calculate_column_widths
200
+ columns.map.with_index do |col, _i|
201
+ values = data.map { |row| format_cell(row[col]).to_s.length }
202
+ [col.to_s.length, *values].max
203
+ end
204
+ end
205
+
206
+ def format_cell(value)
207
+ case value
208
+ when nil
209
+ ""
210
+ when Float
211
+ format("%.2f", value)
212
+ when Time, DateTime
213
+ value.strftime("%Y-%m-%d %H:%M:%S")
214
+ when String
215
+ truncate_query(value)
216
+ else
217
+ value.to_s
218
+ end
219
+ end
220
+
221
+ def truncate_query(text)
222
+ max_length = PgReports.config.max_query_length
223
+ return text if text.length <= max_length
224
+
225
+ "#{text[0, max_length - 3]}..."
226
+ end
227
+ end
228
+ end
@@ -0,0 +1,20 @@
1
+ -- Active connections
2
+ -- Shows all current database connections
3
+
4
+ SELECT
5
+ pid,
6
+ datname AS database,
7
+ usename AS username,
8
+ application_name AS application,
9
+ client_addr,
10
+ client_hostname,
11
+ state,
12
+ query_start,
13
+ state_change,
14
+ wait_event_type,
15
+ wait_event,
16
+ LEFT(query, 500) AS query
17
+ FROM pg_stat_activity
18
+ WHERE datname IS NOT NULL
19
+ AND pid != pg_backend_pid()
20
+ ORDER BY query_start DESC NULLS LAST;
@@ -0,0 +1,35 @@
1
+ -- Blocking queries
2
+ -- Shows queries that are blocking other queries
3
+
4
+ SELECT
5
+ blocked_locks.pid AS blocked_pid,
6
+ blocked_activity.usename AS blocked_user,
7
+ blocking_locks.pid AS blocking_pid,
8
+ blocking_activity.usename AS blocking_user,
9
+ blocked_activity.datname AS database,
10
+ EXTRACT(EPOCH FROM (NOW() - blocked_activity.query_start)) AS blocked_duration,
11
+ blocked_activity.wait_event_type AS blocked_wait_type,
12
+ LEFT(blocked_activity.query, 300) AS blocked_query,
13
+ LEFT(blocking_activity.query, 300) AS blocking_query,
14
+ blocked_locks.locktype AS lock_type,
15
+ blocked_locks.mode AS blocked_mode,
16
+ blocking_locks.mode AS blocking_mode
17
+ FROM pg_catalog.pg_locks blocked_locks
18
+ JOIN pg_catalog.pg_stat_activity blocked_activity
19
+ ON blocked_activity.pid = blocked_locks.pid
20
+ JOIN pg_catalog.pg_locks blocking_locks
21
+ ON blocking_locks.locktype = blocked_locks.locktype
22
+ AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database
23
+ AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
24
+ AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
25
+ AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
26
+ AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
27
+ AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
28
+ AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
29
+ AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
30
+ AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
31
+ AND blocking_locks.pid != blocked_locks.pid
32
+ JOIN pg_catalog.pg_stat_activity blocking_activity
33
+ ON blocking_activity.pid = blocking_locks.pid
34
+ WHERE NOT blocked_locks.granted
35
+ ORDER BY blocked_duration DESC;
@@ -0,0 +1,13 @@
1
+ -- Connection statistics by state
2
+ -- Summary of connections grouped by database and state
3
+
4
+ SELECT
5
+ datname AS database,
6
+ state,
7
+ COUNT(*) AS count,
8
+ COUNT(*) FILTER (WHERE wait_event IS NOT NULL) AS waiting_count
9
+ FROM pg_stat_activity
10
+ WHERE datname IS NOT NULL
11
+ AND pid != pg_backend_pid()
12
+ GROUP BY datname, state
13
+ ORDER BY datname, count DESC;
@@ -0,0 +1,19 @@
1
+ -- Idle connections
2
+ -- Shows connections that are idle
3
+
4
+ SELECT
5
+ pid,
6
+ datname AS database,
7
+ usename AS username,
8
+ application_name AS application,
9
+ client_addr,
10
+ state,
11
+ EXTRACT(EPOCH FROM (NOW() - state_change)) AS idle_duration,
12
+ state_change,
13
+ query_start,
14
+ LEFT(query, 200) AS last_query
15
+ FROM pg_stat_activity
16
+ WHERE datname IS NOT NULL
17
+ AND pid != pg_backend_pid()
18
+ AND state IN ('idle', 'idle in transaction', 'idle in transaction (aborted)')
19
+ ORDER BY idle_duration DESC;
@@ -0,0 +1,20 @@
1
+ -- Current locks
2
+ -- Shows all locks held in the database
3
+
4
+ SELECT
5
+ l.pid,
6
+ a.datname AS database,
7
+ a.usename AS username,
8
+ COALESCE(c.relname, l.relation::text) AS relation,
9
+ l.locktype,
10
+ l.mode,
11
+ l.granted,
12
+ NOT l.granted AS waiting,
13
+ EXTRACT(EPOCH FROM (NOW() - a.query_start)) AS query_duration,
14
+ LEFT(a.query, 300) AS query
15
+ FROM pg_locks l
16
+ JOIN pg_stat_activity a ON l.pid = a.pid
17
+ LEFT JOIN pg_class c ON l.relation = c.oid
18
+ WHERE a.datname IS NOT NULL
19
+ AND l.pid != pg_backend_pid()
20
+ ORDER BY l.granted, l.pid;
@@ -0,0 +1,21 @@
1
+ -- Long running queries
2
+ -- Queries that have been running for extended period
3
+
4
+ SELECT
5
+ pid,
6
+ datname AS database,
7
+ usename AS username,
8
+ application_name AS application,
9
+ client_addr,
10
+ state,
11
+ EXTRACT(EPOCH FROM (NOW() - query_start)) AS duration_seconds,
12
+ query_start,
13
+ wait_event_type,
14
+ wait_event,
15
+ LEFT(query, 500) AS query
16
+ FROM pg_stat_activity
17
+ WHERE datname IS NOT NULL
18
+ AND pid != pg_backend_pid()
19
+ AND state = 'active'
20
+ AND query_start IS NOT NULL
21
+ ORDER BY duration_seconds DESC;
@@ -0,0 +1,36 @@
1
+ -- Bloated indexes: indexes with estimated bloat
2
+ -- Uses pgstattuple extension if available, otherwise estimates
3
+
4
+ WITH index_stats AS (
5
+ SELECT
6
+ schemaname AS schema,
7
+ tablename AS table_name,
8
+ indexname AS index_name,
9
+ pg_relation_size(indexname::regclass) AS index_size,
10
+ -- Estimate bloat using relation page count vs expected
11
+ CASE
12
+ WHEN pg_relation_size(indexname::regclass) > 0 THEN
13
+ ROUND(
14
+ (
15
+ (pg_relation_size(indexname::regclass) -
16
+ (SELECT reltuples * 8 FROM pg_class WHERE relname = indexname))
17
+ * 100.0 / pg_relation_size(indexname::regclass)
18
+ )::numeric,
19
+ 2
20
+ )
21
+ ELSE 0
22
+ END AS estimated_bloat_percent
23
+ FROM pg_indexes
24
+ WHERE schemaname NOT IN ('pg_catalog', 'information_schema')
25
+ )
26
+ SELECT
27
+ schema,
28
+ table_name,
29
+ index_name,
30
+ pg_size_pretty(index_size) AS index_size,
31
+ ROUND(index_size / 1024.0 / 1024.0, 2) AS index_size_mb,
32
+ GREATEST(estimated_bloat_percent, 0) AS bloat_percent,
33
+ ROUND(GREATEST(estimated_bloat_percent, 0) * index_size / 100.0 / 1024.0 / 1024.0, 2) AS bloat_size_mb
34
+ FROM index_stats
35
+ WHERE index_size > 1024 * 1024 -- Only indexes > 1MB
36
+ ORDER BY index_size DESC;
@@ -0,0 +1,38 @@
1
+ -- Duplicate indexes: indexes that may be redundant
2
+ -- One index is a prefix of another on the same table
3
+
4
+ WITH index_cols AS (
5
+ SELECT
6
+ n.nspname AS schema,
7
+ t.relname AS table_name,
8
+ i.relname AS index_name,
9
+ a.amname AS index_type,
10
+ ARRAY_AGG(attr.attname ORDER BY array_position(ix.indkey, attr.attnum)) AS columns,
11
+ pg_relation_size(i.oid) AS index_size
12
+ FROM pg_index ix
13
+ JOIN pg_class t ON t.oid = ix.indrelid
14
+ JOIN pg_class i ON i.oid = ix.indexrelid
15
+ JOIN pg_namespace n ON n.oid = t.relnamespace
16
+ JOIN pg_am a ON a.oid = i.relam
17
+ JOIN pg_attribute attr ON attr.attrelid = t.oid AND attr.attnum = ANY(ix.indkey)
18
+ WHERE n.nspname NOT IN ('pg_catalog', 'information_schema')
19
+ GROUP BY n.nspname, t.relname, i.relname, a.amname, i.oid
20
+ )
21
+ SELECT
22
+ a.table_name,
23
+ a.index_name,
24
+ a.columns AS index_columns,
25
+ b.index_name AS duplicate_of,
26
+ b.columns AS duplicate_columns,
27
+ pg_size_pretty(a.index_size) AS index_size,
28
+ ROUND(a.index_size / 1024.0 / 1024.0, 2) AS index_size_mb
29
+ FROM index_cols a
30
+ JOIN index_cols b ON
31
+ a.schema = b.schema AND
32
+ a.table_name = b.table_name AND
33
+ a.index_name != b.index_name AND
34
+ a.index_type = b.index_type AND
35
+ -- a's columns are a prefix of b's columns
36
+ a.columns = b.columns[1:array_length(a.columns, 1)]
37
+ WHERE array_length(a.columns, 1) < array_length(b.columns, 1)
38
+ ORDER BY a.index_size DESC;
@@ -0,0 +1,14 @@
1
+ -- Index sizes
2
+ -- Shows all indexes sorted by size
3
+
4
+ SELECT
5
+ schemaname AS schema,
6
+ relname AS table_name,
7
+ indexrelname AS index_name,
8
+ pg_size_pretty(pg_relation_size(indexrelid)) AS index_size,
9
+ ROUND(pg_relation_size(indexrelid) / 1024.0 / 1024.0, 2) AS index_size_mb,
10
+ idx_scan,
11
+ idx_tup_read
12
+ FROM pg_stat_user_indexes
13
+ WHERE schemaname NOT IN ('pg_catalog', 'information_schema')
14
+ ORDER BY pg_relation_size(indexrelid) DESC;
@@ -0,0 +1,19 @@
1
+ -- Index usage statistics
2
+ -- Shows how often each index is scanned
3
+
4
+ SELECT
5
+ schemaname AS schema,
6
+ relname AS table_name,
7
+ indexrelname AS index_name,
8
+ idx_scan,
9
+ idx_tup_read,
10
+ idx_tup_fetch,
11
+ pg_size_pretty(pg_relation_size(indexrelid)) AS index_size,
12
+ ROUND(pg_relation_size(indexrelid) / 1024.0 / 1024.0, 2) AS index_size_mb,
13
+ CASE
14
+ WHEN idx_scan > 0 THEN ROUND((idx_tup_read / idx_scan)::numeric, 2)
15
+ ELSE 0
16
+ END AS avg_tuples_per_scan
17
+ FROM pg_stat_user_indexes
18
+ WHERE schemaname NOT IN ('pg_catalog', 'information_schema')
19
+ ORDER BY idx_scan DESC;
@@ -0,0 +1,15 @@
1
+ -- Invalid indexes: indexes that are not valid
2
+ -- These may have failed during concurrent creation
3
+
4
+ SELECT
5
+ n.nspname AS schema,
6
+ t.relname AS table_name,
7
+ i.relname AS index_name,
8
+ pg_get_indexdef(i.oid) AS index_definition
9
+ FROM pg_index ix
10
+ JOIN pg_class t ON t.oid = ix.indrelid
11
+ JOIN pg_class i ON i.oid = ix.indexrelid
12
+ JOIN pg_namespace n ON n.oid = t.relnamespace
13
+ WHERE NOT ix.indisvalid
14
+ AND n.nspname NOT IN ('pg_catalog', 'information_schema')
15
+ ORDER BY n.nspname, t.relname, i.relname;
@@ -0,0 +1,27 @@
1
+ -- Tables potentially missing indexes
2
+ -- High sequential scans with low or no index usage
3
+
4
+ SELECT
5
+ schemaname AS schema,
6
+ relname AS table_name,
7
+ seq_scan,
8
+ seq_tup_read,
9
+ idx_scan,
10
+ COALESCE(idx_scan, 0) AS idx_scan_count,
11
+ CASE
12
+ WHEN seq_scan > 0 THEN ROUND((seq_tup_read / seq_scan)::numeric, 0)
13
+ ELSE 0
14
+ END AS avg_seq_tup_read,
15
+ pg_size_pretty(pg_relation_size(relid)) AS table_size,
16
+ ROUND(pg_relation_size(relid) / 1024.0 / 1024.0, 2) AS table_size_mb,
17
+ n_live_tup AS estimated_rows
18
+ FROM pg_stat_user_tables
19
+ WHERE schemaname NOT IN ('pg_catalog', 'information_schema')
20
+ AND seq_scan > 50
21
+ AND pg_relation_size(relid) > 10 * 1024 * 1024 -- Tables > 10MB
22
+ AND (
23
+ idx_scan IS NULL
24
+ OR idx_scan = 0
25
+ OR (seq_scan::float / NULLIF(idx_scan, 0)) > 10
26
+ )
27
+ ORDER BY seq_tup_read DESC NULLS LAST;
@@ -0,0 +1,18 @@
1
+ -- Unused indexes: indexes that are rarely or never scanned
2
+ -- These indexes waste disk space and slow down writes
3
+
4
+ SELECT
5
+ schemaname AS schema,
6
+ relname AS table_name,
7
+ indexrelname AS index_name,
8
+ idx_scan,
9
+ idx_tup_read,
10
+ idx_tup_fetch,
11
+ pg_size_pretty(pg_relation_size(indexrelid)) AS index_size,
12
+ ROUND(pg_relation_size(indexrelid) / 1024.0 / 1024.0, 2) AS index_size_mb
13
+ FROM pg_stat_user_indexes
14
+ WHERE schemaname NOT IN ('pg_catalog', 'information_schema')
15
+ AND idx_scan = 0
16
+ AND indexrelname NOT LIKE '%_pkey'
17
+ AND indexrelname NOT LIKE '%_unique%'
18
+ ORDER BY pg_relation_size(indexrelid) DESC;
@@ -0,0 +1,20 @@
1
+ -- All query statistics ordered by total time
2
+ -- Requires pg_stat_statements extension
3
+
4
+ SELECT
5
+ query,
6
+ calls,
7
+ ROUND((total_exec_time)::numeric, 2) AS total_time_ms,
8
+ ROUND((mean_exec_time)::numeric, 2) AS mean_time_ms,
9
+ ROUND((min_exec_time)::numeric, 2) AS min_time_ms,
10
+ ROUND((max_exec_time)::numeric, 2) AS max_time_ms,
11
+ ROUND((stddev_exec_time)::numeric, 2) AS stddev_time_ms,
12
+ rows,
13
+ shared_blks_hit,
14
+ shared_blks_read,
15
+ ROUND((shared_blks_hit * 100.0 / NULLIF(shared_blks_hit + shared_blks_read, 0))::numeric, 2) AS cache_hit_ratio
16
+ FROM pg_stat_statements
17
+ WHERE calls > 0
18
+ AND query NOT LIKE '%pg_stat_statements%'
19
+ ORDER BY total_exec_time DESC
20
+ LIMIT 200;