pg_reports 0.5.4 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +69 -0
  3. data/README.md +123 -370
  4. data/app/controllers/pg_reports/dashboard_controller.rb +21 -21
  5. data/app/views/layouts/pg_reports/application.html.erb +135 -69
  6. data/app/views/pg_reports/dashboard/_show_modals.html.erb +22 -22
  7. data/app/views/pg_reports/dashboard/_show_scripts.html.erb +105 -55
  8. data/app/views/pg_reports/dashboard/_show_styles.html.erb +49 -11
  9. data/app/views/pg_reports/dashboard/index.html.erb +123 -114
  10. data/app/views/pg_reports/dashboard/show.html.erb +30 -26
  11. data/config/locales/en.yml +597 -0
  12. data/config/locales/ru.yml +562 -0
  13. data/config/locales/uk.yml +607 -0
  14. data/lib/pg_reports/compatibility.rb +63 -0
  15. data/lib/pg_reports/configuration.rb +2 -0
  16. data/lib/pg_reports/dashboard/reports_registry.rb +112 -5
  17. data/lib/pg_reports/definitions/indexes/fk_without_indexes.yml +30 -0
  18. data/lib/pg_reports/definitions/indexes/index_correlation.yml +31 -0
  19. data/lib/pg_reports/definitions/indexes/inefficient_indexes.yml +45 -0
  20. data/lib/pg_reports/definitions/queries/temp_file_queries.yml +39 -0
  21. data/lib/pg_reports/definitions/schema_analysis/always_null_columns.yml +31 -0
  22. data/lib/pg_reports/definitions/schema_analysis/unused_columns.yml +32 -0
  23. data/lib/pg_reports/definitions/system/wraparound_risk.yml +31 -0
  24. data/lib/pg_reports/definitions/tables/tables_without_pk.yml +28 -0
  25. data/lib/pg_reports/definitions/tables/unused_tables.yml +30 -0
  26. data/lib/pg_reports/definitions/tables/update_hotspots.yml +32 -0
  27. data/lib/pg_reports/engine.rb +6 -0
  28. data/lib/pg_reports/module_generator.rb +2 -1
  29. data/lib/pg_reports/modules/indexes.rb +3 -0
  30. data/lib/pg_reports/modules/queries.rb +1 -0
  31. data/lib/pg_reports/modules/schema_analysis.rb +261 -2
  32. data/lib/pg_reports/modules/system.rb +27 -0
  33. data/lib/pg_reports/modules/tables.rb +1 -0
  34. data/lib/pg_reports/query_monitor.rb +64 -36
  35. data/lib/pg_reports/report_definition.rb +20 -24
  36. data/lib/pg_reports/sql/indexes/fk_without_indexes.sql +23 -0
  37. data/lib/pg_reports/sql/indexes/index_correlation.sql +27 -0
  38. data/lib/pg_reports/sql/indexes/inefficient_indexes.sql +22 -0
  39. data/lib/pg_reports/sql/queries/temp_file_queries.sql +16 -0
  40. data/lib/pg_reports/sql/schema_analysis/always_null_columns.sql +25 -0
  41. data/lib/pg_reports/sql/schema_analysis/unused_columns.sql +36 -0
  42. data/lib/pg_reports/sql/system/checkpoint_stats.sql +20 -0
  43. data/lib/pg_reports/sql/system/checkpoint_stats_legacy.sql +19 -0
  44. data/lib/pg_reports/sql/system/wraparound_risk.sql +21 -0
  45. data/lib/pg_reports/sql/tables/tables_without_pk.sql +20 -0
  46. data/lib/pg_reports/sql/tables/unused_tables.sql +19 -0
  47. data/lib/pg_reports/sql/tables/update_hotspots.sql +26 -0
  48. data/lib/pg_reports/version.rb +1 -1
  49. data/lib/pg_reports.rb +5 -0
  50. metadata +24 -1
@@ -64,6 +64,155 @@ module PgReports
64
64
  )
65
65
  end
66
66
 
67
+ # Polymorphic associations missing a composite (type, id) index
68
+ # @return [Report]
69
+ def polymorphic_without_index(**_params)
70
+ eager_load_models!
71
+ results = []
72
+
73
+ each_concrete_model do |model|
74
+ polymorphic_belongs_to(model).each do |assoc|
75
+ type_col = "#{assoc.name}_type"
76
+ id_col = "#{assoc.name}_id"
77
+
78
+ next unless model.column_names.include?(type_col) && model.column_names.include?(id_col)
79
+
80
+ # Expression/functional indexes report `columns` as a String — drop them; we only care about column-list indexes.
81
+ indexes = ActiveRecord::Base.connection.indexes(model.table_name).select { |i| i.columns.is_a?(Array) }
82
+ composite = indexes.find { |idx| (idx.columns & [type_col, id_col]).size == 2 }
83
+ next if composite
84
+
85
+ results << {
86
+ "schema" => "public",
87
+ "table_name" => model.table_name,
88
+ "model_name" => model.name,
89
+ "association" => assoc.name.to_s,
90
+ "type_column" => type_col,
91
+ "id_column" => id_col,
92
+ "coverage" => coverage_label(indexes, type_col, id_col),
93
+ "suggestion" => "add_index :#{model.table_name}, [:#{type_col}, :#{id_col}]"
94
+ }
95
+ end
96
+ end
97
+
98
+ Report.new(
99
+ title: "Polymorphic Associations Without Composite Index",
100
+ data: results,
101
+ columns: %w[table_name model_name association coverage suggestion]
102
+ )
103
+ end
104
+
105
+ # belongs_to ..., counter_cache: ... whose counter column is missing on the parent
106
+ # @return [Report]
107
+ def counter_cache_issues(**_params)
108
+ eager_load_models!
109
+ results = []
110
+
111
+ each_concrete_model do |model|
112
+ counter_belongs_to(model).each do |assoc|
113
+ counter_col = counter_cache_column_name(model, assoc)
114
+ parent = parent_class_for(assoc)
115
+ next unless parent && parent.table_exists?
116
+
117
+ unless parent.column_names.include?(counter_col)
118
+ results << {
119
+ "schema" => "public",
120
+ "child_model" => model.name,
121
+ "child_table" => model.table_name,
122
+ "parent_model" => parent.name,
123
+ "parent_table" => parent.table_name,
124
+ "expected_column" => counter_col,
125
+ "issue" => "missing_column",
126
+ "suggestion" => "add_column :#{parent.table_name}, :#{counter_col}, :integer, default: 0, null: false"
127
+ }
128
+ end
129
+ end
130
+ end
131
+
132
+ Report.new(
133
+ title: "Counter Cache Issues",
134
+ data: results,
135
+ columns: %w[child_model parent_model expected_column issue suggestion]
136
+ )
137
+ end
138
+
139
+ # Tables with soft-delete column but no model scope filtering it
140
+ # @return [Report]
141
+ def soft_delete_without_scope(**_params)
142
+ eager_load_models!
143
+ soft_delete_columns = %w[deleted_at discarded_at archived_at]
144
+ results = []
145
+
146
+ ActiveRecord::Base.connection.tables.each do |table|
147
+ next if internal_table?(table)
148
+
149
+ columns = ActiveRecord::Base.connection.columns(table).map(&:name)
150
+ soft_col = (columns & soft_delete_columns).first
151
+ next unless soft_col
152
+
153
+ model = find_model_for_table(table)
154
+ if model.nil?
155
+ results << {
156
+ "schema" => "public",
157
+ "table_name" => table,
158
+ "model_name" => "(no model)",
159
+ "soft_delete_column" => soft_col,
160
+ "status" => "no_model",
161
+ "suggestion" => "Create a model or filter manually in queries"
162
+ }
163
+ next
164
+ end
165
+
166
+ next if model_filters_soft_delete?(model, soft_col)
167
+
168
+ results << {
169
+ "schema" => "public",
170
+ "table_name" => table,
171
+ "model_name" => model.name,
172
+ "soft_delete_column" => soft_col,
173
+ "status" => "no_scope",
174
+ "suggestion" => "default_scope { where(#{soft_col}: nil) } or use discard/paranoia"
175
+ }
176
+ end
177
+
178
+ Report.new(
179
+ title: "Soft Delete Without Scope",
180
+ data: results,
181
+ columns: %w[table_name model_name soft_delete_column status suggestion]
182
+ )
183
+ end
184
+
185
+ # Tables with no corresponding Rails model (legacy or HABTM)
186
+ # @return [Report]
187
+ def orphan_tables(**_params)
188
+ eager_load_models!
189
+ results = []
190
+
191
+ ActiveRecord::Base.connection.tables.each do |table|
192
+ next if internal_table?(table)
193
+ next if find_model_for_table(table)
194
+
195
+ columns = ActiveRecord::Base.connection.columns(table)
196
+ row_count = approximate_row_count(table)
197
+
198
+ results << {
199
+ "schema" => "public",
200
+ "table_name" => table,
201
+ "row_count" => row_count,
202
+ "column_count" => columns.size,
203
+ "classification" => classify_orphan(columns)
204
+ }
205
+ end
206
+
207
+ results.sort_by! { |r| -r["row_count"].to_i }
208
+
209
+ Report.new(
210
+ title: "Orphan Tables (No Rails Model)",
211
+ data: results,
212
+ columns: %w[table_name row_count column_count classification]
213
+ )
214
+ end
215
+
67
216
  private
68
217
 
69
218
  def executor
@@ -125,7 +274,7 @@ module PgReports
125
274
  if column_names.size > 1
126
275
  # For composite indexes, we need to check if there's a validation with scope
127
276
  primary_column = column_names.first.to_sym
128
- scope_columns = column_names[1..-1].map(&:to_sym)
277
+ scope_columns = column_names[1..].map(&:to_sym)
129
278
 
130
279
  has_composite = uniqueness_validators.any? do |validator|
131
280
  validator.attributes.include?(primary_column) &&
@@ -145,10 +294,120 @@ module PgReports
145
294
  "validates :#{column_names.first}, uniqueness: true"
146
295
  else
147
296
  primary = column_names.first
148
- scopes = column_names[1..-1]
297
+ scopes = column_names[1..]
149
298
  "validates :#{primary}, uniqueness: { scope: #{scopes.inspect} }"
150
299
  end
151
300
  end
301
+
302
+ # Eager-load all application models so descendants is complete in development
303
+ def eager_load_models!
304
+ return unless defined?(Rails) && Rails.respond_to?(:application) && Rails.application
305
+ Rails.application.eager_load!
306
+ rescue
307
+ # Best-effort; don't crash the report if a model fails to autoload
308
+ end
309
+
310
+ # Yield each concrete (non-abstract) ActiveRecord model that has a backing table
311
+ def each_concrete_model
312
+ ActiveRecord::Base.descendants.each do |model|
313
+ next if model.abstract_class?
314
+ next if model.name.nil?
315
+ next unless model.table_exists?
316
+ yield model
317
+ rescue ActiveRecord::StatementInvalid, ActiveRecord::ConnectionNotEstablished
318
+ # Skip models whose table cannot be inspected
319
+ end
320
+ end
321
+
322
+ def polymorphic_belongs_to(model)
323
+ model.reflect_on_all_associations(:belongs_to).select { |a| a.options[:polymorphic] }
324
+ end
325
+
326
+ def counter_belongs_to(model)
327
+ model.reflect_on_all_associations(:belongs_to).reject { |a| a.options[:polymorphic] }.select { |a| a.options[:counter_cache] }
328
+ end
329
+
330
+ # Resolve the counter_cache column name from belongs_to options.
331
+ # counter_cache: true → "<child_table_name>_count" (Rails default)
332
+ # counter_cache: :col → "col"
333
+ # counter_cache: "col" → "col"
334
+ # counter_cache: { active: true, column: "col" } → "col" (Rails 7.1+ form)
335
+ # counter_cache: { active: true } → falls back to default
336
+ def counter_cache_column_name(child_model, assoc)
337
+ opt = assoc.options[:counter_cache]
338
+ column = case opt
339
+ when true
340
+ nil
341
+ when Hash
342
+ opt[:column] || opt["column"]
343
+ else
344
+ opt
345
+ end
346
+ return column.to_s if column
347
+
348
+ "#{child_model.table_name.split(".").last}_count"
349
+ end
350
+
351
+ def parent_class_for(assoc)
352
+ assoc.klass
353
+ rescue NameError
354
+ nil
355
+ end
356
+
357
+ # Does the model's default scope filter out soft-deleted rows on the given column?
358
+ # Detects: paranoia, discard, hand-rolled default_scope referencing the column.
359
+ def model_filters_soft_delete?(model, column)
360
+ return true if model.respond_to?(:paranoid?) && model.paranoid?
361
+ return true if model.respond_to?(:discard_column) && model.discard_column.to_s == column
362
+
363
+ default_sql = model.all.to_sql
364
+ default_sql.include?(column)
365
+ rescue
366
+ false
367
+ end
368
+
369
+ def coverage_label(indexes, type_col, id_col)
370
+ column_list_indexes = indexes.select { |i| i.columns.is_a?(Array) }
371
+ type_indexed = column_list_indexes.any? { |i| i.columns.first == type_col }
372
+ id_indexed = column_list_indexes.any? { |i| i.columns.first == id_col }
373
+
374
+ if type_indexed && id_indexed
375
+ "type and id indexed separately"
376
+ elsif type_indexed
377
+ "only type indexed"
378
+ elsif id_indexed
379
+ "only id indexed"
380
+ else
381
+ "neither indexed"
382
+ end
383
+ end
384
+
385
+ def internal_table?(name)
386
+ %w[schema_migrations ar_internal_metadata].include?(name)
387
+ end
388
+
389
+ def approximate_row_count(table)
390
+ sql = "SELECT n_live_tup FROM pg_stat_user_tables WHERE relname = #{ActiveRecord::Base.connection.quote(table)}"
391
+ ActiveRecord::Base.connection.select_value(sql).to_i
392
+ rescue
393
+ 0
394
+ end
395
+
396
+ # Classify an orphan table based on column shape.
397
+ # Two FK columns + nothing else (or just timestamps) → likely HABTM join table.
398
+ def classify_orphan(columns)
399
+ col_names = columns.map(&:name)
400
+ fk_cols = col_names.select { |c| c.end_with?("_id") }
401
+ non_meta = col_names - %w[id created_at updated_at]
402
+
403
+ if fk_cols.size == 2 && (non_meta - fk_cols).empty?
404
+ "join_table_candidate"
405
+ elsif fk_cols.size >= 2
406
+ "join_model_without_class"
407
+ else
408
+ "legacy"
409
+ end
410
+ end
152
411
  end
153
412
  end
154
413
  end
@@ -12,7 +12,11 @@ module PgReports
12
12
  # - settings
13
13
  # - extensions
14
14
  # - activity_overview
15
+ # - wraparound_risk(limit: 50)
15
16
  # - cache_stats
17
+ #
18
+ # Manually implemented (version-dependent SQL):
19
+ # - checkpoint_stats(limit: 10)
16
20
 
17
21
  # pg_stat_statements availability check
18
22
  # @return [Boolean] Whether pg_stat_statements is available
@@ -132,6 +136,22 @@ module PgReports
132
136
  end
133
137
  end
134
138
 
139
+ # Checkpoint stats — uses version-specific SQL because PostgreSQL 17+
140
+ # moved checkpoint columns from pg_stat_bgwriter to pg_stat_checkpointer
141
+ def checkpoint_stats(limit: 10)
142
+ sql_file = (pg_version >= 170_000) ? :checkpoint_stats : :checkpoint_stats_legacy
143
+ data = executor.execute_from_file(:system, sql_file)
144
+ data = data.first(limit) if limit
145
+
146
+ Report.new(
147
+ title: "Checkpoint Statistics",
148
+ data: data,
149
+ columns: %w[checkpoints_timed checkpoints_requested checkpoint_write_time_sec
150
+ checkpoint_sync_time_sec buffers_checkpoint buffers_clean
151
+ bgwriter_stops buffers_alloc requested_pct stats_reset]
152
+ )
153
+ end
154
+
135
155
  # Get list of all databases
136
156
  # @return [Array<Hash>] List of databases with sizes
137
157
  def databases_list
@@ -152,6 +172,13 @@ module PgReports
152
172
 
153
173
  private
154
174
 
175
+ def pg_version
176
+ @pg_version ||= begin
177
+ result = executor.execute("SELECT current_setting('server_version_num')::int AS v")
178
+ result.first&.fetch("v", 0).to_i
179
+ end
180
+ end
181
+
155
182
  def executor
156
183
  @executor ||= Executor.new
157
184
  end
@@ -14,6 +14,7 @@ module PgReports
14
14
  # - row_counts(limit: 50)
15
15
  # - cache_hit_ratios(limit: 50)
16
16
  # - seq_scans(limit: 20)
17
+ # - tables_without_pk(limit: 50)
17
18
  # - recently_modified(limit: 20)
18
19
 
19
20
  private
@@ -16,16 +16,20 @@ module PgReports
16
16
  @subscriber = nil
17
17
  @mutex = Mutex.new
18
18
  @queries = []
19
+ @handling_event = false
20
+
21
+ # Local state — used by the event handler to avoid cache reads
22
+ # (which generate SQL events and cause infinite recursion with DB-backed caches)
23
+ @enabled = false
24
+ @session_id = nil
25
+
26
+ sync_from_cache
19
27
  ensure_subscription_if_enabled
20
28
  end
21
29
 
22
- def enabled
23
- cache_read(CACHE_KEY_ENABLED) || false
24
- end
30
+ attr_reader :enabled
25
31
 
26
- def session_id
27
- cache_read(CACHE_KEY_SESSION_ID)
28
- end
32
+ attr_reader :session_id
29
33
 
30
34
  def start
31
35
  @mutex.synchronize do
@@ -37,7 +41,11 @@ module PgReports
37
41
  new_session_id = SecureRandom.uuid
38
42
  @queries = []
39
43
 
40
- # Store state in cache so all processes can see it
44
+ # Update local state first (used by event handler no cache round-trip)
45
+ @enabled = true
46
+ @session_id = new_session_id
47
+
48
+ # Store state in cache so other processes can see it
41
49
  cache_write(CACHE_KEY_ENABLED, true)
42
50
  cache_write(CACHE_KEY_SESSION_ID, new_session_id)
43
51
 
@@ -52,6 +60,8 @@ module PgReports
52
60
  {success: true, message: "Query monitoring started", session_id: new_session_id}
53
61
  end
54
62
  rescue => e
63
+ @enabled = false
64
+ @session_id = nil
55
65
  cache_write(CACHE_KEY_ENABLED, false)
56
66
  {success: false, error: e.message}
57
67
  end
@@ -62,7 +72,11 @@ module PgReports
62
72
  return {success: false, message: "Monitoring not active"}
63
73
  end
64
74
 
65
- current_session_id = session_id
75
+ current_session_id = @session_id
76
+
77
+ # Clear local state immediately — stops event handler from processing
78
+ @enabled = false
79
+ @session_id = nil
66
80
 
67
81
  # Unsubscribe from notifications in THIS process
68
82
  if @subscriber
@@ -71,12 +85,12 @@ module PgReports
71
85
  end
72
86
 
73
87
  # Write session end marker to file
74
- write_session_marker("session_end")
88
+ write_session_marker("session_end", current_session_id)
75
89
 
76
90
  # Flush queries to file
77
91
  flush_to_file
78
92
 
79
- # Clear state from cache
93
+ # Clear state from cache so other processes see it
80
94
  cache_delete(CACHE_KEY_ENABLED)
81
95
  cache_delete(CACHE_KEY_SESSION_ID)
82
96
 
@@ -175,9 +189,15 @@ module PgReports
175
189
  defined?(Rails) && defined?(Rails.cache)
176
190
  end
177
191
 
192
+ # Sync local state from shared cache (called on initialize for multi-process support)
193
+ def sync_from_cache
194
+ @enabled = enabled
195
+ @session_id = session_id
196
+ end
197
+
178
198
  # Ensure this process is subscribed to notifications if monitoring is enabled
179
199
  def ensure_subscription_if_enabled
180
- return unless enabled
200
+ return unless @enabled
181
201
  ensure_subscription
182
202
  end
183
203
 
@@ -192,30 +212,38 @@ module PgReports
192
212
  end
193
213
 
194
214
  def handle_sql_event(name, started, finished, unique_id, payload)
195
- return unless enabled
215
+ # Use local @enabled instead of enabled (which hits cache and may generate SQL,
216
+ # causing infinite recursion with database-backed cache stores like SolidCache)
217
+ return unless @enabled
218
+ return if @handling_event
196
219
 
197
- # Skip if should be filtered
198
- return if should_skip?(payload)
199
-
200
- duration_ms = ((finished - started) * 1000).round(2)
201
- sql = payload[:sql]
202
- query_name = payload[:name]
203
-
204
- # Extract source location
205
- source_location = extract_source_location
206
-
207
- # Build query entry
208
- query_entry = {
209
- type: "query",
210
- session_id: session_id,
211
- sql: sql,
212
- duration_ms: duration_ms,
213
- name: query_name,
214
- source_location: source_location,
215
- timestamp: Time.current.iso8601
216
- }
217
-
218
- add_to_buffer(query_entry)
220
+ @handling_event = true
221
+ begin
222
+ # Skip if should be filtered
223
+ return if should_skip?(payload)
224
+
225
+ duration_ms = ((finished - started) * 1000).round(2)
226
+ sql = payload[:sql]
227
+ query_name = payload[:name]
228
+
229
+ # Extract source location
230
+ source_location = extract_source_location
231
+
232
+ # Build query entry
233
+ query_entry = {
234
+ type: "query",
235
+ session_id: @session_id,
236
+ sql: sql,
237
+ duration_ms: duration_ms,
238
+ name: query_name,
239
+ source_location: source_location,
240
+ timestamp: Time.current.iso8601
241
+ }
242
+
243
+ add_to_buffer(query_entry)
244
+ ensure
245
+ @handling_event = false
246
+ end
219
247
  end
220
248
 
221
249
  def should_skip?(payload)
@@ -317,12 +345,12 @@ module PgReports
317
345
  end
318
346
  end
319
347
 
320
- def write_session_marker(marker_type)
348
+ def write_session_marker(marker_type, sid = @session_id)
321
349
  return unless log_file_enabled?
322
350
 
323
351
  marker = {
324
352
  type: marker_type,
325
- session_id: session_id,
353
+ session_id: sid,
326
354
  timestamp: Time.current.iso8601
327
355
  }
328
356
 
@@ -120,33 +120,29 @@ module PgReports
120
120
  params = {}
121
121
 
122
122
  # Parameters from parameters section
123
- if config["parameters"]
124
- config["parameters"].each do |name, param_config|
125
- params[name] = {
126
- type: param_config["type"],
127
- default: param_config["default"],
128
- description: param_config["description"],
129
- label: name.to_s.titleize
130
- }
131
- end
123
+ config["parameters"]&.each do |name, param_config|
124
+ params[name] = {
125
+ type: param_config["type"],
126
+ default: param_config["default"],
127
+ description: I18n.t("pg_reports.parameters.#{name}.description", default: param_config["description"]),
128
+ label: I18n.t("pg_reports.parameters.#{name}.label", default: name.to_s.titleize)
129
+ }
132
130
  end
133
131
 
134
132
  # Add threshold parameters from filters (config-based)
135
- if config["filters"]
136
- config["filters"].each do |filter|
137
- if filter["value"]["source"] == "config"
138
- config_key = filter["value"]["key"]
139
- field_name = filter["field"]
140
-
141
- params["#{field_name}_threshold"] = {
142
- type: filter["cast"] || "integer",
143
- default: PgReports.config.public_send(config_key),
144
- description: "Override threshold for #{field_name}",
145
- label: "#{field_name.titleize} Threshold",
146
- current_config: PgReports.config.public_send(config_key),
147
- is_threshold: true
148
- }
149
- end
133
+ config["filters"]&.each do |filter|
134
+ if filter["value"]["source"] == "config"
135
+ config_key = filter["value"]["key"]
136
+ field_name = filter["field"]
137
+
138
+ params["#{field_name}_threshold"] = {
139
+ type: filter["cast"] || "integer",
140
+ default: PgReports.config.public_send(config_key),
141
+ description: I18n.t("pg_reports.parameters.threshold_description", field: field_name),
142
+ label: I18n.t("pg_reports.parameters.threshold_label", field: field_name.titleize),
143
+ current_config: PgReports.config.public_send(config_key),
144
+ is_threshold: true
145
+ }
150
146
  end
151
147
  end
152
148
 
@@ -0,0 +1,23 @@
1
+ -- Foreign keys without indexes on the referencing (child) table
2
+ -- Missing indexes cause sequential scans on DELETE/UPDATE of parent rows
3
+
4
+ SELECT
5
+ c.conname AS constraint_name,
6
+ c.conrelid::regclass::text AS child_table,
7
+ a.attname AS child_column,
8
+ c.confrelid::regclass::text AS parent_table,
9
+ pa.attname AS parent_column,
10
+ pg_size_pretty(pg_relation_size(c.conrelid)) AS child_table_size,
11
+ ROUND(pg_relation_size(c.conrelid) / 1024.0 / 1024.0, 2) AS child_table_size_mb
12
+ FROM pg_constraint c
13
+ JOIN pg_attribute a ON a.attrelid = c.conrelid AND a.attnum = ANY(c.conkey)
14
+ JOIN pg_attribute pa ON pa.attrelid = c.confrelid AND pa.attnum = ANY(c.confkey)
15
+ WHERE c.contype = 'f'
16
+ AND NOT EXISTS (
17
+ SELECT 1
18
+ FROM pg_index i
19
+ WHERE i.indrelid = c.conrelid
20
+ AND a.attnum = ANY(i.indkey)
21
+ AND i.indkey[0] = a.attnum
22
+ )
23
+ ORDER BY pg_relation_size(c.conrelid) DESC;
@@ -0,0 +1,27 @@
1
+ -- Index correlation: how well physical row order matches index order
2
+ -- Low correlation on frequently range-scanned columns means excessive random I/O
3
+
4
+ SELECT
5
+ s.schemaname AS schema,
6
+ s.tablename AS table_name,
7
+ s.attname AS column_name,
8
+ i.indexrelname AS index_name,
9
+ ROUND(s.correlation::numeric, 4) AS correlation,
10
+ ABS(s.correlation) AS abs_correlation,
11
+ s.n_distinct,
12
+ pg_size_pretty(pg_relation_size(c.oid)) AS table_size,
13
+ ROUND(pg_relation_size(c.oid) / 1024.0 / 1024.0, 2) AS table_size_mb,
14
+ si.idx_scan
15
+ FROM pg_stats s
16
+ JOIN pg_class c ON c.relname = s.tablename
17
+ AND c.relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = s.schemaname)
18
+ JOIN pg_index idx ON idx.indrelid = c.oid
19
+ JOIN pg_attribute a ON a.attrelid = c.oid AND a.attname = s.attname
20
+ AND a.attnum = idx.indkey[0]
21
+ JOIN pg_stat_user_indexes i ON i.indexrelid = idx.indexrelid
22
+ JOIN pg_stat_user_indexes si ON si.indexrelid = idx.indexrelid
23
+ WHERE s.schemaname NOT IN ('pg_catalog', 'information_schema')
24
+ AND ABS(s.correlation) < 0.5
25
+ AND pg_relation_size(c.oid) > 10 * 1024 * 1024
26
+ AND si.idx_scan > 100
27
+ ORDER BY ABS(s.correlation) ASC, pg_relation_size(c.oid) DESC;
@@ -0,0 +1,22 @@
1
+ -- Inefficient index scans: indexes that are used but read far more entries than they fetch
2
+ -- A high idx_tup_read / idx_tup_fetch ratio indicates the index column order
3
+ -- does not match query predicates, forcing PostgreSQL to scan large index ranges
4
+ -- Reference: https://www.datadoghq.com/blog/detect-inefficient-index-scans-with-dbm/
5
+
6
+ SELECT
7
+ schemaname AS schema,
8
+ relname AS table_name,
9
+ indexrelname AS index_name,
10
+ idx_scan,
11
+ idx_tup_read,
12
+ idx_tup_fetch,
13
+ ROUND((idx_tup_read::numeric / NULLIF(idx_tup_fetch, 0)), 1) AS read_to_fetch_ratio,
14
+ pg_size_pretty(pg_relation_size(indexrelid)) AS index_size,
15
+ ROUND(pg_relation_size(indexrelid) / 1024.0 / 1024.0, 2) AS index_size_mb,
16
+ pg_get_indexdef(indexrelid) AS index_definition
17
+ FROM pg_stat_user_indexes
18
+ WHERE schemaname NOT IN ('pg_catalog', 'information_schema')
19
+ AND idx_scan > 0
20
+ AND idx_tup_fetch > 0
21
+ AND (idx_tup_read::numeric / NULLIF(idx_tup_fetch, 0)) > 10
22
+ ORDER BY (idx_tup_read::numeric / NULLIF(idx_tup_fetch, 0)) DESC;
@@ -0,0 +1,16 @@
1
+ -- Queries that spill to disk via temporary files
2
+ -- High temp file usage indicates insufficient work_mem for these queries
3
+
4
+ SELECT
5
+ queryid,
6
+ LEFT(query, :max_query_length) AS query,
7
+ calls,
8
+ ROUND(temp_blks_written::numeric * 8 / 1024, 2) AS temp_mb_written,
9
+ ROUND(temp_blks_read::numeric * 8 / 1024, 2) AS temp_mb_read,
10
+ ROUND((total_exec_time / 1000)::numeric, 2) AS total_time_sec,
11
+ ROUND((mean_exec_time)::numeric, 2) AS mean_time_ms,
12
+ rows
13
+ FROM pg_stat_statements
14
+ WHERE temp_blks_written > 0
15
+ AND dbid = (SELECT oid FROM pg_database WHERE datname = current_database())
16
+ ORDER BY temp_blks_written DESC;