pghero 2.2.1 → 2.7.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of pghero might be problematic. Click here for more details.

Files changed (50) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +100 -53
  3. data/README.md +20 -8
  4. data/app/assets/javascripts/pghero/Chart.bundle.js +16260 -15580
  5. data/app/assets/javascripts/pghero/application.js +8 -7
  6. data/app/assets/javascripts/pghero/chartkick.js +1973 -1325
  7. data/app/assets/javascripts/pghero/highlight.pack.js +2 -2
  8. data/app/assets/javascripts/pghero/jquery.js +3605 -4015
  9. data/app/assets/javascripts/pghero/nouislider.js +2479 -0
  10. data/app/assets/stylesheets/pghero/application.css +1 -1
  11. data/app/assets/stylesheets/pghero/nouislider.css +299 -0
  12. data/app/controllers/pg_hero/home_controller.rb +97 -42
  13. data/app/helpers/pg_hero/home_helper.rb +11 -0
  14. data/app/views/pg_hero/home/_live_queries_table.html.erb +14 -3
  15. data/app/views/pg_hero/home/connections.html.erb +9 -0
  16. data/app/views/pg_hero/home/index.html.erb +49 -10
  17. data/app/views/pg_hero/home/live_queries.html.erb +1 -1
  18. data/app/views/pg_hero/home/maintenance.html.erb +16 -2
  19. data/app/views/pg_hero/home/relation_space.html.erb +2 -2
  20. data/app/views/pg_hero/home/show_query.html.erb +4 -5
  21. data/app/views/pg_hero/home/space.html.erb +3 -3
  22. data/app/views/pg_hero/home/system.html.erb +4 -4
  23. data/app/views/pg_hero/home/tune.html.erb +2 -1
  24. data/lib/generators/pghero/config_generator.rb +1 -1
  25. data/lib/generators/pghero/query_stats_generator.rb +3 -20
  26. data/lib/generators/pghero/space_stats_generator.rb +3 -20
  27. data/lib/generators/pghero/templates/config.yml.tt +21 -1
  28. data/lib/pghero.rb +82 -17
  29. data/lib/pghero/database.rb +104 -19
  30. data/lib/pghero/methods/basic.rb +34 -25
  31. data/lib/pghero/methods/connections.rb +35 -0
  32. data/lib/pghero/methods/constraints.rb +30 -0
  33. data/lib/pghero/methods/explain.rb +1 -1
  34. data/lib/pghero/methods/indexes.rb +1 -1
  35. data/lib/pghero/methods/maintenance.rb +3 -1
  36. data/lib/pghero/methods/queries.rb +7 -3
  37. data/lib/pghero/methods/query_stats.rb +93 -25
  38. data/lib/pghero/methods/sequences.rb +1 -1
  39. data/lib/pghero/methods/space.rb +4 -0
  40. data/lib/pghero/methods/suggested_indexes.rb +1 -1
  41. data/lib/pghero/methods/system.rb +219 -23
  42. data/lib/pghero/methods/users.rb +4 -0
  43. data/lib/pghero/query_stats.rb +1 -3
  44. data/lib/pghero/space_stats.rb +5 -0
  45. data/lib/pghero/stats.rb +6 -0
  46. data/lib/pghero/version.rb +1 -1
  47. data/lib/tasks/pghero.rake +10 -4
  48. metadata +15 -12
  49. data/app/assets/javascripts/pghero/jquery.nouislider.min.js +0 -31
  50. data/app/assets/stylesheets/pghero/jquery.nouislider.css +0 -165
@@ -0,0 +1,30 @@
1
+ module PgHero
2
+ module Methods
3
+ module Constraints
4
+ # referenced fields can be nil
5
+ # as not all constraints are foreign keys
6
+ def invalid_constraints
7
+ select_all <<-SQL
8
+ SELECT
9
+ nsp.nspname AS schema,
10
+ rel.relname AS table,
11
+ con.conname AS name,
12
+ fnsp.nspname AS referenced_schema,
13
+ frel.relname AS referenced_table
14
+ FROM
15
+ pg_catalog.pg_constraint con
16
+ INNER JOIN
17
+ pg_catalog.pg_class rel ON rel.oid = con.conrelid
18
+ LEFT JOIN
19
+ pg_catalog.pg_class frel ON frel.oid = con.confrelid
20
+ LEFT JOIN
21
+ pg_catalog.pg_namespace nsp ON nsp.oid = con.connamespace
22
+ LEFT JOIN
23
+ pg_catalog.pg_namespace fnsp ON fnsp.oid = frel.relnamespace
24
+ WHERE
25
+ con.convalidated = 'f'
26
+ SQL
27
+ end
28
+ end
29
+ end
30
+ end
@@ -6,7 +6,7 @@ module PgHero
6
6
  explanation = nil
7
7
 
8
8
  # use transaction for safety
9
- with_transaction(statement_timeout: (explain_timeout_sec * 1000), rollback: true) do
9
+ with_transaction(statement_timeout: (explain_timeout_sec * 1000).round, rollback: true) do
10
10
  if (sql.sub(/;\z/, "").include?(";") || sql.upcase.include?("COMMIT")) && !explain_safe?
11
11
  raise ActiveRecord::StatementInvalid, "Unsafe statement"
12
12
  end
@@ -195,7 +195,7 @@ module PgHero
195
195
  FROM
196
196
  pg_index
197
197
  JOIN
198
- pg_class ON pg_class.oid=pg_index.indexrelid
198
+ pg_class ON pg_class.oid = pg_index.indexrelid
199
199
  JOIN
200
200
  pg_namespace ON pg_namespace.oid = pg_class.relnamespace
201
201
  JOIN
@@ -57,7 +57,9 @@ module PgHero
57
57
  last_vacuum,
58
58
  last_autovacuum,
59
59
  last_analyze,
60
- last_autoanalyze
60
+ last_autoanalyze,
61
+ n_dead_tup AS dead_rows,
62
+ n_live_tup AS live_rows
61
63
  FROM
62
64
  pg_stat_user_tables
63
65
  ORDER BY
@@ -2,7 +2,7 @@ module PgHero
2
2
  module Methods
3
3
  module Queries
4
4
  def running_queries(min_duration: nil, all: false)
5
- select_all <<-SQL
5
+ query = <<-SQL
6
6
  SELECT
7
7
  pid,
8
8
  state,
@@ -24,6 +24,8 @@ module PgHero
24
24
  ORDER BY
25
25
  COALESCE(query_start, xact_start) DESC
26
26
  SQL
27
+
28
+ select_all(query, query_columns: [:query])
27
29
  end
28
30
 
29
31
  def long_running_queries
@@ -31,9 +33,9 @@ module PgHero
31
33
  end
32
34
 
33
35
  # from https://wiki.postgresql.org/wiki/Lock_Monitoring
34
- # and http://big-elephants.com/2013-09/exploring-query-locks-in-postgres/
36
+ # and https://big-elephants.com/2013-09/exploring-query-locks-in-postgres/
35
37
  def blocked_queries
36
- select_all <<-SQL
38
+ query = <<-SQL
37
39
  SELECT
38
40
  COALESCE(blockingl.relation::regclass::text,blockingl.locktype) as locked_item,
39
41
  blockeda.pid AS blocked_pid,
@@ -65,6 +67,8 @@ module PgHero
65
67
  ORDER BY
66
68
  blocked_duration DESC
67
69
  SQL
70
+
71
+ select_all(query, query_columns: [:blocked_query, :current_or_recent_query_in_blocking_process])
68
72
  end
69
73
  end
70
74
  end
@@ -2,7 +2,7 @@ module PgHero
2
2
  module Methods
3
3
  module QueryStats
4
4
  def query_stats(historical: false, start_at: nil, end_at: nil, min_average_time: nil, min_calls: nil, **options)
5
- current_query_stats = historical && end_at && end_at < Time.now ? [] : current_query_stats(options)
5
+ current_query_stats = historical && end_at && end_at < Time.now ? [] : current_query_stats(**options)
6
6
  historical_query_stats = historical && historical_query_stats_enabled? ? historical_query_stats(start_at: start_at, end_at: end_at, **options) : []
7
7
 
8
8
  query_stats = combine_query_stats((current_query_stats + historical_query_stats).group_by { |q| [q[:query_hash], q[:user]] })
@@ -56,8 +56,46 @@ module PgHero
56
56
  true
57
57
  end
58
58
 
59
- def reset_query_stats(raise_errors: false)
60
- execute("SELECT pg_stat_statements_reset()")
59
+ # TODO scope by database in PgHero 3.0
60
+ # (add database: database_name to options)
61
+ def reset_query_stats(**options)
62
+ reset_instance_query_stats(**options)
63
+ end
64
+
65
+ # resets query stats for the entire instance
66
+ # it's possible to reset stats for a specific
67
+ # database, user or query hash in Postgres 12+
68
+ def reset_instance_query_stats(database: nil, user: nil, query_hash: nil, raise_errors: false)
69
+ if database || user || query_hash
70
+ raise PgHero::Error, "Requires PostgreSQL 12+" if server_version_num < 120000
71
+
72
+ if database
73
+ database_id = execute("SELECT oid FROM pg_database WHERE datname = #{quote(database)}").first.try(:[], "oid")
74
+ raise PgHero::Error, "Database not found: #{database}" unless database_id
75
+ else
76
+ database_id = 0
77
+ end
78
+
79
+ if user
80
+ user_id = execute("SELECT usesysid FROM pg_user WHERE usename = #{quote(user)}").first.try(:[], "usesysid")
81
+ raise PgHero::Error, "User not found: #{user}" unless user_id
82
+ else
83
+ user_id = 0
84
+ end
85
+
86
+ if query_hash
87
+ query_id = query_hash.to_i
88
+ # may not be needed
89
+ # but not intuitive that all query hashes are reset with 0
90
+ raise PgHero::Error, "Invalid query hash: #{query_hash}" if query_id == 0
91
+ else
92
+ query_id = 0
93
+ end
94
+
95
+ execute("SELECT pg_stat_statements_reset(#{quote(user_id.to_i)}, #{quote(database_id.to_i)}, #{quote(query_id.to_i)})")
96
+ else
97
+ execute("SELECT pg_stat_statements_reset()")
98
+ end
61
99
  true
62
100
  rescue ActiveRecord::StatementInvalid => e
63
101
  raise e if raise_errors
@@ -104,31 +142,32 @@ module PgHero
104
142
  query_stats[database_id] = query_stats(limit: 1000000, database: database_name)
105
143
  end
106
144
 
107
- supports_query_hash = supports_query_hash?
145
+ query_stats = query_stats.select { |_, v| v.any? }
146
+
147
+ # nothing to do
148
+ return if query_stats.empty?
108
149
 
109
- if query_stats.any? { |_, v| v.any? } && reset_query_stats(raise_errors: raise_errors)
150
+ # use mapping, not query stats here
151
+ # TODO add option for this, and make default in PgHero 3.0
152
+ if false # mapping.size == 1 && server_version_num >= 120000
110
153
  query_stats.each do |db_id, db_query_stats|
111
- if db_query_stats.any?
112
- values =
113
- db_query_stats.map do |qs|
114
- [
115
- db_id,
116
- qs[:query],
117
- qs[:total_minutes] * 60 * 1000,
118
- qs[:calls],
119
- now,
120
- supports_query_hash ? qs[:query_hash] : nil,
121
- qs[:user]
122
- ]
123
- end
124
-
125
- columns = %w[database query total_time calls captured_at query_hash user]
126
- insert_stats("pghero_query_stats", columns, values)
154
+ if reset_query_stats(database: mapping[db_id], raise_errors: raise_errors)
155
+ insert_query_stats(db_id, db_query_stats, now)
156
+ end
157
+ end
158
+ else
159
+ if reset_query_stats(raise_errors: raise_errors)
160
+ query_stats.each do |db_id, db_query_stats|
161
+ insert_query_stats(db_id, db_query_stats, now)
127
162
  end
128
163
  end
129
164
  end
130
165
  end
131
166
 
167
+ def clean_query_stats
168
+ PgHero::QueryStats.where(database: id).where("captured_at < ?", 14.days.ago).delete_all
169
+ end
170
+
132
171
  def slow_queries(query_stats: nil, **options)
133
172
  query_stats ||= self.query_stats(options)
134
173
  query_stats.select { |q| q[:calls].to_i >= slow_query_calls.to_i && q[:average_time].to_f >= slow_query_ms.to_f }
@@ -166,14 +205,15 @@ module PgHero
166
205
  if query_stats_enabled?
167
206
  limit ||= 100
168
207
  sort ||= "total_minutes"
169
- select_all <<-SQL
208
+ total_time = server_version_num >= 130000 ? "(total_plan_time + total_exec_time)" : "total_time"
209
+ query = <<-SQL
170
210
  WITH query_stats AS (
171
211
  SELECT
172
212
  LEFT(query, 10000) AS query,
173
213
  #{supports_query_hash? ? "queryid" : "md5(query)"} AS query_hash,
174
214
  rolname AS user,
175
- (total_time / 1000 / 60) AS total_minutes,
176
- (total_time / calls) AS average_time,
215
+ (#{total_time} / 1000 / 60) AS total_minutes,
216
+ (#{total_time} / calls) AS average_time,
177
217
  calls
178
218
  FROM
179
219
  pg_stat_statements
@@ -182,6 +222,7 @@ module PgHero
182
222
  INNER JOIN
183
223
  pg_roles ON pg_roles.oid = pg_stat_statements.userid
184
224
  WHERE
225
+ calls > 0 AND
185
226
  pg_database.datname = #{database ? quote(database) : "current_database()"}
186
227
  #{query_hash ? "AND queryid = #{quote(query_hash)}" : nil}
187
228
  )
@@ -200,6 +241,11 @@ module PgHero
200
241
  #{quote_table_name(sort)} DESC
201
242
  LIMIT #{limit.to_i}
202
243
  SQL
244
+
245
+ # we may be able to skip query_columns
246
+ # in more recent versions of Postgres
247
+ # as pg_stat_statements should be already normalized
248
+ select_all(query, query_columns: [:query])
203
249
  else
204
250
  raise NotEnabled, "Query stats not enabled"
205
251
  end
@@ -208,7 +254,7 @@ module PgHero
208
254
  def historical_query_stats(sort: nil, start_at: nil, end_at: nil, query_hash: nil)
209
255
  if historical_query_stats_enabled?
210
256
  sort ||= "total_minutes"
211
- select_all_stats <<-SQL
257
+ query = <<-SQL
212
258
  WITH query_stats AS (
213
259
  SELECT
214
260
  #{supports_query_hash? ? "query_hash" : "md5(query)"} AS query_hash,
@@ -244,6 +290,10 @@ module PgHero
244
290
  #{quote_table_name(sort)} DESC
245
291
  LIMIT 100
246
292
  SQL
293
+
294
+ # we can skip query_columns if all stored data is normalized
295
+ # for now, assume it's not
296
+ select_all_stats(query, query_columns: [:query, :explainable_query])
247
297
  else
248
298
  raise NotEnabled, "Historical query stats not enabled"
249
299
  end
@@ -276,6 +326,24 @@ module PgHero
276
326
  def normalize_query(query)
277
327
  squish(query.to_s.gsub(/\?(, ?\?)+/, "?").gsub(/\/\*.+?\*\//, ""))
278
328
  end
329
+
330
+ def insert_query_stats(db_id, db_query_stats, now)
331
+ values =
332
+ db_query_stats.map do |qs|
333
+ [
334
+ db_id,
335
+ qs[:query],
336
+ qs[:total_minutes] * 60 * 1000,
337
+ qs[:calls],
338
+ now,
339
+ supports_query_hash? ? qs[:query_hash] : nil,
340
+ qs[:user]
341
+ ]
342
+ end
343
+
344
+ columns = %w[database query total_time calls captured_at query_hash user]
345
+ insert_stats("pghero_query_stats", columns, values)
346
+ end
279
347
  end
280
348
  end
281
349
  end
@@ -25,7 +25,7 @@ module PgHero
25
25
  WHERE
26
26
  NOT a.attisdropped
27
27
  AND a.attnum > 0
28
- AND d.adsrc LIKE 'nextval%'
28
+ AND pg_get_expr(d.adbin, d.adrelid) LIKE 'nextval%'
29
29
  AND n.nspname NOT LIKE 'pg\\_temp\\_%'
30
30
  SQL
31
31
 
@@ -129,6 +129,10 @@ module PgHero
129
129
  insert_stats("pghero_space_stats", columns, values) if values.any?
130
130
  end
131
131
 
132
+ def clean_space_stats
133
+ PgHero::SpaceStats.where(database: id).where("captured_at < ?", 90.days.ago).delete_all
134
+ end
135
+
132
136
  def space_stats_enabled?
133
137
  table_exists?("pghero_space_stats")
134
138
  end
@@ -48,7 +48,7 @@ module PgHero
48
48
  def suggested_indexes(suggested_indexes_by_query: nil, **options)
49
49
  indexes = []
50
50
 
51
- (suggested_indexes_by_query || self.suggested_indexes_by_query(options)).select { |_s, i| i[:found] && !i[:covering_index] }.group_by { |_s, i| i[:index] }.each do |index, group|
51
+ (suggested_indexes_by_query || self.suggested_indexes_by_query(**options)).select { |_s, i| i[:found] && !i[:covering_index] }.group_by { |_s, i| i[:index] }.each do |index, group|
52
52
  details = {}
53
53
  group.map(&:second).each do |g|
54
54
  details = details.except(:index).deep_merge(g)
@@ -1,31 +1,46 @@
1
1
  module PgHero
2
2
  module Methods
3
3
  module System
4
+ def system_stats_enabled?
5
+ !system_stats_provider.nil?
6
+ end
7
+
8
+ # TODO remove defined checks in 3.0
9
+ def system_stats_provider
10
+ if aws_db_instance_identifier && (defined?(Aws) || defined?(AWS))
11
+ :aws
12
+ elsif gcp_database_id
13
+ :gcp
14
+ elsif azure_resource_id
15
+ :azure
16
+ end
17
+ end
18
+
4
19
  def cpu_usage(**options)
5
- rds_stats("CPUUtilization", options)
20
+ system_stats(:cpu, **options)
6
21
  end
7
22
 
8
23
  def connection_stats(**options)
9
- rds_stats("DatabaseConnections", options)
24
+ system_stats(:connections, **options)
10
25
  end
11
26
 
12
27
  def replication_lag_stats(**options)
13
- rds_stats("ReplicaLag", options)
28
+ system_stats(:replication_lag, **options)
14
29
  end
15
30
 
16
31
  def read_iops_stats(**options)
17
- rds_stats("ReadIOPS", options)
32
+ system_stats(:read_iops, **options)
18
33
  end
19
34
 
20
35
  def write_iops_stats(**options)
21
- rds_stats("WriteIOPS", options)
36
+ system_stats(:write_iops, **options)
22
37
  end
23
38
 
24
39
  def free_space_stats(**options)
25
- rds_stats("FreeStorageSpace", options)
40
+ system_stats(:free_space, **options)
26
41
  end
27
42
 
28
- def rds_stats(metric_name, duration: nil, period: nil, offset: nil)
43
+ def rds_stats(metric_name, duration: nil, period: nil, offset: nil, series: false)
29
44
  if system_stats_enabled?
30
45
  aws_options = {region: region}
31
46
  if access_key_id
@@ -43,16 +58,14 @@ module PgHero
43
58
  duration = (duration || 1.hour).to_i
44
59
  period = (period || 1.minute).to_i
45
60
  offset = (offset || 0).to_i
46
-
47
- end_time = (Time.now - offset)
48
- # ceil period
49
- end_time = Time.at((end_time.to_f / period).ceil * period)
61
+ end_time = Time.at(((Time.now - offset).to_f / period).ceil * period)
62
+ start_time = end_time - duration
50
63
 
51
64
  resp = client.get_metric_statistics(
52
65
  namespace: "AWS/RDS",
53
66
  metric_name: metric_name,
54
- dimensions: [{name: "DBInstanceIdentifier", value: db_instance_identifier}],
55
- start_time: (end_time - duration).iso8601,
67
+ dimensions: [{name: "DBInstanceIdentifier", value: aws_db_instance_identifier}],
68
+ start_time: start_time.iso8601,
56
69
  end_time: end_time.iso8601,
57
70
  period: period,
58
71
  statistics: ["Average"]
@@ -61,30 +74,213 @@ module PgHero
61
74
  resp[:datapoints].sort_by { |d| d[:timestamp] }.each do |d|
62
75
  data[d[:timestamp]] = d[:average]
63
76
  end
77
+
78
+ add_missing_data(data, start_time, end_time, period) if series
79
+
64
80
  data
65
81
  else
66
82
  raise NotEnabled, "System stats not enabled"
67
83
  end
68
84
  end
69
85
 
70
- def system_stats_enabled?
71
- !!((defined?(Aws) || defined?(AWS)) && db_instance_identifier)
86
+ def azure_stats(metric_name, duration: nil, period: nil, offset: nil, series: false)
87
+ # TODO DRY with RDS stats
88
+ duration = (duration || 1.hour).to_i
89
+ period = (period || 1.minute).to_i
90
+ offset = (offset || 0).to_i
91
+ end_time = Time.at(((Time.now - offset).to_f / period).ceil * period)
92
+ start_time = end_time - duration
93
+
94
+ interval =
95
+ case period
96
+ when 60
97
+ "PT1M"
98
+ when 300
99
+ "PT5M"
100
+ when 900
101
+ "PT15M"
102
+ when 1800
103
+ "PT30M"
104
+ when 3600
105
+ "PT1H"
106
+ else
107
+ raise Error, "Unsupported period"
108
+ end
109
+
110
+ client = Azure::Monitor::Profiles::Latest::Mgmt::Client.new
111
+ timespan = "#{start_time.iso8601}/#{end_time.iso8601}"
112
+ results = client.metrics.list(
113
+ azure_resource_id,
114
+ metricnames: metric_name,
115
+ aggregation: "Average",
116
+ timespan: timespan,
117
+ interval: interval
118
+ )
119
+
120
+ data = {}
121
+ result = results.value.first
122
+ if result
123
+ result.timeseries.first.data.each do |point|
124
+ data[point.time_stamp.to_time] = point.average
125
+ end
126
+ end
127
+
128
+ add_missing_data(data, start_time, end_time, period) if series
129
+
130
+ data
72
131
  end
73
132
 
74
- def access_key_id
75
- ENV["PGHERO_ACCESS_KEY_ID"] || ENV["AWS_ACCESS_KEY_ID"]
133
+ private
134
+
135
+ def gcp_stats(metric_name, duration: nil, period: nil, offset: nil, series: false)
136
+ require "google/cloud/monitoring/v3"
137
+
138
+ # TODO DRY with RDS stats
139
+ duration = (duration || 1.hour).to_i
140
+ period = (period || 1.minute).to_i
141
+ offset = (offset || 0).to_i
142
+ end_time = Time.at(((Time.now - offset).to_f / period).ceil * period)
143
+ start_time = end_time - duration
144
+
145
+ # validate input since we need to interpolate below
146
+ raise Error, "Invalid metric name" unless metric_name =~ /\A[a-z\/_]+\z/i
147
+ raise Error, "Invalid database id" unless gcp_database_id =~ /\A[a-z\-:]+\z/i
148
+
149
+ # we handle three situations:
150
+ # 1. google-cloud-monitoring-v3
151
+ # 2. google-cloud-monitoring >= 1
152
+ # 3. google-cloud-monitoring < 1
153
+
154
+ # for situations 1 and 2
155
+ # Google::Cloud::Monitoring.metric_service is documented
156
+ # but doesn't work for situation 1
157
+ if defined?(Google::Cloud::Monitoring::V3::MetricService::Client)
158
+ client = Google::Cloud::Monitoring::V3::MetricService::Client.new
159
+
160
+ interval = Google::Cloud::Monitoring::V3::TimeInterval.new
161
+ interval.end_time = Google::Protobuf::Timestamp.new(seconds: end_time.to_i)
162
+ # subtract period to make sure we get first data point
163
+ interval.start_time = Google::Protobuf::Timestamp.new(seconds: (start_time - period).to_i)
164
+
165
+ aggregation = Google::Cloud::Monitoring::V3::Aggregation.new
166
+ # may be better to use ALIGN_NEXT_OLDER for space stats to show most recent data point
167
+ # stick with average for now to match AWS
168
+ aggregation.per_series_aligner = Google::Cloud::Monitoring::V3::Aggregation::Aligner::ALIGN_MEAN
169
+ aggregation.alignment_period = period
170
+
171
+ results = client.list_time_series({
172
+ name: "projects/#{gcp_database_id.split(":").first}",
173
+ filter: "metric.type = \"cloudsql.googleapis.com/database/#{metric_name}\" AND resource.label.database_id = \"#{gcp_database_id}\"",
174
+ interval: interval,
175
+ view: Google::Cloud::Monitoring::V3::ListTimeSeriesRequest::TimeSeriesView::FULL,
176
+ aggregation: aggregation
177
+ })
178
+ else
179
+ require "google/cloud/monitoring"
180
+
181
+ client = Google::Cloud::Monitoring::Metric.new
182
+
183
+ interval = Google::Monitoring::V3::TimeInterval.new
184
+ interval.end_time = Google::Protobuf::Timestamp.new(seconds: end_time.to_i)
185
+ # subtract period to make sure we get first data point
186
+ interval.start_time = Google::Protobuf::Timestamp.new(seconds: (start_time - period).to_i)
187
+
188
+ aggregation = Google::Monitoring::V3::Aggregation.new
189
+ # may be better to use ALIGN_NEXT_OLDER for space stats to show most recent data point
190
+ # stick with average for now to match AWS
191
+ aggregation.per_series_aligner = Google::Monitoring::V3::Aggregation::Aligner::ALIGN_MEAN
192
+ aggregation.alignment_period = period
193
+
194
+ results = client.list_time_series(
195
+ "projects/#{gcp_database_id.split(":").first}",
196
+ "metric.type = \"cloudsql.googleapis.com/database/#{metric_name}\" AND resource.label.database_id = \"#{gcp_database_id}\"",
197
+ interval,
198
+ Google::Monitoring::V3::ListTimeSeriesRequest::TimeSeriesView::FULL,
199
+ aggregation: aggregation
200
+ )
201
+ end
202
+
203
+ data = {}
204
+ result = results.first
205
+ if result
206
+ result.points.each do |point|
207
+ time = Time.at(point.interval.start_time.seconds)
208
+ value = point.value.double_value
209
+ value *= 100 if metric_name == "cpu/utilization"
210
+ data[time] = value
211
+ end
212
+ end
213
+
214
+ add_missing_data(data, start_time, end_time, period) if series
215
+
216
+ data
76
217
  end
77
218
 
78
- def secret_access_key
79
- ENV["PGHERO_SECRET_ACCESS_KEY"] || ENV["AWS_SECRET_ACCESS_KEY"]
219
+ def system_stats(metric_key, **options)
220
+ case system_stats_provider
221
+ when :aws
222
+ metrics = {
223
+ cpu: "CPUUtilization",
224
+ connections: "DatabaseConnections",
225
+ replication_lag: "ReplicaLag",
226
+ read_iops: "ReadIOPS",
227
+ write_iops: "WriteIOPS",
228
+ free_space: "FreeStorageSpace"
229
+ }
230
+ rds_stats(metrics[metric_key], **options)
231
+ when :gcp
232
+ if metric_key == :free_space
233
+ quota = gcp_stats("disk/quota", **options)
234
+ used = gcp_stats("disk/bytes_used", **options)
235
+ free_space(quota, used)
236
+ else
237
+ metrics = {
238
+ cpu: "cpu/utilization",
239
+ connections: "postgresql/num_backends",
240
+ replication_lag: "replication/replica_lag",
241
+ read_iops: "disk/read_ops_count",
242
+ write_iops: "disk/write_ops_count"
243
+ }
244
+ gcp_stats(metrics[metric_key], **options)
245
+ end
246
+ when :azure
247
+ if metric_key == :free_space
248
+ quota = azure_stats("storage_limit", **options)
249
+ used = azure_stats("storage_used", **options)
250
+ free_space(quota, used)
251
+ else
252
+ # no read_iops, write_iops
253
+ # could add io_consumption_percent
254
+ metrics = {
255
+ cpu: "cpu_percent",
256
+ connections: "active_connections",
257
+ replication_lag: "pg_replica_log_delay_in_seconds"
258
+ }
259
+ raise Error, "Metric not supported" unless metrics[metric_key]
260
+ azure_stats(metrics[metric_key], **options)
261
+ end
262
+ else
263
+ raise NotEnabled, "System stats not enabled"
264
+ end
80
265
  end
81
266
 
82
- def region
83
- ENV["PGHERO_REGION"] || ENV["AWS_REGION"] || (defined?(Aws) && Aws.config[:region]) || "us-east-1"
267
+ # only use data points included in both series
268
+ # this also eliminates need to align Time.now
269
+ def free_space(quota, used)
270
+ data = {}
271
+ quota.each do |k, v|
272
+ data[k] = v - used[k] if v && used[k]
273
+ end
274
+ data
84
275
  end
85
276
 
86
- def db_instance_identifier
87
- databases[current_database].db_instance_identifier
277
+ def add_missing_data(data, start_time, end_time, period)
278
+ time = start_time
279
+ end_time = end_time
280
+ while time < end_time
281
+ data[time] ||= nil
282
+ time += period
283
+ end
88
284
  end
89
285
  end
90
286
  end