pghero 2.3.0 → 2.5.1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of pghero might be problematic. Click here for more details.

Files changed (40) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +85 -54
  3. data/README.md +20 -8
  4. data/app/assets/javascripts/pghero/Chart.bundle.js +16260 -15580
  5. data/app/assets/javascripts/pghero/application.js +8 -7
  6. data/app/assets/javascripts/pghero/chartkick.js +1973 -1325
  7. data/app/assets/javascripts/pghero/highlight.pack.js +2 -2
  8. data/app/assets/javascripts/pghero/jquery.js +3605 -4015
  9. data/app/assets/javascripts/pghero/nouislider.js +2479 -0
  10. data/app/assets/stylesheets/pghero/application.css +1 -1
  11. data/app/assets/stylesheets/pghero/nouislider.css +299 -0
  12. data/app/controllers/pg_hero/home_controller.rb +94 -35
  13. data/app/helpers/pg_hero/home_helper.rb +11 -0
  14. data/app/views/pg_hero/home/_live_queries_table.html.erb +14 -2
  15. data/app/views/pg_hero/home/connections.html.erb +9 -0
  16. data/app/views/pg_hero/home/index.html.erb +49 -10
  17. data/app/views/pg_hero/home/live_queries.html.erb +1 -1
  18. data/app/views/pg_hero/home/maintenance.html.erb +16 -2
  19. data/app/views/pg_hero/home/relation_space.html.erb +2 -2
  20. data/app/views/pg_hero/home/show_query.html.erb +3 -3
  21. data/app/views/pg_hero/home/space.html.erb +3 -3
  22. data/app/views/pg_hero/home/system.html.erb +4 -4
  23. data/app/views/pg_hero/home/tune.html.erb +2 -1
  24. data/lib/generators/pghero/templates/config.yml.tt +21 -1
  25. data/lib/pghero.rb +63 -15
  26. data/lib/pghero/database.rb +101 -17
  27. data/lib/pghero/methods/basic.rb +28 -7
  28. data/lib/pghero/methods/connections.rb +35 -0
  29. data/lib/pghero/methods/constraints.rb +30 -0
  30. data/lib/pghero/methods/indexes.rb +1 -1
  31. data/lib/pghero/methods/maintenance.rb +3 -1
  32. data/lib/pghero/methods/queries.rb +6 -2
  33. data/lib/pghero/methods/query_stats.rb +12 -3
  34. data/lib/pghero/methods/suggested_indexes.rb +1 -1
  35. data/lib/pghero/methods/system.rb +219 -23
  36. data/lib/pghero/stats.rb +1 -1
  37. data/lib/pghero/version.rb +1 -1
  38. metadata +6 -5
  39. data/app/assets/javascripts/pghero/jquery.nouislider.min.js +0 -31
  40. data/app/assets/stylesheets/pghero/jquery.nouislider.css +0 -165
@@ -2,6 +2,7 @@ module PgHero
2
2
  class Database
3
3
  include Methods::Basic
4
4
  include Methods::Connections
5
+ include Methods::Constraints
5
6
  include Methods::Explain
6
7
  include Methods::Indexes
7
8
  include Methods::Kill
@@ -22,16 +23,17 @@ module PgHero
22
23
  def initialize(id, config)
23
24
  @id = id
24
25
  @config = config || {}
26
+
27
+ # preload model to ensure only one connection pool
28
+ # this doesn't actually start any connections
29
+ @adapter_checked = false
30
+ @connection_model = build_connection_model
25
31
  end
26
32
 
27
33
  def name
28
34
  @name ||= @config["name"] || id.titleize
29
35
  end
30
36
 
31
- def db_instance_identifier
32
- @db_instance_identifier ||= @config["db_instance_identifier"]
33
- end
34
-
35
37
  def capture_query_stats?
36
38
  config["capture_query_stats"] != false
37
39
  end
@@ -64,24 +66,106 @@ module PgHero
64
66
  (config["index_bloat_bytes"] || PgHero.config["index_bloat_bytes"] || 100.megabytes).to_i
65
67
  end
66
68
 
67
- private
69
+ def aws_access_key_id
70
+ config["aws_access_key_id"] || PgHero.config["aws_access_key_id"] || ENV["PGHERO_ACCESS_KEY_ID"] || ENV["AWS_ACCESS_KEY_ID"]
71
+ end
68
72
 
69
- def connection_model
70
- @connection_model ||= begin
71
- url = config["url"]
72
- Class.new(PgHero::Connection) do
73
- def self.name
74
- "PgHero::Connection::Database#{object_id}"
73
+ def aws_secret_access_key
74
+ config["aws_secret_access_key"] || PgHero.config["aws_secret_access_key"] || ENV["PGHERO_SECRET_ACCESS_KEY"] || ENV["AWS_SECRET_ACCESS_KEY"]
75
+ end
76
+
77
+ def aws_region
78
+ config["aws_region"] || PgHero.config["aws_region"] || ENV["PGHERO_REGION"] || ENV["AWS_REGION"] || (defined?(Aws) && Aws.config[:region]) || "us-east-1"
79
+ end
80
+
81
+ # environment variable is only used if no config file
82
+ def aws_db_instance_identifier
83
+ @aws_db_instance_identifier ||= config["aws_db_instance_identifier"] || config["db_instance_identifier"]
84
+ end
85
+
86
+ # environment variable is only used if no config file
87
+ def gcp_database_id
88
+ @gcp_database_id ||= config["gcp_database_id"]
89
+ end
90
+
91
+ # environment variable is only used if no config file
92
+ def azure_resource_id
93
+ @azure_resource_id ||= config["azure_resource_id"]
94
+ end
95
+
96
+ # must check keys for booleans
97
+ def filter_data
98
+ unless defined?(@filter_data)
99
+ @filter_data =
100
+ if config.key?("filter_data")
101
+ config["filter_data"]
102
+ elsif PgHero.config.key?("filter_data")
103
+ PgHero.config.key?("filter_data")
104
+ else
105
+ PgHero.filter_data
75
106
  end
76
- case url
77
- when String
78
- url = "#{url}#{url.include?("?") ? "&" : "?"}connect_timeout=5" unless url.include?("connect_timeout=")
79
- when Hash
80
- url[:connect_timeout] ||= 5
107
+
108
+ if @filter_data
109
+ begin
110
+ require "pg_query"
111
+ rescue LoadError
112
+ raise Error, "pg_query required for filter_data"
81
113
  end
82
- establish_connection url if url
83
114
  end
84
115
  end
116
+
117
+ @filter_data
118
+ end
119
+
120
+ # TODO remove in next major version
121
+ alias_method :access_key_id, :aws_access_key_id
122
+ alias_method :secret_access_key, :aws_secret_access_key
123
+ alias_method :region, :aws_region
124
+ alias_method :db_instance_identifier, :aws_db_instance_identifier
125
+
126
+ private
127
+
128
+ # check adapter lazily
129
+ def connection_model
130
+ unless @adapter_checked
131
+ # rough check for Postgres adapter
132
+ # keep this message generic so it's useful
133
+ # when empty url set in Docker image pghero.yml
134
+ unless @connection_model.connection.adapter_name =~ /postg/i
135
+ raise Error, "Invalid connection URL"
136
+ end
137
+ @adapter_checked = true
138
+ end
139
+
140
+ @connection_model
141
+ end
142
+
143
+ # just return the model
144
+ # do not start a connection
145
+ def build_connection_model
146
+ url = config["url"]
147
+
148
+ # resolve spec
149
+ if !url && config["spec"]
150
+ raise Error, "Spec requires Rails 6+" unless PgHero.spec_supported?
151
+ resolved = ActiveRecord::Base.configurations.configs_for(env_name: PgHero.env, spec_name: config["spec"], include_replicas: true)
152
+ raise Error, "Spec not found: #{config["spec"]}" unless resolved
153
+ url = resolved.config
154
+ end
155
+
156
+ Class.new(PgHero::Connection) do
157
+ def self.name
158
+ "PgHero::Connection::Database#{object_id}"
159
+ end
160
+
161
+ case url
162
+ when String
163
+ url = "#{url}#{url.include?("?") ? "&" : "?"}connect_timeout=5" unless url.include?("connect_timeout=")
164
+ when Hash
165
+ url[:connect_timeout] ||= 5
166
+ end
167
+ establish_connection url if url
168
+ end
85
169
  end
86
170
  end
87
171
  end
@@ -32,13 +32,34 @@ module PgHero
32
32
 
33
33
  private
34
34
 
35
- def select_all(sql, conn = nil)
35
+ def select_all(sql, conn: nil, query_columns: [])
36
36
  conn ||= connection
37
37
  # squish for logs
38
38
  retries = 0
39
39
  begin
40
40
  result = conn.select_all(add_source(squish(sql)))
41
- result.map { |row| Hash[row.map { |col, val| [col.to_sym, result.column_types[col].send(:cast_value, val)] }] }
41
+ result = result.map { |row| Hash[row.map { |col, val| [col.to_sym, result.column_types[col].send(:cast_value, val)] }] }
42
+ if filter_data
43
+ query_columns.each do |column|
44
+ result.each do |row|
45
+ begin
46
+ row[column] = PgQuery.normalize(row[column])
47
+ rescue PgQuery::ParseError
48
+ # try replacing "interval $1" with "$1::interval"
49
+ # see https://github.com/lfittl/pg_query/issues/169 for more info
50
+ # this is not ideal since it changes the query slightly
51
+ # we could skip normalization
52
+ # but this has a very small chance of data leakage
53
+ begin
54
+ row[column] = PgQuery.normalize(row[column].gsub(/\binterval\s+(\$\d+)\b/i, "\\1::interval"))
55
+ rescue PgQuery::ParseError
56
+ row[column] = "<unable to filter data>"
57
+ end
58
+ end
59
+ end
60
+ end
61
+ end
62
+ result
42
63
  rescue ActiveRecord::StatementInvalid => e
43
64
  # fix for random internal errors
44
65
  if e.message.include?("PG::InternalError") && retries < 2
@@ -51,8 +72,8 @@ module PgHero
51
72
  end
52
73
  end
53
74
 
54
- def select_all_stats(sql)
55
- select_all(sql, stats_connection)
75
+ def select_all_stats(sql, **options)
76
+ select_all(sql, **options, conn: stats_connection)
56
77
  end
57
78
 
58
79
  def select_all_size(sql)
@@ -63,12 +84,12 @@ module PgHero
63
84
  result
64
85
  end
65
86
 
66
- def select_one(sql, conn = nil)
67
- select_all(sql, conn).first.values.first
87
+ def select_one(sql, conn: nil)
88
+ select_all(sql, conn: conn).first.values.first
68
89
  end
69
90
 
70
91
  def select_one_stats(sql)
71
- select_one(sql, stats_connection)
92
+ select_one(sql, conn: stats_connection)
72
93
  end
73
94
 
74
95
  def execute(sql)
@@ -1,6 +1,41 @@
1
1
  module PgHero
2
2
  module Methods
3
3
  module Connections
4
+ def connections
5
+ if server_version_num >= 90500
6
+ select_all <<-SQL
7
+ SELECT
8
+ pg_stat_activity.pid,
9
+ datname AS database,
10
+ usename AS user,
11
+ application_name AS source,
12
+ client_addr AS ip,
13
+ state,
14
+ ssl
15
+ FROM
16
+ pg_stat_activity
17
+ LEFT JOIN
18
+ pg_stat_ssl ON pg_stat_activity.pid = pg_stat_ssl.pid
19
+ ORDER BY
20
+ pg_stat_activity.pid
21
+ SQL
22
+ else
23
+ select_all <<-SQL
24
+ SELECT
25
+ pid,
26
+ datname AS database,
27
+ usename AS user,
28
+ application_name AS source,
29
+ client_addr AS ip,
30
+ state
31
+ FROM
32
+ pg_stat_activity
33
+ ORDER BY
34
+ pid
35
+ SQL
36
+ end
37
+ end
38
+
4
39
  def total_connections
5
40
  select_one("SELECT COUNT(*) FROM pg_stat_activity")
6
41
  end
@@ -0,0 +1,30 @@
1
+ module PgHero
2
+ module Methods
3
+ module Constraints
4
+ # referenced fields can be nil
5
+ # as not all constraints are foreign keys
6
+ def invalid_constraints
7
+ select_all <<-SQL
8
+ SELECT
9
+ nsp.nspname AS schema,
10
+ rel.relname AS table,
11
+ con.conname AS name,
12
+ fnsp.nspname AS referenced_schema,
13
+ frel.relname AS referenced_table
14
+ FROM
15
+ pg_catalog.pg_constraint con
16
+ INNER JOIN
17
+ pg_catalog.pg_class rel ON rel.oid = con.conrelid
18
+ LEFT JOIN
19
+ pg_catalog.pg_class frel ON frel.oid = con.confrelid
20
+ LEFT JOIN
21
+ pg_catalog.pg_namespace nsp ON nsp.oid = con.connamespace
22
+ LEFT JOIN
23
+ pg_catalog.pg_namespace fnsp ON fnsp.oid = frel.relnamespace
24
+ WHERE
25
+ con.convalidated = 'f'
26
+ SQL
27
+ end
28
+ end
29
+ end
30
+ end
@@ -195,7 +195,7 @@ module PgHero
195
195
  FROM
196
196
  pg_index
197
197
  JOIN
198
- pg_class ON pg_class.oid=pg_index.indexrelid
198
+ pg_class ON pg_class.oid = pg_index.indexrelid
199
199
  JOIN
200
200
  pg_namespace ON pg_namespace.oid = pg_class.relnamespace
201
201
  JOIN
@@ -57,7 +57,9 @@ module PgHero
57
57
  last_vacuum,
58
58
  last_autovacuum,
59
59
  last_analyze,
60
- last_autoanalyze
60
+ last_autoanalyze,
61
+ n_dead_tup AS dead_rows,
62
+ n_live_tup AS live_rows
61
63
  FROM
62
64
  pg_stat_user_tables
63
65
  ORDER BY
@@ -2,7 +2,7 @@ module PgHero
2
2
  module Methods
3
3
  module Queries
4
4
  def running_queries(min_duration: nil, all: false)
5
- select_all <<-SQL
5
+ query = <<-SQL
6
6
  SELECT
7
7
  pid,
8
8
  state,
@@ -24,6 +24,8 @@ module PgHero
24
24
  ORDER BY
25
25
  COALESCE(query_start, xact_start) DESC
26
26
  SQL
27
+
28
+ select_all(query, query_columns: [:query])
27
29
  end
28
30
 
29
31
  def long_running_queries
@@ -33,7 +35,7 @@ module PgHero
33
35
  # from https://wiki.postgresql.org/wiki/Lock_Monitoring
34
36
  # and https://big-elephants.com/2013-09/exploring-query-locks-in-postgres/
35
37
  def blocked_queries
36
- select_all <<-SQL
38
+ query = <<-SQL
37
39
  SELECT
38
40
  COALESCE(blockingl.relation::regclass::text,blockingl.locktype) as locked_item,
39
41
  blockeda.pid AS blocked_pid,
@@ -65,6 +67,8 @@ module PgHero
65
67
  ORDER BY
66
68
  blocked_duration DESC
67
69
  SQL
70
+
71
+ select_all(query, query_columns: [:blocked_query, :current_or_recent_query_in_blocking_process])
68
72
  end
69
73
  end
70
74
  end
@@ -2,7 +2,7 @@ module PgHero
2
2
  module Methods
3
3
  module QueryStats
4
4
  def query_stats(historical: false, start_at: nil, end_at: nil, min_average_time: nil, min_calls: nil, **options)
5
- current_query_stats = historical && end_at && end_at < Time.now ? [] : current_query_stats(options)
5
+ current_query_stats = historical && end_at && end_at < Time.now ? [] : current_query_stats(**options)
6
6
  historical_query_stats = historical && historical_query_stats_enabled? ? historical_query_stats(start_at: start_at, end_at: end_at, **options) : []
7
7
 
8
8
  query_stats = combine_query_stats((current_query_stats + historical_query_stats).group_by { |q| [q[:query_hash], q[:user]] })
@@ -166,7 +166,7 @@ module PgHero
166
166
  if query_stats_enabled?
167
167
  limit ||= 100
168
168
  sort ||= "total_minutes"
169
- select_all <<-SQL
169
+ query = <<-SQL
170
170
  WITH query_stats AS (
171
171
  SELECT
172
172
  LEFT(query, 10000) AS query,
@@ -200,6 +200,11 @@ module PgHero
200
200
  #{quote_table_name(sort)} DESC
201
201
  LIMIT #{limit.to_i}
202
202
  SQL
203
+
204
+ # we may be able to skip query_columns
205
+ # in more recent versions of Postgres
206
+ # as pg_stat_statements should be already normalized
207
+ select_all(query, query_columns: [:query])
203
208
  else
204
209
  raise NotEnabled, "Query stats not enabled"
205
210
  end
@@ -208,7 +213,7 @@ module PgHero
208
213
  def historical_query_stats(sort: nil, start_at: nil, end_at: nil, query_hash: nil)
209
214
  if historical_query_stats_enabled?
210
215
  sort ||= "total_minutes"
211
- select_all_stats <<-SQL
216
+ query = <<-SQL
212
217
  WITH query_stats AS (
213
218
  SELECT
214
219
  #{supports_query_hash? ? "query_hash" : "md5(query)"} AS query_hash,
@@ -244,6 +249,10 @@ module PgHero
244
249
  #{quote_table_name(sort)} DESC
245
250
  LIMIT 100
246
251
  SQL
252
+
253
+ # we can skip query_columns if all stored data is normalized
254
+ # for now, assume it's not
255
+ select_all_stats(query, query_columns: [:query, :explainable_query])
247
256
  else
248
257
  raise NotEnabled, "Historical query stats not enabled"
249
258
  end
@@ -48,7 +48,7 @@ module PgHero
48
48
  def suggested_indexes(suggested_indexes_by_query: nil, **options)
49
49
  indexes = []
50
50
 
51
- (suggested_indexes_by_query || self.suggested_indexes_by_query(options)).select { |_s, i| i[:found] && !i[:covering_index] }.group_by { |_s, i| i[:index] }.each do |index, group|
51
+ (suggested_indexes_by_query || self.suggested_indexes_by_query(**options)).select { |_s, i| i[:found] && !i[:covering_index] }.group_by { |_s, i| i[:index] }.each do |index, group|
52
52
  details = {}
53
53
  group.map(&:second).each do |g|
54
54
  details = details.except(:index).deep_merge(g)
@@ -1,31 +1,46 @@
1
1
  module PgHero
2
2
  module Methods
3
3
  module System
4
+ def system_stats_enabled?
5
+ !system_stats_provider.nil?
6
+ end
7
+
8
+ # TODO remove defined checks in 3.0
9
+ def system_stats_provider
10
+ if aws_db_instance_identifier && (defined?(Aws) || defined?(AWS))
11
+ :aws
12
+ elsif gcp_database_id
13
+ :gcp
14
+ elsif azure_resource_id
15
+ :azure
16
+ end
17
+ end
18
+
4
19
  def cpu_usage(**options)
5
- rds_stats("CPUUtilization", options)
20
+ system_stats(:cpu, **options)
6
21
  end
7
22
 
8
23
  def connection_stats(**options)
9
- rds_stats("DatabaseConnections", options)
24
+ system_stats(:connections, **options)
10
25
  end
11
26
 
12
27
  def replication_lag_stats(**options)
13
- rds_stats("ReplicaLag", options)
28
+ system_stats(:replication_lag, **options)
14
29
  end
15
30
 
16
31
  def read_iops_stats(**options)
17
- rds_stats("ReadIOPS", options)
32
+ system_stats(:read_iops, **options)
18
33
  end
19
34
 
20
35
  def write_iops_stats(**options)
21
- rds_stats("WriteIOPS", options)
36
+ system_stats(:write_iops, **options)
22
37
  end
23
38
 
24
39
  def free_space_stats(**options)
25
- rds_stats("FreeStorageSpace", options)
40
+ system_stats(:free_space, **options)
26
41
  end
27
42
 
28
- def rds_stats(metric_name, duration: nil, period: nil, offset: nil)
43
+ def rds_stats(metric_name, duration: nil, period: nil, offset: nil, series: false)
29
44
  if system_stats_enabled?
30
45
  aws_options = {region: region}
31
46
  if access_key_id
@@ -43,16 +58,14 @@ module PgHero
43
58
  duration = (duration || 1.hour).to_i
44
59
  period = (period || 1.minute).to_i
45
60
  offset = (offset || 0).to_i
46
-
47
- end_time = (Time.now - offset)
48
- # ceil period
49
- end_time = Time.at((end_time.to_f / period).ceil * period)
61
+ end_time = Time.at(((Time.now - offset).to_f / period).ceil * period)
62
+ start_time = end_time - duration
50
63
 
51
64
  resp = client.get_metric_statistics(
52
65
  namespace: "AWS/RDS",
53
66
  metric_name: metric_name,
54
- dimensions: [{name: "DBInstanceIdentifier", value: db_instance_identifier}],
55
- start_time: (end_time - duration).iso8601,
67
+ dimensions: [{name: "DBInstanceIdentifier", value: aws_db_instance_identifier}],
68
+ start_time: start_time.iso8601,
56
69
  end_time: end_time.iso8601,
57
70
  period: period,
58
71
  statistics: ["Average"]
@@ -61,30 +74,213 @@ module PgHero
61
74
  resp[:datapoints].sort_by { |d| d[:timestamp] }.each do |d|
62
75
  data[d[:timestamp]] = d[:average]
63
76
  end
77
+
78
+ add_missing_data(data, start_time, end_time, period) if series
79
+
64
80
  data
65
81
  else
66
82
  raise NotEnabled, "System stats not enabled"
67
83
  end
68
84
  end
69
85
 
70
- def system_stats_enabled?
71
- !!((defined?(Aws) || defined?(AWS)) && db_instance_identifier)
86
+ def azure_stats(metric_name, duration: nil, period: nil, offset: nil, series: false)
87
+ # TODO DRY with RDS stats
88
+ duration = (duration || 1.hour).to_i
89
+ period = (period || 1.minute).to_i
90
+ offset = (offset || 0).to_i
91
+ end_time = Time.at(((Time.now - offset).to_f / period).ceil * period)
92
+ start_time = end_time - duration
93
+
94
+ interval =
95
+ case period
96
+ when 60
97
+ "PT1M"
98
+ when 300
99
+ "PT5M"
100
+ when 900
101
+ "PT15M"
102
+ when 1800
103
+ "PT30M"
104
+ when 3600
105
+ "PT1H"
106
+ else
107
+ raise Error, "Unsupported period"
108
+ end
109
+
110
+ client = Azure::Monitor::Profiles::Latest::Mgmt::Client.new
111
+ timespan = "#{start_time.iso8601}/#{end_time.iso8601}"
112
+ results = client.metrics.list(
113
+ azure_resource_id,
114
+ metricnames: metric_name,
115
+ aggregation: "Average",
116
+ timespan: timespan,
117
+ interval: interval
118
+ )
119
+
120
+ data = {}
121
+ result = results.value.first
122
+ if result
123
+ result.timeseries.first.data.each do |point|
124
+ data[point.time_stamp.to_time] = point.average
125
+ end
126
+ end
127
+
128
+ add_missing_data(data, start_time, end_time, period) if series
129
+
130
+ data
72
131
  end
73
132
 
74
- def access_key_id
75
- ENV["PGHERO_ACCESS_KEY_ID"] || ENV["AWS_ACCESS_KEY_ID"]
133
+ private
134
+
135
+ def gcp_stats(metric_name, duration: nil, period: nil, offset: nil, series: false)
136
+ require "google/cloud/monitoring/v3"
137
+
138
+ # TODO DRY with RDS stats
139
+ duration = (duration || 1.hour).to_i
140
+ period = (period || 1.minute).to_i
141
+ offset = (offset || 0).to_i
142
+ end_time = Time.at(((Time.now - offset).to_f / period).ceil * period)
143
+ start_time = end_time - duration
144
+
145
+ # validate input since we need to interpolate below
146
+ raise Error, "Invalid metric name" unless metric_name =~ /\A[a-z\/_]+\z/i
147
+ raise Error, "Invalid database id" unless gcp_database_id =~ /\A[a-z\-:]+\z/i
148
+
149
+ # we handle three situations:
150
+ # 1. google-cloud-monitoring-v3
151
+ # 2. google-cloud-monitoring >= 1
152
+ # 3. google-cloud-monitoring < 1
153
+
154
+ # for situations 1 and 2
155
+ # Google::Cloud::Monitoring.metric_service is documented
156
+ # but doesn't work for situation 1
157
+ if defined?(Google::Cloud::Monitoring::V3::MetricService::Client)
158
+ client = Google::Cloud::Monitoring::V3::MetricService::Client.new
159
+
160
+ interval = Google::Cloud::Monitoring::V3::TimeInterval.new
161
+ interval.end_time = Google::Protobuf::Timestamp.new(seconds: end_time.to_i)
162
+ # subtract period to make sure we get first data point
163
+ interval.start_time = Google::Protobuf::Timestamp.new(seconds: (start_time - period).to_i)
164
+
165
+ aggregation = Google::Cloud::Monitoring::V3::Aggregation.new
166
+ # may be better to use ALIGN_NEXT_OLDER for space stats to show most recent data point
167
+ # stick with average for now to match AWS
168
+ aggregation.per_series_aligner = Google::Cloud::Monitoring::V3::Aggregation::Aligner::ALIGN_MEAN
169
+ aggregation.alignment_period = period
170
+
171
+ results = client.list_time_series({
172
+ name: "projects/#{gcp_database_id.split(":").first}",
173
+ filter: "metric.type = \"cloudsql.googleapis.com/database/#{metric_name}\" AND resource.label.database_id = \"#{gcp_database_id}\"",
174
+ interval: interval,
175
+ view: Google::Cloud::Monitoring::V3::ListTimeSeriesRequest::TimeSeriesView::FULL,
176
+ aggregation: aggregation
177
+ })
178
+ else
179
+ require "google/cloud/monitoring"
180
+
181
+ client = Google::Cloud::Monitoring::Metric.new
182
+
183
+ interval = Google::Monitoring::V3::TimeInterval.new
184
+ interval.end_time = Google::Protobuf::Timestamp.new(seconds: end_time.to_i)
185
+ # subtract period to make sure we get first data point
186
+ interval.start_time = Google::Protobuf::Timestamp.new(seconds: (start_time - period).to_i)
187
+
188
+ aggregation = Google::Monitoring::V3::Aggregation.new
189
+ # may be better to use ALIGN_NEXT_OLDER for space stats to show most recent data point
190
+ # stick with average for now to match AWS
191
+ aggregation.per_series_aligner = Google::Monitoring::V3::Aggregation::Aligner::ALIGN_MEAN
192
+ aggregation.alignment_period = period
193
+
194
+ results = client.list_time_series(
195
+ "projects/#{gcp_database_id.split(":").first}",
196
+ "metric.type = \"cloudsql.googleapis.com/database/#{metric_name}\" AND resource.label.database_id = \"#{gcp_database_id}\"",
197
+ interval,
198
+ Google::Monitoring::V3::ListTimeSeriesRequest::TimeSeriesView::FULL,
199
+ aggregation: aggregation
200
+ )
201
+ end
202
+
203
+ data = {}
204
+ result = results.first
205
+ if result
206
+ result.points.each do |point|
207
+ time = Time.at(point.interval.start_time.seconds)
208
+ value = point.value.double_value
209
+ value *= 100 if metric_name == "cpu/utilization"
210
+ data[time] = value
211
+ end
212
+ end
213
+
214
+ add_missing_data(data, start_time, end_time, period) if series
215
+
216
+ data
76
217
  end
77
218
 
78
- def secret_access_key
79
- ENV["PGHERO_SECRET_ACCESS_KEY"] || ENV["AWS_SECRET_ACCESS_KEY"]
219
+ def system_stats(metric_key, **options)
220
+ case system_stats_provider
221
+ when :aws
222
+ metrics = {
223
+ cpu: "CPUUtilization",
224
+ connections: "DatabaseConnections",
225
+ replication_lag: "ReplicaLag",
226
+ read_iops: "ReadIOPS",
227
+ write_iops: "WriteIOPS",
228
+ free_space: "FreeStorageSpace"
229
+ }
230
+ rds_stats(metrics[metric_key], **options)
231
+ when :gcp
232
+ if metric_key == :free_space
233
+ quota = gcp_stats("disk/quota", **options)
234
+ used = gcp_stats("disk/bytes_used", **options)
235
+ free_space(quota, used)
236
+ else
237
+ metrics = {
238
+ cpu: "cpu/utilization",
239
+ connections: "postgresql/num_backends",
240
+ replication_lag: "replication/replica_lag",
241
+ read_iops: "disk/read_ops_count",
242
+ write_iops: "disk/write_ops_count"
243
+ }
244
+ gcp_stats(metrics[metric_key], **options)
245
+ end
246
+ when :azure
247
+ if metric_key == :free_space
248
+ quota = azure_stats("storage_limit", **options)
249
+ used = azure_stats("storage_used", **options)
250
+ free_space(quota, used)
251
+ else
252
+ # no read_iops, write_iops
253
+ # could add io_consumption_percent
254
+ metrics = {
255
+ cpu: "cpu_percent",
256
+ connections: "active_connections",
257
+ replication_lag: "pg_replica_log_delay_in_seconds"
258
+ }
259
+ raise Error, "Metric not supported" unless metrics[metric_key]
260
+ azure_stats(metrics[metric_key], **options)
261
+ end
262
+ else
263
+ raise NotEnabled, "System stats not enabled"
264
+ end
80
265
  end
81
266
 
82
- def region
83
- ENV["PGHERO_REGION"] || ENV["AWS_REGION"] || (defined?(Aws) && Aws.config[:region]) || "us-east-1"
267
+ # only use data points included in both series
268
+ # this also eliminates need to align Time.now
269
+ def free_space(quota, used)
270
+ data = {}
271
+ quota.each do |k, v|
272
+ data[k] = v - used[k] if v && used[k]
273
+ end
274
+ data
84
275
  end
85
276
 
86
- def db_instance_identifier
87
- databases[current_database].db_instance_identifier
277
+ def add_missing_data(data, start_time, end_time, period)
278
+ time = start_time
279
+ end_time = end_time
280
+ while time < end_time
281
+ data[time] ||= nil
282
+ time += period
283
+ end
88
284
  end
89
285
  end
90
286
  end