active_postgres 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE +23 -0
- data/README.md +158 -0
- data/exe/activepostgres +5 -0
- data/lib/active_postgres/cli.rb +157 -0
- data/lib/active_postgres/cluster_deployment_flow.rb +85 -0
- data/lib/active_postgres/component_resolver.rb +24 -0
- data/lib/active_postgres/components/base.rb +38 -0
- data/lib/active_postgres/components/core.rb +158 -0
- data/lib/active_postgres/components/extensions.rb +99 -0
- data/lib/active_postgres/components/monitoring.rb +55 -0
- data/lib/active_postgres/components/pgbackrest.rb +94 -0
- data/lib/active_postgres/components/pgbouncer.rb +137 -0
- data/lib/active_postgres/components/repmgr.rb +651 -0
- data/lib/active_postgres/components/ssl.rb +86 -0
- data/lib/active_postgres/configuration.rb +190 -0
- data/lib/active_postgres/connection_pooler.rb +429 -0
- data/lib/active_postgres/credentials.rb +17 -0
- data/lib/active_postgres/deployment_flow.rb +154 -0
- data/lib/active_postgres/error_handler.rb +185 -0
- data/lib/active_postgres/failover.rb +83 -0
- data/lib/active_postgres/generators/active_postgres/install_generator.rb +186 -0
- data/lib/active_postgres/health_checker.rb +244 -0
- data/lib/active_postgres/installer.rb +114 -0
- data/lib/active_postgres/log_sanitizer.rb +67 -0
- data/lib/active_postgres/logger.rb +125 -0
- data/lib/active_postgres/performance_tuner.rb +246 -0
- data/lib/active_postgres/rails/database_config.rb +174 -0
- data/lib/active_postgres/rails/migration_guard.rb +25 -0
- data/lib/active_postgres/railtie.rb +28 -0
- data/lib/active_postgres/retry_helper.rb +80 -0
- data/lib/active_postgres/rollback_manager.rb +140 -0
- data/lib/active_postgres/secrets.rb +86 -0
- data/lib/active_postgres/ssh_executor.rb +288 -0
- data/lib/active_postgres/standby_deployment_flow.rb +122 -0
- data/lib/active_postgres/validator.rb +143 -0
- data/lib/active_postgres/version.rb +3 -0
- data/lib/active_postgres.rb +67 -0
- data/lib/tasks/postgres.rake +855 -0
- data/lib/tasks/rolling_update.rake +258 -0
- data/lib/tasks/rotate_credentials.rake +193 -0
- data/templates/pg_hba.conf.erb +47 -0
- data/templates/pgbackrest.conf.erb +43 -0
- data/templates/pgbouncer.ini.erb +55 -0
- data/templates/postgresql.conf.erb +157 -0
- data/templates/repmgr.conf.erb +40 -0
- metadata +224 -0
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
module ActivePostgres
|
|
2
|
+
# Automatic PostgreSQL performance tuning based on hardware specs
|
|
3
|
+
# Following best practices from PGTune and PostgreSQL documentation
|
|
4
|
+
class PerformanceTuner
|
|
5
|
+
attr_reader :config, :ssh_executor, :logger
|
|
6
|
+
|
|
7
|
+
def initialize(config, ssh_executor, logger = Logger.new)
|
|
8
|
+
@config = config
|
|
9
|
+
@ssh_executor = ssh_executor
|
|
10
|
+
@logger = logger
|
|
11
|
+
end
|
|
12
|
+
|
|
13
|
+
def tune_for_host(host, db_type: 'web')
|
|
14
|
+
@logger.info 'Analyzing hardware for optimal PostgreSQL configuration...'
|
|
15
|
+
|
|
16
|
+
hardware = analyze_hardware(host)
|
|
17
|
+
settings = calculate_optimal_settings(hardware, db_type)
|
|
18
|
+
|
|
19
|
+
@logger.info 'Hardware detected:'
|
|
20
|
+
@logger.info " CPU Cores: #{hardware[:cpu_cores]}"
|
|
21
|
+
@logger.info " RAM: #{format_bytes(hardware[:total_memory])}"
|
|
22
|
+
@logger.info " Storage: #{hardware[:storage_type]}"
|
|
23
|
+
|
|
24
|
+
settings
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
private
|
|
28
|
+
|
|
29
|
+
def analyze_hardware(host)
|
|
30
|
+
hardware = {}
|
|
31
|
+
pg_version = @config.version.to_f # Capture before SSH block
|
|
32
|
+
|
|
33
|
+
@ssh_executor.execute_on_host(host) do
|
|
34
|
+
# Get CPU cores
|
|
35
|
+
hardware[:cpu_cores] = capture(:nproc).strip.to_i
|
|
36
|
+
|
|
37
|
+
# Get total memory in KB
|
|
38
|
+
mem_info = capture(:cat, '/proc/meminfo')
|
|
39
|
+
hardware[:total_memory] = mem_info.match(/MemTotal:\s+(\d+)/)[1].to_i * 1024
|
|
40
|
+
|
|
41
|
+
# Detect storage type (SSD vs HDD) - inline to avoid scope issues
|
|
42
|
+
disk_info = begin
|
|
43
|
+
capture(:lsblk, '-d', '-o', 'name,rota', '2>/dev/null')
|
|
44
|
+
rescue StandardError
|
|
45
|
+
''
|
|
46
|
+
end
|
|
47
|
+
# rota=0 means SSD, rota=1 means HDD
|
|
48
|
+
hardware[:storage_type] = if disk_info.include?('nvme') || disk_info.match(/\s0$/)
|
|
49
|
+
'ssd'
|
|
50
|
+
else
|
|
51
|
+
'hdd'
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
# Get PostgreSQL version
|
|
55
|
+
hardware[:pg_version] = pg_version
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
hardware
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
def calculate_optimal_settings(hardware, db_type)
|
|
62
|
+
settings = {}
|
|
63
|
+
|
|
64
|
+
total_memory = hardware[:total_memory]
|
|
65
|
+
cpu_cores = hardware[:cpu_cores]
|
|
66
|
+
|
|
67
|
+
# Calculate based on database type
|
|
68
|
+
case db_type
|
|
69
|
+
when 'web'
|
|
70
|
+
# Web application (many connections, mixed read/write)
|
|
71
|
+
settings[:shared_buffers] = calculate_shared_buffers(total_memory, 0.25)
|
|
72
|
+
settings[:effective_cache_size] = calculate_effective_cache_size(total_memory, 0.75)
|
|
73
|
+
settings[:maintenance_work_mem] = calculate_maintenance_work_mem(total_memory, 0.05)
|
|
74
|
+
settings[:work_mem] = calculate_work_mem(total_memory, 200)
|
|
75
|
+
settings[:max_connections] = [200, cpu_cores * 25].min
|
|
76
|
+
|
|
77
|
+
when 'oltp'
|
|
78
|
+
# Online Transaction Processing (many small transactions)
|
|
79
|
+
settings[:shared_buffers] = calculate_shared_buffers(total_memory, 0.25)
|
|
80
|
+
settings[:effective_cache_size] = calculate_effective_cache_size(total_memory, 0.75)
|
|
81
|
+
settings[:maintenance_work_mem] = calculate_maintenance_work_mem(total_memory, 0.05)
|
|
82
|
+
settings[:work_mem] = calculate_work_mem(total_memory, 300)
|
|
83
|
+
settings[:max_connections] = [300, cpu_cores * 40].min
|
|
84
|
+
|
|
85
|
+
when 'dw'
|
|
86
|
+
# Data Warehouse (complex queries, fewer connections)
|
|
87
|
+
settings[:shared_buffers] = calculate_shared_buffers(total_memory, 0.4)
|
|
88
|
+
settings[:effective_cache_size] = calculate_effective_cache_size(total_memory, 0.8)
|
|
89
|
+
settings[:maintenance_work_mem] = calculate_maintenance_work_mem(total_memory, 0.1)
|
|
90
|
+
settings[:work_mem] = calculate_work_mem(total_memory, 50)
|
|
91
|
+
settings[:max_connections] = [50, cpu_cores * 10].min
|
|
92
|
+
|
|
93
|
+
when 'desktop'
|
|
94
|
+
# Development/Desktop (conservative settings)
|
|
95
|
+
settings[:shared_buffers] = calculate_shared_buffers(total_memory, 0.1)
|
|
96
|
+
settings[:effective_cache_size] = calculate_effective_cache_size(total_memory, 0.25)
|
|
97
|
+
settings[:maintenance_work_mem] = '64MB'
|
|
98
|
+
settings[:work_mem] = '4MB'
|
|
99
|
+
settings[:max_connections] = 20
|
|
100
|
+
end
|
|
101
|
+
|
|
102
|
+
# Common optimizations for all types
|
|
103
|
+
settings[:checkpoint_completion_target] = 0.9
|
|
104
|
+
settings[:wal_buffers] = calculate_wal_buffers(settings[:shared_buffers])
|
|
105
|
+
settings[:default_statistics_target] = 100
|
|
106
|
+
settings[:random_page_cost] = hardware[:storage_type] == 'ssd' ? 1.1 : 4
|
|
107
|
+
settings[:effective_io_concurrency] = hardware[:storage_type] == 'ssd' ? 200 : 2
|
|
108
|
+
settings[:max_worker_processes] = cpu_cores
|
|
109
|
+
settings[:max_parallel_workers_per_gather] = [(cpu_cores / 2).to_i, 4].min
|
|
110
|
+
settings[:max_parallel_workers] = cpu_cores
|
|
111
|
+
settings[:max_parallel_maintenance_workers] = [(cpu_cores / 2).to_i, 4].min
|
|
112
|
+
|
|
113
|
+
# WAL settings for replication
|
|
114
|
+
if @config.component_enabled?(:repmgr)
|
|
115
|
+
settings[:wal_level] = 'replica'
|
|
116
|
+
settings[:max_wal_senders] = [10, @config.standby_hosts.size * 2].max
|
|
117
|
+
settings[:max_replication_slots] = [10, @config.standby_hosts.size * 2].max
|
|
118
|
+
settings[:wal_keep_size] = '1GB'
|
|
119
|
+
settings[:hot_standby] = 'on'
|
|
120
|
+
settings[:wal_compression] = 'on'
|
|
121
|
+
settings[:archive_mode] = 'on'
|
|
122
|
+
settings[:archive_command] = '/bin/true' # Will be overridden by pgbackrest if enabled
|
|
123
|
+
end
|
|
124
|
+
|
|
125
|
+
# Huge pages optimization for large memory systems
|
|
126
|
+
settings[:huge_pages] = 'try' if total_memory > 32 * 1024 * 1024 * 1024 # > 32GB
|
|
127
|
+
|
|
128
|
+
# JIT compilation for PG11+
|
|
129
|
+
settings[:jit] = 'on' if hardware[:pg_version] >= 11
|
|
130
|
+
|
|
131
|
+
# Additional settings for PG13+
|
|
132
|
+
if hardware[:pg_version] >= 13
|
|
133
|
+
settings[:shared_memory_type] = hardware[:storage_type] == 'ssd' ? 'sysv' : 'mmap'
|
|
134
|
+
settings[:wal_init_zero] = 'off'
|
|
135
|
+
settings[:wal_recycle] = 'on'
|
|
136
|
+
end
|
|
137
|
+
|
|
138
|
+
# Logging optimizations
|
|
139
|
+
settings[:log_checkpoints] = 'on'
|
|
140
|
+
settings[:log_connections] = 'on'
|
|
141
|
+
settings[:log_disconnections] = 'on'
|
|
142
|
+
settings[:log_lock_waits] = 'on'
|
|
143
|
+
settings[:log_temp_files] = 0
|
|
144
|
+
settings[:log_autovacuum_min_duration] = '0'
|
|
145
|
+
settings[:log_min_duration_statement] = '1000' # Log slow queries > 1s
|
|
146
|
+
|
|
147
|
+
# Statement tracking
|
|
148
|
+
settings[:shared_preload_libraries] = 'pg_stat_statements'
|
|
149
|
+
settings['pg_stat_statements.max'] = 10_000
|
|
150
|
+
settings['pg_stat_statements.track'] = 'all'
|
|
151
|
+
|
|
152
|
+
settings
|
|
153
|
+
end
|
|
154
|
+
|
|
155
|
+
def calculate_shared_buffers(total_memory, ratio)
|
|
156
|
+
value = (total_memory * ratio).to_i
|
|
157
|
+
|
|
158
|
+
# Cap at 40% of RAM for large memory systems
|
|
159
|
+
max_value = (total_memory * 0.4).to_i
|
|
160
|
+
value = [value, max_value].min
|
|
161
|
+
|
|
162
|
+
# Minimum 128MB
|
|
163
|
+
value = [value, 128 * 1024 * 1024].max
|
|
164
|
+
|
|
165
|
+
format_memory_value(value)
|
|
166
|
+
end
|
|
167
|
+
|
|
168
|
+
def calculate_effective_cache_size(total_memory, ratio)
|
|
169
|
+
value = (total_memory * ratio).to_i
|
|
170
|
+
format_memory_value(value)
|
|
171
|
+
end
|
|
172
|
+
|
|
173
|
+
def calculate_maintenance_work_mem(total_memory, ratio)
|
|
174
|
+
value = (total_memory * ratio).to_i
|
|
175
|
+
|
|
176
|
+
# Cap at 2GB
|
|
177
|
+
max_value = 2 * 1024 * 1024 * 1024
|
|
178
|
+
value = [value, max_value].min
|
|
179
|
+
|
|
180
|
+
# Minimum 64MB
|
|
181
|
+
value = [value, 64 * 1024 * 1024].max
|
|
182
|
+
|
|
183
|
+
format_memory_value(value)
|
|
184
|
+
end
|
|
185
|
+
|
|
186
|
+
def calculate_work_mem(total_memory, max_connections)
|
|
187
|
+
# Formula: (Total RAM - shared_buffers) / (max_connections * 3)
|
|
188
|
+
shared_buffers = total_memory * 0.25
|
|
189
|
+
available_memory = total_memory - shared_buffers
|
|
190
|
+
value = (available_memory / (max_connections * 3)).to_i
|
|
191
|
+
|
|
192
|
+
# Reasonable bounds: 4MB to 256MB
|
|
193
|
+
value = value.clamp(4 * 1024 * 1024, 256 * 1024 * 1024)
|
|
194
|
+
|
|
195
|
+
format_memory_value(value)
|
|
196
|
+
end
|
|
197
|
+
|
|
198
|
+
def calculate_wal_buffers(shared_buffers)
|
|
199
|
+
# 3% of shared_buffers, capped at 16MB
|
|
200
|
+
value = parse_memory_value(shared_buffers)
|
|
201
|
+
wal_buffers = (value * 0.03).to_i
|
|
202
|
+
wal_buffers = [wal_buffers, 16 * 1024 * 1024].min
|
|
203
|
+
format_memory_value(wal_buffers)
|
|
204
|
+
end
|
|
205
|
+
|
|
206
|
+
def format_memory_value(bytes)
|
|
207
|
+
if bytes >= 1024 * 1024 * 1024
|
|
208
|
+
"#{(bytes / (1024 * 1024 * 1024)).to_i}GB"
|
|
209
|
+
elsif bytes >= 1024 * 1024
|
|
210
|
+
"#{(bytes / (1024 * 1024)).to_i}MB"
|
|
211
|
+
else
|
|
212
|
+
"#{(bytes / 1024).to_i}kB"
|
|
213
|
+
end
|
|
214
|
+
end
|
|
215
|
+
|
|
216
|
+
def parse_memory_value(value)
|
|
217
|
+
return value if value.is_a?(Integer)
|
|
218
|
+
|
|
219
|
+
match = value.match(/(\d+)\s*(GB|MB|kB|B)?/i)
|
|
220
|
+
return 0 unless match
|
|
221
|
+
|
|
222
|
+
number = match[1].to_i
|
|
223
|
+
unit = match[2]&.upcase || 'B'
|
|
224
|
+
|
|
225
|
+
case unit
|
|
226
|
+
when 'GB' then number * 1024 * 1024 * 1024
|
|
227
|
+
when 'MB' then number * 1024 * 1024
|
|
228
|
+
when 'KB' then number * 1024
|
|
229
|
+
else number
|
|
230
|
+
end
|
|
231
|
+
end
|
|
232
|
+
|
|
233
|
+
def format_bytes(bytes)
|
|
234
|
+
units = %w[B KB MB GB TB]
|
|
235
|
+
unit_index = 0
|
|
236
|
+
value = bytes.to_f
|
|
237
|
+
|
|
238
|
+
while value >= 1024 && unit_index < units.length - 1
|
|
239
|
+
value /= 1024
|
|
240
|
+
unit_index += 1
|
|
241
|
+
end
|
|
242
|
+
|
|
243
|
+
format('%.2f %s', value, units[unit_index])
|
|
244
|
+
end
|
|
245
|
+
end
|
|
246
|
+
end
|
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
require 'yaml'
|
|
2
|
+
|
|
3
|
+
module ActivePostgres
|
|
4
|
+
module Rails
|
|
5
|
+
class DatabaseConfig
|
|
6
|
+
DEFAULT_ROLE = 'primary'.freeze
|
|
7
|
+
|
|
8
|
+
def self.generate(environment = 'production', app_name: nil, config: nil)
|
|
9
|
+
builder = Builder.new(environment, app_name: app_name, config: config)
|
|
10
|
+
builder.build_connections
|
|
11
|
+
end
|
|
12
|
+
|
|
13
|
+
def self.render(environment = 'production', app_name: nil, config: nil)
|
|
14
|
+
connections = generate(environment, app_name: app_name, config: config)
|
|
15
|
+
render_yaml(environment, connections)
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
def self.render_yaml(environment, connections)
|
|
19
|
+
output = "#{environment}:\n"
|
|
20
|
+
connections.each do |role, config|
|
|
21
|
+
output += " #{role}:\n"
|
|
22
|
+
output += render_hash(config, indent: 4)
|
|
23
|
+
end
|
|
24
|
+
output
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
def self.render_hash(hash, indent: 0)
|
|
28
|
+
output = ''
|
|
29
|
+
hash.each do |key, value|
|
|
30
|
+
spaces = ' ' * indent
|
|
31
|
+
if value.is_a?(Hash)
|
|
32
|
+
output += "#{spaces}#{key}:\n"
|
|
33
|
+
output += render_hash(value, indent: indent + 2)
|
|
34
|
+
elsif value.to_s.include?('<%=')
|
|
35
|
+
# Don't quote ERB tags
|
|
36
|
+
output += "#{spaces}#{key}: #{value}\n"
|
|
37
|
+
elsif value.is_a?(String)
|
|
38
|
+
output += "#{spaces}#{key}: #{value}\n"
|
|
39
|
+
elsif [true, false].include?(value)
|
|
40
|
+
output += "#{spaces}#{key}: #{value}\n"
|
|
41
|
+
elsif value.is_a?(Integer)
|
|
42
|
+
output += "#{spaces}#{key}: #{value}\n"
|
|
43
|
+
else
|
|
44
|
+
output += "#{spaces}#{key}: #{value}\n"
|
|
45
|
+
end
|
|
46
|
+
end
|
|
47
|
+
output
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
def self.render_partial(environment = 'production', app_name: nil, config: nil)
|
|
51
|
+
<<~YAML
|
|
52
|
+
# Generated by active_postgres. This file is evaluated via ERB from config/database.yml.
|
|
53
|
+
# Update config/postgres.yml or the POSTGRES_* environment variables to change values.
|
|
54
|
+
|
|
55
|
+
#{render(environment, app_name: app_name, config: config)}
|
|
56
|
+
YAML
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
class Builder
|
|
60
|
+
attr_reader :environment, :config, :app_name
|
|
61
|
+
|
|
62
|
+
def initialize(environment, app_name: nil, config: nil)
|
|
63
|
+
@environment = environment
|
|
64
|
+
@app_name = normalize_app_name(app_name)
|
|
65
|
+
@config = config || Configuration.load('config/postgres.yml', environment)
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
def build_connections
|
|
69
|
+
connections = {
|
|
70
|
+
'primary' => deep_merge(base_connection, primary_connection_overrides)
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
replica_config = replica_connection_overrides
|
|
74
|
+
connections['primary_replica'] = deep_merge(base_connection, replica_config) if replica_config
|
|
75
|
+
|
|
76
|
+
connections
|
|
77
|
+
end
|
|
78
|
+
|
|
79
|
+
private
|
|
80
|
+
|
|
81
|
+
def base_connection
|
|
82
|
+
{
|
|
83
|
+
'adapter' => 'postgresql',
|
|
84
|
+
'encoding' => 'unicode',
|
|
85
|
+
'pool' => env_value('RAILS_MAX_THREADS', fallback: 5),
|
|
86
|
+
'username' => env_value('POSTGRES_APP_USER', fallback: app_name),
|
|
87
|
+
'password' => env_value('POSTGRES_APP_PASSWORD'),
|
|
88
|
+
'variables' => {
|
|
89
|
+
'statement_timeout' => env_value('POSTGRES_STATEMENT_TIMEOUT', fallback: '15s')
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
end
|
|
93
|
+
|
|
94
|
+
def primary_connection_overrides
|
|
95
|
+
primary_config = {
|
|
96
|
+
'database' => env_value('POSTGRES_DATABASE', fallback: default_database_name),
|
|
97
|
+
'host' => env_value('POSTGRES_PRIMARY_HOST', fallback: config.primary_replication_host),
|
|
98
|
+
'port' => env_value('POSTGRES_PRIMARY_PORT', fallback: 5432)
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
# Set application_name in variables, not at top level
|
|
102
|
+
primary_config['variables'] = (primary_config['variables'] || {}).merge(
|
|
103
|
+
'application_name' => env_value('POSTGRES_APPLICATION_NAME', fallback: "#{app_name}-primary")
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
primary_config
|
|
107
|
+
end
|
|
108
|
+
|
|
109
|
+
def replica_connection_overrides
|
|
110
|
+
standby_host = config.standby_hosts.first
|
|
111
|
+
return nil unless standby_host
|
|
112
|
+
|
|
113
|
+
resolved_host = config.replication_host_for(standby_host)
|
|
114
|
+
|
|
115
|
+
replica_config = {
|
|
116
|
+
'database' => env_value('POSTGRES_DATABASE', fallback: default_database_name),
|
|
117
|
+
'host' => env_value('POSTGRES_REPLICA_HOST', fallback: resolved_host),
|
|
118
|
+
'port' => env_value('POSTGRES_REPLICA_PORT', fallback: 5432),
|
|
119
|
+
'replica' => true,
|
|
120
|
+
'role' => 'reading'
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
# Set application_name in variables, not at top level
|
|
124
|
+
replica_config['variables'] = (replica_config['variables'] || {}).merge(
|
|
125
|
+
'application_name' => env_value('POSTGRES_APPLICATION_NAME', fallback: "#{app_name}-replica")
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
replica_config
|
|
129
|
+
end
|
|
130
|
+
|
|
131
|
+
def env_value(name, fallback: nil)
|
|
132
|
+
if fallback.nil?
|
|
133
|
+
"<%= ENV['#{name}'] %>"
|
|
134
|
+
elsif fallback.is_a?(String)
|
|
135
|
+
"<%= ENV.fetch('#{name}') { '#{fallback}' } %>"
|
|
136
|
+
else
|
|
137
|
+
"<%= ENV.fetch('#{name}') { #{fallback} } %>"
|
|
138
|
+
end
|
|
139
|
+
end
|
|
140
|
+
|
|
141
|
+
def normalize_app_name(custom_name)
|
|
142
|
+
chosen = custom_name || ENV['BORING_APP_NAME'] || default_app_name
|
|
143
|
+
chosen.to_s.tr('- ', '_')
|
|
144
|
+
end
|
|
145
|
+
|
|
146
|
+
def default_app_name
|
|
147
|
+
if defined?(::Rails) && ::Rails.application
|
|
148
|
+
::Rails.application.class.module_parent_name.underscore
|
|
149
|
+
else
|
|
150
|
+
File.basename(Dir.pwd).tr('- ', '_')
|
|
151
|
+
end
|
|
152
|
+
rescue StandardError
|
|
153
|
+
'app'
|
|
154
|
+
end
|
|
155
|
+
|
|
156
|
+
def default_database_name
|
|
157
|
+
"#{app_name}_#{environment}"
|
|
158
|
+
end
|
|
159
|
+
|
|
160
|
+
def deep_merge(hash1, hash2)
|
|
161
|
+
result = hash1.dup
|
|
162
|
+
hash2.each do |key, value|
|
|
163
|
+
result[key] = if result[key].is_a?(Hash) && value.is_a?(Hash)
|
|
164
|
+
deep_merge(result[key], value)
|
|
165
|
+
else
|
|
166
|
+
value
|
|
167
|
+
end
|
|
168
|
+
end
|
|
169
|
+
result
|
|
170
|
+
end
|
|
171
|
+
end
|
|
172
|
+
end
|
|
173
|
+
end
|
|
174
|
+
end
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
module ActivePostgres
|
|
2
|
+
module Rails
|
|
3
|
+
module MigrationGuard
|
|
4
|
+
def exec_migration(conn, direction)
|
|
5
|
+
# Check if we're connected to a read replica
|
|
6
|
+
if connection_is_replica?(conn)
|
|
7
|
+
raise ActiveRecord::MigrationError,
|
|
8
|
+
'Cannot run migrations on read replica! Connect to primary database.'
|
|
9
|
+
end
|
|
10
|
+
|
|
11
|
+
super
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
private
|
|
15
|
+
|
|
16
|
+
def connection_is_replica?(conn)
|
|
17
|
+
# Check if PostgreSQL is in recovery mode (i.e., it's a replica)
|
|
18
|
+
result = conn.execute('SELECT pg_is_in_recovery();')
|
|
19
|
+
result.first['pg_is_in_recovery'] == 't'
|
|
20
|
+
rescue StandardError
|
|
21
|
+
false
|
|
22
|
+
end
|
|
23
|
+
end
|
|
24
|
+
end
|
|
25
|
+
end
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
if defined?(Rails::Railtie)
|
|
2
|
+
require 'rails/railtie'
|
|
3
|
+
|
|
4
|
+
module ActivePostgres
|
|
5
|
+
class Railtie < ::Rails::Railtie
|
|
6
|
+
railtie_name :active_postgres
|
|
7
|
+
|
|
8
|
+
rake_tasks do
|
|
9
|
+
Dir[File.expand_path('../tasks/**/*.rake', __dir__)].each { |f| load f }
|
|
10
|
+
end
|
|
11
|
+
|
|
12
|
+
generators do
|
|
13
|
+
require_relative 'generators/active_postgres/install_generator'
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
initializer 'active_postgres.migration_guard' do
|
|
17
|
+
::ActiveSupport.on_load(:active_record) do
|
|
18
|
+
require_relative 'rails/migration_guard'
|
|
19
|
+
::ActiveRecord::Migration.prepend(ActivePostgres::Rails::MigrationGuard)
|
|
20
|
+
end
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
console do
|
|
24
|
+
puts 'ActivePostgres loaded. Try: ActivePostgres.status'
|
|
25
|
+
end
|
|
26
|
+
end
|
|
27
|
+
end
|
|
28
|
+
end
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
module ActivePostgres
|
|
2
|
+
module RetryHelper
|
|
3
|
+
class RetryExhausted < StandardError; end
|
|
4
|
+
|
|
5
|
+
# Retry a block with exponential backoff
|
|
6
|
+
# @param max_attempts [Integer] Maximum number of attempts
|
|
7
|
+
# @param initial_delay [Float] Initial delay in seconds
|
|
8
|
+
# @param max_delay [Float] Maximum delay between retries
|
|
9
|
+
# @param backoff_factor [Float] Multiplier for exponential backoff
|
|
10
|
+
# @param on [Array<Class>] Exception classes to retry on
|
|
11
|
+
# @yield Block to retry
|
|
12
|
+
# @return Result of the block
|
|
13
|
+
def retry_with_backoff(max_attempts: 3, initial_delay: 1.0, max_delay: 30.0,
|
|
14
|
+
backoff_factor: 2.0, on: [StandardError])
|
|
15
|
+
attempt = 0
|
|
16
|
+
delay = initial_delay
|
|
17
|
+
|
|
18
|
+
begin
|
|
19
|
+
attempt += 1
|
|
20
|
+
yield
|
|
21
|
+
rescue *on => e
|
|
22
|
+
if attempt < max_attempts
|
|
23
|
+
puts " Attempt #{attempt}/#{max_attempts} failed: #{e.message}"
|
|
24
|
+
puts " Retrying in #{delay.round(1)}s..."
|
|
25
|
+
sleep delay
|
|
26
|
+
delay = [delay * backoff_factor, max_delay].min
|
|
27
|
+
retry
|
|
28
|
+
else
|
|
29
|
+
puts " All #{max_attempts} attempts failed"
|
|
30
|
+
raise RetryExhausted, "Failed after #{max_attempts} attempts: #{e.message}"
|
|
31
|
+
end
|
|
32
|
+
end
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
# Wait for a condition to be true with timeout
|
|
36
|
+
# @param timeout [Float] Maximum time to wait in seconds
|
|
37
|
+
# @param interval [Float] Time between checks in seconds
|
|
38
|
+
# @param description [String] Description of what we're waiting for
|
|
39
|
+
# @yield Block that should return true when condition is met
|
|
40
|
+
# @return [Boolean] true if condition met, false if timeout
|
|
41
|
+
def wait_for(timeout: 60, interval: 3, description: 'condition')
|
|
42
|
+
deadline = Time.now + timeout
|
|
43
|
+
attempts = 0
|
|
44
|
+
|
|
45
|
+
while Time.now < deadline
|
|
46
|
+
attempts += 1
|
|
47
|
+
|
|
48
|
+
begin
|
|
49
|
+
return true if yield
|
|
50
|
+
rescue StandardError => e
|
|
51
|
+
puts " Check #{attempts} raised error: #{e.message}" if (attempts % 10).zero?
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
remaining = (deadline - Time.now).to_i
|
|
55
|
+
puts " Waiting for #{description}... (#{remaining}s remaining)" if (attempts % 5).zero?
|
|
56
|
+
sleep interval
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
puts " ⚠️ Timeout waiting for #{description} after #{timeout}s"
|
|
60
|
+
false
|
|
61
|
+
end
|
|
62
|
+
|
|
63
|
+
# Execute a block with a timeout
|
|
64
|
+
# @param timeout [Float] Timeout in seconds
|
|
65
|
+
# @param description [String] Description of the operation
|
|
66
|
+
# @yield Block to execute
|
|
67
|
+
# @return Result of the block
|
|
68
|
+
def with_timeout(timeout: 300, description: 'operation')
|
|
69
|
+
result = nil
|
|
70
|
+
thread = Thread.new { result = yield }
|
|
71
|
+
|
|
72
|
+
unless thread.join(timeout)
|
|
73
|
+
thread.kill
|
|
74
|
+
raise Timeout::Error, "#{description} timed out after #{timeout}s"
|
|
75
|
+
end
|
|
76
|
+
|
|
77
|
+
result
|
|
78
|
+
end
|
|
79
|
+
end
|
|
80
|
+
end
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
module ActivePostgres
|
|
2
|
+
class RollbackManager
|
|
3
|
+
attr_reader :config, :ssh_executor, :logger, :rollback_stack
|
|
4
|
+
|
|
5
|
+
def initialize(config, ssh_executor, logger: nil)
|
|
6
|
+
@config = config
|
|
7
|
+
@ssh_executor = ssh_executor
|
|
8
|
+
@logger = logger || Logger.new
|
|
9
|
+
@rollback_stack = []
|
|
10
|
+
end
|
|
11
|
+
|
|
12
|
+
# Register a rollback action
|
|
13
|
+
# @param description [String] Description of what will be rolled back
|
|
14
|
+
# @param host [String] Host to execute rollback on
|
|
15
|
+
# @yield Block to execute for rollback
|
|
16
|
+
def register(description, host: nil, &block)
|
|
17
|
+
rollback_stack.push({
|
|
18
|
+
description: description,
|
|
19
|
+
host: host,
|
|
20
|
+
action: block
|
|
21
|
+
})
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
# Execute all registered rollback actions in reverse order
|
|
25
|
+
def execute
|
|
26
|
+
return if rollback_stack.empty?
|
|
27
|
+
|
|
28
|
+
logger.warn "Executing rollback (#{rollback_stack.count} actions)..."
|
|
29
|
+
|
|
30
|
+
rollback_stack.reverse.each do |rollback|
|
|
31
|
+
logger.info " Rolling back: #{rollback[:description]}"
|
|
32
|
+
|
|
33
|
+
if rollback[:host]
|
|
34
|
+
ssh_executor.execute_on_host(rollback[:host]) do
|
|
35
|
+
instance_eval(&rollback[:action])
|
|
36
|
+
end
|
|
37
|
+
else
|
|
38
|
+
rollback[:action].call
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
logger.success ' Completed'
|
|
42
|
+
rescue StandardError => e
|
|
43
|
+
logger.error " Failed: #{e.message}"
|
|
44
|
+
# Continue with other rollback actions
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
clear
|
|
48
|
+
logger.success 'Rollback completed'
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
# Clear all registered rollback actions
|
|
52
|
+
def clear
|
|
53
|
+
rollback_stack.clear
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
# Wrap a block with automatic rollback on failure
|
|
57
|
+
# @param description [String] Description of the operation
|
|
58
|
+
# @yield Block to execute
|
|
59
|
+
def with_rollback(description: 'operation')
|
|
60
|
+
result = yield
|
|
61
|
+
clear # Success - clear rollback stack
|
|
62
|
+
result
|
|
63
|
+
rescue StandardError => e
|
|
64
|
+
logger.error "#{description} failed: #{e.message}"
|
|
65
|
+
execute if rollback_stack.any?
|
|
66
|
+
raise
|
|
67
|
+
end
|
|
68
|
+
|
|
69
|
+
# Common rollback actions for PostgreSQL components
|
|
70
|
+
|
|
71
|
+
def register_postgres_cluster_removal(host, version)
|
|
72
|
+
register("Remove PostgreSQL cluster on #{host}", host: host) do
|
|
73
|
+
begin
|
|
74
|
+
execute :sudo, 'systemctl', 'stop', 'postgresql'
|
|
75
|
+
rescue StandardError
|
|
76
|
+
nil
|
|
77
|
+
end
|
|
78
|
+
execute :sudo, 'pg_dropcluster', '--stop', version.to_s, 'main', rescue: nil
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
def register_package_removal(host, packages)
|
|
83
|
+
register("Remove packages on #{host}: #{packages.join(', ')}", host: host) do
|
|
84
|
+
execute :sudo, 'apt-get', 'remove', '-y', *packages
|
|
85
|
+
rescue StandardError
|
|
86
|
+
nil
|
|
87
|
+
end
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
def register_file_removal(host, file_path)
|
|
91
|
+
register("Remove file #{file_path} on #{host}", host: host) do
|
|
92
|
+
execute :sudo, 'rm', '-f', file_path
|
|
93
|
+
rescue StandardError
|
|
94
|
+
nil
|
|
95
|
+
end
|
|
96
|
+
end
|
|
97
|
+
|
|
98
|
+
def register_directory_removal(host, dir_path)
|
|
99
|
+
register("Remove directory #{dir_path} on #{host}", host: host) do
|
|
100
|
+
execute :sudo, 'rm', '-rf', dir_path
|
|
101
|
+
rescue StandardError
|
|
102
|
+
nil
|
|
103
|
+
end
|
|
104
|
+
end
|
|
105
|
+
|
|
106
|
+
def register_user_removal(host, username)
|
|
107
|
+
register("Remove user #{username} on #{host}", host: host) do
|
|
108
|
+
execute :sudo, 'userdel', username
|
|
109
|
+
rescue StandardError
|
|
110
|
+
nil
|
|
111
|
+
end
|
|
112
|
+
end
|
|
113
|
+
|
|
114
|
+
def register_database_removal(host, database_name)
|
|
115
|
+
postgres_user = config.postgres_user
|
|
116
|
+
register("Drop database #{database_name} on #{host}", host: host) do
|
|
117
|
+
sql = "DROP DATABASE IF EXISTS #{database_name};"
|
|
118
|
+
upload! StringIO.new(sql), '/tmp/drop_database.sql'
|
|
119
|
+
execute :chmod, '644', '/tmp/drop_database.sql'
|
|
120
|
+
execute :sudo, '-u', postgres_user, 'psql', '-f', '/tmp/drop_database.sql'
|
|
121
|
+
execute :rm, '-f', '/tmp/drop_database.sql'
|
|
122
|
+
rescue StandardError
|
|
123
|
+
nil
|
|
124
|
+
end
|
|
125
|
+
end
|
|
126
|
+
|
|
127
|
+
def register_postgres_user_removal(host, username)
|
|
128
|
+
postgres_user = config.postgres_user
|
|
129
|
+
register("Drop PostgreSQL user #{username} on #{host}", host: host) do
|
|
130
|
+
sql = "DROP USER IF EXISTS #{username};"
|
|
131
|
+
upload! StringIO.new(sql), '/tmp/drop_user.sql'
|
|
132
|
+
execute :chmod, '644', '/tmp/drop_user.sql'
|
|
133
|
+
execute :sudo, '-u', postgres_user, 'psql', '-f', '/tmp/drop_user.sql'
|
|
134
|
+
execute :rm, '-f', '/tmp/drop_user.sql'
|
|
135
|
+
rescue StandardError
|
|
136
|
+
nil
|
|
137
|
+
end
|
|
138
|
+
end
|
|
139
|
+
end
|
|
140
|
+
end
|