activerecord-cipherstash-pg-adapter 0.8.1 → 0.8.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +4 -0
- data/activerecord-cipherstash-pg-adapter.gemspec +1 -1
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/column.rb +70 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/database_statements.rb +199 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/explain_pretty_printer.rb +44 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/array.rb +91 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/bit.rb +53 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/bit_varying.rb +15 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/bytea.rb +17 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/cidr.rb +48 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/date.rb +31 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/date_time.rb +36 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/decimal.rb +15 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/enum.rb +20 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/hstore.rb +109 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/inet.rb +15 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/interval.rb +49 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/jsonb.rb +15 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/legacy_point.rb +44 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/macaddr.rb +25 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/money.rb +41 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/oid.rb +15 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/point.rb +64 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/range.rb +124 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/specialized_string.rb +18 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/timestamp.rb +15 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/timestamp_with_time_zone.rb +30 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/type_map_initializer.rb +125 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/uuid.rb +35 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/vector.rb +28 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid/xml.rb +30 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/oid.rb +38 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/quoting.rb +237 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/referential_integrity.rb +71 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/schema_creation.rb +170 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/schema_definitions.rb +372 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/schema_dumper.rb +116 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/schema_statements.rb +1110 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/type_metadata.rb +44 -0
- data/lib/active_record/connection_adapters/7.1/cipherstash_pg/utils.rb +79 -0
- data/lib/active_record/connection_adapters/7.1/postgres_cipherstash_adapter.rb +1266 -0
- data/lib/active_record/connection_adapters/postgres_cipherstash_adapter.rb +5 -1
- data/lib/version.rb +1 -1
- metadata +44 -5
@@ -0,0 +1,1266 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "active_support/core_ext/object/try"
|
4
|
+
require "active_record/connection_adapters/abstract_adapter"
|
5
|
+
require "active_record/connection_adapters/statement_pool"
|
6
|
+
require "active_record/connection_adapters/cipherstash_column_mapper"
|
7
|
+
|
8
|
+
require_relative "./cipherstash_pg/column"
|
9
|
+
require_relative "./cipherstash_pg/database_statements"
|
10
|
+
require_relative "./cipherstash_pg/explain_pretty_printer"
|
11
|
+
require_relative "./cipherstash_pg/oid"
|
12
|
+
require_relative "./cipherstash_pg/quoting"
|
13
|
+
require_relative "./cipherstash_pg/referential_integrity"
|
14
|
+
require_relative "./cipherstash_pg/schema_creation"
|
15
|
+
require_relative "./cipherstash_pg/schema_definitions"
|
16
|
+
require_relative "./cipherstash_pg/schema_dumper"
|
17
|
+
require_relative "./cipherstash_pg/schema_statements"
|
18
|
+
require_relative "./cipherstash_pg/type_metadata"
|
19
|
+
require_relative "./cipherstash_pg/utils"
|
20
|
+
|
21
|
+
module ActiveRecord
|
22
|
+
module ConnectionHandling # :nodoc:
|
23
|
+
###########################################################################
|
24
|
+
# Everything from this point onwards is a copy-paste or inherit from Rails'
|
25
|
+
# activerecord/lib/active_record/connection_adapters/postgresql_adapter.rb
|
26
|
+
# with some changes
|
27
|
+
#
|
28
|
+
# The changes besides name changes include:
|
29
|
+
# * ore_64_8_v1_term and ore_64_8_v1 are registered
|
30
|
+
# * column_definitions maps with CipherStashColumnMapper.map_column_definitions
|
31
|
+
#
|
32
|
+
# (We can't just inherit from PostgreSQLAdapter because we cannot load the
|
33
|
+
# postgresql_adapter.rb file; that would load 'pg', and we cannot [right
|
34
|
+
# now] have both 'cipherstash-pg' and 'pg' loaded at the same time.)
|
35
|
+
###########################################################################
|
36
|
+
def postgresql_adapter_class
|
37
|
+
ConnectionAdapters::CipherStashPGAdapter
|
38
|
+
end
|
39
|
+
|
40
|
+
# Establishes a connection to the database that's used by all Active Record objects
|
41
|
+
def postgres_cipherstash_connection(config)
|
42
|
+
postgresql_adapter_class.new(config)
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
module ConnectionAdapters
|
47
|
+
# = Active Record PostgreSQL Adapter
|
48
|
+
#
|
49
|
+
# The PostgreSQL adapter works with the native C (https://github.com/ged/ruby-pg) driver.
|
50
|
+
#
|
51
|
+
# Options:
|
52
|
+
#
|
53
|
+
# * <tt>:host</tt> - Defaults to a Unix-domain socket in /tmp. On machines without Unix-domain sockets,
|
54
|
+
# the default is to connect to localhost.
|
55
|
+
# * <tt>:port</tt> - Defaults to 5432.
|
56
|
+
# * <tt>:username</tt> - Defaults to be the same as the operating system name of the user running the application.
|
57
|
+
# * <tt>:password</tt> - Password to be used if the server demands password authentication.
|
58
|
+
# * <tt>:database</tt> - Defaults to be the same as the username.
|
59
|
+
# * <tt>:schema_search_path</tt> - An optional schema search path for the connection given
|
60
|
+
# as a string of comma-separated schema names. This is backward-compatible with the <tt>:schema_order</tt> option.
|
61
|
+
# * <tt>:encoding</tt> - An optional client encoding that is used in a <tt>SET client_encoding TO
|
62
|
+
# <encoding></tt> call on the connection.
|
63
|
+
# * <tt>:min_messages</tt> - An optional client min messages that is used in a
|
64
|
+
# <tt>SET client_min_messages TO <min_messages></tt> call on the connection.
|
65
|
+
# * <tt>:variables</tt> - An optional hash of additional parameters that
|
66
|
+
# will be used in <tt>SET SESSION key = val</tt> calls on the connection.
|
67
|
+
# * <tt>:insert_returning</tt> - An optional boolean to control the use of <tt>RETURNING</tt> for <tt>INSERT</tt> statements
|
68
|
+
# defaults to true.
|
69
|
+
#
|
70
|
+
# Any further options are used as connection parameters to libpq. See
|
71
|
+
# https://www.postgresql.org/docs/current/static/libpq-connect.html for the
|
72
|
+
# list of parameters.
|
73
|
+
#
|
74
|
+
# In addition, default connection parameters of libpq can be set per environment variables.
|
75
|
+
# See https://www.postgresql.org/docs/current/static/libpq-envars.html .
|
76
|
+
class CipherStashPGAdapter < AbstractAdapter
|
77
|
+
ADAPTER_NAME = "CipherStashPG"
|
78
|
+
|
79
|
+
class << self
|
80
|
+
def new_client(conn_params)
|
81
|
+
::CipherStashPG.connect(**conn_params)
|
82
|
+
rescue ::CipherStashPG::Error => error
|
83
|
+
if conn_params && conn_params[:dbname] == "postgres"
|
84
|
+
raise ActiveRecord::ConnectionNotEstablished, error.message
|
85
|
+
elsif conn_params && conn_params[:dbname] && error.message.include?(conn_params[:dbname])
|
86
|
+
raise ActiveRecord::NoDatabaseError.db_error(conn_params[:dbname])
|
87
|
+
elsif conn_params && conn_params[:user] && error.message.include?(conn_params[:user])
|
88
|
+
raise ActiveRecord::DatabaseConnectionError.username_error(conn_params[:user])
|
89
|
+
elsif conn_params && conn_params[:host] && error.message.include?(conn_params[:host])
|
90
|
+
raise ActiveRecord::DatabaseConnectionError.hostname_error(conn_params[:host])
|
91
|
+
else
|
92
|
+
raise ActiveRecord::ConnectionNotEstablished, error.message
|
93
|
+
end
|
94
|
+
end
|
95
|
+
|
96
|
+
def dbconsole(config, options = {})
|
97
|
+
pg_config = config.configuration_hash
|
98
|
+
|
99
|
+
ENV["PGUSER"] = pg_config[:username] if pg_config[:username]
|
100
|
+
ENV["PGHOST"] = pg_config[:host] if pg_config[:host]
|
101
|
+
ENV["PGPORT"] = pg_config[:port].to_s if pg_config[:port]
|
102
|
+
ENV["PGPASSWORD"] = pg_config[:password].to_s if pg_config[:password] && options[:include_password]
|
103
|
+
ENV["PGSSLMODE"] = pg_config[:sslmode].to_s if pg_config[:sslmode]
|
104
|
+
ENV["PGSSLCERT"] = pg_config[:sslcert].to_s if pg_config[:sslcert]
|
105
|
+
ENV["PGSSLKEY"] = pg_config[:sslkey].to_s if pg_config[:sslkey]
|
106
|
+
ENV["PGSSLROOTCERT"] = pg_config[:sslrootcert].to_s if pg_config[:sslrootcert]
|
107
|
+
if pg_config[:variables]
|
108
|
+
ENV["PGOPTIONS"] = pg_config[:variables].filter_map do |name, value|
|
109
|
+
"-c #{name}=#{value.to_s.gsub(/[ \\]/, '\\\\\0')}" unless value == ":default" || value == :default
|
110
|
+
end.join(" ")
|
111
|
+
end
|
112
|
+
find_cmd_and_exec("psql", config.database)
|
113
|
+
end
|
114
|
+
end
|
115
|
+
|
116
|
+
##
|
117
|
+
# :singleton-method:
|
118
|
+
# PostgreSQL allows the creation of "unlogged" tables, which do not record
|
119
|
+
# data in the PostgreSQL Write-Ahead Log. This can make the tables faster,
|
120
|
+
# but significantly increases the risk of data loss if the database
|
121
|
+
# crashes. As a result, this should not be used in production
|
122
|
+
# environments. If you would like all created tables to be unlogged in
|
123
|
+
# the test environment you can add the following line to your test.rb
|
124
|
+
# file:
|
125
|
+
#
|
126
|
+
# ActiveRecord::ConnectionAdapters::CipherStashPGAdapter.create_unlogged_tables = true
|
127
|
+
class_attribute :create_unlogged_tables, default: false
|
128
|
+
|
129
|
+
##
|
130
|
+
# :singleton-method:
|
131
|
+
# PostgreSQL supports multiple types for DateTimes. By default, if you use +datetime+
|
132
|
+
# in migrations, \Rails will translate this to a PostgreSQL "timestamp without time zone".
|
133
|
+
# Change this in an initializer to use another NATIVE_DATABASE_TYPES. For example, to
|
134
|
+
# store DateTimes as "timestamp with time zone":
|
135
|
+
#
|
136
|
+
# ActiveRecord::ConnectionAdapters::PostgreSQLAdapter.datetime_type = :timestamptz
|
137
|
+
#
|
138
|
+
# Or if you are adding a custom type:
|
139
|
+
#
|
140
|
+
# ActiveRecord::ConnectionAdapters::PostgreSQLAdapter::NATIVE_DATABASE_TYPES[:my_custom_type] = { name: "my_custom_type_name" }
|
141
|
+
# ActiveRecord::ConnectionAdapters::PostgreSQLAdapter.datetime_type = :my_custom_type
|
142
|
+
#
|
143
|
+
# If you're using +:ruby+ as your +config.active_record.schema_format+ and you change this
|
144
|
+
# setting, you should immediately run <tt>bin/rails db:migrate</tt> to update the types in your schema.rb.
|
145
|
+
class_attribute :datetime_type, default: :timestamp
|
146
|
+
|
147
|
+
NATIVE_DATABASE_TYPES = {
|
148
|
+
primary_key: "bigserial primary key",
|
149
|
+
string: { name: "character varying" },
|
150
|
+
text: { name: "text" },
|
151
|
+
integer: { name: "integer", limit: 4 },
|
152
|
+
bigint: { name: "bigint" },
|
153
|
+
float: { name: "float" },
|
154
|
+
decimal: { name: "decimal" },
|
155
|
+
datetime: {}, # set dynamically based on datetime_type
|
156
|
+
timestamp: { name: "timestamp" },
|
157
|
+
timestamptz: { name: "timestamptz" },
|
158
|
+
time: { name: "time" },
|
159
|
+
date: { name: "date" },
|
160
|
+
daterange: { name: "daterange" },
|
161
|
+
numrange: { name: "numrange" },
|
162
|
+
tsrange: { name: "tsrange" },
|
163
|
+
tstzrange: { name: "tstzrange" },
|
164
|
+
int4range: { name: "int4range" },
|
165
|
+
int8range: { name: "int8range" },
|
166
|
+
binary: { name: "bytea" },
|
167
|
+
boolean: { name: "boolean" },
|
168
|
+
xml: { name: "xml" },
|
169
|
+
tsvector: { name: "tsvector" },
|
170
|
+
hstore: { name: "hstore" },
|
171
|
+
inet: { name: "inet" },
|
172
|
+
cidr: { name: "cidr" },
|
173
|
+
macaddr: { name: "macaddr" },
|
174
|
+
uuid: { name: "uuid" },
|
175
|
+
json: { name: "json" },
|
176
|
+
jsonb: { name: "jsonb" },
|
177
|
+
ltree: { name: "ltree" },
|
178
|
+
citext: { name: "citext" },
|
179
|
+
point: { name: "point" },
|
180
|
+
line: { name: "line" },
|
181
|
+
lseg: { name: "lseg" },
|
182
|
+
box: { name: "box" },
|
183
|
+
path: { name: "path" },
|
184
|
+
polygon: { name: "polygon" },
|
185
|
+
circle: { name: "circle" },
|
186
|
+
bit: { name: "bit" },
|
187
|
+
bit_varying: { name: "bit varying" },
|
188
|
+
money: { name: "money" },
|
189
|
+
interval: { name: "interval" },
|
190
|
+
oid: { name: "oid" },
|
191
|
+
enum: {} # special type https://www.postgresql.org/docs/current/datatype-enum.html
|
192
|
+
}
|
193
|
+
|
194
|
+
OID = CipherStashPG::OID # :nodoc:
|
195
|
+
|
196
|
+
include CipherStashPG::Quoting
|
197
|
+
include CipherStashPG::ReferentialIntegrity
|
198
|
+
include CipherStashPG::SchemaStatements
|
199
|
+
include CipherStashPG::DatabaseStatements
|
200
|
+
|
201
|
+
def supports_bulk_alter?
|
202
|
+
true
|
203
|
+
end
|
204
|
+
|
205
|
+
def supports_index_sort_order?
|
206
|
+
true
|
207
|
+
end
|
208
|
+
|
209
|
+
def supports_partitioned_indexes?
|
210
|
+
database_version >= 11_00_00 # >= 11.0
|
211
|
+
end
|
212
|
+
|
213
|
+
def supports_partial_index?
|
214
|
+
true
|
215
|
+
end
|
216
|
+
|
217
|
+
def supports_index_include?
|
218
|
+
database_version >= 11_00_00 # >= 11.0
|
219
|
+
end
|
220
|
+
|
221
|
+
def supports_expression_index?
|
222
|
+
true
|
223
|
+
end
|
224
|
+
|
225
|
+
def supports_transaction_isolation?
|
226
|
+
true
|
227
|
+
end
|
228
|
+
|
229
|
+
def supports_foreign_keys?
|
230
|
+
true
|
231
|
+
end
|
232
|
+
|
233
|
+
def supports_check_constraints?
|
234
|
+
true
|
235
|
+
end
|
236
|
+
|
237
|
+
def supports_exclusion_constraints?
|
238
|
+
true
|
239
|
+
end
|
240
|
+
|
241
|
+
def supports_unique_keys?
|
242
|
+
true
|
243
|
+
end
|
244
|
+
|
245
|
+
def supports_validate_constraints?
|
246
|
+
true
|
247
|
+
end
|
248
|
+
|
249
|
+
def supports_deferrable_constraints?
|
250
|
+
true
|
251
|
+
end
|
252
|
+
|
253
|
+
def supports_views?
|
254
|
+
true
|
255
|
+
end
|
256
|
+
|
257
|
+
def supports_datetime_with_precision?
|
258
|
+
true
|
259
|
+
end
|
260
|
+
|
261
|
+
def supports_json?
|
262
|
+
true
|
263
|
+
end
|
264
|
+
|
265
|
+
def supports_comments?
|
266
|
+
true
|
267
|
+
end
|
268
|
+
|
269
|
+
def supports_savepoints?
|
270
|
+
true
|
271
|
+
end
|
272
|
+
|
273
|
+
def supports_restart_db_transaction?
|
274
|
+
database_version >= 12_00_00 # >= 12.0
|
275
|
+
end
|
276
|
+
|
277
|
+
def supports_insert_returning?
|
278
|
+
true
|
279
|
+
end
|
280
|
+
|
281
|
+
def supports_insert_on_conflict?
|
282
|
+
database_version >= 9_05_00 # >= 9.5
|
283
|
+
end
|
284
|
+
alias supports_insert_on_duplicate_skip? supports_insert_on_conflict?
|
285
|
+
alias supports_insert_on_duplicate_update? supports_insert_on_conflict?
|
286
|
+
alias supports_insert_conflict_target? supports_insert_on_conflict?
|
287
|
+
|
288
|
+
def supports_virtual_columns?
|
289
|
+
database_version >= 12_00_00 # >= 12.0
|
290
|
+
end
|
291
|
+
|
292
|
+
def supports_nulls_not_distinct?
|
293
|
+
database_version >= 15_00_00 # >= 15.0
|
294
|
+
end
|
295
|
+
|
296
|
+
def index_algorithms
|
297
|
+
{ concurrently: "CONCURRENTLY" }
|
298
|
+
end
|
299
|
+
|
300
|
+
def return_value_after_insert?(column) # :nodoc:
|
301
|
+
column.auto_populated?
|
302
|
+
end
|
303
|
+
|
304
|
+
class StatementPool < ConnectionAdapters::StatementPool # :nodoc:
|
305
|
+
def initialize(connection, max)
|
306
|
+
super(max)
|
307
|
+
@connection = connection
|
308
|
+
@counter = 0
|
309
|
+
end
|
310
|
+
|
311
|
+
def next_key
|
312
|
+
"a#{@counter += 1}"
|
313
|
+
end
|
314
|
+
|
315
|
+
private
|
316
|
+
def dealloc(key)
|
317
|
+
# This is ugly, but safe: the statement pool is only
|
318
|
+
# accessed while holding the connection's lock. (And we
|
319
|
+
# don't need the complication of with_raw_connection because
|
320
|
+
# a reconnect would invalidate the entire statement pool.)
|
321
|
+
if conn = @connection.instance_variable_get(:@raw_connection)
|
322
|
+
conn.query "DEALLOCATE #{key}" if conn.status == ::CipherStashPG::CONNECTION_OK
|
323
|
+
end
|
324
|
+
rescue ::CipherStashPG::Error
|
325
|
+
end
|
326
|
+
end
|
327
|
+
|
328
|
+
# Initializes and connects a PostgreSQL adapter.
|
329
|
+
def initialize(...)
|
330
|
+
super
|
331
|
+
|
332
|
+
conn_params = @config.compact
|
333
|
+
|
334
|
+
# Map ActiveRecords param names to PGs.
|
335
|
+
conn_params[:user] = conn_params.delete(:username) if conn_params[:username]
|
336
|
+
conn_params[:dbname] = conn_params.delete(:database) if conn_params[:database]
|
337
|
+
|
338
|
+
# Forward only valid config params to PG::Connection.connect.
|
339
|
+
valid_conn_param_keys = ::CipherStashPG::Connection.conndefaults_hash.keys + [:requiressl]
|
340
|
+
conn_params.slice!(*valid_conn_param_keys)
|
341
|
+
|
342
|
+
@connection_parameters = conn_params
|
343
|
+
|
344
|
+
@max_identifier_length = nil
|
345
|
+
@type_map = nil
|
346
|
+
@raw_connection = nil
|
347
|
+
@notice_receiver_sql_warnings = []
|
348
|
+
|
349
|
+
@use_insert_returning = @config.key?(:insert_returning) ? self.class.type_cast_config_to_boolean(@config[:insert_returning]) : true
|
350
|
+
end
|
351
|
+
|
352
|
+
# Is this connection alive and ready for queries?
|
353
|
+
def active?
|
354
|
+
@lock.synchronize do
|
355
|
+
return false unless @raw_connection
|
356
|
+
@raw_connection.query ";"
|
357
|
+
end
|
358
|
+
true
|
359
|
+
rescue ::CipherStashPG::Error
|
360
|
+
false
|
361
|
+
end
|
362
|
+
|
363
|
+
def reload_type_map # :nodoc:
|
364
|
+
@lock.synchronize do
|
365
|
+
if @type_map
|
366
|
+
type_map.clear
|
367
|
+
else
|
368
|
+
@type_map = Type::HashLookupTypeMap.new
|
369
|
+
end
|
370
|
+
|
371
|
+
initialize_type_map
|
372
|
+
end
|
373
|
+
end
|
374
|
+
|
375
|
+
def reset!
|
376
|
+
@lock.synchronize do
|
377
|
+
return connect! unless @raw_connection
|
378
|
+
|
379
|
+
unless @raw_connection.transaction_status == ::CipherStashPG::PQTRANS_IDLE
|
380
|
+
@raw_connection.query "ROLLBACK"
|
381
|
+
end
|
382
|
+
@raw_connection.query "DISCARD ALL"
|
383
|
+
|
384
|
+
super
|
385
|
+
end
|
386
|
+
end
|
387
|
+
|
388
|
+
# Disconnects from the database if already connected. Otherwise, this
|
389
|
+
# method does nothing.
|
390
|
+
def disconnect!
|
391
|
+
@lock.synchronize do
|
392
|
+
super
|
393
|
+
@raw_connection&.close rescue nil
|
394
|
+
@raw_connection = nil
|
395
|
+
end
|
396
|
+
end
|
397
|
+
|
398
|
+
def discard! # :nodoc:
|
399
|
+
super
|
400
|
+
@raw_connection&.socket_io&.reopen(IO::NULL) rescue nil
|
401
|
+
@raw_connection = nil
|
402
|
+
end
|
403
|
+
|
404
|
+
def native_database_types # :nodoc:
|
405
|
+
self.class.native_database_types
|
406
|
+
end
|
407
|
+
|
408
|
+
def self.native_database_types # :nodoc:
|
409
|
+
@native_database_types ||= begin
|
410
|
+
types = NATIVE_DATABASE_TYPES.dup
|
411
|
+
types[:datetime] = types[datetime_type]
|
412
|
+
types
|
413
|
+
end
|
414
|
+
end
|
415
|
+
|
416
|
+
def set_standard_conforming_strings
|
417
|
+
internal_execute("SET standard_conforming_strings = on")
|
418
|
+
end
|
419
|
+
|
420
|
+
def supports_ddl_transactions?
|
421
|
+
true
|
422
|
+
end
|
423
|
+
|
424
|
+
def supports_advisory_locks?
|
425
|
+
true
|
426
|
+
end
|
427
|
+
|
428
|
+
def supports_explain?
|
429
|
+
true
|
430
|
+
end
|
431
|
+
|
432
|
+
def supports_extensions?
|
433
|
+
true
|
434
|
+
end
|
435
|
+
|
436
|
+
def supports_materialized_views?
|
437
|
+
true
|
438
|
+
end
|
439
|
+
|
440
|
+
def supports_foreign_tables?
|
441
|
+
true
|
442
|
+
end
|
443
|
+
|
444
|
+
def supports_pgcrypto_uuid?
|
445
|
+
database_version >= 9_04_00 # >= 9.4
|
446
|
+
end
|
447
|
+
|
448
|
+
def supports_optimizer_hints?
|
449
|
+
unless defined?(@has_pg_hint_plan)
|
450
|
+
@has_pg_hint_plan = extension_available?("pg_hint_plan")
|
451
|
+
end
|
452
|
+
@has_pg_hint_plan
|
453
|
+
end
|
454
|
+
|
455
|
+
def supports_common_table_expressions?
|
456
|
+
true
|
457
|
+
end
|
458
|
+
|
459
|
+
def supports_lazy_transactions?
|
460
|
+
true
|
461
|
+
end
|
462
|
+
|
463
|
+
def get_advisory_lock(lock_id) # :nodoc:
|
464
|
+
unless lock_id.is_a?(Integer) && lock_id.bit_length <= 63
|
465
|
+
raise(ArgumentError, "PostgreSQL requires advisory lock ids to be a signed 64 bit integer")
|
466
|
+
end
|
467
|
+
query_value("SELECT pg_try_advisory_lock(#{lock_id})")
|
468
|
+
end
|
469
|
+
|
470
|
+
def release_advisory_lock(lock_id) # :nodoc:
|
471
|
+
unless lock_id.is_a?(Integer) && lock_id.bit_length <= 63
|
472
|
+
raise(ArgumentError, "PostgreSQL requires advisory lock ids to be a signed 64 bit integer")
|
473
|
+
end
|
474
|
+
query_value("SELECT pg_advisory_unlock(#{lock_id})")
|
475
|
+
end
|
476
|
+
|
477
|
+
def enable_extension(name, **)
|
478
|
+
schema, name = name.to_s.split(".").values_at(-2, -1)
|
479
|
+
sql = +"CREATE EXTENSION IF NOT EXISTS \"#{name}\""
|
480
|
+
sql << " SCHEMA #{schema}" if schema
|
481
|
+
|
482
|
+
internal_exec_query(sql).tap { reload_type_map }
|
483
|
+
end
|
484
|
+
|
485
|
+
# Removes an extension from the database.
|
486
|
+
#
|
487
|
+
# [<tt>:force</tt>]
|
488
|
+
# Set to +:cascade+ to drop dependent objects as well.
|
489
|
+
# Defaults to false.
|
490
|
+
def disable_extension(name, force: false)
|
491
|
+
internal_exec_query("DROP EXTENSION IF EXISTS \"#{name}\"#{' CASCADE' if force == :cascade}").tap {
|
492
|
+
reload_type_map
|
493
|
+
}
|
494
|
+
end
|
495
|
+
|
496
|
+
def extension_available?(name)
|
497
|
+
query_value("SELECT true FROM pg_available_extensions WHERE name = #{quote(name)}", "SCHEMA")
|
498
|
+
end
|
499
|
+
|
500
|
+
def extension_enabled?(name)
|
501
|
+
query_value("SELECT installed_version IS NOT NULL FROM pg_available_extensions WHERE name = #{quote(name)}", "SCHEMA")
|
502
|
+
end
|
503
|
+
|
504
|
+
def extensions
|
505
|
+
internal_exec_query("SELECT extname FROM pg_extension", "SCHEMA", allow_retry: true, materialize_transactions: false).cast_values
|
506
|
+
end
|
507
|
+
|
508
|
+
# Returns a list of defined enum types, and their values.
|
509
|
+
def enum_types
|
510
|
+
query = <<~SQL
|
511
|
+
SELECT
|
512
|
+
type.typname AS name,
|
513
|
+
type.OID AS oid,
|
514
|
+
n.nspname AS schema,
|
515
|
+
string_agg(enum.enumlabel, ',' ORDER BY enum.enumsortorder) AS value
|
516
|
+
FROM pg_enum AS enum
|
517
|
+
JOIN pg_type AS type ON (type.oid = enum.enumtypid)
|
518
|
+
JOIN pg_namespace n ON type.typnamespace = n.oid
|
519
|
+
WHERE n.nspname = ANY (current_schemas(false))
|
520
|
+
GROUP BY type.OID, n.nspname, type.typname;
|
521
|
+
SQL
|
522
|
+
|
523
|
+
internal_exec_query(query, "SCHEMA", allow_retry: true, materialize_transactions: false).cast_values.each_with_object({}) do |row, memo|
|
524
|
+
name, schema = row[0], row[2]
|
525
|
+
schema = nil if schema == current_schema
|
526
|
+
full_name = [schema, name].compact.join(".")
|
527
|
+
memo[full_name] = row.last
|
528
|
+
end.to_a
|
529
|
+
end
|
530
|
+
|
531
|
+
# Given a name and an array of values, creates an enum type.
|
532
|
+
def create_enum(name, values, **options)
|
533
|
+
sql_values = values.map { |s| quote(s) }.join(", ")
|
534
|
+
scope = quoted_scope(name)
|
535
|
+
query = <<~SQL
|
536
|
+
DO $$
|
537
|
+
BEGIN
|
538
|
+
IF NOT EXISTS (
|
539
|
+
SELECT 1
|
540
|
+
FROM pg_type t
|
541
|
+
JOIN pg_namespace n ON t.typnamespace = n.oid
|
542
|
+
WHERE t.typname = #{scope[:name]}
|
543
|
+
AND n.nspname = #{scope[:schema]}
|
544
|
+
) THEN
|
545
|
+
CREATE TYPE #{quote_table_name(name)} AS ENUM (#{sql_values});
|
546
|
+
END IF;
|
547
|
+
END
|
548
|
+
$$;
|
549
|
+
SQL
|
550
|
+
internal_exec_query(query)
|
551
|
+
end
|
552
|
+
|
553
|
+
# Drops an enum type.
|
554
|
+
#
|
555
|
+
# If the <tt>if_exists: true</tt> option is provided, the enum is dropped
|
556
|
+
# only if it exists. Otherwise, if the enum doesn't exist, an error is
|
557
|
+
# raised.
|
558
|
+
#
|
559
|
+
# The +values+ parameter will be ignored if present. It can be helpful
|
560
|
+
# to provide this in a migration's +change+ method so it can be reverted.
|
561
|
+
# In that case, +values+ will be used by #create_enum.
|
562
|
+
def drop_enum(name, values = nil, **options)
|
563
|
+
query = <<~SQL
|
564
|
+
DROP TYPE#{' IF EXISTS' if options[:if_exists]} #{quote_table_name(name)};
|
565
|
+
SQL
|
566
|
+
internal_exec_query(query)
|
567
|
+
end
|
568
|
+
|
569
|
+
# Rename an existing enum type to something else.
|
570
|
+
def rename_enum(name, options = {})
|
571
|
+
to = options.fetch(:to) { raise ArgumentError, ":to is required" }
|
572
|
+
|
573
|
+
exec_query("ALTER TYPE #{quote_table_name(name)} RENAME TO #{to}").tap { reload_type_map }
|
574
|
+
end
|
575
|
+
|
576
|
+
# Add enum value to an existing enum type.
|
577
|
+
def add_enum_value(type_name, value, options = {})
|
578
|
+
before, after = options.values_at(:before, :after)
|
579
|
+
sql = +"ALTER TYPE #{quote_table_name(type_name)} ADD VALUE '#{value}'"
|
580
|
+
|
581
|
+
if before && after
|
582
|
+
raise ArgumentError, "Cannot have both :before and :after at the same time"
|
583
|
+
elsif before
|
584
|
+
sql << " BEFORE '#{before}'"
|
585
|
+
elsif after
|
586
|
+
sql << " AFTER '#{after}'"
|
587
|
+
end
|
588
|
+
|
589
|
+
execute(sql).tap { reload_type_map }
|
590
|
+
end
|
591
|
+
|
592
|
+
# Rename enum value on an existing enum type.
|
593
|
+
def rename_enum_value(type_name, options = {})
|
594
|
+
unless database_version >= 10_00_00 # >= 10.0
|
595
|
+
raise ArgumentError, "Renaming enum values is only supported in PostgreSQL 10 or later"
|
596
|
+
end
|
597
|
+
|
598
|
+
from = options.fetch(:from) { raise ArgumentError, ":from is required" }
|
599
|
+
to = options.fetch(:to) { raise ArgumentError, ":to is required" }
|
600
|
+
|
601
|
+
execute("ALTER TYPE #{quote_table_name(type_name)} RENAME VALUE '#{from}' TO '#{to}'").tap {
|
602
|
+
reload_type_map
|
603
|
+
}
|
604
|
+
end
|
605
|
+
|
606
|
+
# Returns the configured supported identifier length supported by PostgreSQL
|
607
|
+
def max_identifier_length
|
608
|
+
@max_identifier_length ||= query_value("SHOW max_identifier_length", "SCHEMA").to_i
|
609
|
+
end
|
610
|
+
|
611
|
+
# Returns the maximum length of a table name.
|
612
|
+
def table_name_length
|
613
|
+
# PostgreSQL automatically creates an index for PRIMARY KEY with name consisting of
|
614
|
+
# truncated table name and "_pkey" suffix fitting into max_identifier_length number of characters.
|
615
|
+
# We allow smaller table names to be able to correctly rename this index when renaming the table.
|
616
|
+
max_identifier_length - "_pkey".length
|
617
|
+
end
|
618
|
+
|
619
|
+
# Set the authorized user for this session
|
620
|
+
def session_auth=(user)
|
621
|
+
clear_cache!
|
622
|
+
internal_execute("SET SESSION AUTHORIZATION #{user}", nil, materialize_transactions: true)
|
623
|
+
end
|
624
|
+
|
625
|
+
def use_insert_returning?
|
626
|
+
@use_insert_returning
|
627
|
+
end
|
628
|
+
|
629
|
+
# Returns the version of the connected PostgreSQL server.
|
630
|
+
def get_database_version # :nodoc:
|
631
|
+
valid_raw_connection.server_version
|
632
|
+
end
|
633
|
+
alias :postgresql_version :database_version
|
634
|
+
|
635
|
+
def default_index_type?(index) # :nodoc:
|
636
|
+
index.using == :btree || super
|
637
|
+
end
|
638
|
+
|
639
|
+
def build_insert_sql(insert) # :nodoc:
|
640
|
+
sql = +"INSERT #{insert.into} #{insert.values_list}"
|
641
|
+
|
642
|
+
if insert.skip_duplicates?
|
643
|
+
sql << " ON CONFLICT #{insert.conflict_target} DO NOTHING"
|
644
|
+
elsif insert.update_duplicates?
|
645
|
+
sql << " ON CONFLICT #{insert.conflict_target} DO UPDATE SET "
|
646
|
+
if insert.raw_update_sql?
|
647
|
+
sql << insert.raw_update_sql
|
648
|
+
else
|
649
|
+
sql << insert.touch_model_timestamps_unless { |column| "#{insert.model.quoted_table_name}.#{column} IS NOT DISTINCT FROM excluded.#{column}" }
|
650
|
+
sql << insert.updatable_columns.map { |column| "#{column}=excluded.#{column}" }.join(",")
|
651
|
+
end
|
652
|
+
end
|
653
|
+
|
654
|
+
sql << " RETURNING #{insert.returning}" if insert.returning
|
655
|
+
sql
|
656
|
+
end
|
657
|
+
|
658
|
+
def check_version # :nodoc:
|
659
|
+
if database_version < 9_03_00 # < 9.3
|
660
|
+
raise "Your version of PostgreSQL (#{database_version}) is too old. Active Record supports PostgreSQL >= 9.3."
|
661
|
+
end
|
662
|
+
end
|
663
|
+
|
664
|
+
class << self
|
665
|
+
def initialize_type_map(m) # :nodoc:
|
666
|
+
m.register_type "int2", Type::Integer.new(limit: 2)
|
667
|
+
m.register_type "int4", Type::Integer.new(limit: 4)
|
668
|
+
m.register_type "int8", Type::Integer.new(limit: 8)
|
669
|
+
m.register_type "oid", OID::Oid.new
|
670
|
+
m.register_type "float4", Type::Float.new
|
671
|
+
m.alias_type "float8", "float4"
|
672
|
+
m.register_type "text", Type::Text.new
|
673
|
+
register_class_with_limit m, "varchar", Type::String
|
674
|
+
m.alias_type "char", "varchar"
|
675
|
+
m.alias_type "name", "varchar"
|
676
|
+
m.alias_type "bpchar", "varchar"
|
677
|
+
m.register_type "bool", Type::Boolean.new
|
678
|
+
register_class_with_limit m, "bit", OID::Bit
|
679
|
+
register_class_with_limit m, "varbit", OID::BitVarying
|
680
|
+
m.register_type "date", OID::Date.new
|
681
|
+
|
682
|
+
m.register_type "money", OID::Money.new
|
683
|
+
m.register_type "bytea", OID::Bytea.new
|
684
|
+
m.register_type "point", OID::Point.new
|
685
|
+
m.register_type "hstore", OID::Hstore.new
|
686
|
+
m.register_type "json", Type::Json.new
|
687
|
+
m.register_type "jsonb", OID::Jsonb.new
|
688
|
+
m.register_type "cidr", OID::Cidr.new
|
689
|
+
m.register_type "inet", OID::Inet.new
|
690
|
+
m.register_type "uuid", OID::Uuid.new
|
691
|
+
m.register_type "xml", OID::Xml.new
|
692
|
+
m.register_type "tsvector", OID::SpecializedString.new(:tsvector)
|
693
|
+
m.register_type "macaddr", OID::Macaddr.new
|
694
|
+
m.register_type "citext", OID::SpecializedString.new(:citext)
|
695
|
+
m.register_type "ltree", OID::SpecializedString.new(:ltree)
|
696
|
+
m.register_type "line", OID::SpecializedString.new(:line)
|
697
|
+
m.register_type "lseg", OID::SpecializedString.new(:lseg)
|
698
|
+
m.register_type "box", OID::SpecializedString.new(:box)
|
699
|
+
m.register_type "path", OID::SpecializedString.new(:path)
|
700
|
+
m.register_type "polygon", OID::SpecializedString.new(:polygon)
|
701
|
+
m.register_type "circle", OID::SpecializedString.new(:circle)
|
702
|
+
|
703
|
+
m.register_type "numeric" do |_, fmod, sql_type|
|
704
|
+
precision = extract_precision(sql_type)
|
705
|
+
scale = extract_scale(sql_type)
|
706
|
+
|
707
|
+
# The type for the numeric depends on the width of the field,
|
708
|
+
# so we'll do something special here.
|
709
|
+
#
|
710
|
+
# When dealing with decimal columns:
|
711
|
+
#
|
712
|
+
# places after decimal = fmod - 4 & 0xffff
|
713
|
+
# places before decimal = (fmod - 4) >> 16 & 0xffff
|
714
|
+
if fmod && (fmod - 4 & 0xffff).zero?
|
715
|
+
# FIXME: Remove this class, and the second argument to
|
716
|
+
# lookups on PG
|
717
|
+
Type::DecimalWithoutScale.new(precision: precision)
|
718
|
+
else
|
719
|
+
OID::Decimal.new(precision: precision, scale: scale)
|
720
|
+
end
|
721
|
+
end
|
722
|
+
|
723
|
+
m.register_type "interval" do |*args, sql_type|
|
724
|
+
precision = extract_precision(sql_type)
|
725
|
+
OID::Interval.new(precision: precision)
|
726
|
+
end
|
727
|
+
end
|
728
|
+
end
|
729
|
+
|
730
|
+
private
|
731
|
+
def type_map
|
732
|
+
@type_map ||= Type::HashLookupTypeMap.new
|
733
|
+
end
|
734
|
+
|
735
|
+
def initialize_type_map(m = type_map)
|
736
|
+
self.class.initialize_type_map(m)
|
737
|
+
|
738
|
+
self.class.register_class_with_precision m, "time", Type::Time, timezone: @default_timezone
|
739
|
+
self.class.register_class_with_precision m, "timestamp", OID::Timestamp, timezone: @default_timezone
|
740
|
+
self.class.register_class_with_precision m, "timestamptz", OID::TimestampWithTimeZone
|
741
|
+
|
742
|
+
############################################
|
743
|
+
# EDITED to add type mapping for ORE type.
|
744
|
+
############################################
|
745
|
+
# TODO: Look into OID::Array for ore_64_8_v1. It doesn't work out of
|
746
|
+
# the box, FWIW, so will need a little digging.
|
747
|
+
m.register_type "ore_64_8_v1_term", OID::SpecializedString.new(:ore_64_8_v1_term)
|
748
|
+
m.register_type "ore_64_8_v1", OID::SpecializedString.new(:ore_64_8_v1)
|
749
|
+
|
750
|
+
load_additional_types
|
751
|
+
end
|
752
|
+
|
753
|
+
# Extracts the value from a PostgreSQL column default definition.
|
754
|
+
def extract_value_from_default(default)
|
755
|
+
case default
|
756
|
+
# Quoted types
|
757
|
+
when /\A[(B]?'(.*)'.*::"?([\w. ]+)"?(?:\[\])?\z/m
|
758
|
+
# The default 'now'::date is CURRENT_DATE
|
759
|
+
if $1 == "now" && $2 == "date"
|
760
|
+
nil
|
761
|
+
else
|
762
|
+
$1.gsub("''", "'")
|
763
|
+
end
|
764
|
+
# Boolean types
|
765
|
+
when "true", "false"
|
766
|
+
default
|
767
|
+
# Numeric types
|
768
|
+
when /\A\(?(-?\d+(\.\d*)?)\)?(::bigint)?\z/
|
769
|
+
$1
|
770
|
+
# Object identifier types
|
771
|
+
when /\A-?\d+\z/
|
772
|
+
$1
|
773
|
+
else
|
774
|
+
# Anything else is blank, some user type, or some function
|
775
|
+
# and we can't know the value of that, so return nil.
|
776
|
+
nil
|
777
|
+
end
|
778
|
+
end
|
779
|
+
|
780
|
+
def extract_default_function(default_value, default)
|
781
|
+
default if has_default_function?(default_value, default)
|
782
|
+
end
|
783
|
+
|
784
|
+
def has_default_function?(default_value, default)
|
785
|
+
!default_value && %r{\w+\(.*\)|\(.*\)::\w+|CURRENT_DATE|CURRENT_TIMESTAMP}.match?(default)
|
786
|
+
end
|
787
|
+
|
788
|
+
# See https://www.postgresql.org/docs/current/static/errcodes-appendix.html
|
789
|
+
VALUE_LIMIT_VIOLATION = "22001"
|
790
|
+
NUMERIC_VALUE_OUT_OF_RANGE = "22003"
|
791
|
+
NOT_NULL_VIOLATION = "23502"
|
792
|
+
FOREIGN_KEY_VIOLATION = "23503"
|
793
|
+
UNIQUE_VIOLATION = "23505"
|
794
|
+
SERIALIZATION_FAILURE = "40001"
|
795
|
+
DEADLOCK_DETECTED = "40P01"
|
796
|
+
DUPLICATE_DATABASE = "42P04"
|
797
|
+
LOCK_NOT_AVAILABLE = "55P03"
|
798
|
+
QUERY_CANCELED = "57014"
|
799
|
+
|
800
|
+
def translate_exception(exception, message:, sql:, binds:)
|
801
|
+
return exception unless exception.respond_to?(:result)
|
802
|
+
|
803
|
+
case exception.result.try(:error_field, ::CipherStashPG::PG_DIAG_SQLSTATE)
|
804
|
+
when nil
|
805
|
+
if exception.message.match?(/connection is closed/i)
|
806
|
+
ConnectionNotEstablished.new(exception, connection_pool: @pool)
|
807
|
+
elsif exception.is_a?(::CipherStashPG::ConnectionBad)
|
808
|
+
# libpq message style always ends with a newline; the pg gem's internal
|
809
|
+
# errors do not. We separate these cases because a pg-internal
|
810
|
+
# ConnectionBad means it failed before it managed to send the query,
|
811
|
+
# whereas a libpq failure could have occurred at any time (meaning the
|
812
|
+
# server may have already executed part or all of the query).
|
813
|
+
if exception.message.end_with?("\n")
|
814
|
+
ConnectionFailed.new(exception, connection_pool: @pool)
|
815
|
+
else
|
816
|
+
ConnectionNotEstablished.new(exception, connection_pool: @pool)
|
817
|
+
end
|
818
|
+
else
|
819
|
+
super
|
820
|
+
end
|
821
|
+
when UNIQUE_VIOLATION
|
822
|
+
RecordNotUnique.new(message, sql: sql, binds: binds, connection_pool: @pool)
|
823
|
+
when FOREIGN_KEY_VIOLATION
|
824
|
+
InvalidForeignKey.new(message, sql: sql, binds: binds, connection_pool: @pool)
|
825
|
+
when VALUE_LIMIT_VIOLATION
|
826
|
+
ValueTooLong.new(message, sql: sql, binds: binds, connection_pool: @pool)
|
827
|
+
when NUMERIC_VALUE_OUT_OF_RANGE
|
828
|
+
RangeError.new(message, sql: sql, binds: binds, connection_pool: @pool)
|
829
|
+
when NOT_NULL_VIOLATION
|
830
|
+
NotNullViolation.new(message, sql: sql, binds: binds, connection_pool: @pool)
|
831
|
+
when SERIALIZATION_FAILURE
|
832
|
+
SerializationFailure.new(message, sql: sql, binds: binds, connection_pool: @pool)
|
833
|
+
when DEADLOCK_DETECTED
|
834
|
+
Deadlocked.new(message, sql: sql, binds: binds, connection_pool: @pool)
|
835
|
+
when DUPLICATE_DATABASE
|
836
|
+
DatabaseAlreadyExists.new(message, sql: sql, binds: binds, connection_pool: @pool)
|
837
|
+
when LOCK_NOT_AVAILABLE
|
838
|
+
LockWaitTimeout.new(message, sql: sql, binds: binds, connection_pool: @pool)
|
839
|
+
when QUERY_CANCELED
|
840
|
+
QueryCanceled.new(message, sql: sql, binds: binds, connection_pool: @pool)
|
841
|
+
else
|
842
|
+
super
|
843
|
+
end
|
844
|
+
end
|
845
|
+
|
846
|
+
def retryable_query_error?(exception)
|
847
|
+
# We cannot retry anything if we're inside a broken transaction; we need to at
|
848
|
+
# least raise until the innermost savepoint is rolled back
|
849
|
+
@raw_connection&.transaction_status != ::CipherStashPG::PQTRANS_INERROR &&
|
850
|
+
super
|
851
|
+
end
|
852
|
+
|
853
|
+
def get_oid_type(oid, fmod, column_name, sql_type = "")
|
854
|
+
if !type_map.key?(oid)
|
855
|
+
load_additional_types([oid])
|
856
|
+
end
|
857
|
+
|
858
|
+
type_map.fetch(oid, fmod, sql_type) {
|
859
|
+
warn "unknown OID #{oid}: failed to recognize type of '#{column_name}'. It will be treated as String."
|
860
|
+
Type.default_value.tap do |cast_type|
|
861
|
+
type_map.register_type(oid, cast_type)
|
862
|
+
end
|
863
|
+
}
|
864
|
+
end
|
865
|
+
|
866
|
+
def load_additional_types(oids = nil)
|
867
|
+
initializer = OID::TypeMapInitializer.new(type_map)
|
868
|
+
load_types_queries(initializer, oids) do |query|
|
869
|
+
execute_and_clear(query, "SCHEMA", [], allow_retry: true, materialize_transactions: false) do |records|
|
870
|
+
initializer.run(records)
|
871
|
+
end
|
872
|
+
end
|
873
|
+
end
|
874
|
+
|
875
|
+
def load_types_queries(initializer, oids)
|
876
|
+
query = <<~SQL
|
877
|
+
SELECT t.oid, t.typname, t.typelem, t.typdelim, t.typinput, r.rngsubtype, t.typtype, t.typbasetype
|
878
|
+
FROM pg_type as t
|
879
|
+
LEFT JOIN pg_range as r ON oid = rngtypid
|
880
|
+
SQL
|
881
|
+
if oids
|
882
|
+
yield query + "WHERE t.oid IN (%s)" % oids.join(", ")
|
883
|
+
else
|
884
|
+
yield query + initializer.query_conditions_for_known_type_names
|
885
|
+
yield query + initializer.query_conditions_for_known_type_types
|
886
|
+
yield query + initializer.query_conditions_for_array_types
|
887
|
+
end
|
888
|
+
end
|
889
|
+
|
890
|
+
FEATURE_NOT_SUPPORTED = "0A000" # :nodoc:
|
891
|
+
|
892
|
+
def execute_and_clear(sql, name, binds, prepare: false, async: false, allow_retry: false, materialize_transactions: true)
|
893
|
+
sql = transform_query(sql)
|
894
|
+
check_if_write_query(sql)
|
895
|
+
|
896
|
+
if !prepare || without_prepared_statement?(binds)
|
897
|
+
result = exec_no_cache(sql, name, binds, async: async, allow_retry: allow_retry, materialize_transactions: materialize_transactions)
|
898
|
+
else
|
899
|
+
result = exec_cache(sql, name, binds, async: async, allow_retry: allow_retry, materialize_transactions: materialize_transactions)
|
900
|
+
end
|
901
|
+
begin
|
902
|
+
ret = yield result
|
903
|
+
ensure
|
904
|
+
result.clear
|
905
|
+
end
|
906
|
+
ret
|
907
|
+
end
|
908
|
+
|
909
|
+
def exec_no_cache(sql, name, binds, async:, allow_retry:, materialize_transactions:)
|
910
|
+
mark_transaction_written_if_write(sql)
|
911
|
+
|
912
|
+
# make sure we carry over any changes to ActiveRecord.default_timezone that have been
|
913
|
+
# made since we established the connection
|
914
|
+
update_typemap_for_default_timezone
|
915
|
+
|
916
|
+
type_casted_binds = type_casted_binds(binds)
|
917
|
+
log(sql, name, binds, type_casted_binds, async: async) do
|
918
|
+
with_raw_connection do |conn|
|
919
|
+
conn.exec_params(sql, type_casted_binds)
|
920
|
+
end
|
921
|
+
end
|
922
|
+
end
|
923
|
+
|
924
|
+
def exec_cache(sql, name, binds, async:, allow_retry:, materialize_transactions:)
|
925
|
+
mark_transaction_written_if_write(sql)
|
926
|
+
|
927
|
+
update_typemap_for_default_timezone
|
928
|
+
|
929
|
+
stmt_key = prepare_statement(sql, binds)
|
930
|
+
type_casted_binds = type_casted_binds(binds)
|
931
|
+
|
932
|
+
with_raw_connection do |conn|
|
933
|
+
log(sql, name, binds, type_casted_binds, stmt_key, async: async) do
|
934
|
+
conn.exec_prepared(stmt_key, type_casted_binds)
|
935
|
+
end
|
936
|
+
end
|
937
|
+
rescue ActiveRecord::StatementInvalid => e
|
938
|
+
raise unless is_cached_plan_failure?(e)
|
939
|
+
|
940
|
+
# Nothing we can do if we are in a transaction because all commands
|
941
|
+
# will raise InFailedSQLTransaction
|
942
|
+
if in_transaction?
|
943
|
+
raise ActiveRecord::PreparedStatementCacheExpired.new(e.cause.message)
|
944
|
+
else
|
945
|
+
@lock.synchronize do
|
946
|
+
# outside of transactions we can simply flush this query and retry
|
947
|
+
@statements.delete sql_key(sql)
|
948
|
+
end
|
949
|
+
retry
|
950
|
+
end
|
951
|
+
end
|
952
|
+
|
953
|
+
# Annoyingly, the code for prepared statements whose return value may
|
954
|
+
# have changed is FEATURE_NOT_SUPPORTED.
|
955
|
+
#
|
956
|
+
# This covers various different error types so we need to do additional
|
957
|
+
# work to classify the exception definitively as a
|
958
|
+
# ActiveRecord::PreparedStatementCacheExpired
|
959
|
+
#
|
960
|
+
# Check here for more details:
|
961
|
+
# https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/backend/utils/cache/plancache.c#l573
|
962
|
+
def is_cached_plan_failure?(e)
|
963
|
+
pgerror = e.cause
|
964
|
+
pgerror.result.result_error_field(::CipherStashPG::PG_DIAG_SQLSTATE) == FEATURE_NOT_SUPPORTED &&
|
965
|
+
pgerror.result.result_error_field(::CipherStashPG::PG_DIAG_SOURCE_FUNCTION) == "RevalidateCachedQuery"
|
966
|
+
rescue
|
967
|
+
false
|
968
|
+
end
|
969
|
+
|
970
|
+
def in_transaction?
|
971
|
+
open_transactions > 0
|
972
|
+
end
|
973
|
+
|
974
|
+
# Returns the statement identifier for the client side cache
|
975
|
+
# of statements
|
976
|
+
def sql_key(sql)
|
977
|
+
"#{schema_search_path}-#{sql}"
|
978
|
+
end
|
979
|
+
|
980
|
+
# Prepare the statement if it hasn't been prepared, return
|
981
|
+
# the statement key.
|
982
|
+
def prepare_statement(sql, binds)
|
983
|
+
with_raw_connection(allow_retry: true, materialize_transactions: false) do |conn|
|
984
|
+
sql_key = sql_key(sql)
|
985
|
+
unless @statements.key? sql_key
|
986
|
+
nextkey = @statements.next_key
|
987
|
+
begin
|
988
|
+
conn.prepare nextkey, sql
|
989
|
+
rescue => e
|
990
|
+
raise translate_exception_class(e, sql, binds)
|
991
|
+
end
|
992
|
+
# Clear the queue
|
993
|
+
conn.get_last_result
|
994
|
+
@statements[sql_key] = nextkey
|
995
|
+
end
|
996
|
+
@statements[sql_key]
|
997
|
+
end
|
998
|
+
end
|
999
|
+
|
1000
|
+
# Connects to a PostgreSQL server and sets up the adapter depending on the
|
1001
|
+
# connected server's characteristics.
|
1002
|
+
def connect
|
1003
|
+
@raw_connection = self.class.new_client(@connection_parameters)
|
1004
|
+
rescue ConnectionNotEstablished => ex
|
1005
|
+
raise ex.set_pool(@pool)
|
1006
|
+
end
|
1007
|
+
|
1008
|
+
def reconnect
|
1009
|
+
begin
|
1010
|
+
@raw_connection&.reset
|
1011
|
+
rescue ::CipherStashPG::ConnectionBad
|
1012
|
+
@raw_connection = nil
|
1013
|
+
end
|
1014
|
+
|
1015
|
+
connect unless @raw_connection
|
1016
|
+
end
|
1017
|
+
|
1018
|
+
# Configures the encoding, verbosity, schema search path, and time zone of the connection.
|
1019
|
+
# This is called by #connect and should not be called manually.
|
1020
|
+
def configure_connection
|
1021
|
+
if @config[:encoding]
|
1022
|
+
@raw_connection.set_client_encoding(@config[:encoding])
|
1023
|
+
end
|
1024
|
+
self.client_min_messages = @config[:min_messages] || "warning"
|
1025
|
+
self.schema_search_path = @config[:schema_search_path] || @config[:schema_order]
|
1026
|
+
|
1027
|
+
unless ActiveRecord.db_warnings_action.nil?
|
1028
|
+
@raw_connection.set_notice_receiver do |result|
|
1029
|
+
message = result.error_field(::CipherStashPG::Result::PG_DIAG_MESSAGE_PRIMARY)
|
1030
|
+
code = result.error_field(::CipherStashPG::Result::PG_DIAG_SQLSTATE)
|
1031
|
+
level = result.error_field(::CipherStashPG::Result::PG_DIAG_SEVERITY)
|
1032
|
+
@notice_receiver_sql_warnings << SQLWarning.new(message, code, level, nil, @pool)
|
1033
|
+
end
|
1034
|
+
end
|
1035
|
+
|
1036
|
+
# Use standard-conforming strings so we don't have to do the E'...' dance.
|
1037
|
+
set_standard_conforming_strings
|
1038
|
+
|
1039
|
+
variables = @config.fetch(:variables, {}).stringify_keys
|
1040
|
+
|
1041
|
+
# Set interval output format to ISO 8601 for ease of parsing by ActiveSupport::Duration.parse
|
1042
|
+
internal_execute("SET intervalstyle = iso_8601")
|
1043
|
+
|
1044
|
+
# SET statements from :variables config hash
|
1045
|
+
# https://www.postgresql.org/docs/current/static/sql-set.html
|
1046
|
+
variables.map do |k, v|
|
1047
|
+
if v == ":default" || v == :default
|
1048
|
+
# Sets the value to the global or compile default
|
1049
|
+
internal_execute("SET SESSION #{k} TO DEFAULT")
|
1050
|
+
elsif !v.nil?
|
1051
|
+
internal_execute("SET SESSION #{k} TO #{quote(v)}")
|
1052
|
+
end
|
1053
|
+
end
|
1054
|
+
|
1055
|
+
add_pg_encoders
|
1056
|
+
add_pg_decoders
|
1057
|
+
|
1058
|
+
reload_type_map
|
1059
|
+
end
|
1060
|
+
|
1061
|
+
def reconfigure_connection_timezone
|
1062
|
+
variables = @config.fetch(:variables, {}).stringify_keys
|
1063
|
+
|
1064
|
+
# If it's been directly configured as a connection variable, we don't
|
1065
|
+
# need to do anything here; it will be set up by configure_connection
|
1066
|
+
# and then never changed.
|
1067
|
+
return if variables["timezone"]
|
1068
|
+
|
1069
|
+
# If using Active Record's time zone support configure the connection
|
1070
|
+
# to return TIMESTAMP WITH ZONE types in UTC.
|
1071
|
+
if default_timezone == :utc
|
1072
|
+
internal_execute("SET SESSION timezone TO 'UTC'")
|
1073
|
+
else
|
1074
|
+
internal_execute("SET SESSION timezone TO DEFAULT")
|
1075
|
+
end
|
1076
|
+
end
|
1077
|
+
|
1078
|
+
# Returns the list of a table's column names, data types, and default values.
|
1079
|
+
#
|
1080
|
+
# The underlying query is roughly:
|
1081
|
+
# SELECT column.name, column.type, default.value, column.comment
|
1082
|
+
# FROM column LEFT JOIN default
|
1083
|
+
# ON column.table_id = default.table_id
|
1084
|
+
# AND column.num = default.column_num
|
1085
|
+
# WHERE column.table_id = get_table_id('table_name')
|
1086
|
+
# AND column.num > 0
|
1087
|
+
# AND NOT column.is_dropped
|
1088
|
+
# ORDER BY column.num
|
1089
|
+
#
|
1090
|
+
# If the table name is not prefixed with a schema, the database will
|
1091
|
+
# take the first match from the schema search path.
|
1092
|
+
#
|
1093
|
+
# Query implementation notes:
|
1094
|
+
# - format_type includes the column size constraint, e.g. varchar(50)
|
1095
|
+
# - ::regclass is a function that gives the id for a table name
|
1096
|
+
#
|
1097
|
+
# NOTE: this method has been modified from the original version that was lifted
|
1098
|
+
# from ActiveRecord's PostgreSQL adapter. The query is untouched, but
|
1099
|
+
# `CipherStashColumnMapper` is custom. See the docs in `CipherStashColumnMapper`
|
1100
|
+
# for details.
|
1101
|
+
#
|
1102
|
+
# Original source:
|
1103
|
+
# https://github.com/rails/rails/blob/699dfdb42635faf6d40ff2405b2f0a615b1c54ed/activerecord/lib/active_record/connection_adapters/postgresql_adapter.rb#L1076
|
1104
|
+
def column_definitions(table_name)
|
1105
|
+
column_definitions =
|
1106
|
+
query(<<~SQL, "SCHEMA")
|
1107
|
+
SELECT a.attname, format_type(a.atttypid, a.atttypmod),
|
1108
|
+
pg_get_expr(d.adbin, d.adrelid), a.attnotnull, a.atttypid, a.atttypmod,
|
1109
|
+
c.collname, col_description(a.attrelid, a.attnum) AS comment,
|
1110
|
+
#{supports_virtual_columns? ? 'attgenerated' : quote('')} as attgenerated
|
1111
|
+
FROM pg_attribute a
|
1112
|
+
LEFT JOIN pg_attrdef d ON a.attrelid = d.adrelid AND a.attnum = d.adnum
|
1113
|
+
LEFT JOIN pg_type t ON a.atttypid = t.oid
|
1114
|
+
LEFT JOIN pg_collation c ON a.attcollation = c.oid AND a.attcollation <> t.typcollation
|
1115
|
+
WHERE a.attrelid = #{quote(quote_table_name(table_name))}::regclass
|
1116
|
+
AND a.attnum > 0 AND NOT a.attisdropped
|
1117
|
+
ORDER BY a.attnum
|
1118
|
+
SQL
|
1119
|
+
CipherStashColumnMapper.map_column_definitions(column_definitions)
|
1120
|
+
end
|
1121
|
+
|
1122
|
+
def extract_table_ref_from_insert_sql(sql)
|
1123
|
+
sql[/into\s("[A-Za-z0-9_."\[\]\s]+"|[A-Za-z0-9_."\[\]]+)\s*/im]
|
1124
|
+
$1.strip if $1
|
1125
|
+
end
|
1126
|
+
|
1127
|
+
def arel_visitor
|
1128
|
+
Arel::Visitors::PostgreSQL.new(self)
|
1129
|
+
end
|
1130
|
+
|
1131
|
+
def build_statement_pool
|
1132
|
+
StatementPool.new(self, self.class.type_cast_config_to_integer(@config[:statement_limit]))
|
1133
|
+
end
|
1134
|
+
|
1135
|
+
def can_perform_case_insensitive_comparison_for?(column)
|
1136
|
+
# NOTE: citext is an exception. It is possible to perform a
|
1137
|
+
# case-insensitive comparison using `LOWER()`, but it is
|
1138
|
+
# unnecessary, as `citext` is case-insensitive by definition.
|
1139
|
+
@case_insensitive_cache ||= { "citext" => false }
|
1140
|
+
@case_insensitive_cache.fetch(column.sql_type) do
|
1141
|
+
@case_insensitive_cache[column.sql_type] = begin
|
1142
|
+
sql = <<~SQL
|
1143
|
+
SELECT exists(
|
1144
|
+
SELECT * FROM pg_proc
|
1145
|
+
WHERE proname = 'lower'
|
1146
|
+
AND proargtypes = ARRAY[#{quote column.sql_type}::regtype]::oidvector
|
1147
|
+
) OR exists(
|
1148
|
+
SELECT * FROM pg_proc
|
1149
|
+
INNER JOIN pg_cast
|
1150
|
+
ON ARRAY[casttarget]::oidvector = proargtypes
|
1151
|
+
WHERE proname = 'lower'
|
1152
|
+
AND castsource = #{quote column.sql_type}::regtype
|
1153
|
+
)
|
1154
|
+
SQL
|
1155
|
+
execute_and_clear(sql, "SCHEMA", [], allow_retry: true, materialize_transactions: false) do |result|
|
1156
|
+
result.getvalue(0, 0)
|
1157
|
+
end
|
1158
|
+
end
|
1159
|
+
end
|
1160
|
+
end
|
1161
|
+
|
1162
|
+
def add_pg_encoders
|
1163
|
+
map = ::CipherStashPG::TypeMapByClass.new
|
1164
|
+
map[Integer] = ::CipherStashPG::TextEncoder::Integer.new
|
1165
|
+
map[TrueClass] = ::CipherStashPG::TextEncoder::Boolean.new
|
1166
|
+
map[FalseClass] = ::CipherStashPG::TextEncoder::Boolean.new
|
1167
|
+
@raw_connection.type_map_for_queries = map
|
1168
|
+
end
|
1169
|
+
|
1170
|
+
def update_typemap_for_default_timezone
|
1171
|
+
if @raw_connection && @mapped_default_timezone != default_timezone && @timestamp_decoder
|
1172
|
+
decoder_class = default_timezone == :utc ?
|
1173
|
+
::CipherStashPG::TextDecoder::TimestampUtc :
|
1174
|
+
::CipherStashPG::TextDecoder::TimestampWithoutTimeZone
|
1175
|
+
|
1176
|
+
@timestamp_decoder = decoder_class.new(**@timestamp_decoder.to_h)
|
1177
|
+
@raw_connection.type_map_for_results.add_coder(@timestamp_decoder)
|
1178
|
+
|
1179
|
+
@mapped_default_timezone = default_timezone
|
1180
|
+
|
1181
|
+
# if default timezone has changed, we need to reconfigure the connection
|
1182
|
+
# (specifically, the session time zone)
|
1183
|
+
reconfigure_connection_timezone
|
1184
|
+
|
1185
|
+
true
|
1186
|
+
end
|
1187
|
+
end
|
1188
|
+
|
1189
|
+
def add_pg_decoders
|
1190
|
+
@mapped_default_timezone = nil
|
1191
|
+
@timestamp_decoder = nil
|
1192
|
+
|
1193
|
+
coders_by_name = {
|
1194
|
+
"int2" => ::CipherStashPG::TextDecoder::Integer,
|
1195
|
+
"int4" => ::CipherStashPG::TextDecoder::Integer,
|
1196
|
+
"int8" => ::CipherStashPG::TextDecoder::Integer,
|
1197
|
+
"oid" => ::CipherStashPG::TextDecoder::Integer,
|
1198
|
+
"float4" => ::CipherStashPG::TextDecoder::Float,
|
1199
|
+
"float8" => ::CipherStashPG::TextDecoder::Float,
|
1200
|
+
"numeric" => ::CipherStashPG::TextDecoder::Numeric,
|
1201
|
+
"bool" => ::CipherStashPG::TextDecoder::Boolean,
|
1202
|
+
"timestamp" => ::CipherStashPG::TextDecoder::TimestampUtc,
|
1203
|
+
"timestamptz" => ::CipherStashPG::TextDecoder::TimestampWithTimeZone,
|
1204
|
+
}
|
1205
|
+
|
1206
|
+
known_coder_types = coders_by_name.keys.map { |n| quote(n) }
|
1207
|
+
query = <<~SQL % known_coder_types.join(", ")
|
1208
|
+
SELECT t.oid, t.typname
|
1209
|
+
FROM pg_type as t
|
1210
|
+
WHERE t.typname IN (%s)
|
1211
|
+
SQL
|
1212
|
+
coders = execute_and_clear(query, "SCHEMA", [], allow_retry: true, materialize_transactions: false) do |result|
|
1213
|
+
result.filter_map { |row| construct_coder(row, coders_by_name[row["typname"]]) }
|
1214
|
+
end
|
1215
|
+
|
1216
|
+
map = ::CipherStashPG::TypeMapByOid.new
|
1217
|
+
coders.each { |coder| map.add_coder(coder) }
|
1218
|
+
@raw_connection.type_map_for_results = map
|
1219
|
+
|
1220
|
+
@type_map_for_results = ::CipherStashPG::TypeMapByOid.new
|
1221
|
+
@type_map_for_results.default_type_map = map
|
1222
|
+
@type_map_for_results.add_coder(::CipherStashPG::TextDecoder::Bytea.new(oid: 17, name: "bytea"))
|
1223
|
+
@type_map_for_results.add_coder(MoneyDecoder.new(oid: 790, name: "money"))
|
1224
|
+
|
1225
|
+
# extract timestamp decoder for use in update_typemap_for_default_timezone
|
1226
|
+
@timestamp_decoder = coders.find { |coder| coder.name == "timestamp" }
|
1227
|
+
update_typemap_for_default_timezone
|
1228
|
+
end
|
1229
|
+
|
1230
|
+
def construct_coder(row, coder_class)
|
1231
|
+
return unless coder_class
|
1232
|
+
coder_class.new(oid: row["oid"].to_i, name: row["typname"])
|
1233
|
+
end
|
1234
|
+
|
1235
|
+
class MoneyDecoder < ::CipherStashPG::SimpleDecoder # :nodoc:
|
1236
|
+
TYPE = OID::Money.new
|
1237
|
+
|
1238
|
+
def decode(value, tuple = nil, field = nil)
|
1239
|
+
TYPE.deserialize(value)
|
1240
|
+
end
|
1241
|
+
end
|
1242
|
+
|
1243
|
+
ActiveRecord::Type.add_modifier({ array: true }, OID::Array, adapter: :postgresql)
|
1244
|
+
ActiveRecord::Type.add_modifier({ range: true }, OID::Range, adapter: :postgresql)
|
1245
|
+
ActiveRecord::Type.register(:bit, OID::Bit, adapter: :postgresql)
|
1246
|
+
ActiveRecord::Type.register(:bit_varying, OID::BitVarying, adapter: :postgresql)
|
1247
|
+
ActiveRecord::Type.register(:binary, OID::Bytea, adapter: :postgresql)
|
1248
|
+
ActiveRecord::Type.register(:cidr, OID::Cidr, adapter: :postgresql)
|
1249
|
+
ActiveRecord::Type.register(:date, OID::Date, adapter: :postgresql)
|
1250
|
+
ActiveRecord::Type.register(:datetime, OID::DateTime, adapter: :postgresql)
|
1251
|
+
ActiveRecord::Type.register(:decimal, OID::Decimal, adapter: :postgresql)
|
1252
|
+
ActiveRecord::Type.register(:enum, OID::Enum, adapter: :postgresql)
|
1253
|
+
ActiveRecord::Type.register(:hstore, OID::Hstore, adapter: :postgresql)
|
1254
|
+
ActiveRecord::Type.register(:inet, OID::Inet, adapter: :postgresql)
|
1255
|
+
ActiveRecord::Type.register(:interval, OID::Interval, adapter: :postgresql)
|
1256
|
+
ActiveRecord::Type.register(:jsonb, OID::Jsonb, adapter: :postgresql)
|
1257
|
+
ActiveRecord::Type.register(:money, OID::Money, adapter: :postgresql)
|
1258
|
+
ActiveRecord::Type.register(:point, OID::Point, adapter: :postgresql)
|
1259
|
+
ActiveRecord::Type.register(:legacy_point, OID::LegacyPoint, adapter: :postgresql)
|
1260
|
+
ActiveRecord::Type.register(:uuid, OID::Uuid, adapter: :postgresql)
|
1261
|
+
ActiveRecord::Type.register(:vector, OID::Vector, adapter: :postgresql)
|
1262
|
+
ActiveRecord::Type.register(:xml, OID::Xml, adapter: :postgresql)
|
1263
|
+
end
|
1264
|
+
ActiveSupport.run_load_hooks(:active_record_cipherstash_pgadapter, CipherStashPGAdapter)
|
1265
|
+
end
|
1266
|
+
end
|