activerecord7-redshift-adapter-pennylane 1.0.1 → 1.0.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/active_record/connection_adapters/redshift_7_0/oid.rb +17 -0
- data/lib/active_record/connection_adapters/redshift_7_0_adapter.rb +770 -0
- data/lib/active_record/connection_adapters/redshift_7_1/array_parser.rb +92 -0
- data/lib/active_record/connection_adapters/redshift_7_1/column.rb +17 -0
- data/lib/active_record/connection_adapters/redshift_7_1/database_statements.rb +232 -0
- data/lib/active_record/connection_adapters/redshift_7_1/oid/date_time.rb +36 -0
- data/lib/active_record/connection_adapters/redshift_7_1/oid/decimal.rb +15 -0
- data/lib/active_record/connection_adapters/redshift_7_1/oid/json.rb +41 -0
- data/lib/active_record/connection_adapters/redshift_7_1/oid/jsonb.rb +25 -0
- data/lib/active_record/connection_adapters/redshift_7_1/oid/type_map_initializer.rb +62 -0
- data/lib/active_record/connection_adapters/redshift_7_1/oid.rb +17 -0
- data/lib/active_record/connection_adapters/redshift_7_1/quoting.rb +99 -0
- data/lib/active_record/connection_adapters/redshift_7_1/referential_integrity.rb +17 -0
- data/lib/active_record/connection_adapters/redshift_7_1/schema_definitions.rb +70 -0
- data/lib/active_record/connection_adapters/redshift_7_1/schema_dumper.rb +17 -0
- data/lib/active_record/connection_adapters/redshift_7_1/schema_statements.rb +424 -0
- data/lib/active_record/connection_adapters/redshift_7_1/type_metadata.rb +39 -0
- data/lib/active_record/connection_adapters/redshift_7_1/utils.rb +81 -0
- data/lib/active_record/connection_adapters/redshift_7_1_adapter.rb +769 -0
- data/lib/active_record/connection_adapters/redshift_adapter.rb +7 -768
- metadata +38 -19
- data/lib/active_record/connection_adapters/redshift/oid.rb +0 -17
- /data/lib/active_record/connection_adapters/{redshift → redshift_7_0}/array_parser.rb +0 -0
- /data/lib/active_record/connection_adapters/{redshift → redshift_7_0}/column.rb +0 -0
- /data/lib/active_record/connection_adapters/{redshift → redshift_7_0}/database_statements.rb +0 -0
- /data/lib/active_record/connection_adapters/{redshift → redshift_7_0}/oid/date_time.rb +0 -0
- /data/lib/active_record/connection_adapters/{redshift → redshift_7_0}/oid/decimal.rb +0 -0
- /data/lib/active_record/connection_adapters/{redshift → redshift_7_0}/oid/json.rb +0 -0
- /data/lib/active_record/connection_adapters/{redshift → redshift_7_0}/oid/jsonb.rb +0 -0
- /data/lib/active_record/connection_adapters/{redshift → redshift_7_0}/oid/type_map_initializer.rb +0 -0
- /data/lib/active_record/connection_adapters/{redshift → redshift_7_0}/quoting.rb +0 -0
- /data/lib/active_record/connection_adapters/{redshift → redshift_7_0}/referential_integrity.rb +0 -0
- /data/lib/active_record/connection_adapters/{redshift → redshift_7_0}/schema_definitions.rb +0 -0
- /data/lib/active_record/connection_adapters/{redshift → redshift_7_0}/schema_dumper.rb +0 -0
- /data/lib/active_record/connection_adapters/{redshift → redshift_7_0}/schema_statements.rb +0 -0
- /data/lib/active_record/connection_adapters/{redshift → redshift_7_0}/type_metadata.rb +0 -0
- /data/lib/active_record/connection_adapters/{redshift → redshift_7_0}/utils.rb +0 -0
@@ -0,0 +1,769 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'active_record/connection_adapters/abstract_adapter'
|
4
|
+
require 'active_record/connection_adapters/statement_pool'
|
5
|
+
|
6
|
+
require 'active_record/connection_adapters/redshift_7_1/utils'
|
7
|
+
require 'active_record/connection_adapters/redshift_7_1/column'
|
8
|
+
require 'active_record/connection_adapters/redshift_7_1/oid'
|
9
|
+
require 'active_record/connection_adapters/redshift_7_1/quoting'
|
10
|
+
require 'active_record/connection_adapters/redshift_7_1/referential_integrity'
|
11
|
+
require 'active_record/connection_adapters/redshift_7_1/schema_definitions'
|
12
|
+
require 'active_record/connection_adapters/redshift_7_1/schema_dumper'
|
13
|
+
require 'active_record/connection_adapters/redshift_7_1/schema_statements'
|
14
|
+
require 'active_record/connection_adapters/redshift_7_1/type_metadata'
|
15
|
+
require 'active_record/connection_adapters/redshift_7_1/database_statements'
|
16
|
+
|
17
|
+
require 'active_record/tasks/database_tasks'
|
18
|
+
|
19
|
+
require 'pg'
|
20
|
+
|
21
|
+
require 'ipaddr'
|
22
|
+
|
23
|
+
ActiveRecord::Tasks::DatabaseTasks.register_task(/redshift/, 'ActiveRecord::Tasks::PostgreSQLDatabaseTasks')
|
24
|
+
module ActiveRecord
|
25
|
+
module ConnectionHandling # :nodoc:
|
26
|
+
RS_VALID_CONN_PARAMS = %i[host hostaddr port dbname user password connect_timeout
|
27
|
+
client_encoding options application_name fallback_application_name
|
28
|
+
keepalives keepalives_idle keepalives_interval keepalives_count
|
29
|
+
tty sslmode requiressl sslcompression sslcert sslkey
|
30
|
+
sslrootcert sslcrl requirepeer krbsrvname gsslib service].freeze
|
31
|
+
|
32
|
+
# Establishes a connection to the database that's used by all Active Record objects
|
33
|
+
def redshift_connection(config)
|
34
|
+
conn_params = config.symbolize_keys
|
35
|
+
|
36
|
+
conn_params.delete_if { |_, v| v.nil? }
|
37
|
+
|
38
|
+
# Map ActiveRecords param names to PGs.
|
39
|
+
conn_params[:user] = conn_params.delete(:username) if conn_params[:username]
|
40
|
+
conn_params[:dbname] = conn_params.delete(:database) if conn_params[:database]
|
41
|
+
|
42
|
+
# Forward only valid config params to PG::Connection.connect.
|
43
|
+
conn_params.keep_if { |k, _| RS_VALID_CONN_PARAMS.include?(k) }
|
44
|
+
|
45
|
+
# The postgres drivers don't allow the creation of an unconnected PG::Connection object,
|
46
|
+
# so just pass a nil connection object for the time being.
|
47
|
+
ConnectionAdapters::RedshiftAdapter.new(nil, logger, conn_params, config)
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
module ConnectionAdapters
|
52
|
+
# The PostgreSQL adapter works with the native C (https://bitbucket.org/ged/ruby-pg) driver.
|
53
|
+
#
|
54
|
+
# Options:
|
55
|
+
#
|
56
|
+
# * <tt>:host</tt> - Defaults to a Unix-domain socket in /tmp. On machines without Unix-domain sockets,
|
57
|
+
# the default is to connect to localhost.
|
58
|
+
# * <tt>:port</tt> - Defaults to 5432.
|
59
|
+
# * <tt>:username</tt> - Defaults to be the same as the operating system name of the user running the application.
|
60
|
+
# * <tt>:password</tt> - Password to be used if the server demands password authentication.
|
61
|
+
# * <tt>:database</tt> - Defaults to be the same as the user name.
|
62
|
+
# * <tt>:schema_search_path</tt> - An optional schema search path for the connection given
|
63
|
+
# as a string of comma-separated schema names. This is backward-compatible with the <tt>:schema_order</tt> option.
|
64
|
+
# * <tt>:encoding</tt> - An optional client encoding that is used in a <tt>SET client_encoding TO
|
65
|
+
# <encoding></tt> call on the connection.
|
66
|
+
# * <tt>:min_messages</tt> - An optional client min messages that is used in a
|
67
|
+
# <tt>SET client_min_messages TO <min_messages></tt> call on the connection.
|
68
|
+
# * <tt>:variables</tt> - An optional hash of additional parameters that
|
69
|
+
# will be used in <tt>SET SESSION key = val</tt> calls on the connection.
|
70
|
+
# * <tt>:insert_returning</tt> - Does nothing for Redshift.
|
71
|
+
#
|
72
|
+
# Any further options are used as connection parameters to libpq. See
|
73
|
+
# http://www.postgresql.org/docs/9.1/static/libpq-connect.html for the
|
74
|
+
# list of parameters.
|
75
|
+
#
|
76
|
+
# In addition, default connection parameters of libpq can be set per environment variables.
|
77
|
+
# See http://www.postgresql.org/docs/9.1/static/libpq-envars.html .
|
78
|
+
class RedshiftAdapter < AbstractAdapter
|
79
|
+
ADAPTER_NAME = 'Redshift'
|
80
|
+
|
81
|
+
NATIVE_DATABASE_TYPES = {
|
82
|
+
primary_key: 'integer identity primary key',
|
83
|
+
string: { name: 'varchar' },
|
84
|
+
text: { name: 'varchar' },
|
85
|
+
integer: { name: 'integer' },
|
86
|
+
float: { name: 'decimal' },
|
87
|
+
decimal: { name: 'decimal' },
|
88
|
+
datetime: { name: 'timestamp' },
|
89
|
+
time: { name: 'timestamp' },
|
90
|
+
date: { name: 'date' },
|
91
|
+
bigint: { name: 'bigint' },
|
92
|
+
boolean: { name: 'boolean' }
|
93
|
+
}.freeze
|
94
|
+
|
95
|
+
OID = Redshift::OID # :nodoc:
|
96
|
+
|
97
|
+
include Redshift::Quoting
|
98
|
+
include Redshift::ReferentialIntegrity
|
99
|
+
include Redshift::SchemaStatements
|
100
|
+
include Redshift::DatabaseStatements
|
101
|
+
|
102
|
+
def schema_creation # :nodoc:
|
103
|
+
Redshift::SchemaCreation.new self
|
104
|
+
end
|
105
|
+
|
106
|
+
def supports_index_sort_order?
|
107
|
+
false
|
108
|
+
end
|
109
|
+
|
110
|
+
def supports_partial_index?
|
111
|
+
false
|
112
|
+
end
|
113
|
+
|
114
|
+
def supports_transaction_isolation?
|
115
|
+
false
|
116
|
+
end
|
117
|
+
|
118
|
+
def supports_foreign_keys?
|
119
|
+
true
|
120
|
+
end
|
121
|
+
|
122
|
+
def supports_deferrable_constraints?
|
123
|
+
false
|
124
|
+
end
|
125
|
+
|
126
|
+
def supports_views?
|
127
|
+
true
|
128
|
+
end
|
129
|
+
|
130
|
+
def supports_virtual_columns?
|
131
|
+
false
|
132
|
+
end
|
133
|
+
|
134
|
+
def index_algorithms
|
135
|
+
{ concurrently: 'CONCURRENTLY' }
|
136
|
+
end
|
137
|
+
|
138
|
+
class StatementPool < ConnectionAdapters::StatementPool # :nodoc:
|
139
|
+
def initialize(connection, max)
|
140
|
+
super(max)
|
141
|
+
@raw_connection = connection
|
142
|
+
@counter = 0
|
143
|
+
end
|
144
|
+
|
145
|
+
def next_key
|
146
|
+
"a#{@counter + 1}"
|
147
|
+
end
|
148
|
+
|
149
|
+
def []=(sql, key)
|
150
|
+
super.tap { @counter += 1 }
|
151
|
+
end
|
152
|
+
|
153
|
+
private
|
154
|
+
|
155
|
+
def dealloc(key)
|
156
|
+
@raw_connection.query "DEALLOCATE #{key}" if connection_active?
|
157
|
+
rescue PG::Error
|
158
|
+
end
|
159
|
+
|
160
|
+
def connection_active?
|
161
|
+
@raw_connection.status == PG::CONNECTION_OK
|
162
|
+
rescue PG::Error
|
163
|
+
false
|
164
|
+
end
|
165
|
+
end
|
166
|
+
|
167
|
+
# Initializes and connects a PostgreSQL adapter.
|
168
|
+
def initialize(connection, logger, connection_parameters, config)
|
169
|
+
super(connection, logger, config)
|
170
|
+
|
171
|
+
@visitor = Arel::Visitors::PostgreSQL.new self
|
172
|
+
@visitor.extend(ConnectionAdapters::DetermineIfPreparableVisitor) if defined?(ConnectionAdapters::DetermineIfPreparableVisitor)
|
173
|
+
@prepared_statements = false
|
174
|
+
|
175
|
+
@raw_connection_parameters = connection_parameters
|
176
|
+
|
177
|
+
# @local_tz is initialized as nil to avoid warnings when connect tries to use it
|
178
|
+
@local_tz = nil
|
179
|
+
@table_alias_length = nil
|
180
|
+
|
181
|
+
connect
|
182
|
+
@statements = StatementPool.new @raw_connection,
|
183
|
+
self.class.type_cast_config_to_integer(config[:statement_limit])
|
184
|
+
|
185
|
+
@type_map = Type::HashLookupTypeMap.new
|
186
|
+
initialize_type_map(type_map)
|
187
|
+
@local_tz = execute('SHOW TIME ZONE', 'SCHEMA').first['TimeZone']
|
188
|
+
@use_insert_returning = @config.key?(:insert_returning) ? self.class.type_cast_config_to_boolean(@config[:insert_returning]) : false
|
189
|
+
end
|
190
|
+
|
191
|
+
# Clears the prepared statements cache.
|
192
|
+
def clear_cache!(new_connection: false)
|
193
|
+
@statements.clear
|
194
|
+
end
|
195
|
+
|
196
|
+
def truncate(table_name, name = nil)
|
197
|
+
exec_query "TRUNCATE TABLE #{quote_table_name(table_name)}", name, []
|
198
|
+
end
|
199
|
+
|
200
|
+
# Is this connection alive and ready for queries?
|
201
|
+
def active?
|
202
|
+
@raw_connection.query 'SELECT 1'
|
203
|
+
true
|
204
|
+
rescue PG::Error
|
205
|
+
false
|
206
|
+
end
|
207
|
+
|
208
|
+
def reload_type_map
|
209
|
+
type_map.clear
|
210
|
+
initialize_type_map
|
211
|
+
end
|
212
|
+
|
213
|
+
# Close then reopen the connection.
|
214
|
+
def reconnect!
|
215
|
+
super
|
216
|
+
@raw_connection.reset
|
217
|
+
configure_connection
|
218
|
+
reload_type_map
|
219
|
+
end
|
220
|
+
|
221
|
+
def reset!
|
222
|
+
clear_cache!
|
223
|
+
reset_transaction
|
224
|
+
@raw_connection.query 'ROLLBACK' unless @raw_connection.transaction_status == ::PG::PQTRANS_IDLE
|
225
|
+
@raw_connection.query 'DISCARD ALL'
|
226
|
+
configure_connection
|
227
|
+
end
|
228
|
+
|
229
|
+
# Disconnects from the database if already connected. Otherwise, this
|
230
|
+
# method does nothing.
|
231
|
+
def disconnect!
|
232
|
+
super
|
233
|
+
begin
|
234
|
+
@raw_connection.close
|
235
|
+
rescue StandardError
|
236
|
+
nil
|
237
|
+
end
|
238
|
+
end
|
239
|
+
|
240
|
+
def native_database_types # :nodoc:
|
241
|
+
NATIVE_DATABASE_TYPES
|
242
|
+
end
|
243
|
+
|
244
|
+
# Returns true, since this connection adapter supports migrations.
|
245
|
+
def supports_migrations?
|
246
|
+
true
|
247
|
+
end
|
248
|
+
|
249
|
+
# Does PostgreSQL support finding primary key on non-Active Record tables?
|
250
|
+
def supports_primary_key? # :nodoc:
|
251
|
+
true
|
252
|
+
end
|
253
|
+
|
254
|
+
def supports_ddl_transactions?
|
255
|
+
true
|
256
|
+
end
|
257
|
+
|
258
|
+
def supports_explain?
|
259
|
+
true
|
260
|
+
end
|
261
|
+
|
262
|
+
def supports_extensions?
|
263
|
+
false
|
264
|
+
end
|
265
|
+
|
266
|
+
def supports_ranges?
|
267
|
+
false
|
268
|
+
end
|
269
|
+
|
270
|
+
def supports_materialized_views?
|
271
|
+
false
|
272
|
+
end
|
273
|
+
|
274
|
+
def supports_import?
|
275
|
+
true
|
276
|
+
end
|
277
|
+
|
278
|
+
def enable_extension(name); end
|
279
|
+
|
280
|
+
def disable_extension(name); end
|
281
|
+
|
282
|
+
def extension_enabled?(_name)
|
283
|
+
false
|
284
|
+
end
|
285
|
+
|
286
|
+
# Returns the configured supported identifier length supported by PostgreSQL
|
287
|
+
def table_alias_length
|
288
|
+
@table_alias_length ||= query('SHOW max_identifier_length', 'SCHEMA')[0][0].to_i
|
289
|
+
end
|
290
|
+
|
291
|
+
# Set the authorized user for this session
|
292
|
+
def session_auth=(user)
|
293
|
+
clear_cache!
|
294
|
+
exec_query "SET SESSION AUTHORIZATION #{user}"
|
295
|
+
end
|
296
|
+
|
297
|
+
def use_insert_returning?
|
298
|
+
false
|
299
|
+
end
|
300
|
+
|
301
|
+
def valid_type?(type)
|
302
|
+
!native_database_types[type].nil?
|
303
|
+
end
|
304
|
+
|
305
|
+
def update_table_definition(table_name, base) # :nodoc:
|
306
|
+
Redshift::Table.new(table_name, base)
|
307
|
+
end
|
308
|
+
|
309
|
+
def lookup_cast_type(sql_type) # :nodoc:
|
310
|
+
oid = execute("SELECT #{quote(sql_type)}::regtype::oid", 'SCHEMA').first['oid'].to_i
|
311
|
+
super(oid)
|
312
|
+
end
|
313
|
+
|
314
|
+
def column_name_for_operation(operation, _node) # :nodoc:
|
315
|
+
OPERATION_ALIASES.fetch(operation) { operation.downcase }
|
316
|
+
end
|
317
|
+
|
318
|
+
OPERATION_ALIASES = { # :nodoc:
|
319
|
+
'maximum' => 'max',
|
320
|
+
'minimum' => 'min',
|
321
|
+
'average' => 'avg'
|
322
|
+
}.freeze
|
323
|
+
|
324
|
+
protected
|
325
|
+
|
326
|
+
# Returns the version of the connected PostgreSQL server.
|
327
|
+
def redshift_version
|
328
|
+
@raw_connection.server_version
|
329
|
+
end
|
330
|
+
|
331
|
+
def translate_exception(exception, message:, sql:, binds:)
|
332
|
+
return exception unless exception.respond_to?(:result)
|
333
|
+
|
334
|
+
case exception.message
|
335
|
+
when /duplicate key value violates unique constraint/
|
336
|
+
RecordNotUnique.new(message, exception)
|
337
|
+
when /violates foreign key constraint/
|
338
|
+
InvalidForeignKey.new(message, exception)
|
339
|
+
else
|
340
|
+
super
|
341
|
+
end
|
342
|
+
end
|
343
|
+
|
344
|
+
class << self
|
345
|
+
def initialize_type_map(m) # :nodoc:
|
346
|
+
m.register_type 'int2', Type::Integer.new(limit: 2)
|
347
|
+
m.register_type 'int4', Type::Integer.new(limit: 4)
|
348
|
+
m.register_type 'int8', Type::Integer.new(limit: 8)
|
349
|
+
m.alias_type 'oid', 'int2'
|
350
|
+
m.register_type 'float4', Type::Float.new
|
351
|
+
m.alias_type 'float8', 'float4'
|
352
|
+
m.register_type 'text', Type::Text.new
|
353
|
+
register_class_with_limit m, 'varchar', Type::String
|
354
|
+
m.alias_type 'char', 'varchar'
|
355
|
+
m.alias_type 'name', 'varchar'
|
356
|
+
m.alias_type 'bpchar', 'varchar'
|
357
|
+
m.register_type 'bool', Type::Boolean.new
|
358
|
+
m.alias_type 'timestamptz', 'timestamp'
|
359
|
+
m.register_type 'date', Type::Date.new
|
360
|
+
m.register_type 'time', Type::Time.new
|
361
|
+
|
362
|
+
m.register_type 'timestamp' do |_, _, sql_type|
|
363
|
+
precision = extract_precision(sql_type)
|
364
|
+
OID::DateTime.new(precision: precision)
|
365
|
+
end
|
366
|
+
|
367
|
+
m.register_type 'numeric' do |_, fmod, sql_type|
|
368
|
+
precision = extract_precision(sql_type)
|
369
|
+
scale = extract_scale(sql_type)
|
370
|
+
|
371
|
+
# The type for the numeric depends on the width of the field,
|
372
|
+
# so we'll do something special here.
|
373
|
+
#
|
374
|
+
# When dealing with decimal columns:
|
375
|
+
#
|
376
|
+
# places after decimal = fmod - 4 & 0xffff
|
377
|
+
# places before decimal = (fmod - 4) >> 16 & 0xffff
|
378
|
+
if fmod && (fmod - 4 & 0xffff) == 0
|
379
|
+
# FIXME: Remove this class, and the second argument to
|
380
|
+
# lookups on PG
|
381
|
+
Type::DecimalWithoutScale.new(precision: precision)
|
382
|
+
else
|
383
|
+
OID::Decimal.new(precision: precision, scale: scale)
|
384
|
+
end
|
385
|
+
end
|
386
|
+
end
|
387
|
+
end
|
388
|
+
|
389
|
+
private
|
390
|
+
|
391
|
+
def get_oid_type(oid, fmod, column_name, sql_type = '') # :nodoc:
|
392
|
+
load_additional_types(type_map, [oid]) unless type_map.key?(oid)
|
393
|
+
|
394
|
+
type_map.fetch(oid, fmod, sql_type) do
|
395
|
+
warn "unknown OID #{oid}: failed to recognize type of '#{column_name}'. It will be treated as String."
|
396
|
+
Type::Value.new.tap do |cast_type|
|
397
|
+
type_map.register_type(oid, cast_type)
|
398
|
+
end
|
399
|
+
end
|
400
|
+
end
|
401
|
+
|
402
|
+
def type_map
|
403
|
+
@type_map ||= Type::HashLookupTypeMap.new
|
404
|
+
end
|
405
|
+
|
406
|
+
def initialize_type_map(m = type_map)
|
407
|
+
self.class.initialize_type_map(m)
|
408
|
+
load_additional_types(m)
|
409
|
+
end
|
410
|
+
|
411
|
+
def extract_limit(sql_type) # :nodoc:
|
412
|
+
case sql_type
|
413
|
+
when /^bigint/i, /^int8/i
|
414
|
+
8
|
415
|
+
when /^smallint/i
|
416
|
+
2
|
417
|
+
else
|
418
|
+
super
|
419
|
+
end
|
420
|
+
end
|
421
|
+
|
422
|
+
# Extracts the value from a PostgreSQL column default definition.
|
423
|
+
def extract_value_from_default(default) # :nodoc:
|
424
|
+
case default
|
425
|
+
# Quoted types
|
426
|
+
when /\A[(B]?'(.*)'::/m
|
427
|
+
Regexp.last_match(1).gsub(/''/, "'")
|
428
|
+
# Boolean types
|
429
|
+
when 'true', 'false'
|
430
|
+
default
|
431
|
+
# Numeric types
|
432
|
+
when /\A\(?(-?\d+(\.\d*)?)\)?\z/
|
433
|
+
Regexp.last_match(1)
|
434
|
+
# Object identifier types
|
435
|
+
when /\A-?\d+\z/
|
436
|
+
Regexp.last_match(1)
|
437
|
+
else # rubocop:disable Style/EmptyElse
|
438
|
+
# Anything else is blank, some user type, or some function
|
439
|
+
# and we can't know the value of that, so return nil.
|
440
|
+
nil
|
441
|
+
end
|
442
|
+
end
|
443
|
+
|
444
|
+
def extract_default_function(default_value, default) # :nodoc:
|
445
|
+
default if has_default_function?(default_value, default)
|
446
|
+
end
|
447
|
+
|
448
|
+
def has_default_function?(default_value, default) # :nodoc:
|
449
|
+
!default_value && (/\w+\(.*\)/ === default)
|
450
|
+
end
|
451
|
+
|
452
|
+
def load_additional_types(type_map, oids = nil) # :nodoc:
|
453
|
+
initializer = OID::TypeMapInitializer.new(type_map)
|
454
|
+
|
455
|
+
load_types_queries(initializer, oids) do |query|
|
456
|
+
execute_and_clear(query, 'SCHEMA', []) do |records|
|
457
|
+
initializer.run(records)
|
458
|
+
end
|
459
|
+
end
|
460
|
+
end
|
461
|
+
|
462
|
+
def load_types_queries(_initializer, oids)
|
463
|
+
query =
|
464
|
+
if supports_ranges?
|
465
|
+
<<-SQL
|
466
|
+
SELECT t.oid, t.typname, t.typelem, t.typdelim, t.typinput, r.rngsubtype, t.typtype, t.typbasetype
|
467
|
+
FROM pg_type as t
|
468
|
+
LEFT JOIN pg_range as r ON oid = rngtypid
|
469
|
+
SQL
|
470
|
+
else
|
471
|
+
<<-SQL
|
472
|
+
SELECT t.oid, t.typname, t.typelem, t.typdelim, t.typinput, t.typtype, t.typbasetype
|
473
|
+
FROM pg_type as t
|
474
|
+
SQL
|
475
|
+
end
|
476
|
+
|
477
|
+
if oids
|
478
|
+
yield query + 'WHERE t.oid::integer IN (%s)' % oids.join(', ')
|
479
|
+
else
|
480
|
+
yield query
|
481
|
+
end
|
482
|
+
end
|
483
|
+
|
484
|
+
FEATURE_NOT_SUPPORTED = '0A000' # :nodoc:
|
485
|
+
|
486
|
+
def execute_and_clear(sql, name, binds, prepare: false, async: false)
|
487
|
+
result =
|
488
|
+
if without_prepared_statement?(binds)
|
489
|
+
exec_no_cache(sql, name, [])
|
490
|
+
elsif !prepare
|
491
|
+
exec_no_cache(sql, name, binds)
|
492
|
+
else
|
493
|
+
exec_cache(sql, name, binds)
|
494
|
+
end
|
495
|
+
|
496
|
+
ret = yield result
|
497
|
+
result.clear
|
498
|
+
ret
|
499
|
+
end
|
500
|
+
|
501
|
+
def exec_no_cache(sql, name, binds)
|
502
|
+
materialize_transactions
|
503
|
+
|
504
|
+
# make sure we carry over any changes to ActiveRecord.default_timezone that have been
|
505
|
+
# made since we established the connection
|
506
|
+
update_typemap_for_default_timezone
|
507
|
+
|
508
|
+
type_casted_binds = type_casted_binds(binds)
|
509
|
+
log(sql, name, binds, type_casted_binds) do
|
510
|
+
ActiveSupport::Dependencies.interlock.permit_concurrent_loads do
|
511
|
+
@raw_connection.exec_params(sql, type_casted_binds)
|
512
|
+
end
|
513
|
+
end
|
514
|
+
end
|
515
|
+
|
516
|
+
def exec_cache(sql, name, binds)
|
517
|
+
materialize_transactions
|
518
|
+
update_typemap_for_default_timezone
|
519
|
+
|
520
|
+
stmt_key = prepare_statement(sql, binds)
|
521
|
+
type_casted_binds = type_casted_binds(binds)
|
522
|
+
|
523
|
+
log(sql, name, binds, type_casted_binds, stmt_key) do
|
524
|
+
ActiveSupport::Dependencies.interlock.permit_concurrent_loads do
|
525
|
+
@raw_connection.exec_prepared(stmt_key, type_casted_binds)
|
526
|
+
end
|
527
|
+
end
|
528
|
+
rescue ActiveRecord::StatementInvalid => e
|
529
|
+
raise unless is_cached_plan_failure?(e)
|
530
|
+
raise ActiveRecord::PreparedStatementCacheExpired, e.cause.message if in_transaction?
|
531
|
+
|
532
|
+
@lock.synchronize do
|
533
|
+
# outside of transactions we can simply flush this query and retry
|
534
|
+
@statements.delete sql_key(sql)
|
535
|
+
end
|
536
|
+
|
537
|
+
retry
|
538
|
+
end
|
539
|
+
|
540
|
+
# Annoyingly, the code for prepared statements whose return value may
|
541
|
+
# have changed is FEATURE_NOT_SUPPORTED.
|
542
|
+
#
|
543
|
+
# This covers various different error types so we need to do additional
|
544
|
+
# work to classify the exception definitively as a
|
545
|
+
# ActiveRecord::PreparedStatementCacheExpired
|
546
|
+
#
|
547
|
+
# Check here for more details:
|
548
|
+
# https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/backend/utils/cache/plancache.c#l573
|
549
|
+
CACHED_PLAN_HEURISTIC = 'cached plan must not change result type'
|
550
|
+
def is_cached_plan_failure?(e)
|
551
|
+
pgerror = e.cause
|
552
|
+
code = pgerror.result.result_error_field(PG::PG_DIAG_SQLSTATE)
|
553
|
+
code == FEATURE_NOT_SUPPORTED && pgerror.message.include?(CACHED_PLAN_HEURISTIC)
|
554
|
+
rescue StandardError
|
555
|
+
false
|
556
|
+
end
|
557
|
+
|
558
|
+
# Returns the statement identifier for the client side cache
|
559
|
+
# of statements
|
560
|
+
def sql_key(sql)
|
561
|
+
"#{schema_search_path}-#{sql}"
|
562
|
+
end
|
563
|
+
|
564
|
+
# Prepare the statement if it hasn't been prepared, return
|
565
|
+
# the statement key.
|
566
|
+
def prepare_statement(sql, binds)
|
567
|
+
@lock.synchronize do
|
568
|
+
sql_key = sql_key(sql)
|
569
|
+
unless @statements.key? sql_key
|
570
|
+
nextkey = @statements.next_key
|
571
|
+
begin
|
572
|
+
@raw_connection.prepare nextkey, sql
|
573
|
+
rescue StandardError => e
|
574
|
+
raise translate_exception_class(e, sql, binds)
|
575
|
+
end
|
576
|
+
# Clear the queue
|
577
|
+
@raw_connection.get_last_result
|
578
|
+
@statements[sql_key] = nextkey
|
579
|
+
end
|
580
|
+
@statements[sql_key]
|
581
|
+
end
|
582
|
+
end
|
583
|
+
|
584
|
+
# Connects to a PostgreSQL server and sets up the adapter depending on the
|
585
|
+
# connected server's characteristics.
|
586
|
+
def connect
|
587
|
+
@raw_connection = PG.connect(@raw_connection_parameters)
|
588
|
+
configure_connection
|
589
|
+
add_pg_encoders
|
590
|
+
add_pg_decoders
|
591
|
+
end
|
592
|
+
|
593
|
+
# Configures the encoding, verbosity, schema search path, and time zone of the connection.
|
594
|
+
# This is called by #connect and should not be called manually.
|
595
|
+
def configure_connection
|
596
|
+
@raw_connection.set_client_encoding(@config[:encoding]) if @config[:encoding]
|
597
|
+
self.schema_search_path = @config[:schema_search_path] || @config[:schema_order]
|
598
|
+
|
599
|
+
variables = @config.fetch(:variables, {}).stringify_keys
|
600
|
+
|
601
|
+
# If using Active Record's time zone support configure the connection to return
|
602
|
+
# TIMESTAMP WITH ZONE types in UTC.
|
603
|
+
unless variables['timezone']
|
604
|
+
if ActiveRecord.default_timezone == :utc
|
605
|
+
variables['timezone'] = 'UTC'
|
606
|
+
elsif @local_tz
|
607
|
+
variables['timezone'] = @local_tz
|
608
|
+
end
|
609
|
+
end
|
610
|
+
|
611
|
+
# SET statements from :variables config hash
|
612
|
+
# https://www.postgresql.org/docs/current/static/sql-set.html
|
613
|
+
variables.map do |k, v|
|
614
|
+
if [':default', :default].include?(v)
|
615
|
+
# Sets the value to the global or compile default
|
616
|
+
execute("SET #{k} TO DEFAULT", 'SCHEMA')
|
617
|
+
elsif !v.nil?
|
618
|
+
execute("SET #{k} TO #{quote(v)}", 'SCHEMA')
|
619
|
+
end
|
620
|
+
end
|
621
|
+
end
|
622
|
+
|
623
|
+
def last_insert_id_result(sequence_name) # :nodoc:
|
624
|
+
exec_query("SELECT currval('#{sequence_name}')", 'SQL')
|
625
|
+
end
|
626
|
+
|
627
|
+
# Returns the list of a table's column names, data types, and default values.
|
628
|
+
#
|
629
|
+
# The underlying query is roughly:
|
630
|
+
# SELECT column.name, column.type, default.value
|
631
|
+
# FROM column LEFT JOIN default
|
632
|
+
# ON column.table_id = default.table_id
|
633
|
+
# AND column.num = default.column_num
|
634
|
+
# WHERE column.table_id = get_table_id('table_name')
|
635
|
+
# AND column.num > 0
|
636
|
+
# AND NOT column.is_dropped
|
637
|
+
# ORDER BY column.num
|
638
|
+
#
|
639
|
+
# If the table name is not prefixed with a schema, the database will
|
640
|
+
# take the first match from the schema search path.
|
641
|
+
#
|
642
|
+
# Query implementation notes:
|
643
|
+
# - format_type includes the column size constraint, e.g. varchar(50)
|
644
|
+
# - ::regclass is a function that gives the id for a table name
|
645
|
+
def column_definitions(table_name) # :nodoc:
|
646
|
+
query(<<-END_SQL, 'SCHEMA')
|
647
|
+
SELECT a.attname, format_type(a.atttypid, a.atttypmod),
|
648
|
+
pg_get_expr(d.adbin, d.adrelid), a.attnotnull, a.atttypid, a.atttypmod
|
649
|
+
FROM pg_attribute a LEFT JOIN pg_attrdef d
|
650
|
+
ON a.attrelid = d.adrelid AND a.attnum = d.adnum
|
651
|
+
WHERE a.attrelid = '#{quote_table_name(table_name)}'::regclass
|
652
|
+
AND a.attnum > 0 AND NOT a.attisdropped
|
653
|
+
ORDER BY a.attnum
|
654
|
+
END_SQL
|
655
|
+
end
|
656
|
+
|
657
|
+
def extract_table_ref_from_insert_sql(sql)
|
658
|
+
sql[/into\s("[A-Za-z0-9_."\[\]\s]+"|[A-Za-z0-9_."\[\]]+)\s*/im]
|
659
|
+
Regexp.last_match(1)&.strip
|
660
|
+
end
|
661
|
+
|
662
|
+
def arel_visitor
|
663
|
+
Arel::Visitors::PostgreSQL.new(self)
|
664
|
+
end
|
665
|
+
|
666
|
+
def build_statement_pool
|
667
|
+
StatementPool.new(@raw_connection, self.class.type_cast_config_to_integer(@config[:statement_limit]))
|
668
|
+
end
|
669
|
+
|
670
|
+
def can_perform_case_insensitive_comparison_for?(column)
|
671
|
+
@case_insensitive_cache ||= {}
|
672
|
+
@case_insensitive_cache[column.sql_type] ||= begin
|
673
|
+
sql = <<~SQL
|
674
|
+
SELECT exists(
|
675
|
+
SELECT * FROM pg_proc
|
676
|
+
WHERE proname = 'lower'
|
677
|
+
AND proargtypes = ARRAY[#{quote column.sql_type}::regtype]::oidvector
|
678
|
+
) OR exists(
|
679
|
+
SELECT * FROM pg_proc
|
680
|
+
INNER JOIN pg_cast
|
681
|
+
ON ARRAY[casttarget]::oidvector = proargtypes
|
682
|
+
WHERE proname = 'lower'
|
683
|
+
AND castsource = #{quote column.sql_type}::regtype
|
684
|
+
)
|
685
|
+
SQL
|
686
|
+
execute_and_clear(sql, 'SCHEMA', []) do |result|
|
687
|
+
result.getvalue(0, 0)
|
688
|
+
end
|
689
|
+
end
|
690
|
+
end
|
691
|
+
|
692
|
+
def add_pg_encoders
|
693
|
+
map = PG::TypeMapByClass.new
|
694
|
+
map[Integer] = PG::TextEncoder::Integer.new
|
695
|
+
map[TrueClass] = PG::TextEncoder::Boolean.new
|
696
|
+
map[FalseClass] = PG::TextEncoder::Boolean.new
|
697
|
+
@raw_connection.type_map_for_queries = map
|
698
|
+
end
|
699
|
+
|
700
|
+
def update_typemap_for_default_timezone
|
701
|
+
return if @default_timezone == ActiveRecord.default_timezone || !@timestamp_decoder
|
702
|
+
|
703
|
+
decoder_class =
|
704
|
+
if ActiveRecord.default_timezone == :utc
|
705
|
+
PG::TextDecoder::TimestampUtc
|
706
|
+
else
|
707
|
+
PG::TextDecoder::TimestampWithoutTimeZone
|
708
|
+
end
|
709
|
+
|
710
|
+
@timestamp_decoder = decoder_class.new(@timestamp_decoder.to_h)
|
711
|
+
@raw_connection.type_map_for_results.add_coder(@timestamp_decoder)
|
712
|
+
@default_timezone = ActiveRecord.default_timezone
|
713
|
+
|
714
|
+
# if default timezone has changed, we need to reconfigure the connection
|
715
|
+
# (specifically, the session time zone)
|
716
|
+
configure_connection
|
717
|
+
end
|
718
|
+
|
719
|
+
def add_pg_decoders
|
720
|
+
@default_timezone = nil
|
721
|
+
@timestamp_decoder = nil
|
722
|
+
|
723
|
+
coders_by_name = {
|
724
|
+
'int2' => PG::TextDecoder::Integer,
|
725
|
+
'int4' => PG::TextDecoder::Integer,
|
726
|
+
'int8' => PG::TextDecoder::Integer,
|
727
|
+
'oid' => PG::TextDecoder::Integer,
|
728
|
+
'float4' => PG::TextDecoder::Float,
|
729
|
+
'float8' => PG::TextDecoder::Float,
|
730
|
+
'bool' => PG::TextDecoder::Boolean
|
731
|
+
}
|
732
|
+
|
733
|
+
if defined?(PG::TextDecoder::TimestampUtc)
|
734
|
+
# Use native PG encoders available since pg-1.1
|
735
|
+
coders_by_name['timestamp'] = PG::TextDecoder::TimestampUtc
|
736
|
+
coders_by_name['timestamptz'] = PG::TextDecoder::TimestampWithTimeZone
|
737
|
+
end
|
738
|
+
|
739
|
+
known_coder_types = coders_by_name.keys.map { |n| quote(n) }
|
740
|
+
query = <<~SQL % known_coder_types.join(', ')
|
741
|
+
SELECT t.oid, t.typname
|
742
|
+
FROM pg_type as t
|
743
|
+
WHERE t.typname IN (%s)
|
744
|
+
SQL
|
745
|
+
coders = execute_and_clear(query, 'SCHEMA', []) do |result|
|
746
|
+
result.filter_map { |row| construct_coder(row, coders_by_name[row['typname']]) }
|
747
|
+
end
|
748
|
+
|
749
|
+
map = PG::TypeMapByOid.new
|
750
|
+
coders.each { |coder| map.add_coder(coder) }
|
751
|
+
@raw_connection.type_map_for_results = map
|
752
|
+
|
753
|
+
# extract timestamp decoder for use in update_typemap_for_default_timezone
|
754
|
+
@timestamp_decoder = coders.find { |coder| coder.name == 'timestamp' }
|
755
|
+
update_typemap_for_default_timezone
|
756
|
+
end
|
757
|
+
|
758
|
+
def construct_coder(row, coder_class)
|
759
|
+
return unless coder_class
|
760
|
+
|
761
|
+
coder_class.new(oid: row['oid'].to_i, name: row['typname'])
|
762
|
+
end
|
763
|
+
|
764
|
+
def create_table_definition(*args, **options) # :nodoc:
|
765
|
+
Redshift::TableDefinition.new(self, *args, **options)
|
766
|
+
end
|
767
|
+
end
|
768
|
+
end
|
769
|
+
end
|