activerecord5-redshift-adapter 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE +54 -0
- data/README.md +42 -0
- data/lib/active_record/connection_adapters/redshift/array_parser.rb +93 -0
- data/lib/active_record/connection_adapters/redshift/column.rb +11 -0
- data/lib/active_record/connection_adapters/redshift/database_statements.rb +205 -0
- data/lib/active_record/connection_adapters/redshift/oid.rb +15 -0
- data/lib/active_record/connection_adapters/redshift/oid/date_time.rb +34 -0
- data/lib/active_record/connection_adapters/redshift/oid/decimal.rb +13 -0
- data/lib/active_record/connection_adapters/redshift/oid/json.rb +35 -0
- data/lib/active_record/connection_adapters/redshift/oid/jsonb.rb +23 -0
- data/lib/active_record/connection_adapters/redshift/oid/type_map_initializer.rb +64 -0
- data/lib/active_record/connection_adapters/redshift/quoting.rb +99 -0
- data/lib/active_record/connection_adapters/redshift/referential_integrity.rb +15 -0
- data/lib/active_record/connection_adapters/redshift/schema_definitions.rb +67 -0
- data/lib/active_record/connection_adapters/redshift/schema_dumper.rb +15 -0
- data/lib/active_record/connection_adapters/redshift/schema_statements.rb +419 -0
- data/lib/active_record/connection_adapters/redshift/type_metadata.rb +37 -0
- data/lib/active_record/connection_adapters/redshift/utils.rb +77 -0
- data/lib/active_record/connection_adapters/redshift_adapter.rb +640 -0
- metadata +91 -0
@@ -0,0 +1,37 @@
|
|
1
|
+
module ActiveRecord
|
2
|
+
module ConnectionAdapters
|
3
|
+
module Redshift
|
4
|
+
class TypeMetadata < DelegateClass(SqlTypeMetadata)
|
5
|
+
attr_reader :oid, :fmod, :array
|
6
|
+
|
7
|
+
def initialize(type_metadata, oid: nil, fmod: nil)
|
8
|
+
super(type_metadata)
|
9
|
+
@type_metadata = type_metadata
|
10
|
+
@oid = oid
|
11
|
+
@fmod = fmod
|
12
|
+
@array = /\[\]$/ === type_metadata.sql_type
|
13
|
+
end
|
14
|
+
|
15
|
+
def sql_type
|
16
|
+
super.gsub(/\[\]$/, "".freeze)
|
17
|
+
end
|
18
|
+
|
19
|
+
def ==(other)
|
20
|
+
other.is_a?(Redshift::TypeMetadata) &&
|
21
|
+
attributes_for_hash == other.attributes_for_hash
|
22
|
+
end
|
23
|
+
alias eql? ==
|
24
|
+
|
25
|
+
def hash
|
26
|
+
attributes_for_hash.hash
|
27
|
+
end
|
28
|
+
|
29
|
+
protected
|
30
|
+
|
31
|
+
def attributes_for_hash
|
32
|
+
[self.class, @type_metadata, oid, fmod]
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
@@ -0,0 +1,77 @@
|
|
1
|
+
module ActiveRecord
|
2
|
+
module ConnectionAdapters
|
3
|
+
module Redshift
|
4
|
+
# Value Object to hold a schema qualified name.
|
5
|
+
# This is usually the name of a PostgreSQL relation but it can also represent
|
6
|
+
# schema qualified type names. +schema+ and +identifier+ are unquoted to prevent
|
7
|
+
# double quoting.
|
8
|
+
class Name # :nodoc:
|
9
|
+
SEPARATOR = "."
|
10
|
+
attr_reader :schema, :identifier
|
11
|
+
|
12
|
+
def initialize(schema, identifier)
|
13
|
+
@schema, @identifier = unquote(schema), unquote(identifier)
|
14
|
+
end
|
15
|
+
|
16
|
+
def to_s
|
17
|
+
parts.join SEPARATOR
|
18
|
+
end
|
19
|
+
|
20
|
+
def quoted
|
21
|
+
if schema
|
22
|
+
PGconn.quote_ident(schema) << SEPARATOR << PGconn.quote_ident(identifier)
|
23
|
+
else
|
24
|
+
PGconn.quote_ident(identifier)
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
def ==(o)
|
29
|
+
o.class == self.class && o.parts == parts
|
30
|
+
end
|
31
|
+
alias_method :eql?, :==
|
32
|
+
|
33
|
+
def hash
|
34
|
+
parts.hash
|
35
|
+
end
|
36
|
+
|
37
|
+
protected
|
38
|
+
def unquote(part)
|
39
|
+
if part && part.start_with?('"')
|
40
|
+
part[1..-2]
|
41
|
+
else
|
42
|
+
part
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
def parts
|
47
|
+
@parts ||= [@schema, @identifier].compact
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
module Utils # :nodoc:
|
52
|
+
extend self
|
53
|
+
|
54
|
+
# Returns an instance of <tt>ActiveRecord::ConnectionAdapters::PostgreSQL::Name</tt>
|
55
|
+
# extracted from +string+.
|
56
|
+
# +schema+ is nil if not specified in +string+.
|
57
|
+
# +schema+ and +identifier+ exclude surrounding quotes (regardless of whether provided in +string+)
|
58
|
+
# +string+ supports the range of schema/table references understood by PostgreSQL, for example:
|
59
|
+
#
|
60
|
+
# * <tt>table_name</tt>
|
61
|
+
# * <tt>"table.name"</tt>
|
62
|
+
# * <tt>schema_name.table_name</tt>
|
63
|
+
# * <tt>schema_name."table.name"</tt>
|
64
|
+
# * <tt>"schema_name".table_name</tt>
|
65
|
+
# * <tt>"schema.name"."table name"</tt>
|
66
|
+
def extract_schema_qualified_name(string)
|
67
|
+
schema, table = string.scan(/[^".\s]+|"[^"]*"/)
|
68
|
+
if table.nil?
|
69
|
+
table = schema
|
70
|
+
schema = nil
|
71
|
+
end
|
72
|
+
Redshift::Name.new(schema, table)
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
@@ -0,0 +1,640 @@
|
|
1
|
+
require 'active_record/connection_adapters/abstract_adapter'
|
2
|
+
require 'active_record/connection_adapters/statement_pool'
|
3
|
+
|
4
|
+
require 'active_record/connection_adapters/redshift/utils'
|
5
|
+
require 'active_record/connection_adapters/redshift/column'
|
6
|
+
require 'active_record/connection_adapters/redshift/oid'
|
7
|
+
require 'active_record/connection_adapters/redshift/quoting'
|
8
|
+
require 'active_record/connection_adapters/redshift/referential_integrity'
|
9
|
+
require 'active_record/connection_adapters/redshift/schema_definitions'
|
10
|
+
require 'active_record/connection_adapters/redshift/schema_dumper'
|
11
|
+
require 'active_record/connection_adapters/redshift/schema_statements'
|
12
|
+
require 'active_record/connection_adapters/redshift/type_metadata'
|
13
|
+
require 'active_record/connection_adapters/redshift/database_statements'
|
14
|
+
|
15
|
+
require 'pg'
|
16
|
+
|
17
|
+
require 'ipaddr'
|
18
|
+
|
19
|
+
module ActiveRecord
|
20
|
+
module ConnectionHandling # :nodoc:
|
21
|
+
RS_VALID_CONN_PARAMS = [:host, :hostaddr, :port, :dbname, :user, :password, :connect_timeout,
|
22
|
+
:client_encoding, :options, :application_name, :fallback_application_name,
|
23
|
+
:keepalives, :keepalives_idle, :keepalives_interval, :keepalives_count,
|
24
|
+
:tty, :sslmode, :requiressl, :sslcompression, :sslcert, :sslkey,
|
25
|
+
:sslrootcert, :sslcrl, :requirepeer, :krbsrvname, :gsslib, :service]
|
26
|
+
|
27
|
+
# Establishes a connection to the database that's used by all Active Record objects
|
28
|
+
def redshift_connection(config)
|
29
|
+
conn_params = config.symbolize_keys
|
30
|
+
|
31
|
+
conn_params.delete_if { |_, v| v.nil? }
|
32
|
+
|
33
|
+
# Map ActiveRecords param names to PGs.
|
34
|
+
conn_params[:user] = conn_params.delete(:username) if conn_params[:username]
|
35
|
+
conn_params[:dbname] = conn_params.delete(:database) if conn_params[:database]
|
36
|
+
|
37
|
+
# Forward only valid config params to PGconn.connect.
|
38
|
+
conn_params.keep_if { |k, _| RS_VALID_CONN_PARAMS.include?(k) }
|
39
|
+
|
40
|
+
# The postgres drivers don't allow the creation of an unconnected PGconn object,
|
41
|
+
# so just pass a nil connection object for the time being.
|
42
|
+
ConnectionAdapters::RedshiftAdapter.new(nil, logger, conn_params, config)
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
module ConnectionAdapters
|
47
|
+
# The PostgreSQL adapter works with the native C (https://bitbucket.org/ged/ruby-pg) driver.
|
48
|
+
#
|
49
|
+
# Options:
|
50
|
+
#
|
51
|
+
# * <tt>:host</tt> - Defaults to a Unix-domain socket in /tmp. On machines without Unix-domain sockets,
|
52
|
+
# the default is to connect to localhost.
|
53
|
+
# * <tt>:port</tt> - Defaults to 5432.
|
54
|
+
# * <tt>:username</tt> - Defaults to be the same as the operating system name of the user running the application.
|
55
|
+
# * <tt>:password</tt> - Password to be used if the server demands password authentication.
|
56
|
+
# * <tt>:database</tt> - Defaults to be the same as the user name.
|
57
|
+
# * <tt>:schema_search_path</tt> - An optional schema search path for the connection given
|
58
|
+
# as a string of comma-separated schema names. This is backward-compatible with the <tt>:schema_order</tt> option.
|
59
|
+
# * <tt>:encoding</tt> - An optional client encoding that is used in a <tt>SET client_encoding TO
|
60
|
+
# <encoding></tt> call on the connection.
|
61
|
+
# * <tt>:min_messages</tt> - An optional client min messages that is used in a
|
62
|
+
# <tt>SET client_min_messages TO <min_messages></tt> call on the connection.
|
63
|
+
# * <tt>:variables</tt> - An optional hash of additional parameters that
|
64
|
+
# will be used in <tt>SET SESSION key = val</tt> calls on the connection.
|
65
|
+
# * <tt>:insert_returning</tt> - Does nothing for Redshift.
|
66
|
+
#
|
67
|
+
# Any further options are used as connection parameters to libpq. See
|
68
|
+
# http://www.postgresql.org/docs/9.1/static/libpq-connect.html for the
|
69
|
+
# list of parameters.
|
70
|
+
#
|
71
|
+
# In addition, default connection parameters of libpq can be set per environment variables.
|
72
|
+
# See http://www.postgresql.org/docs/9.1/static/libpq-envars.html .
|
73
|
+
class RedshiftAdapter < AbstractAdapter
|
74
|
+
ADAPTER_NAME = 'Redshift'.freeze
|
75
|
+
|
76
|
+
NATIVE_DATABASE_TYPES = {
|
77
|
+
primary_key: "integer identity primary key",
|
78
|
+
string: { name: "varchar" },
|
79
|
+
text: { name: "varchar" },
|
80
|
+
integer: { name: "integer" },
|
81
|
+
float: { name: "float" },
|
82
|
+
decimal: { name: "decimal" },
|
83
|
+
datetime: { name: "timestamp" },
|
84
|
+
time: { name: "time" },
|
85
|
+
date: { name: "date" },
|
86
|
+
bigint: { name: "bigint" },
|
87
|
+
boolean: { name: "boolean" },
|
88
|
+
}
|
89
|
+
|
90
|
+
OID = Redshift::OID #:nodoc:
|
91
|
+
|
92
|
+
include Redshift::Quoting
|
93
|
+
include Redshift::ReferentialIntegrity
|
94
|
+
include Redshift::SchemaStatements
|
95
|
+
include Redshift::DatabaseStatements
|
96
|
+
|
97
|
+
def schema_creation # :nodoc:
|
98
|
+
Redshift::SchemaCreation.new self
|
99
|
+
end
|
100
|
+
|
101
|
+
# Returns +true+, since this connection adapter supports prepared statement
|
102
|
+
# caching.
|
103
|
+
def supports_statement_cache?
|
104
|
+
true
|
105
|
+
end
|
106
|
+
|
107
|
+
def supports_index_sort_order?
|
108
|
+
false
|
109
|
+
end
|
110
|
+
|
111
|
+
def supports_partial_index?
|
112
|
+
false
|
113
|
+
end
|
114
|
+
|
115
|
+
def supports_transaction_isolation?
|
116
|
+
false
|
117
|
+
end
|
118
|
+
|
119
|
+
def supports_foreign_keys?
|
120
|
+
true
|
121
|
+
end
|
122
|
+
|
123
|
+
def supports_views?
|
124
|
+
true
|
125
|
+
end
|
126
|
+
|
127
|
+
def index_algorithms
|
128
|
+
{ concurrently: 'CONCURRENTLY' }
|
129
|
+
end
|
130
|
+
|
131
|
+
class StatementPool < ConnectionAdapters::StatementPool
|
132
|
+
def initialize(connection, max)
|
133
|
+
super(max)
|
134
|
+
@connection = connection
|
135
|
+
@counter = 0
|
136
|
+
@cache = Hash.new { |h,pid| h[pid] = {} }
|
137
|
+
end
|
138
|
+
|
139
|
+
def each(&block); cache.each(&block); end
|
140
|
+
def key?(key); cache.key?(key); end
|
141
|
+
def [](key); cache[key]; end
|
142
|
+
def length; cache.length; end
|
143
|
+
|
144
|
+
def next_key
|
145
|
+
"a#{@counter + 1}"
|
146
|
+
end
|
147
|
+
|
148
|
+
def []=(sql, key)
|
149
|
+
while @max <= cache.size
|
150
|
+
dealloc(cache.shift.last)
|
151
|
+
end
|
152
|
+
@counter += 1
|
153
|
+
cache[sql] = key
|
154
|
+
end
|
155
|
+
|
156
|
+
def clear
|
157
|
+
cache.each_value do |stmt_key|
|
158
|
+
dealloc stmt_key
|
159
|
+
end
|
160
|
+
cache.clear
|
161
|
+
end
|
162
|
+
|
163
|
+
def delete(sql_key)
|
164
|
+
dealloc cache[sql_key]
|
165
|
+
cache.delete sql_key
|
166
|
+
end
|
167
|
+
|
168
|
+
private
|
169
|
+
|
170
|
+
def cache
|
171
|
+
@cache[Process.pid]
|
172
|
+
end
|
173
|
+
|
174
|
+
def dealloc(key)
|
175
|
+
@connection.query "DEALLOCATE #{key}" if connection_active?
|
176
|
+
end
|
177
|
+
|
178
|
+
def connection_active?
|
179
|
+
@connection.status == PGconn::CONNECTION_OK
|
180
|
+
rescue PGError
|
181
|
+
false
|
182
|
+
end
|
183
|
+
end
|
184
|
+
|
185
|
+
# Initializes and connects a PostgreSQL adapter.
|
186
|
+
def initialize(connection, logger, connection_parameters, config)
|
187
|
+
super(connection, logger, config)
|
188
|
+
|
189
|
+
@visitor = Arel::Visitors::PostgreSQL.new self
|
190
|
+
@prepared_statements = false
|
191
|
+
|
192
|
+
@connection_parameters = connection_parameters
|
193
|
+
|
194
|
+
# @local_tz is initialized as nil to avoid warnings when connect tries to use it
|
195
|
+
@local_tz = nil
|
196
|
+
@table_alias_length = nil
|
197
|
+
|
198
|
+
connect
|
199
|
+
@statements = StatementPool.new @connection,
|
200
|
+
self.class.type_cast_config_to_integer(config[:statement_limit])
|
201
|
+
|
202
|
+
@type_map = Type::HashLookupTypeMap.new
|
203
|
+
initialize_type_map(type_map)
|
204
|
+
@local_tz = execute('SHOW TIME ZONE', 'SCHEMA').first["TimeZone"]
|
205
|
+
@use_insert_returning = @config.key?(:insert_returning) ? self.class.type_cast_config_to_boolean(@config[:insert_returning]) : false
|
206
|
+
end
|
207
|
+
|
208
|
+
# Clears the prepared statements cache.
|
209
|
+
def clear_cache!
|
210
|
+
@statements.clear
|
211
|
+
end
|
212
|
+
|
213
|
+
def truncate(table_name, name = nil)
|
214
|
+
exec_query "TRUNCATE TABLE #{quote_table_name(table_name)}", name, []
|
215
|
+
end
|
216
|
+
|
217
|
+
# Is this connection alive and ready for queries?
|
218
|
+
def active?
|
219
|
+
@connection.query 'SELECT 1'
|
220
|
+
true
|
221
|
+
rescue PGError
|
222
|
+
false
|
223
|
+
end
|
224
|
+
|
225
|
+
# Close then reopen the connection.
|
226
|
+
def reconnect!
|
227
|
+
super
|
228
|
+
@connection.reset
|
229
|
+
configure_connection
|
230
|
+
end
|
231
|
+
|
232
|
+
def reset!
|
233
|
+
clear_cache!
|
234
|
+
reset_transaction
|
235
|
+
unless @connection.transaction_status == ::PG::PQTRANS_IDLE
|
236
|
+
@connection.query 'ROLLBACK'
|
237
|
+
end
|
238
|
+
@connection.query 'DISCARD ALL'
|
239
|
+
configure_connection
|
240
|
+
end
|
241
|
+
|
242
|
+
# Disconnects from the database if already connected. Otherwise, this
|
243
|
+
# method does nothing.
|
244
|
+
def disconnect!
|
245
|
+
super
|
246
|
+
@connection.close rescue nil
|
247
|
+
end
|
248
|
+
|
249
|
+
def native_database_types #:nodoc:
|
250
|
+
NATIVE_DATABASE_TYPES
|
251
|
+
end
|
252
|
+
|
253
|
+
# Returns true, since this connection adapter supports migrations.
|
254
|
+
def supports_migrations?
|
255
|
+
true
|
256
|
+
end
|
257
|
+
|
258
|
+
# Does PostgreSQL support finding primary key on non-Active Record tables?
|
259
|
+
def supports_primary_key? #:nodoc:
|
260
|
+
true
|
261
|
+
end
|
262
|
+
|
263
|
+
# Enable standard-conforming strings if available.
|
264
|
+
def set_standard_conforming_strings
|
265
|
+
old, self.client_min_messages = client_min_messages, 'panic'
|
266
|
+
execute('SET standard_conforming_strings = on', 'SCHEMA') rescue nil
|
267
|
+
ensure
|
268
|
+
self.client_min_messages = old
|
269
|
+
end
|
270
|
+
|
271
|
+
def supports_ddl_transactions?
|
272
|
+
true
|
273
|
+
end
|
274
|
+
|
275
|
+
def supports_explain?
|
276
|
+
true
|
277
|
+
end
|
278
|
+
|
279
|
+
def supports_extensions?
|
280
|
+
false
|
281
|
+
end
|
282
|
+
|
283
|
+
def supports_ranges?
|
284
|
+
false
|
285
|
+
end
|
286
|
+
|
287
|
+
def supports_materialized_views?
|
288
|
+
false
|
289
|
+
end
|
290
|
+
|
291
|
+
def supports_import?
|
292
|
+
true
|
293
|
+
end
|
294
|
+
|
295
|
+
def enable_extension(name)
|
296
|
+
end
|
297
|
+
|
298
|
+
def disable_extension(name)
|
299
|
+
end
|
300
|
+
|
301
|
+
def extension_enabled?(name)
|
302
|
+
false
|
303
|
+
end
|
304
|
+
|
305
|
+
# Returns the configured supported identifier length supported by PostgreSQL
|
306
|
+
def table_alias_length
|
307
|
+
@table_alias_length ||= query('SHOW max_identifier_length', 'SCHEMA')[0][0].to_i
|
308
|
+
end
|
309
|
+
|
310
|
+
# Set the authorized user for this session
|
311
|
+
def session_auth=(user)
|
312
|
+
clear_cache!
|
313
|
+
exec_query "SET SESSION AUTHORIZATION #{user}"
|
314
|
+
end
|
315
|
+
|
316
|
+
def use_insert_returning?
|
317
|
+
false
|
318
|
+
end
|
319
|
+
|
320
|
+
def valid_type?(type)
|
321
|
+
!native_database_types[type].nil?
|
322
|
+
end
|
323
|
+
|
324
|
+
def update_table_definition(table_name, base) #:nodoc:
|
325
|
+
Redshift::Table.new(table_name, base)
|
326
|
+
end
|
327
|
+
|
328
|
+
def lookup_cast_type(sql_type) # :nodoc:
|
329
|
+
oid = execute("SELECT #{quote(sql_type)}::regtype::oid", "SCHEMA").first['oid'].to_i
|
330
|
+
super(oid)
|
331
|
+
end
|
332
|
+
|
333
|
+
def column_name_for_operation(operation, node) # :nodoc:
|
334
|
+
OPERATION_ALIASES.fetch(operation) { operation.downcase }
|
335
|
+
end
|
336
|
+
|
337
|
+
OPERATION_ALIASES = { # :nodoc:
|
338
|
+
"maximum" => "max",
|
339
|
+
"minimum" => "min",
|
340
|
+
"average" => "avg",
|
341
|
+
}
|
342
|
+
|
343
|
+
protected
|
344
|
+
|
345
|
+
# Returns the version of the connected PostgreSQL server.
|
346
|
+
def redshift_version
|
347
|
+
@connection.server_version
|
348
|
+
end
|
349
|
+
|
350
|
+
def translate_exception(exception, message)
|
351
|
+
return exception unless exception.respond_to?(:result)
|
352
|
+
|
353
|
+
case exception.message
|
354
|
+
when /duplicate key value violates unique constraint/
|
355
|
+
RecordNotUnique.new(message, exception)
|
356
|
+
when /violates foreign key constraint/
|
357
|
+
InvalidForeignKey.new(message, exception)
|
358
|
+
else
|
359
|
+
super
|
360
|
+
end
|
361
|
+
end
|
362
|
+
|
363
|
+
private
|
364
|
+
|
365
|
+
def get_oid_type(oid, fmod, column_name, sql_type = '') # :nodoc:
|
366
|
+
if !type_map.key?(oid)
|
367
|
+
load_additional_types(type_map, [oid])
|
368
|
+
end
|
369
|
+
|
370
|
+
type_map.fetch(oid, fmod, sql_type) {
|
371
|
+
warn "unknown OID #{oid}: failed to recognize type of '#{column_name}'. It will be treated as String."
|
372
|
+
Type::Value.new.tap do |cast_type|
|
373
|
+
type_map.register_type(oid, cast_type)
|
374
|
+
end
|
375
|
+
}
|
376
|
+
end
|
377
|
+
|
378
|
+
def initialize_type_map(m) # :nodoc:
|
379
|
+
register_class_with_limit m, 'int2', Type::Integer
|
380
|
+
register_class_with_limit m, 'int4', Type::Integer
|
381
|
+
register_class_with_limit m, 'int8', Type::Integer
|
382
|
+
m.alias_type 'oid', 'int2'
|
383
|
+
m.register_type 'float4', Type::Float.new
|
384
|
+
m.alias_type 'float8', 'float4'
|
385
|
+
m.register_type 'text', Type::Text.new
|
386
|
+
register_class_with_limit m, 'varchar', Type::String
|
387
|
+
m.alias_type 'char', 'varchar'
|
388
|
+
m.alias_type 'name', 'varchar'
|
389
|
+
m.alias_type 'bpchar', 'varchar'
|
390
|
+
m.register_type 'bool', Type::Boolean.new
|
391
|
+
m.alias_type 'timestamptz', 'timestamp'
|
392
|
+
m.register_type 'date', Type::Date.new
|
393
|
+
m.register_type 'time', Type::Time.new
|
394
|
+
|
395
|
+
m.register_type 'timestamp' do |_, _, sql_type|
|
396
|
+
precision = extract_precision(sql_type)
|
397
|
+
OID::DateTime.new(precision: precision)
|
398
|
+
end
|
399
|
+
|
400
|
+
m.register_type 'numeric' do |_, fmod, sql_type|
|
401
|
+
precision = extract_precision(sql_type)
|
402
|
+
scale = extract_scale(sql_type)
|
403
|
+
|
404
|
+
# The type for the numeric depends on the width of the field,
|
405
|
+
# so we'll do something special here.
|
406
|
+
#
|
407
|
+
# When dealing with decimal columns:
|
408
|
+
#
|
409
|
+
# places after decimal = fmod - 4 & 0xffff
|
410
|
+
# places before decimal = (fmod - 4) >> 16 & 0xffff
|
411
|
+
if fmod && (fmod - 4 & 0xffff).zero?
|
412
|
+
# FIXME: Remove this class, and the second argument to
|
413
|
+
# lookups on PG
|
414
|
+
Type::DecimalWithoutScale.new(precision: precision)
|
415
|
+
else
|
416
|
+
OID::Decimal.new(precision: precision, scale: scale)
|
417
|
+
end
|
418
|
+
end
|
419
|
+
|
420
|
+
load_additional_types(m)
|
421
|
+
end
|
422
|
+
|
423
|
+
def extract_limit(sql_type) # :nodoc:
|
424
|
+
case sql_type
|
425
|
+
when /^bigint/i, /^int8/i
|
426
|
+
8
|
427
|
+
when /^smallint/i
|
428
|
+
2
|
429
|
+
else
|
430
|
+
super
|
431
|
+
end
|
432
|
+
end
|
433
|
+
|
434
|
+
# Extracts the value from a PostgreSQL column default definition.
|
435
|
+
def extract_value_from_default(default) # :nodoc:
|
436
|
+
case default
|
437
|
+
# Quoted types
|
438
|
+
when /\A[\(B]?'(.*)'::/m
|
439
|
+
$1.gsub(/''/, "'")
|
440
|
+
# Boolean types
|
441
|
+
when 'true', 'false'
|
442
|
+
default
|
443
|
+
# Numeric types
|
444
|
+
when /\A\(?(-?\d+(\.\d*)?)\)?\z/
|
445
|
+
$1
|
446
|
+
# Object identifier types
|
447
|
+
when /\A-?\d+\z/
|
448
|
+
$1
|
449
|
+
else
|
450
|
+
# Anything else is blank, some user type, or some function
|
451
|
+
# and we can't know the value of that, so return nil.
|
452
|
+
nil
|
453
|
+
end
|
454
|
+
end
|
455
|
+
|
456
|
+
def extract_default_function(default_value, default) # :nodoc:
|
457
|
+
default if has_default_function?(default_value, default)
|
458
|
+
end
|
459
|
+
|
460
|
+
def has_default_function?(default_value, default) # :nodoc:
|
461
|
+
!default_value && (%r{\w+\(.*\)} === default)
|
462
|
+
end
|
463
|
+
|
464
|
+
def load_additional_types(type_map, oids = nil) # :nodoc:
|
465
|
+
initializer = OID::TypeMapInitializer.new(type_map)
|
466
|
+
|
467
|
+
if supports_ranges?
|
468
|
+
query = <<-SQL
|
469
|
+
SELECT t.oid, t.typname, t.typelem, t.typdelim, t.typinput, r.rngsubtype, t.typtype, t.typbasetype
|
470
|
+
FROM pg_type as t
|
471
|
+
LEFT JOIN pg_range as r ON oid = rngtypid
|
472
|
+
SQL
|
473
|
+
else
|
474
|
+
query = <<-SQL
|
475
|
+
SELECT t.oid, t.typname, t.typelem, t.typdelim, t.typinput, t.typtype, t.typbasetype
|
476
|
+
FROM pg_type as t
|
477
|
+
SQL
|
478
|
+
end
|
479
|
+
|
480
|
+
if oids
|
481
|
+
query += "WHERE t.oid::integer IN (%s)" % oids.join(", ")
|
482
|
+
end
|
483
|
+
|
484
|
+
execute_and_clear(query, 'SCHEMA', []) do |records|
|
485
|
+
initializer.run(records)
|
486
|
+
end
|
487
|
+
end
|
488
|
+
|
489
|
+
FEATURE_NOT_SUPPORTED = "0A000" #:nodoc:
|
490
|
+
|
491
|
+
def execute_and_clear(sql, name, binds, prepare: false)
|
492
|
+
if without_prepared_statement?(binds)
|
493
|
+
result = exec_no_cache(sql, name, [])
|
494
|
+
elsif !prepare
|
495
|
+
result = exec_no_cache(sql, name, binds)
|
496
|
+
else
|
497
|
+
result = exec_cache(sql, name, binds)
|
498
|
+
end
|
499
|
+
ret = yield result
|
500
|
+
result.clear
|
501
|
+
ret
|
502
|
+
end
|
503
|
+
|
504
|
+
def exec_no_cache(sql, name, binds)
|
505
|
+
log(sql, name, binds) { @connection.async_exec(sql, []) }
|
506
|
+
end
|
507
|
+
|
508
|
+
def exec_cache(sql, name, binds)
|
509
|
+
stmt_key = prepare_statement(sql)
|
510
|
+
type_casted_binds = binds.map { |col, val|
|
511
|
+
[col, type_cast(val, col)]
|
512
|
+
}
|
513
|
+
|
514
|
+
log(sql, name, type_casted_binds, stmt_key) do
|
515
|
+
@connection.exec_prepared(stmt_key, type_casted_binds.map { |_, val| val })
|
516
|
+
end
|
517
|
+
rescue ActiveRecord::StatementInvalid => e
|
518
|
+
pgerror = e.original_exception
|
519
|
+
|
520
|
+
# Get the PG code for the failure. Annoyingly, the code for
|
521
|
+
# prepared statements whose return value may have changed is
|
522
|
+
# FEATURE_NOT_SUPPORTED. Check here for more details:
|
523
|
+
# http://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/backend/utils/cache/plancache.c#l573
|
524
|
+
begin
|
525
|
+
code = pgerror.result.result_error_field(PGresult::PG_DIAG_SQLSTATE)
|
526
|
+
rescue
|
527
|
+
raise e
|
528
|
+
end
|
529
|
+
if FEATURE_NOT_SUPPORTED == code
|
530
|
+
@statements.delete sql_key(sql)
|
531
|
+
retry
|
532
|
+
else
|
533
|
+
raise e
|
534
|
+
end
|
535
|
+
end
|
536
|
+
|
537
|
+
# Returns the statement identifier for the client side cache
|
538
|
+
# of statements
|
539
|
+
def sql_key(sql)
|
540
|
+
"#{schema_search_path}-#{sql}"
|
541
|
+
end
|
542
|
+
|
543
|
+
# Prepare the statement if it hasn't been prepared, return
|
544
|
+
# the statement key.
|
545
|
+
def prepare_statement(sql)
|
546
|
+
sql_key = sql_key(sql)
|
547
|
+
unless @statements.key? sql_key
|
548
|
+
nextkey = @statements.next_key
|
549
|
+
begin
|
550
|
+
@connection.prepare nextkey, sql
|
551
|
+
rescue => e
|
552
|
+
raise translate_exception_class(e, sql)
|
553
|
+
end
|
554
|
+
# Clear the queue
|
555
|
+
@connection.get_last_result
|
556
|
+
@statements[sql_key] = nextkey
|
557
|
+
end
|
558
|
+
@statements[sql_key]
|
559
|
+
end
|
560
|
+
|
561
|
+
# Connects to a PostgreSQL server and sets up the adapter depending on the
|
562
|
+
# connected server's characteristics.
|
563
|
+
def connect
|
564
|
+
@connection = PGconn.connect(@connection_parameters)
|
565
|
+
|
566
|
+
configure_connection
|
567
|
+
rescue ::PG::Error => error
|
568
|
+
if error.message.include?("does not exist")
|
569
|
+
raise ActiveRecord::NoDatabaseError.new(error.message, error)
|
570
|
+
else
|
571
|
+
raise
|
572
|
+
end
|
573
|
+
end
|
574
|
+
|
575
|
+
# Configures the encoding, verbosity, schema search path, and time zone of the connection.
|
576
|
+
# This is called by #connect and should not be called manually.
|
577
|
+
def configure_connection
|
578
|
+
if @config[:encoding]
|
579
|
+
@connection.set_client_encoding(@config[:encoding])
|
580
|
+
end
|
581
|
+
self.schema_search_path = @config[:schema_search_path] || @config[:schema_order]
|
582
|
+
|
583
|
+
# SET statements from :variables config hash
|
584
|
+
# http://www.postgresql.org/docs/8.3/static/sql-set.html
|
585
|
+
variables = @config[:variables] || {}
|
586
|
+
variables.map do |k, v|
|
587
|
+
if v == ':default' || v == :default
|
588
|
+
# Sets the value to the global or compile default
|
589
|
+
execute("SET SESSION #{k} TO DEFAULT", 'SCHEMA')
|
590
|
+
elsif !v.nil?
|
591
|
+
execute("SET SESSION #{k} TO #{quote(v)}", 'SCHEMA')
|
592
|
+
end
|
593
|
+
end
|
594
|
+
end
|
595
|
+
|
596
|
+
def last_insert_id_result(sequence_name) #:nodoc:
|
597
|
+
exec_query("SELECT currval('#{sequence_name}')", 'SQL')
|
598
|
+
end
|
599
|
+
|
600
|
+
# Returns the list of a table's column names, data types, and default values.
|
601
|
+
#
|
602
|
+
# The underlying query is roughly:
|
603
|
+
# SELECT column.name, column.type, default.value
|
604
|
+
# FROM column LEFT JOIN default
|
605
|
+
# ON column.table_id = default.table_id
|
606
|
+
# AND column.num = default.column_num
|
607
|
+
# WHERE column.table_id = get_table_id('table_name')
|
608
|
+
# AND column.num > 0
|
609
|
+
# AND NOT column.is_dropped
|
610
|
+
# ORDER BY column.num
|
611
|
+
#
|
612
|
+
# If the table name is not prefixed with a schema, the database will
|
613
|
+
# take the first match from the schema search path.
|
614
|
+
#
|
615
|
+
# Query implementation notes:
|
616
|
+
# - format_type includes the column size constraint, e.g. varchar(50)
|
617
|
+
# - ::regclass is a function that gives the id for a table name
|
618
|
+
def column_definitions(table_name) # :nodoc:
|
619
|
+
query(<<-end_sql, 'SCHEMA')
|
620
|
+
SELECT a.attname, format_type(a.atttypid, a.atttypmod),
|
621
|
+
pg_get_expr(d.adbin, d.adrelid), a.attnotnull, a.atttypid, a.atttypmod
|
622
|
+
FROM pg_attribute a LEFT JOIN pg_attrdef d
|
623
|
+
ON a.attrelid = d.adrelid AND a.attnum = d.adnum
|
624
|
+
WHERE a.attrelid = '#{quote_table_name(table_name)}'::regclass
|
625
|
+
AND a.attnum > 0 AND NOT a.attisdropped
|
626
|
+
ORDER BY a.attnum
|
627
|
+
end_sql
|
628
|
+
end
|
629
|
+
|
630
|
+
def extract_table_ref_from_insert_sql(sql) # :nodoc:
|
631
|
+
sql[/into\s("[A-Za-z0-9_."\[\]\s]+"|[A-Za-z0-9_."\[\]]+)\s*/im]
|
632
|
+
$1.strip if $1
|
633
|
+
end
|
634
|
+
|
635
|
+
def create_table_definition(*args) # :nodoc:
|
636
|
+
Redshift::TableDefinition.new(*args)
|
637
|
+
end
|
638
|
+
end
|
639
|
+
end
|
640
|
+
end
|