activerecord5-redshift-adapter 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,13 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ module OID # :nodoc:
5
+ class Decimal < Type::Decimal # :nodoc:
6
+ def infinity(options = {})
7
+ BigDecimal.new("Infinity") * (options[:negative] ? -1 : 1)
8
+ end
9
+ end
10
+ end
11
+ end
12
+ end
13
+ end
@@ -0,0 +1,35 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ module OID # :nodoc:
5
+ class Json < Type::Value # :nodoc:
6
+ include Type::Helpers::Mutable
7
+
8
+ def type
9
+ :json
10
+ end
11
+
12
+ def type_cast_from_database(value)
13
+ if value.is_a?(::String)
14
+ ::ActiveSupport::JSON.decode(value) rescue nil
15
+ else
16
+ super
17
+ end
18
+ end
19
+
20
+ def type_cast_for_database(value)
21
+ if value.is_a?(::Array) || value.is_a?(::Hash)
22
+ ::ActiveSupport::JSON.encode(value)
23
+ else
24
+ super
25
+ end
26
+ end
27
+
28
+ def accessor
29
+ ActiveRecord::Store::StringKeyedHashAccessor
30
+ end
31
+ end
32
+ end
33
+ end
34
+ end
35
+ end
@@ -0,0 +1,23 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ module OID # :nodoc:
5
+ class Jsonb < Json # :nodoc:
6
+ def type
7
+ :jsonb
8
+ end
9
+
10
+ def changed_in_place?(raw_old_value, new_value)
11
+ # Postgres does not preserve insignificant whitespaces when
12
+ # roundtripping jsonb columns. This causes some false positives for
13
+ # the comparison here. Therefore, we need to parse and re-dump the
14
+ # raw value here to ensure the insignificant whitespaces are
15
+ # consistent with our encoder's output.
16
+ raw_old_value = type_cast_for_database(type_cast_from_database(raw_old_value))
17
+ super(raw_old_value, new_value)
18
+ end
19
+ end
20
+ end
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,64 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ module OID # :nodoc:
5
+ # This class uses the data from PostgreSQL pg_type table to build
6
+ # the OID -> Type mapping.
7
+ # - OID is an integer representing the type.
8
+ # - Type is an OID::Type object.
9
+ # This class has side effects on the +store+ passed during initialization.
10
+ class TypeMapInitializer # :nodoc:
11
+ def initialize(store, run_complex_types = true)
12
+ @store = store
13
+ @run_complex_types = run_complex_types
14
+ end
15
+
16
+ def run(records)
17
+ nodes = records.reject { |row| @store.key? row['oid'].to_i }
18
+ mapped, nodes = nodes.partition { |row| @store.key? row['typname'] }
19
+ ranges, nodes = nodes.partition { |row| row['typtype'] == 'r'.freeze }
20
+ enums, nodes = nodes.partition { |row| row['typtype'] == 'e'.freeze }
21
+ domains, nodes = nodes.partition { |row| row['typtype'] == 'd'.freeze }
22
+ arrays, nodes = nodes.partition { |row| row['typinput'] == 'array_in'.freeze }
23
+ composites, nodes = nodes.partition { |row| row['typelem'].to_i != 0 }
24
+
25
+ mapped.each { |row| register_mapped_type(row) }
26
+ end
27
+
28
+ private
29
+
30
+ def register_mapped_type(row)
31
+ alias_type row['oid'], row['typname']
32
+ end
33
+
34
+ def register(oid, oid_type = nil, &block)
35
+ oid = assert_valid_registration(oid, oid_type || block)
36
+ if block_given?
37
+ @store.register_type(oid, &block)
38
+ else
39
+ @store.register_type(oid, oid_type)
40
+ end
41
+ end
42
+
43
+ def alias_type(oid, target)
44
+ oid = assert_valid_registration(oid, target)
45
+ @store.alias_type(oid, target)
46
+ end
47
+
48
+ def register_with_subtype(oid, target_oid)
49
+ if @store.key?(target_oid)
50
+ register(oid) do |_, *args|
51
+ yield @store.lookup(target_oid, *args)
52
+ end
53
+ end
54
+ end
55
+
56
+ def assert_valid_registration(oid, oid_type)
57
+ raise ArgumentError, "can't register nil type for OID #{oid}" if oid_type.nil?
58
+ oid.to_i
59
+ end
60
+ end
61
+ end
62
+ end
63
+ end
64
+ end
@@ -0,0 +1,99 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ module Quoting
5
+ # Escapes binary strings for bytea input to the database.
6
+ def escape_bytea(value)
7
+ @connection.escape_bytea(value) if value
8
+ end
9
+
10
+ # Unescapes bytea output from a database to the binary string it represents.
11
+ # NOTE: This is NOT an inverse of escape_bytea! This is only to be used
12
+ # on escaped binary output from database drive.
13
+ def unescape_bytea(value)
14
+ @connection.unescape_bytea(value) if value
15
+ end
16
+
17
+ # Quotes strings for use in SQL input.
18
+ def quote_string(s) #:nodoc:
19
+ @connection.escape(s)
20
+ end
21
+
22
+ # Checks the following cases:
23
+ #
24
+ # - table_name
25
+ # - "table.name"
26
+ # - schema_name.table_name
27
+ # - schema_name."table.name"
28
+ # - "schema.name".table_name
29
+ # - "schema.name"."table.name"
30
+ def quote_table_name(name)
31
+ Utils.extract_schema_qualified_name(name.to_s).quoted
32
+ end
33
+
34
+ def quote_table_name_for_assignment(table, attr)
35
+ quote_column_name(attr)
36
+ end
37
+
38
+ # Quotes column names for use in SQL queries.
39
+ def quote_column_name(name) #:nodoc:
40
+ PGconn.quote_ident(name.to_s)
41
+ end
42
+
43
+ # Quotes schema names for use in SQL queries.
44
+ def quote_schema_name(name)
45
+ PGconn.quote_ident(name)
46
+ end
47
+
48
+ # Quote date/time values for use in SQL input.
49
+ def quoted_date(value) #:nodoc:
50
+ result = super
51
+
52
+ if value.year <= 0
53
+ bce_year = format("%04d", -value.year + 1)
54
+ result = result.sub(/^-?\d+/, bce_year) + " BC"
55
+ end
56
+ result
57
+ end
58
+
59
+ # Does not quote function default values for UUID columns
60
+ def quote_default_value(value, column) #:nodoc:
61
+ if column.type == :uuid && value =~ /\(\)/
62
+ value
63
+ else
64
+ quote(value, column)
65
+ end
66
+ end
67
+
68
+ private
69
+
70
+ def _quote(value)
71
+ case value
72
+ when Type::Binary::Data
73
+ "'#{escape_bytea(value.to_s)}'"
74
+ when Float
75
+ if value.infinite? || value.nan?
76
+ "'#{value}'"
77
+ else
78
+ super
79
+ end
80
+ else
81
+ super
82
+ end
83
+ end
84
+
85
+ def _type_cast(value)
86
+ case value
87
+ when Type::Binary::Data
88
+ # Return a bind param hash with format as binary.
89
+ # See http://deveiate.org/code/pg/PGconn.html#method-i-exec_prepared-doc
90
+ # for more information
91
+ { value: value.to_s, format: 1 }
92
+ else
93
+ super
94
+ end
95
+ end
96
+ end
97
+ end
98
+ end
99
+ end
@@ -0,0 +1,15 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ module ReferentialIntegrity # :nodoc:
5
+ def supports_disable_referential_integrity? # :nodoc:
6
+ true
7
+ end
8
+
9
+ def disable_referential_integrity # :nodoc:
10
+ yield
11
+ end
12
+ end
13
+ end
14
+ end
15
+ end
@@ -0,0 +1,67 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ module ColumnMethods
5
+ # Defines the primary key field.
6
+ # Use of the native PostgreSQL UUID type is supported, and can be used
7
+ # by defining your tables as such:
8
+ #
9
+ # create_table :stuffs, id: :uuid do |t|
10
+ # t.string :content
11
+ # t.timestamps
12
+ # end
13
+ #
14
+ # By default, this will use the +uuid_generate_v4()+ function from the
15
+ # +uuid-ossp+ extension, which MUST be enabled on your database. To enable
16
+ # the +uuid-ossp+ extension, you can use the +enable_extension+ method in your
17
+ # migrations. To use a UUID primary key without +uuid-ossp+ enabled, you can
18
+ # set the +:default+ option to +nil+:
19
+ #
20
+ # create_table :stuffs, id: false do |t|
21
+ # t.primary_key :id, :uuid, default: nil
22
+ # t.uuid :foo_id
23
+ # t.timestamps
24
+ # end
25
+ #
26
+ # You may also pass a different UUID generation function from +uuid-ossp+
27
+ # or another library.
28
+ #
29
+ # Note that setting the UUID primary key default value to +nil+ will
30
+ # require you to assure that you always provide a UUID value before saving
31
+ # a record (as primary keys cannot be +nil+). This might be done via the
32
+ # +SecureRandom.uuid+ method and a +before_save+ callback, for instance.
33
+ def primary_key(name, type = :primary_key, options = {})
34
+ return super unless type == :uuid
35
+ options[:default] = options.fetch(:default, 'uuid_generate_v4()')
36
+ options[:primary_key] = true
37
+ column name, type, options
38
+ end
39
+
40
+ def json(name, options = {})
41
+ column(name, :json, options)
42
+ end
43
+
44
+ def jsonb(name, options = {})
45
+ column(name, :jsonb, options)
46
+ end
47
+ end
48
+
49
+ class ColumnDefinition < ActiveRecord::ConnectionAdapters::ColumnDefinition
50
+ end
51
+
52
+ class TableDefinition < ActiveRecord::ConnectionAdapters::TableDefinition
53
+ include ColumnMethods
54
+
55
+ private
56
+
57
+ def create_column_definition(name, type)
58
+ Redshift::ColumnDefinition.new name, type
59
+ end
60
+ end
61
+
62
+ class Table < ActiveRecord::ConnectionAdapters::Table
63
+ include ColumnMethods
64
+ end
65
+ end
66
+ end
67
+ end
@@ -0,0 +1,15 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ module ColumnDumper
5
+ # Adds +:array+ option to the default set provided by the
6
+ # AbstractAdapter
7
+ def prepare_column_options(column) # :nodoc:
8
+ spec = super
9
+ spec[:default] = "\"#{column.default_function}\"" if column.default_function
10
+ spec
11
+ end
12
+ end
13
+ end
14
+ end
15
+ end
@@ -0,0 +1,419 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ class SchemaCreation < AbstractAdapter::SchemaCreation
5
+ private
6
+
7
+ def visit_ColumnDefinition(o)
8
+ o.sql_type = type_to_sql(o.type, o.limit, o.precision, o.scale)
9
+ super
10
+ end
11
+
12
+ def add_column_options!(sql, options)
13
+ column = options.fetch(:column) { return super }
14
+ if column.type == :uuid && options[:default] =~ /\(\)/
15
+ sql << " DEFAULT #{options[:default]}"
16
+ else
17
+ super
18
+ end
19
+ end
20
+ end
21
+
22
+ module SchemaStatements
23
+ # Drops the database specified on the +name+ attribute
24
+ # and creates it again using the provided +options+.
25
+ def recreate_database(name, options = {}) #:nodoc:
26
+ drop_database(name)
27
+ create_database(name, options)
28
+ end
29
+
30
+ # Create a new Redshift database. Options include <tt>:owner</tt>, <tt>:template</tt>,
31
+ # <tt>:encoding</tt> (defaults to utf8), <tt>:collation</tt>, <tt>:ctype</tt>,
32
+ # <tt>:tablespace</tt>, and <tt>:connection_limit</tt> (note that MySQL uses
33
+ # <tt>:charset</tt> while Redshift uses <tt>:encoding</tt>).
34
+ #
35
+ # Example:
36
+ # create_database config[:database], config
37
+ # create_database 'foo_development', encoding: 'unicode'
38
+ def create_database(name, options = {})
39
+ options = { encoding: 'utf8' }.merge!(options.symbolize_keys)
40
+
41
+ option_string = options.inject("") do |memo, (key, value)|
42
+ memo += case key
43
+ when :owner
44
+ " OWNER = \"#{value}\""
45
+ else
46
+ ""
47
+ end
48
+ end
49
+
50
+ execute "CREATE DATABASE #{quote_table_name(name)}#{option_string}"
51
+ end
52
+
53
+ # Drops a Redshift database.
54
+ #
55
+ # Example:
56
+ # drop_database 'matt_development'
57
+ def drop_database(name) #:nodoc:
58
+ execute "DROP DATABASE #{quote_table_name(name)}"
59
+ end
60
+
61
+ # Returns the list of all tables in the schema search path or a specified schema.
62
+ def tables(name = nil)
63
+ if name
64
+ ActiveSupport::Deprecation.warn(<<-MSG.squish)
65
+ Passing arguments to #tables is deprecated without replacement.
66
+ MSG
67
+ end
68
+
69
+ select_values("SELECT tablename FROM pg_tables WHERE schemaname = ANY(current_schemas(false))", 'SCHEMA')
70
+ end
71
+
72
+ def data_sources # :nodoc
73
+ select_values(<<-SQL, 'SCHEMA')
74
+ SELECT c.relname
75
+ FROM pg_class c
76
+ LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
77
+ WHERE c.relkind IN ('r', 'v','m') -- (r)elation/table, (v)iew, (m)aterialized view
78
+ AND n.nspname = ANY (current_schemas(false))
79
+ SQL
80
+ end
81
+
82
+ # Returns true if table exists.
83
+ # If the schema is not specified as part of +name+ then it will only find tables within
84
+ # the current schema search path (regardless of permissions to access tables in other schemas)
85
+ def table_exists?(name)
86
+ ActiveSupport::Deprecation.warn(<<-MSG.squish)
87
+ #table_exists? currently checks both tables and views.
88
+ This behavior is deprecated and will be changed with Rails 5.1 to only check tables.
89
+ Use #data_source_exists? instead.
90
+ MSG
91
+
92
+ data_source_exists?(name)
93
+ end
94
+
95
+ def data_source_exists?(name)
96
+ name = Utils.extract_schema_qualified_name(name.to_s)
97
+ return false unless name.identifier
98
+
99
+ select_value(<<-SQL, 'SCHEMA').to_i > 0
100
+ SELECT COUNT(*)
101
+ FROM pg_class c
102
+ LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
103
+ WHERE c.relkind IN ('r','v','m') -- (r)elation/table, (v)iew, (m)aterialized view
104
+ AND c.relname = '#{name.identifier}'
105
+ AND n.nspname = #{name.schema ? "'#{name.schema}'" : 'ANY (current_schemas(false))'}
106
+ SQL
107
+ end
108
+
109
+ def views # :nodoc:
110
+ select_values(<<-SQL, 'SCHEMA')
111
+ SELECT c.relname
112
+ FROM pg_class c
113
+ LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
114
+ WHERE c.relkind IN ('v','m') -- (v)iew, (m)aterialized view
115
+ AND n.nspname = ANY (current_schemas(false))
116
+ SQL
117
+ end
118
+
119
+ def view_exists?(view_name) # :nodoc:
120
+ name = Utils.extract_schema_qualified_name(view_name.to_s)
121
+ return false unless name.identifier
122
+
123
+ select_values(<<-SQL, 'SCHEMA').any?
124
+ SELECT c.relname
125
+ FROM pg_class c
126
+ LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
127
+ WHERE c.relkind IN ('v','m') -- (v)iew, (m)aterialized view
128
+ AND c.relname = '#{name.identifier}'
129
+ AND n.nspname = #{name.schema ? "'#{name.schema}'" : 'ANY (current_schemas(false))'}
130
+ SQL
131
+ end
132
+
133
+ def drop_table(table_name, options = {})
134
+ execute "DROP TABLE #{quote_table_name(table_name)}#{' CASCADE' if options[:force] == :cascade}"
135
+ end
136
+
137
+ # Returns true if schema exists.
138
+ def schema_exists?(name)
139
+ select_value("SELECT COUNT(*) FROM pg_namespace WHERE nspname = '#{name}'", 'SCHEMA').to_i > 0
140
+ end
141
+
142
+ def index_name_exists?(table_name, index_name, default)
143
+ false
144
+ end
145
+
146
+ # Returns an array of indexes for the given table.
147
+ def indexes(table_name, name = nil)
148
+ []
149
+ end
150
+
151
+ # Returns the list of all column definitions for a table.
152
+ def columns(table_name)
153
+ column_definitions(table_name.to_s).map do |column_name, type, default, notnull, oid, fmod|
154
+ default_value = extract_value_from_default(default)
155
+ type_metadata = fetch_type_metadata(column_name, type, oid, fmod)
156
+ default_function = extract_default_function(default_value, default)
157
+ new_column(column_name, default_value, type_metadata, notnull == 'f', table_name, default_function)
158
+ end
159
+ end
160
+
161
+ def new_column(name, default, sql_type_metadata = nil, null = true, table_name = nil, default_function = nil) # :nodoc:
162
+ RedshiftColumn.new(name, default, sql_type_metadata, null, table_name, default_function)
163
+ end
164
+
165
+ # Returns the current database name.
166
+ def current_database
167
+ select_value('select current_database()', 'SCHEMA')
168
+ end
169
+
170
+ # Returns the current schema name.
171
+ def current_schema
172
+ select_value('SELECT current_schema', 'SCHEMA')
173
+ end
174
+
175
+ # Returns the current database encoding format.
176
+ def encoding
177
+ select_value("SELECT pg_encoding_to_char(encoding) FROM pg_database WHERE datname LIKE '#{current_database}'", 'SCHEMA')
178
+ end
179
+
180
+ def collation
181
+ end
182
+
183
+ def ctype
184
+ end
185
+
186
+ # Returns an array of schema names.
187
+ def schema_names
188
+ select_value(<<-SQL, 'SCHEMA')
189
+ SELECT nspname
190
+ FROM pg_namespace
191
+ WHERE nspname !~ '^pg_.*'
192
+ AND nspname NOT IN ('information_schema')
193
+ ORDER by nspname;
194
+ SQL
195
+ end
196
+
197
+ # Creates a schema for the given schema name.
198
+ def create_schema schema_name
199
+ execute "CREATE SCHEMA #{quote_schema_name(schema_name)}"
200
+ end
201
+
202
+ # Drops the schema for the given schema name.
203
+ def drop_schema(schema_name, options = {})
204
+ execute "DROP SCHEMA#{' IF EXISTS' if options[:if_exists]} #{quote_schema_name(schema_name)} CASCADE"
205
+ end
206
+
207
+ # Sets the schema search path to a string of comma-separated schema names.
208
+ # Names beginning with $ have to be quoted (e.g. $user => '$user').
209
+ # See: http://www.postgresql.org/docs/current/static/ddl-schemas.html
210
+ #
211
+ # This should be not be called manually but set in database.yml.
212
+ def schema_search_path=(schema_csv)
213
+ if schema_csv
214
+ execute("SET search_path TO #{schema_csv}", 'SCHEMA')
215
+ @schema_search_path = schema_csv
216
+ end
217
+ end
218
+
219
+ # Returns the active schema search path.
220
+ def schema_search_path
221
+ @schema_search_path ||= select_value('SHOW search_path', 'SCHEMA')
222
+ end
223
+
224
+ # Returns the sequence name for a table's primary key or some other specified key.
225
+ def default_sequence_name(table_name, pk = nil) #:nodoc:
226
+ result = serial_sequence(table_name, pk || 'id')
227
+ return nil unless result
228
+ Utils.extract_schema_qualified_name(result).to_s
229
+ rescue ActiveRecord::StatementInvalid
230
+ Redshift::Name.new(nil, "#{table_name}_#{pk || 'id'}_seq").to_s
231
+ end
232
+
233
+ def serial_sequence(table, column)
234
+ select_value("SELECT pg_get_serial_sequence('#{table}', '#{column}')", 'SCHEMA')
235
+ end
236
+
237
+ def set_pk_sequence!(table, value) #:nodoc:
238
+ end
239
+
240
+ def reset_pk_sequence!(table, pk = nil, sequence = nil) #:nodoc:
241
+ end
242
+
243
+ def pk_and_sequence_for(table) #:nodoc:
244
+ [nil, nil]
245
+ end
246
+
247
+ # Returns just a table's primary key
248
+ def primary_keys(table)
249
+ pks = query(<<-end_sql, 'SCHEMA')
250
+ SELECT DISTINCT attr.attname
251
+ FROM pg_attribute attr
252
+ INNER JOIN pg_depend dep ON attr.attrelid = dep.refobjid AND attr.attnum = dep.refobjsubid
253
+ INNER JOIN pg_constraint cons ON attr.attrelid = cons.conrelid AND attr.attnum = any(cons.conkey)
254
+ WHERE cons.contype = 'p'
255
+ AND dep.refobjid = '#{quote_table_name(table)}'::regclass
256
+ end_sql
257
+ pks.present? ? pks[0] : pks
258
+ end
259
+
260
+ # Renames a table.
261
+ # Also renames a table's primary key sequence if the sequence name exists and
262
+ # matches the Active Record default.
263
+ #
264
+ # Example:
265
+ # rename_table('octopuses', 'octopi')
266
+ def rename_table(table_name, new_name)
267
+ clear_cache!
268
+ execute "ALTER TABLE #{quote_table_name(table_name)} RENAME TO #{quote_table_name(new_name)}"
269
+ end
270
+
271
+ def add_column(table_name, column_name, type, options = {}) #:nodoc:
272
+ clear_cache!
273
+ super
274
+ end
275
+
276
+ # Changes the column of a table.
277
+ def change_column(table_name, column_name, type, options = {})
278
+ clear_cache!
279
+ quoted_table_name = quote_table_name(table_name)
280
+ sql_type = type_to_sql(type, options[:limit], options[:precision], options[:scale])
281
+ sql = "ALTER TABLE #{quoted_table_name} ALTER COLUMN #{quote_column_name(column_name)} TYPE #{sql_type}"
282
+ sql << " USING #{options[:using]}" if options[:using]
283
+ if options[:cast_as]
284
+ sql << " USING CAST(#{quote_column_name(column_name)} AS #{type_to_sql(options[:cast_as], options[:limit], options[:precision], options[:scale])})"
285
+ end
286
+ execute sql
287
+
288
+ change_column_default(table_name, column_name, options[:default]) if options_include_default?(options)
289
+ change_column_null(table_name, column_name, options[:null], options[:default]) if options.key?(:null)
290
+ end
291
+
292
+ # Changes the default value of a table column.
293
+ def change_column_default(table_name, column_name, default_or_changes)
294
+ clear_cache!
295
+ column = column_for(table_name, column_name)
296
+ return unless column
297
+
298
+ default = extract_new_default_value(default_or_changes)
299
+ alter_column_query = "ALTER TABLE #{quote_table_name(table_name)} ALTER COLUMN #{quote_column_name(column_name)} %s"
300
+ if default.nil?
301
+ # <tt>DEFAULT NULL</tt> results in the same behavior as <tt>DROP DEFAULT</tt>. However, PostgreSQL will
302
+ # cast the default to the columns type, which leaves us with a default like "default NULL::character varying".
303
+ execute alter_column_query % "DROP DEFAULT"
304
+ else
305
+ execute alter_column_query % "SET DEFAULT #{quote_default_value(default, column)}"
306
+ end
307
+ end
308
+
309
+ def change_column_null(table_name, column_name, null, default = nil)
310
+ clear_cache!
311
+ unless null || default.nil?
312
+ column = column_for(table_name, column_name)
313
+ execute("UPDATE #{quote_table_name(table_name)} SET #{quote_column_name(column_name)}=#{quote_default_value(default, column)} WHERE #{quote_column_name(column_name)} IS NULL") if column
314
+ end
315
+ execute("ALTER TABLE #{quote_table_name(table_name)} ALTER #{quote_column_name(column_name)} #{null ? 'DROP' : 'SET'} NOT NULL")
316
+ end
317
+
318
+ # Renames a column in a table.
319
+ def rename_column(table_name, column_name, new_column_name) #:nodoc:
320
+ clear_cache!
321
+ execute "ALTER TABLE #{quote_table_name(table_name)} RENAME COLUMN #{quote_column_name(column_name)} TO #{quote_column_name(new_column_name)}"
322
+ end
323
+
324
+ def add_index(table_name, column_name, options = {}) #:nodoc:
325
+ end
326
+
327
+ def remove_index!(table_name, index_name) #:nodoc:
328
+ end
329
+
330
+ def rename_index(table_name, old_name, new_name)
331
+ end
332
+
333
+ def foreign_keys(table_name)
334
+ fk_info = select_all(<<-SQL.strip_heredoc, 'SCHEMA')
335
+ SELECT t2.relname AS to_table, a1.attname AS column, a2.attname AS primary_key, c.conname AS name, c.confupdtype AS on_update, c.confdeltype AS on_delete
336
+ FROM pg_constraint c
337
+ JOIN pg_class t1 ON c.conrelid = t1.oid
338
+ JOIN pg_class t2 ON c.confrelid = t2.oid
339
+ JOIN pg_attribute a1 ON a1.attnum = c.conkey[1] AND a1.attrelid = t1.oid
340
+ JOIN pg_attribute a2 ON a2.attnum = c.confkey[1] AND a2.attrelid = t2.oid
341
+ JOIN pg_namespace t3 ON c.connamespace = t3.oid
342
+ WHERE c.contype = 'f'
343
+ AND t1.relname = #{quote(table_name)}
344
+ AND t3.nspname = ANY (current_schemas(false))
345
+ ORDER BY c.conname
346
+ SQL
347
+
348
+ fk_info.map do |row|
349
+ options = {
350
+ column: row['column'],
351
+ name: row['name'],
352
+ primary_key: row['primary_key']
353
+ }
354
+
355
+ options[:on_delete] = extract_foreign_key_action(row['on_delete'])
356
+ options[:on_update] = extract_foreign_key_action(row['on_update'])
357
+
358
+ ForeignKeyDefinition.new(table_name, row['to_table'], options)
359
+ end
360
+ end
361
+
362
+ def extract_foreign_key_action(specifier) # :nodoc:
363
+ case specifier
364
+ when 'c'; :cascade
365
+ when 'n'; :nullify
366
+ when 'r'; :restrict
367
+ end
368
+ end
369
+
370
+ def index_name_length
371
+ 63
372
+ end
373
+
374
+ # Maps logical Rails types to PostgreSQL-specific data types.
375
+ def type_to_sql(type, limit = nil, precision = nil, scale = nil)
376
+ case type.to_s
377
+ when 'integer'
378
+ return 'integer' unless limit
379
+
380
+ case limit
381
+ when 1, 2; 'smallint'
382
+ when nil, 3, 4; 'integer'
383
+ when 5..8; 'bigint'
384
+ else raise(ActiveRecordError, "No integer type has byte size #{limit}. Use a numeric with precision 0 instead.")
385
+ end
386
+ else
387
+ super
388
+ end
389
+ end
390
+
391
+ # PostgreSQL requires the ORDER BY columns in the select list for distinct queries, and
392
+ # requires that the ORDER BY include the distinct column.
393
+ def columns_for_distinct(columns, orders) #:nodoc:
394
+ order_columns = orders.reject(&:blank?).map{ |s|
395
+ # Convert Arel node to string
396
+ s = s.to_sql unless s.is_a?(String)
397
+ # Remove any ASC/DESC modifiers
398
+ s.gsub(/\s+(?:ASC|DESC)\b/i, '')
399
+ .gsub(/\s+NULLS\s+(?:FIRST|LAST)\b/i, '')
400
+ }.reject(&:blank?).map.with_index { |column, i| "#{column} AS alias_#{i}" }
401
+
402
+ [super, *order_columns].join(', ')
403
+ end
404
+
405
+ def fetch_type_metadata(column_name, sql_type, oid, fmod)
406
+ cast_type = get_oid_type(oid.to_i, fmod.to_i, column_name, sql_type)
407
+ simple_type = SqlTypeMetadata.new(
408
+ sql_type: sql_type,
409
+ type: cast_type.type,
410
+ limit: cast_type.limit,
411
+ precision: cast_type.precision,
412
+ scale: cast_type.scale,
413
+ )
414
+ TypeMetadata.new(simple_type, oid: oid, fmod: fmod)
415
+ end
416
+ end
417
+ end
418
+ end
419
+ end