activerecord-redshift-adapter-ng 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE +54 -0
  3. data/README.md +39 -0
  4. data/lib/active_record/connection_adapters/redshift/array_parser.rb +93 -0
  5. data/lib/active_record/connection_adapters/redshift/column.rb +10 -0
  6. data/lib/active_record/connection_adapters/redshift/database_statements.rb +232 -0
  7. data/lib/active_record/connection_adapters/redshift/oid.rb +21 -0
  8. data/lib/active_record/connection_adapters/redshift/oid/date.rb +11 -0
  9. data/lib/active_record/connection_adapters/redshift/oid/date_time.rb +36 -0
  10. data/lib/active_record/connection_adapters/redshift/oid/decimal.rb +13 -0
  11. data/lib/active_record/connection_adapters/redshift/oid/float.rb +21 -0
  12. data/lib/active_record/connection_adapters/redshift/oid/infinity.rb +13 -0
  13. data/lib/active_record/connection_adapters/redshift/oid/integer.rb +11 -0
  14. data/lib/active_record/connection_adapters/redshift/oid/json.rb +35 -0
  15. data/lib/active_record/connection_adapters/redshift/oid/jsonb.rb +23 -0
  16. data/lib/active_record/connection_adapters/redshift/oid/time.rb +11 -0
  17. data/lib/active_record/connection_adapters/redshift/oid/type_map_initializer.rb +63 -0
  18. data/lib/active_record/connection_adapters/redshift/quoting.rb +98 -0
  19. data/lib/active_record/connection_adapters/redshift/referential_integrity.rb +15 -0
  20. data/lib/active_record/connection_adapters/redshift/schema_definitions.rb +67 -0
  21. data/lib/active_record/connection_adapters/redshift/schema_statements.rb +393 -0
  22. data/lib/active_record/connection_adapters/redshift/utils.rb +77 -0
  23. data/lib/active_record/connection_adapters/redshift_adapter.rb +653 -0
  24. metadata +127 -0
@@ -0,0 +1,11 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ module OID # :nodoc:
5
+ class Date < Type::Date # :nodoc:
6
+ include Infinity
7
+ end
8
+ end
9
+ end
10
+ end
11
+ end
@@ -0,0 +1,36 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ module OID # :nodoc:
5
+ class DateTime < Type::DateTime # :nodoc:
6
+ include Infinity
7
+
8
+ def type_cast_for_database(value)
9
+ if has_precision? && value.acts_like?(:time) && value.year <= 0
10
+ bce_year = format("%04d", -value.year + 1)
11
+ super.sub(/^-?\d+/, bce_year) + " BC"
12
+ else
13
+ super
14
+ end
15
+ end
16
+
17
+ def cast_value(value)
18
+ if value.is_a?(::String)
19
+ case value
20
+ when 'infinity' then ::Float::INFINITY
21
+ when '-infinity' then -::Float::INFINITY
22
+ when / BC$/
23
+ astronomical_year = format("%04d", -value[/^\d+/].to_i + 1)
24
+ super(value.sub(/ BC$/, "").sub(/^\d+/, astronomical_year))
25
+ else
26
+ super
27
+ end
28
+ else
29
+ value
30
+ end
31
+ end
32
+ end
33
+ end
34
+ end
35
+ end
36
+ end
@@ -0,0 +1,13 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ module OID # :nodoc:
5
+ class Decimal < Type::Decimal # :nodoc:
6
+ def infinity(options = {})
7
+ BigDecimal.new("Infinity") * (options[:negative] ? -1 : 1)
8
+ end
9
+ end
10
+ end
11
+ end
12
+ end
13
+ end
@@ -0,0 +1,21 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ module OID # :nodoc:
5
+ class Float < Type::Float # :nodoc:
6
+ include Infinity
7
+
8
+ def cast_value(value)
9
+ case value
10
+ when ::Float then value
11
+ when 'Infinity' then ::Float::INFINITY
12
+ when '-Infinity' then -::Float::INFINITY
13
+ when 'NaN' then ::Float::NAN
14
+ else value.to_f
15
+ end
16
+ end
17
+ end
18
+ end
19
+ end
20
+ end
21
+ end
@@ -0,0 +1,13 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ module OID # :nodoc:
5
+ module Infinity # :nodoc:
6
+ def infinity(options = {})
7
+ options[:negative] ? -::Float::INFINITY : ::Float::INFINITY
8
+ end
9
+ end
10
+ end
11
+ end
12
+ end
13
+ end
@@ -0,0 +1,11 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ module OID # :nodoc:
5
+ class Integer < Type::Integer # :nodoc:
6
+ include Infinity
7
+ end
8
+ end
9
+ end
10
+ end
11
+ end
@@ -0,0 +1,35 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ module OID # :nodoc:
5
+ class Json < Type::Value # :nodoc:
6
+ include Type::Mutable
7
+
8
+ def type
9
+ :json
10
+ end
11
+
12
+ def type_cast_from_database(value)
13
+ if value.is_a?(::String)
14
+ ::ActiveSupport::JSON.decode(value) rescue nil
15
+ else
16
+ super
17
+ end
18
+ end
19
+
20
+ def type_cast_for_database(value)
21
+ if value.is_a?(::Array) || value.is_a?(::Hash)
22
+ ::ActiveSupport::JSON.encode(value)
23
+ else
24
+ super
25
+ end
26
+ end
27
+
28
+ def accessor
29
+ ActiveRecord::Store::StringKeyedHashAccessor
30
+ end
31
+ end
32
+ end
33
+ end
34
+ end
35
+ end
@@ -0,0 +1,23 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ module OID # :nodoc:
5
+ class Jsonb < Json # :nodoc:
6
+ def type
7
+ :jsonb
8
+ end
9
+
10
+ def changed_in_place?(raw_old_value, new_value)
11
+ # Postgres does not preserve insignificant whitespaces when
12
+ # roundtripping jsonb columns. This causes some false positives for
13
+ # the comparison here. Therefore, we need to parse and re-dump the
14
+ # raw value here to ensure the insignificant whitespaces are
15
+ # consistent with our encoder's output.
16
+ raw_old_value = type_cast_for_database(type_cast_from_database(raw_old_value))
17
+ super(raw_old_value, new_value)
18
+ end
19
+ end
20
+ end
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,11 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ module OID # :nodoc:
5
+ class Time < Type::Time # :nodoc:
6
+ include Infinity
7
+ end
8
+ end
9
+ end
10
+ end
11
+ end
@@ -0,0 +1,63 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ module OID # :nodoc:
5
+ # This class uses the data from PostgreSQL pg_type table to build
6
+ # the OID -> Type mapping.
7
+ # - OID is an integer representing the type.
8
+ # - Type is an OID::Type object.
9
+ # This class has side effects on the +store+ passed during initialization.
10
+ class TypeMapInitializer # :nodoc:
11
+ def initialize(store)
12
+ @store = store
13
+ end
14
+
15
+ def run(records)
16
+ nodes = records.reject { |row| @store.key? row['oid'].to_i }
17
+ mapped, nodes = nodes.partition { |row| @store.key? row['typname'] }
18
+ ranges, nodes = nodes.partition { |row| row['typtype'] == 'r'.freeze }
19
+ enums, nodes = nodes.partition { |row| row['typtype'] == 'e'.freeze }
20
+ domains, nodes = nodes.partition { |row| row['typtype'] == 'd'.freeze }
21
+ arrays, nodes = nodes.partition { |row| row['typinput'] == 'array_in'.freeze }
22
+ composites, nodes = nodes.partition { |row| row['typelem'].to_i != 0 }
23
+
24
+ mapped.each { |row| register_mapped_type(row) }
25
+ end
26
+
27
+ private
28
+
29
+ def register_mapped_type(row)
30
+ alias_type row['oid'], row['typname']
31
+ end
32
+
33
+ def register(oid, oid_type = nil, &block)
34
+ oid = assert_valid_registration(oid, oid_type || block)
35
+ if block_given?
36
+ @store.register_type(oid, &block)
37
+ else
38
+ @store.register_type(oid, oid_type)
39
+ end
40
+ end
41
+
42
+ def alias_type(oid, target)
43
+ oid = assert_valid_registration(oid, target)
44
+ @store.alias_type(oid, target)
45
+ end
46
+
47
+ def register_with_subtype(oid, target_oid)
48
+ if @store.key?(target_oid)
49
+ register(oid) do |_, *args|
50
+ yield @store.lookup(target_oid, *args)
51
+ end
52
+ end
53
+ end
54
+
55
+ def assert_valid_registration(oid, oid_type)
56
+ raise ArgumentError, "can't register nil type for OID #{oid}" if oid_type.nil?
57
+ oid.to_i
58
+ end
59
+ end
60
+ end
61
+ end
62
+ end
63
+ end
@@ -0,0 +1,98 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ module Quoting
5
+ # Escapes binary strings for bytea input to the database.
6
+ def escape_bytea(value)
7
+ @connection.escape_bytea(value) if value
8
+ end
9
+
10
+ # Unescapes bytea output from a database to the binary string it represents.
11
+ # NOTE: This is NOT an inverse of escape_bytea! This is only to be used
12
+ # on escaped binary output from database drive.
13
+ def unescape_bytea(value)
14
+ @connection.unescape_bytea(value) if value
15
+ end
16
+
17
+ # Quotes strings for use in SQL input.
18
+ def quote_string(s) #:nodoc:
19
+ @connection.escape(s)
20
+ end
21
+
22
+ # Checks the following cases:
23
+ #
24
+ # - table_name
25
+ # - "table.name"
26
+ # - schema_name.table_name
27
+ # - schema_name."table.name"
28
+ # - "schema.name".table_name
29
+ # - "schema.name"."table.name"
30
+ def quote_table_name(name)
31
+ Utils.extract_schema_qualified_name(name.to_s).quoted
32
+ end
33
+
34
+ def quote_table_name_for_assignment(table, attr)
35
+ quote_column_name(attr)
36
+ end
37
+
38
+ # Quotes column names for use in SQL queries.
39
+ def quote_column_name(name) #:nodoc:
40
+ PGconn.quote_ident(name.to_s)
41
+ end
42
+
43
+ # Quote date/time values for use in SQL input. Includes microseconds
44
+ # if the value is a Time responding to usec.
45
+ def quoted_date(value) #:nodoc:
46
+ result = super
47
+ if value.acts_like?(:time) && value.respond_to?(:usec)
48
+ result = "#{result}.#{sprintf("%06d", value.usec)}"
49
+ end
50
+
51
+ if value.year <= 0
52
+ bce_year = format("%04d", -value.year + 1)
53
+ result = result.sub(/^-?\d+/, bce_year) + " BC"
54
+ end
55
+ result
56
+ end
57
+
58
+ # Does not quote function default values for UUID columns
59
+ def quote_default_value(value, column) #:nodoc:
60
+ if column.type == :uuid && value =~ /\(\)/
61
+ value
62
+ else
63
+ quote(value, column)
64
+ end
65
+ end
66
+
67
+ private
68
+
69
+ def _quote(value)
70
+ case value
71
+ when Type::Binary::Data
72
+ "'#{escape_bytea(value.to_s)}'"
73
+ when Float
74
+ if value.infinite? || value.nan?
75
+ "'#{value}'"
76
+ else
77
+ super
78
+ end
79
+ else
80
+ super
81
+ end
82
+ end
83
+
84
+ def _type_cast(value)
85
+ case value
86
+ when Type::Binary::Data
87
+ # Return a bind param hash with format as binary.
88
+ # See http://deveiate.org/code/pg/PGconn.html#method-i-exec_prepared-doc
89
+ # for more information
90
+ { value: value.to_s, format: 1 }
91
+ else
92
+ super
93
+ end
94
+ end
95
+ end
96
+ end
97
+ end
98
+ end
@@ -0,0 +1,15 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ module ReferentialIntegrity # :nodoc:
5
+ def supports_disable_referential_integrity? # :nodoc:
6
+ true
7
+ end
8
+
9
+ def disable_referential_integrity # :nodoc:
10
+ yield
11
+ end
12
+ end
13
+ end
14
+ end
15
+ end
@@ -0,0 +1,67 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ module ColumnMethods
5
+ def json(name, options = {})
6
+ column(name, :json, options)
7
+ end
8
+
9
+ def jsonb(name, options = {})
10
+ column(name, :jsonb, options)
11
+ end
12
+ end
13
+
14
+ class ColumnDefinition < ActiveRecord::ConnectionAdapters::ColumnDefinition
15
+ end
16
+
17
+ class TableDefinition < ActiveRecord::ConnectionAdapters::TableDefinition
18
+ include ColumnMethods
19
+
20
+ # Defines the primary key field.
21
+ # Use of the native PostgreSQL UUID type is supported, and can be used
22
+ # by defining your tables as such:
23
+ #
24
+ # create_table :stuffs, id: :uuid do |t|
25
+ # t.string :content
26
+ # t.timestamps
27
+ # end
28
+ #
29
+ # By default, this will use the +uuid_generate_v4()+ function from the
30
+ # +uuid-ossp+ extension, which MUST be enabled on your database. To enable
31
+ # the +uuid-ossp+ extension, you can use the +enable_extension+ method in your
32
+ # migrations. To use a UUID primary key without +uuid-ossp+ enabled, you can
33
+ # set the +:default+ option to +nil+:
34
+ #
35
+ # create_table :stuffs, id: false do |t|
36
+ # t.primary_key :id, :uuid, default: nil
37
+ # t.uuid :foo_id
38
+ # t.timestamps
39
+ # end
40
+ #
41
+ # You may also pass a different UUID generation function from +uuid-ossp+
42
+ # or another library.
43
+ #
44
+ # Note that setting the UUID primary key default value to +nil+ will
45
+ # require you to assure that you always provide a UUID value before saving
46
+ # a record (as primary keys cannot be +nil+). This might be done via the
47
+ # +SecureRandom.uuid+ method and a +before_save+ callback, for instance.
48
+ def primary_key(name, type = :primary_key, options = {})
49
+ return super unless type == :uuid
50
+ options[:default] = options.fetch(:default, 'uuid_generate_v4()')
51
+ options[:primary_key] = true
52
+ column name, type, options
53
+ end
54
+
55
+ private
56
+
57
+ def create_column_definition(name, type)
58
+ Redshift::ColumnDefinition.new name, type
59
+ end
60
+ end
61
+
62
+ class Table < ActiveRecord::ConnectionAdapters::Table
63
+ include ColumnMethods
64
+ end
65
+ end
66
+ end
67
+ end
@@ -0,0 +1,393 @@
1
+ module ActiveRecord
2
+ module ConnectionAdapters
3
+ module Redshift
4
+ class SchemaCreation < AbstractAdapter::SchemaCreation
5
+ private
6
+
7
+ def visit_ColumnDefinition(o)
8
+ sql = super
9
+ if o.primary_key? && o.type != :primary_key
10
+ sql << " PRIMARY KEY "
11
+ add_column_options!(sql, column_options(o))
12
+ end
13
+ sql
14
+ end
15
+
16
+ def add_column_options!(sql, options)
17
+ column = options.fetch(:column) { return super }
18
+ if column.type == :uuid && options[:default] =~ /\(\)/
19
+ sql << " DEFAULT #{options[:default]}"
20
+ else
21
+ super
22
+ end
23
+ end
24
+
25
+ def type_for_column(column)
26
+ if column.array
27
+ @conn.lookup_cast_type("#{column.sql_type}[]")
28
+ else
29
+ super
30
+ end
31
+ end
32
+ end
33
+
34
+ module SchemaStatements
35
+ # Drops the database specified on the +name+ attribute
36
+ # and creates it again using the provided +options+.
37
+ def recreate_database(name, options = {}) #:nodoc:
38
+ drop_database(name)
39
+ create_database(name, options)
40
+ end
41
+
42
+ # Create a new PostgreSQL database. Options include <tt>:owner</tt>, <tt>:template</tt>,
43
+ # <tt>:encoding</tt> (defaults to utf8), <tt>:collation</tt>, <tt>:ctype</tt>,
44
+ # <tt>:tablespace</tt>, and <tt>:connection_limit</tt> (note that MySQL uses
45
+ # <tt>:charset</tt> while PostgreSQL uses <tt>:encoding</tt>).
46
+ #
47
+ # Example:
48
+ # create_database config[:database], config
49
+ # create_database 'foo_development', encoding: 'unicode'
50
+ def create_database(name, options = {})
51
+ options = { encoding: 'utf8' }.merge!(options.symbolize_keys)
52
+
53
+ option_string = options.inject("") do |memo, (key, value)|
54
+ memo += case key
55
+ when :owner
56
+ " OWNER = \"#{value}\""
57
+ else
58
+ ""
59
+ end
60
+ end
61
+
62
+ execute "CREATE DATABASE #{quote_table_name(name)}#{option_string}"
63
+ end
64
+
65
+ # Drops a PostgreSQL database.
66
+ #
67
+ # Example:
68
+ # drop_database 'matt_development'
69
+ def drop_database(name) #:nodoc:
70
+ execute "DROP DATABASE #{quote_table_name(name)}"
71
+ end
72
+
73
+ # Returns the list of all tables in the schema search path or a specified schema.
74
+ def tables(name = nil)
75
+ query(<<-SQL, 'SCHEMA').map { |row| row[0] }
76
+ SELECT tablename
77
+ FROM pg_tables
78
+ WHERE schemaname = ANY (current_schemas(false))
79
+ SQL
80
+ end
81
+
82
+ # Returns true if table exists.
83
+ # If the schema is not specified as part of +name+ then it will only find tables within
84
+ # the current schema search path (regardless of permissions to access tables in other schemas)
85
+ def table_exists?(name)
86
+ name = Utils.extract_schema_qualified_name(name.to_s)
87
+ return false unless name.identifier
88
+
89
+ exec_query(<<-SQL, 'SCHEMA').rows.first[0].to_i > 0
90
+ SELECT COUNT(*)
91
+ FROM pg_class c
92
+ LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
93
+ WHERE c.relkind IN ('r','v','m') -- (r)elation/table, (v)iew, (m)aterialized view
94
+ AND c.relname = '#{name.identifier}'
95
+ AND n.nspname = #{name.schema ? "'#{name.schema}'" : 'ANY (current_schemas(false))'}
96
+ SQL
97
+ end
98
+
99
+ def drop_table(table_name, options = {})
100
+ execute "DROP TABLE #{quote_table_name(table_name)}#{' CASCADE' if options[:force] == :cascade}"
101
+ end
102
+
103
+ # Returns true if schema exists.
104
+ def schema_exists?(name)
105
+ exec_query(<<-SQL, 'SCHEMA').rows.first[0].to_i > 0
106
+ SELECT COUNT(*)
107
+ FROM pg_namespace
108
+ WHERE nspname = '#{name}'
109
+ SQL
110
+ end
111
+
112
+ def index_name_exists?(table_name, index_name, default)
113
+ false
114
+ end
115
+
116
+ # Returns an array of indexes for the given table.
117
+ def indexes(table_name, name = nil)
118
+ []
119
+ end
120
+
121
+ # Returns the list of all column definitions for a table.
122
+ def columns(table_name)
123
+ # Limit, precision, and scale are all handled by the superclass.
124
+ column_definitions(table_name).map do |column_name, type, default, notnull, oid, fmod|
125
+ oid = get_oid_type(oid.to_i, fmod.to_i, column_name, type)
126
+ default_value = extract_value_from_default(oid, default)
127
+ default_function = extract_default_function(default_value, default)
128
+ new_column(column_name, default_value, oid, type, notnull == 'f', default_function)
129
+ end
130
+ end
131
+
132
+ def new_column(name, default, cast_type, sql_type = nil, null = true, default_function = nil) # :nodoc:
133
+ RedshiftColumn.new(name, default, cast_type, sql_type, null, default_function)
134
+ end
135
+
136
+ # Returns the current database name.
137
+ def current_database
138
+ query('select current_database()', 'SCHEMA')[0][0]
139
+ end
140
+
141
+ # Returns the current schema name.
142
+ def current_schema
143
+ query('SELECT current_schema', 'SCHEMA')[0][0]
144
+ end
145
+
146
+ # Returns the current database encoding format.
147
+ def encoding
148
+ query(<<-end_sql, 'SCHEMA')[0][0]
149
+ SELECT pg_encoding_to_char(pg_database.encoding) FROM pg_database
150
+ WHERE pg_database.datname LIKE '#{current_database}'
151
+ end_sql
152
+ end
153
+
154
+ def collation
155
+ end
156
+
157
+ def ctype
158
+ end
159
+
160
+ # Returns an array of schema names.
161
+ def schema_names
162
+ query(<<-SQL, 'SCHEMA').flatten
163
+ SELECT nspname
164
+ FROM pg_namespace
165
+ WHERE nspname !~ '^pg_.*'
166
+ AND nspname NOT IN ('information_schema')
167
+ ORDER by nspname;
168
+ SQL
169
+ end
170
+
171
+ # Creates a schema for the given schema name.
172
+ def create_schema schema_name
173
+ execute "CREATE SCHEMA #{schema_name}"
174
+ end
175
+
176
+ # Drops the schema for the given schema name.
177
+ def drop_schema schema_name
178
+ execute "DROP SCHEMA #{schema_name} CASCADE"
179
+ end
180
+
181
+ # Sets the schema search path to a string of comma-separated schema names.
182
+ # Names beginning with $ have to be quoted (e.g. $user => '$user').
183
+ # See: http://www.postgresql.org/docs/current/static/ddl-schemas.html
184
+ #
185
+ # This should be not be called manually but set in database.yml.
186
+ def schema_search_path=(schema_csv)
187
+ if schema_csv
188
+ execute("SET search_path TO #{schema_csv}", 'SCHEMA')
189
+ @schema_search_path = schema_csv
190
+ end
191
+ end
192
+
193
+ # Returns the active schema search path.
194
+ def schema_search_path
195
+ @schema_search_path ||= query('SHOW search_path', 'SCHEMA')[0][0]
196
+ end
197
+
198
+ # Returns the sequence name for a table's primary key or some other specified key.
199
+ def default_sequence_name(table_name, pk = nil) #:nodoc:
200
+ result = serial_sequence(table_name, pk || 'id')
201
+ return nil unless result
202
+ Utils.extract_schema_qualified_name(result).to_s
203
+ rescue ActiveRecord::StatementInvalid
204
+ Redshift::Name.new(nil, "#{table_name}_#{pk || 'id'}_seq").to_s
205
+ end
206
+
207
+ def serial_sequence(table, column)
208
+ result = exec_query(<<-eosql, 'SCHEMA')
209
+ SELECT pg_get_serial_sequence('#{table}', '#{column}')
210
+ eosql
211
+ result.rows.first.first
212
+ end
213
+
214
+ def set_pk_sequence!(table, value) #:nodoc:
215
+ end
216
+
217
+ def reset_pk_sequence!(table, pk = nil, sequence = nil) #:nodoc:
218
+ end
219
+
220
+ def pk_and_sequence_for(table) #:nodoc:
221
+ [nil, nil]
222
+ end
223
+
224
+ # Returns just a table's primary key
225
+ def primary_key(table)
226
+ pks = exec_query(<<-end_sql, 'SCHEMA').rows
227
+ SELECT DISTINCT attr.attname
228
+ FROM pg_attribute attr
229
+ INNER JOIN pg_depend dep ON attr.attrelid = dep.refobjid AND attr.attnum = dep.refobjsubid
230
+ INNER JOIN pg_constraint cons ON attr.attrelid = cons.conrelid AND attr.attnum = any(cons.conkey)
231
+ WHERE cons.contype = 'p'
232
+ AND dep.refobjid = '#{quote_table_name(table)}'::regclass
233
+ end_sql
234
+ return nil unless pks.count == 1
235
+ pks[0][0]
236
+ end
237
+
238
+ # Renames a table.
239
+ # Also renames a table's primary key sequence if the sequence name exists and
240
+ # matches the Active Record default.
241
+ #
242
+ # Example:
243
+ # rename_table('octopuses', 'octopi')
244
+ def rename_table(table_name, new_name)
245
+ clear_cache!
246
+ execute "ALTER TABLE #{quote_table_name(table_name)} RENAME TO #{quote_table_name(new_name)}"
247
+ end
248
+
249
+ def add_column(table_name, column_name, type, options = {}) #:nodoc:
250
+ clear_cache!
251
+ super
252
+ end
253
+
254
+ # Changes the column of a table.
255
+ def change_column(table_name, column_name, type, options = {})
256
+ clear_cache!
257
+ quoted_table_name = quote_table_name(table_name)
258
+ sql_type = type_to_sql(type, options[:limit], options[:precision], options[:scale])
259
+ sql_type << "[]" if options[:array]
260
+ sql = "ALTER TABLE #{quoted_table_name} ALTER COLUMN #{quote_column_name(column_name)} TYPE #{sql_type}"
261
+ sql << " USING #{options[:using]}" if options[:using]
262
+ if options[:cast_as]
263
+ sql << " USING CAST(#{quote_column_name(column_name)} AS #{type_to_sql(options[:cast_as], options[:limit], options[:precision], options[:scale])})"
264
+ end
265
+ execute sql
266
+
267
+ change_column_default(table_name, column_name, options[:default]) if options_include_default?(options)
268
+ change_column_null(table_name, column_name, options[:null], options[:default]) if options.key?(:null)
269
+ end
270
+
271
+ # Changes the default value of a table column.
272
+ def change_column_default(table_name, column_name, default)
273
+ clear_cache!
274
+ column = column_for(table_name, column_name)
275
+ return unless column
276
+
277
+ alter_column_query = "ALTER TABLE #{quote_table_name(table_name)} ALTER COLUMN #{quote_column_name(column_name)} %s"
278
+ if default.nil?
279
+ # <tt>DEFAULT NULL</tt> results in the same behavior as <tt>DROP DEFAULT</tt>. However, PostgreSQL will
280
+ # cast the default to the columns type, which leaves us with a default like "default NULL::character varying".
281
+ execute alter_column_query % "DROP DEFAULT"
282
+ else
283
+ execute alter_column_query % "SET DEFAULT #{quote_default_value(default, column)}"
284
+ end
285
+ end
286
+
287
+ def change_column_null(table_name, column_name, null, default = nil)
288
+ clear_cache!
289
+ unless null || default.nil?
290
+ column = column_for(table_name, column_name)
291
+ execute("UPDATE #{quote_table_name(table_name)} SET #{quote_column_name(column_name)}=#{quote_default_value(default, column)} WHERE #{quote_column_name(column_name)} IS NULL") if column
292
+ end
293
+ execute("ALTER TABLE #{quote_table_name(table_name)} ALTER #{quote_column_name(column_name)} #{null ? 'DROP' : 'SET'} NOT NULL")
294
+ end
295
+
296
+ # Renames a column in a table.
297
+ def rename_column(table_name, column_name, new_column_name) #:nodoc:
298
+ clear_cache!
299
+ execute "ALTER TABLE #{quote_table_name(table_name)} RENAME COLUMN #{quote_column_name(column_name)} TO #{quote_column_name(new_column_name)}"
300
+ rename_column_indexes(table_name, column_name, new_column_name)
301
+ end
302
+
303
+ def add_index(table_name, column_name, options = {}) #:nodoc:
304
+ end
305
+
306
+ def remove_index!(table_name, index_name) #:nodoc:
307
+ end
308
+
309
+ def rename_index(table_name, old_name, new_name)
310
+ end
311
+
312
+ def foreign_keys(table_name)
313
+ fk_info = select_all <<-SQL.strip_heredoc
314
+ SELECT t2.relname AS to_table, a1.attname AS column, a2.attname AS primary_key, c.conname AS name, c.confupdtype AS on_update, c.confdeltype AS on_delete
315
+ FROM pg_constraint c
316
+ JOIN pg_class t1 ON c.conrelid = t1.oid
317
+ JOIN pg_class t2 ON c.confrelid = t2.oid
318
+ JOIN pg_attribute a1 ON a1.attnum = c.conkey[1] AND a1.attrelid = t1.oid
319
+ JOIN pg_attribute a2 ON a2.attnum = c.confkey[1] AND a2.attrelid = t2.oid
320
+ JOIN pg_namespace t3 ON c.connamespace = t3.oid
321
+ WHERE c.contype = 'f'
322
+ AND t1.relname = #{quote(table_name)}
323
+ AND t3.nspname = ANY (current_schemas(false))
324
+ ORDER BY c.conname
325
+ SQL
326
+
327
+ fk_info.map do |row|
328
+ options = {
329
+ column: row['column'],
330
+ name: row['name'],
331
+ primary_key: row['primary_key']
332
+ }
333
+
334
+ options[:on_delete] = extract_foreign_key_action(row['on_delete'])
335
+ options[:on_update] = extract_foreign_key_action(row['on_update'])
336
+
337
+ ForeignKeyDefinition.new(table_name, row['to_table'], options)
338
+ end
339
+ end
340
+
341
+ def extract_foreign_key_action(specifier) # :nodoc:
342
+ case specifier
343
+ when 'c'; :cascade
344
+ when 'n'; :nullify
345
+ when 'r'; :restrict
346
+ end
347
+ end
348
+
349
+ def index_name_length
350
+ 63
351
+ end
352
+
353
+ # Maps logical Rails types to PostgreSQL-specific data types.
354
+ def type_to_sql(type, limit = nil, precision = nil, scale = nil)
355
+ case type.to_s
356
+ when 'integer'
357
+ return 'integer' unless limit
358
+
359
+ case limit
360
+ when 1, 2; 'smallint'
361
+ when 3, 4; 'integer'
362
+ when 5..8; 'bigint'
363
+ else raise(ActiveRecordError, "No integer type has byte size #{limit}. Use a numeric with precision 0 instead.")
364
+ end
365
+ when 'datetime'
366
+ return super unless precision
367
+
368
+ case precision
369
+ when 0..6; "timestamp(#{precision})"
370
+ else raise(ActiveRecordError, "No timestamp type has precision of #{precision}. The allowed range of precision is from 0 to 6")
371
+ end
372
+ else
373
+ super
374
+ end
375
+ end
376
+
377
+ # PostgreSQL requires the ORDER BY columns in the select list for distinct queries, and
378
+ # requires that the ORDER BY include the distinct column.
379
+ def columns_for_distinct(columns, orders) #:nodoc:
380
+ order_columns = orders.reject(&:blank?).map{ |s|
381
+ # Convert Arel node to string
382
+ s = s.to_sql unless s.is_a?(String)
383
+ # Remove any ASC/DESC modifiers
384
+ s.gsub(/\s+(?:ASC|DESC)\b/i, '')
385
+ .gsub(/\s+NULLS\s+(?:FIRST|LAST)\b/i, '')
386
+ }.reject(&:blank?).map.with_index { |column, i| "#{column} AS alias_#{i}" }
387
+
388
+ [super, *order_columns].join(', ')
389
+ end
390
+ end
391
+ end
392
+ end
393
+ end