sequel 5.7.1 → 5.8.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (39) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG +53 -1
  3. data/doc/association_basics.rdoc +2 -2
  4. data/doc/migration.rdoc +11 -10
  5. data/doc/postgresql.rdoc +71 -0
  6. data/doc/release_notes/5.8.0.txt +170 -0
  7. data/lib/sequel/adapters/jdbc.rb +6 -1
  8. data/lib/sequel/adapters/jdbc/postgresql.rb +3 -3
  9. data/lib/sequel/adapters/mysql2.rb +2 -1
  10. data/lib/sequel/adapters/postgres.rb +32 -10
  11. data/lib/sequel/adapters/shared/mssql.rb +11 -11
  12. data/lib/sequel/adapters/shared/mysql.rb +51 -6
  13. data/lib/sequel/adapters/shared/oracle.rb +12 -2
  14. data/lib/sequel/adapters/shared/postgres.rb +97 -30
  15. data/lib/sequel/adapters/shared/sqlanywhere.rb +2 -2
  16. data/lib/sequel/adapters/shared/sqlite.rb +6 -1
  17. data/lib/sequel/dataset/features.rb +5 -0
  18. data/lib/sequel/dataset/query.rb +48 -19
  19. data/lib/sequel/exceptions.rb +7 -0
  20. data/lib/sequel/extensions/connection_expiration.rb +8 -3
  21. data/lib/sequel/extensions/pg_enum.rb +28 -5
  22. data/lib/sequel/plugins/association_proxies.rb +16 -4
  23. data/lib/sequel/plugins/error_splitter.rb +16 -11
  24. data/lib/sequel/plugins/pg_auto_constraint_validations.rb +260 -0
  25. data/lib/sequel/plugins/subclasses.rb +1 -1
  26. data/lib/sequel/plugins/tactical_eager_loading.rb +1 -1
  27. data/lib/sequel/version.rb +2 -2
  28. data/spec/adapters/mysql_spec.rb +0 -1
  29. data/spec/adapters/postgres_spec.rb +169 -4
  30. data/spec/adapters/sqlite_spec.rb +13 -0
  31. data/spec/core/dataset_spec.rb +21 -0
  32. data/spec/extensions/association_proxies_spec.rb +21 -7
  33. data/spec/extensions/connection_expiration_spec.rb +13 -1
  34. data/spec/extensions/pg_auto_constraint_validations_spec.rb +165 -0
  35. data/spec/extensions/pg_enum_spec.rb +26 -22
  36. data/spec/extensions/tactical_eager_loading_spec.rb +11 -0
  37. data/spec/integration/dataset_test.rb +30 -6
  38. data/spec/integration/plugin_test.rb +2 -2
  39. metadata +6 -2
@@ -232,7 +232,7 @@ module Sequel
232
232
  end
233
233
 
234
234
  module DatasetMethods
235
- Dataset.def_sql_method(self, :insert, %w'with insert into columns values')
235
+ Dataset.def_sql_method(self, :insert, %w'insert into columns values')
236
236
  Dataset.def_sql_method(self, :select, %w'with select distinct limit columns into from join where group having compounds order lock')
237
237
 
238
238
  # Whether to convert smallint to boolean arguments for this dataset.
@@ -247,7 +247,7 @@ module Sequel
247
247
  end
248
248
 
249
249
  def supports_cte?(type=:select)
250
- type == :select || type == :insert
250
+ type == :select
251
251
  end
252
252
 
253
253
  # SQLAnywhere supports GROUPING SETS
@@ -179,7 +179,12 @@ module Sequel
179
179
  fks = fetch("PRAGMA foreign_keys")
180
180
  run "PRAGMA foreign_keys = 0" if fks
181
181
  transaction do
182
- if ops.length > 1 && ops.all?{|op| op[:op] == :add_constraint}
182
+ if ops.length > 1 && ops.all?{|op| op[:op] == :add_constraint || op[:op] == :set_column_null}
183
+ null_ops, ops = ops.partition{|op| op[:op] == :set_column_null}
184
+
185
+ # Apply NULL/NOT NULL ops first, since those should be purely idependent of the constraints.
186
+ null_ops.each{|op| alter_table_sql_list(table, [op]).flatten.each{|sql| execute_ddl(sql)}}
187
+
183
188
  # If you are just doing constraints, apply all of them at the same time,
184
189
  # as otherwise all but the last one get lost.
185
190
  alter_table_sql_list(table, [{:op=>:add_constraints, :ops=>ops}]).flatten.each{|sql| execute_ddl(sql)}
@@ -115,6 +115,11 @@ module Sequel
115
115
  true
116
116
  end
117
117
 
118
+ # Whether the dataset supports skipping raising an error instead of waiting for locked rows when returning data, false by default.
119
+ def supports_nowait?
120
+ false
121
+ end
122
+
118
123
  # Whether modifying joined datasets is supported, false by default.
119
124
  def supports_modifying_joins?
120
125
  false
@@ -88,7 +88,6 @@ module Sequel
88
88
  c.clear_columns_cache
89
89
  end
90
90
  c.freeze
91
- c
92
91
  end
93
92
  else
94
93
  # :nocov:
@@ -116,8 +115,12 @@ module Sequel
116
115
  # DB[:items].order(:id).distinct{func(:id)} # SQL: SELECT DISTINCT ON (func(id)) * FROM items ORDER BY id
117
116
  def distinct(*args, &block)
118
117
  virtual_row_columns(args, block)
119
- raise(InvalidOperation, "DISTINCT ON not supported") if !args.empty? && !supports_distinct_on?
120
- clone(:distinct => args.freeze)
118
+ if args.empty?
119
+ cached_dataset(:_distinct_ds){clone(:distinct => EMPTY_ARRAY)}
120
+ else
121
+ raise(InvalidOperation, "DISTINCT ON not supported") unless supports_distinct_on?
122
+ clone(:distinct => args.freeze)
123
+ end
121
124
  end
122
125
 
123
126
  # Adds an EXCEPT clause using a second dataset object.
@@ -190,7 +193,6 @@ module Sequel
190
193
  c = _clone(:freeze=>false)
191
194
  c.send(:_extension!, a)
192
195
  c.freeze
193
- c
194
196
  end
195
197
  else
196
198
  # :nocov:
@@ -274,11 +276,15 @@ module Sequel
274
276
  def from_self(opts=OPTS)
275
277
  fs = {}
276
278
  @opts.keys.each{|k| fs[k] = nil unless non_sql_option?(k)}
277
- c = clone(fs).from(opts[:alias] ? as(opts[:alias], opts[:column_aliases]) : self)
278
- if cols = _columns
279
- c.send(:columns=, cols)
279
+ pr = proc do
280
+ c = clone(fs).from(opts[:alias] ? as(opts[:alias], opts[:column_aliases]) : self)
281
+ if cols = _columns
282
+ c.send(:columns=, cols)
283
+ end
284
+ c
280
285
  end
281
- c
286
+
287
+ cache ? cached_dataset(:_from_self_ds, &pr) : pr.call
282
288
  end
283
289
 
284
290
  # Match any of the columns to any of the patterns. The terms can be
@@ -616,7 +622,7 @@ module Sequel
616
622
  # DB.from(:a, DB[:b].where(Sequel[:a][:c]=>Sequel[:b][:d]).lateral)
617
623
  # # SELECT * FROM a, LATERAL (SELECT * FROM b WHERE (a.c = b.d))
618
624
  def lateral
619
- clone(:lateral=>true)
625
+ cached_dataset(:_lateral_ds){clone(:lateral=>true)}
620
626
  end
621
627
 
622
628
  # If given an integer, the dataset will contain only the first l results.
@@ -672,6 +678,18 @@ module Sequel
672
678
  cached_dataset(:_naked_ds){with_row_proc(nil)}
673
679
  end
674
680
 
681
+ # Returns a copy of the dataset that will raise a DatabaseLockTimeout instead
682
+ # of waiting for rows that are locked by another transaction
683
+ #
684
+ # DB[:items].for_update.nowait
685
+ # # SELECT * FROM items FOR UPDATE NOWAIT
686
+ def nowait
687
+ cached_dataset(:_nowait_ds) do
688
+ raise(Error, 'This dataset does not support raises errors instead of waiting for locked rows') unless supports_nowait?
689
+ clone(:nowait=>true)
690
+ end
691
+ end
692
+
675
693
  # Returns a copy of the dataset with a specified order. Can be safely combined with limit.
676
694
  # If you call limit with an offset, it will override override the offset if you've called
677
695
  # offset first.
@@ -755,15 +773,20 @@ module Sequel
755
773
  #
756
774
  # DB[:items].where(id: 1).qualify(:i)
757
775
  # # SELECT i.* FROM items WHERE (i.id = 1)
758
- def qualify(table=first_source)
776
+ def qualify(table=(cache=true; first_source))
759
777
  o = @opts
760
778
  return self if o[:sql]
761
- h = {}
762
- (o.keys & QUALIFY_KEYS).each do |k|
763
- h[k] = qualified_expression(o[k], table)
779
+
780
+ pr = proc do
781
+ h = {}
782
+ (o.keys & QUALIFY_KEYS).each do |k|
783
+ h[k] = qualified_expression(o[k], table)
784
+ end
785
+ h[:select] = [SQL::ColumnAll.new(table)].freeze if !o[:select] || o[:select].empty?
786
+ clone(h)
764
787
  end
765
- h[:select] = [SQL::ColumnAll.new(table)].freeze if !o[:select] || o[:select].empty?
766
- clone(h)
788
+
789
+ cache ? cached_dataset(:_qualify_ds, &pr) : pr.call
767
790
  end
768
791
 
769
792
  # Modify the RETURNING clause, only supported on a few databases. If returning
@@ -785,8 +808,15 @@ module Sequel
785
808
  # # hash for each row deleted, with values for all columns
786
809
  # end
787
810
  def returning(*values)
788
- raise Error, "RETURNING is not supported on #{db.database_type}" unless supports_returning?(:insert)
789
- clone(:returning=>values.freeze)
811
+ if values.empty?
812
+ cached_dataset(:_returning_ds) do
813
+ raise Error, "RETURNING is not supported on #{db.database_type}" unless supports_returning?(:insert)
814
+ clone(:returning=>EMPTY_ARRAY)
815
+ end
816
+ else
817
+ raise Error, "RETURNING is not supported on #{db.database_type}" unless supports_returning?(:insert)
818
+ clone(:returning=>values.freeze)
819
+ end
790
820
  end
791
821
 
792
822
  # Returns a copy of the dataset with the order reversed. If no order is
@@ -831,7 +861,7 @@ module Sequel
831
861
  # DB[:items].select_all(:items, :foo) # SELECT items.*, foo.* FROM items
832
862
  def select_all(*tables)
833
863
  if tables.empty?
834
- clone(:select => nil)
864
+ cached_dataset(:_select_all_ds){clone(:select => nil)}
835
865
  else
836
866
  select(*tables.map{|t| i, a = split_alias(t); a || i}.map!{|t| SQL::ColumnAll.new(t)}.freeze)
837
867
  end
@@ -1067,7 +1097,6 @@ module Sequel
1067
1097
  c.extend(*mods) unless mods.empty?
1068
1098
  c.extend(DatasetModule.new(&block)) if block
1069
1099
  c.freeze
1070
- c
1071
1100
  end
1072
1101
  else
1073
1102
  # :nocov:
@@ -73,6 +73,13 @@ module Sequel
73
73
  SerializationFailure = Class.new(DatabaseError)
74
74
  ).name
75
75
 
76
+ (
77
+ # Error raised when Sequel determines the database could not acquire a necessary lock
78
+ # before timing out. Use of Dataset#nowait can often cause this exception when
79
+ # retrieving rows.
80
+ DatabaseLockTimeout = Class.new(DatabaseError)
81
+ ).name
82
+
76
83
  (
77
84
  # Error raised on an invalid operation, such as trying to update or delete
78
85
  # a joined or grouped dataset when the database does not support that.
@@ -38,12 +38,17 @@ module Sequel
38
38
  # Defaults to 14400 seconds (4 hours).
39
39
  attr_accessor :connection_expiration_timeout
40
40
 
41
+ # The maximum number of seconds that will be added as a random delay to the expiration timeout
42
+ # Defaults to 0 seconds (no random delay).
43
+ attr_accessor :connection_expiration_random_delay
44
+
41
45
  # Initialize the data structures used by this extension.
42
46
  def self.extended(pool)
43
47
  pool.instance_exec do
44
48
  sync do
45
49
  @connection_expiration_timestamps ||= {}
46
50
  @connection_expiration_timeout ||= 14400
51
+ @connection_expiration_random_delay ||= 0
47
52
  end
48
53
  end
49
54
  end
@@ -59,7 +64,7 @@ module Sequel
59
64
  # Record the time the connection was created.
60
65
  def make_new(*)
61
66
  conn = super
62
- @connection_expiration_timestamps[conn] = Sequel.start_timer
67
+ @connection_expiration_timestamps[conn] = [Sequel.start_timer, @connection_expiration_timeout + (rand * @connection_expiration_random_delay)].freeze
63
68
  conn
64
69
  end
65
70
 
@@ -69,8 +74,8 @@ module Sequel
69
74
  def acquire(*a)
70
75
  begin
71
76
  if (conn = super) &&
72
- (timer = sync{@connection_expiration_timestamps[conn]}) &&
73
- Sequel.elapsed_seconds_since(timer) > @connection_expiration_timeout
77
+ (cet = sync{@connection_expiration_timestamps[conn]}) &&
78
+ Sequel.elapsed_seconds_since(cet[0]) > cet[1]
74
79
 
75
80
  if pool_type == :sharded_threaded
76
81
  sync{allocated(a.last).delete(Thread.current)}
@@ -13,6 +13,10 @@
13
13
  #
14
14
  # DB.add_enum_value(:enum_type_name, 'value4')
15
15
  #
16
+ # If you want to rename an enum type, you can use rename_enum:
17
+ #
18
+ # DB.rename_enum(:enum_type_name, :enum_type_another_name)
19
+ #
16
20
  # If you want to drop an enum type, you can use drop_enum:
17
21
  #
18
22
  # DB.drop_enum(:enum_type_name)
@@ -63,7 +67,10 @@ module Sequel
63
67
  # Parse the available enum values when loading this extension into
64
68
  # your database.
65
69
  def self.extended(db)
66
- db.send(:parse_enum_labels)
70
+ db.instance_exec do
71
+ @enum_labels = {}
72
+ parse_enum_labels
73
+ end
67
74
  end
68
75
 
69
76
  # Run the SQL to add the given value to the existing enum type.
@@ -92,6 +99,15 @@ module Sequel
92
99
  nil
93
100
  end
94
101
 
102
+ # Run the SQL to rename the enum type with the given name
103
+ # to the another given name.
104
+ def rename_enum(enum, new_name)
105
+ sql = "ALTER TYPE #{quote_schema_table(enum)} RENAME TO #{quote_schema_table(new_name)}"
106
+ run sql
107
+ parse_enum_labels
108
+ nil
109
+ end
110
+
95
111
  # Run the SQL to drop the enum type with the given name.
96
112
  # Options:
97
113
  # :if_exists :: Do not raise an error if the enum type does not exist
@@ -109,15 +125,15 @@ module Sequel
109
125
  # the pg_type table to get names and array oids for
110
126
  # enums.
111
127
  def parse_enum_labels
112
- @enum_labels = metadata_dataset.from(:pg_enum).
128
+ enum_labels = metadata_dataset.from(:pg_enum).
113
129
  order(:enumtypid, :enumsortorder).
114
130
  select_hash_groups(Sequel.cast(:enumtypid, Integer).as(:v), :enumlabel).freeze
115
- @enum_labels.each_value(&:freeze)
131
+ enum_labels.each_value(&:freeze)
116
132
 
117
133
  if respond_to?(:register_array_type)
118
134
  array_types = metadata_dataset.
119
135
  from(:pg_type).
120
- where(:oid=>@enum_labels.keys).
136
+ where(:oid=>enum_labels.keys).
121
137
  exclude(:typarray=>0).
122
138
  select_map([:typname, Sequel.cast(:typarray, Integer).as(:v)])
123
139
 
@@ -127,13 +143,16 @@ module Sequel
127
143
  register_array_type(name, :oid=>oid)
128
144
  end
129
145
  end
146
+
147
+ Sequel.synchronize{@enum_labels.replace(enum_labels)}
130
148
  end
131
149
 
132
150
  # For schema entries that are enums, set the type to
133
151
  # :enum and add a :enum_values entry with the enum values.
134
152
  def schema_post_process(_)
135
153
  super.each do |_, s|
136
- if values = @enum_labels[s[:oid]]
154
+ oid = s[:oid]
155
+ if values = Sequel.synchronize{@enum_labels[oid]}
137
156
  s[:type] = :enum
138
157
  s[:enum_values] = values
139
158
  end
@@ -154,6 +173,10 @@ module Sequel
154
173
  def create_enum(name, _)
155
174
  @actions << [:drop_enum, name]
156
175
  end
176
+
177
+ def rename_enum(old_name, new_name)
178
+ @actions << [:rename_enum, new_name, old_name]
179
+ end
157
180
  end
158
181
  end
159
182
 
@@ -61,11 +61,23 @@ module Sequel
61
61
  # associated objects and call the method on the associated object array.
62
62
  # Calling any other method will call that method on the association's dataset.
63
63
  class AssociationProxy < BasicObject
64
- array = []
64
+ array = [].freeze
65
65
 
66
- # Default proc used to determine whether to sent the method to the dataset.
67
- # If the array would respond to it, sends it to the array instead of the dataset.
68
- DEFAULT_PROXY_TO_DATASET = proc{|opts| !array.respond_to?(opts[:method])}
66
+ if RUBY_VERSION < '2.6'
67
+ # Default proc used to determine whether to send the method to the dataset.
68
+ # If the array would respond to it, sends it to the array instead of the dataset.
69
+ DEFAULT_PROXY_TO_DATASET = proc do |opts|
70
+ array_method = array.respond_to?(opts[:method])
71
+ if !array_method && opts[:method] == :filter
72
+ Sequel::Deprecation.deprecate "The behavior of the #filter method for association proxies will change in Ruby 2.6. Switch from using #filter to using #where to conserve current behavior."
73
+ end
74
+ !array_method
75
+ end
76
+ else
77
+ # :nocov:
78
+ DEFAULT_PROXY_TO_DATASET = proc{|opts| !array.respond_to?(opts[:method])}
79
+ # :nocov:
80
+ end
69
81
 
70
82
  # Set the association reflection to use, and whether the association should be
71
83
  # reloaded if an array method is called.
@@ -32,25 +32,30 @@ module Sequel
32
32
  # Album.plugin :error_splitter
33
33
  module ErrorSplitter
34
34
  module InstanceMethods
35
- # If the model instance is not valid, go through all of the errors entries. For
36
- # any that apply to multiple columns, remove them and add separate error entries,
37
- # one per column.
35
+ private
36
+
37
+ # If the model instance is not valid, split the errors before returning.
38
38
  def _valid?(opts)
39
39
  v = super
40
40
  unless v
41
- errors.keys.select{|k| k.is_a?(Array)}.each do |ks|
42
- msgs = errors.delete(ks)
43
- ks.each do |k|
44
- msgs.each do |msg|
45
- errors.add(k, msg)
46
- end
41
+ split_validation_errors(errors)
42
+ end
43
+ v
44
+ end
45
+
46
+ # Go through all of the errors entries. For any that apply to multiple columns,
47
+ # remove them and add separate error entries, one per column.
48
+ def split_validation_errors(errors)
49
+ errors.keys.select{|k| k.is_a?(Array)}.each do |ks|
50
+ msgs = errors.delete(ks)
51
+ ks.each do |k|
52
+ msgs.each do |msg|
53
+ errors.add(k, msg)
47
54
  end
48
55
  end
49
56
  end
50
- v
51
57
  end
52
58
  end
53
59
  end
54
60
  end
55
61
  end
56
-
@@ -0,0 +1,260 @@
1
+ # frozen-string-literal: true
2
+
3
+ module Sequel
4
+ module Plugins
5
+ # The pg_auto_constraint_validations plugin automatically converts some constraint
6
+ # violation exceptions that are raised by INSERT/UPDATE queries into validation
7
+ # failures. This can allow for using the same error handling code for both
8
+ # regular validation errors (checked before attempting the INSERT/UPDATE), and
9
+ # constraint violations (raised during the INSERT/UPDATE).
10
+ #
11
+ # This handles the following constraint violations:
12
+ #
13
+ # * NOT NULL
14
+ # * CHECK
15
+ # * UNIQUE (except expression/functional indexes)
16
+ # * FOREIGN KEY (both referencing and referenced by)
17
+ #
18
+ # If the plugin cannot convert the constraint violation error to a validation
19
+ # error, it just reraises the initial exception, so this should not cause
20
+ # problems if the plugin doesn't know how to convert the exception.
21
+ #
22
+ # This plugin is not intended as a replacement for other validations,
23
+ # it is intended as a last resort. The purpose of validations is to provide nice
24
+ # error messages for the user, and the error messages generated by this plugin are
25
+ # fairly generic. The error messages can be customized using the :messages plugin
26
+ # option, but there is only a single message used per constraint type.
27
+ #
28
+ # This plugin only works on the postgres adapter when using the pg 0.16+ driver,
29
+ # PostgreSQL 9.3+ server, and PostgreSQL 9.3+ client library (libpq). In other cases
30
+ # it will be a no-op.
31
+ #
32
+ # Example:
33
+ #
34
+ # album = Album.new(:artist_id=>1) # Assume no such artist exists
35
+ # begin
36
+ # album.save
37
+ # rescue Sequel::ValidationFailed
38
+ # album.errors.on(:artist_id) # ['is invalid']
39
+ # end
40
+ #
41
+ # Usage:
42
+ #
43
+ # # Make all model subclasses automatically convert constraint violations
44
+ # # to validation failures (called before loading subclasses)
45
+ # Sequel::Model.plugin :pg_auto_constraint_validations
46
+ #
47
+ # # Make the Album class automatically convert constraint violations
48
+ # # to validation failures
49
+ # Album.plugin :pg_auto_constraint_validations
50
+ module PgAutoConstraintValidations
51
+ (
52
+ # The default error messages for each constraint violation type.
53
+ DEFAULT_ERROR_MESSAGES = {
54
+ :not_null=>"is not present",
55
+ :check=>"is invalid",
56
+ :unique=>'is already taken',
57
+ :foreign_key=>'is invalid',
58
+ :referenced_by=>'cannot be changed currently'
59
+ }.freeze).each_value(&:freeze)
60
+
61
+ # Setup the constraint violation metadata. Options:
62
+ # :messages :: Override the default error messages for each constraint
63
+ # violation type (:not_null, :check, :unique, :foreign_key, :referenced_by)
64
+ def self.configure(model, opts=OPTS)
65
+ model.instance_exec do
66
+ setup_pg_auto_constraint_validations
67
+ @pg_auto_constraint_validations_messages = (@pg_auto_constraint_validations_messages || DEFAULT_ERROR_MESSAGES).merge(opts[:messages] || {}).freeze
68
+ end
69
+ end
70
+
71
+ module ClassMethods
72
+ # Hash of metadata checked when an instance attempts to convert a constraint
73
+ # violation into a validation failure.
74
+ attr_reader :pg_auto_constraint_validations
75
+
76
+ # Hash of error messages keyed by constraint type symbol to use in the
77
+ # generated validation failures.
78
+ attr_reader :pg_auto_constraint_validations_messages
79
+
80
+ Plugins.inherited_instance_variables(self, :@pg_auto_constraint_validations=>nil, :@pg_auto_constraint_validations_messages=>nil)
81
+ Plugins.after_set_dataset(self, :setup_pg_auto_constraint_validations)
82
+
83
+ private
84
+
85
+ # Get the list of constraints, unique indexes, foreign keys in the current
86
+ # table, and keys in the current table referenced by foreign keys in other
87
+ # tables. Store this information so that if a constraint violation occurs,
88
+ # all necessary metadata is already available in the model, so a query is
89
+ # not required at runtime. This is both for performance and because in
90
+ # general after the constraint violation failure you will be inside a
91
+ # failed transaction and not able to execute queries.
92
+ def setup_pg_auto_constraint_validations
93
+ return unless @dataset
94
+
95
+ case @dataset.first_source_table
96
+ when Symbol, String, SQL::Identifier, SQL::QualifiedIdentifier
97
+ convert_errors = db.respond_to?(:error_info)
98
+ end
99
+
100
+ unless convert_errors
101
+ # Might be a table returning function or subquery, skip handling those.
102
+ # Might have db not support error_info, skip handling that.
103
+ @pg_auto_constraint_validations = nil
104
+ return
105
+ end
106
+
107
+ checks = {}
108
+ indexes = {}
109
+ foreign_keys = {}
110
+ referenced_by = {}
111
+
112
+ db.check_constraints(table_name).each do |k, v|
113
+ checks[k] = v[:columns].dup.freeze
114
+ end
115
+ db.indexes(table_name, :include_partial=>true).each do |k, v|
116
+ if v[:unique]
117
+ indexes[k] = v[:columns].dup.freeze
118
+ end
119
+ end
120
+ db.foreign_key_list(table_name, :schema=>false).each do |fk|
121
+ foreign_keys[fk[:name]] = fk[:columns].dup.freeze
122
+ end
123
+ db.foreign_key_list(table_name, :reverse=>true, :schema=>false).each do |fk|
124
+ referenced_by[[fk[:schema], fk[:table], fk[:name]].freeze] = fk[:key].dup.freeze
125
+ end
126
+
127
+ schema, table = db[:pg_class].
128
+ join(:pg_namespace, :oid=>:relnamespace, db.send(:regclass_oid, table_name)=>:oid).
129
+ get([:nspname, :relname])
130
+
131
+ (@pg_auto_constraint_validations = {
132
+ :schema=>schema,
133
+ :table=>table,
134
+ :check=>checks,
135
+ :unique=>indexes,
136
+ :foreign_key=>foreign_keys,
137
+ :referenced_by=>referenced_by
138
+ }.freeze).each_value(&:freeze)
139
+ end
140
+ end
141
+
142
+ module InstanceMethods
143
+ private
144
+
145
+ # Yield to the given block, and if a Sequel::ConstraintViolation is raised, try
146
+ # to convert it to a Sequel::ValidationFailed error using the PostgreSQL error
147
+ # metadata.
148
+ def check_pg_constraint_error(ds)
149
+ yield
150
+ rescue Sequel::ConstraintViolation => e
151
+ begin
152
+ unless cv_info = model.pg_auto_constraint_validations
153
+ # Necessary metadata does not exist, just reraise the exception.
154
+ raise e
155
+ end
156
+
157
+ info = ds.db.error_info(e)
158
+ m = ds.method(:output_identifier)
159
+ schema = info[:schema]
160
+ table = info[:table]
161
+ if constraint = info[:constraint]
162
+ constraint = m.call(constraint)
163
+ end
164
+ messages = model.pg_auto_constraint_validations_messages
165
+
166
+ case e
167
+ when Sequel::NotNullConstraintViolation
168
+ if column = info[:column]
169
+ add_pg_constraint_validation_error([m.call(column)], messages[:not_null])
170
+ end
171
+ when Sequel::CheckConstraintViolation
172
+ if columns = cv_info[:check][constraint]
173
+ add_pg_constraint_validation_error(columns, messages[:check])
174
+ end
175
+ when Sequel::UniqueConstraintViolation
176
+ if columns = cv_info[:unique][constraint]
177
+ add_pg_constraint_validation_error(columns, messages[:unique])
178
+ end
179
+ when Sequel::ForeignKeyConstraintViolation
180
+ message_primary = info[:message_primary]
181
+ if message_primary.start_with?('update')
182
+ # This constraint violation is different from the others, because the constraint
183
+ # referenced is a constraint for a different table, not for this table. This
184
+ # happens when another table references the current table, and the referenced
185
+ # column in the current update is modified such that referential integrity
186
+ # would be broken. Use the reverse foreign key information to figure out
187
+ # which column is affected in that case.
188
+ skip_schema_table_check = true
189
+ if columns = cv_info[:referenced_by][[m.call(schema), m.call(table), constraint]]
190
+ add_pg_constraint_validation_error(columns, messages[:referenced_by])
191
+ end
192
+ elsif message_primary.start_with?('insert')
193
+ if columns = cv_info[:foreign_key][constraint]
194
+ add_pg_constraint_validation_error(columns, messages[:foreign_key])
195
+ end
196
+ end
197
+ end
198
+ rescue => e2
199
+ # If there is an error trying to conver the constraint violation
200
+ # into a validation failure, it's best to just raise the constraint
201
+ # violation. This can make debugging the above block of code more
202
+ # difficult.
203
+ raise e
204
+ else
205
+ unless skip_schema_table_check
206
+ # The constraint violation could be caused by a trigger modifying
207
+ # a different table. Check that the error schema and table
208
+ # match the model's schema and table, or clear the validation error
209
+ # that was set above.
210
+ if schema != cv_info[:schema] || table != cv_info[:table]
211
+ errors.clear
212
+ end
213
+ end
214
+
215
+ if errors.empty?
216
+ # If we weren't able to parse the constraint violation metadata and
217
+ # convert it to an appropriate validation failure, or the schema/table
218
+ # didn't match, then raise the constraint violation.
219
+ raise e
220
+ end
221
+
222
+ # Integrate with error_splitter plugin to split any multi-column errors
223
+ # and add them as separate single column errors
224
+ if respond_to?(:split_validation_errors, true)
225
+ split_validation_errors(errors)
226
+ end
227
+
228
+ vf = ValidationFailed.new(self)
229
+ vf.set_backtrace(e.backtrace)
230
+ vf.wrapped_exception = e
231
+ raise vf
232
+ end
233
+ end
234
+
235
+ # If there is a single column instead of an array of columns, add the error
236
+ # for the column, otherwise add the error for the array of columns.
237
+ def add_pg_constraint_validation_error(column, message)
238
+ column = column.first if column.length == 1
239
+ errors.add(column, message)
240
+ end
241
+
242
+ # Convert PostgreSQL constraint errors when inserting.
243
+ def _insert_raw(ds)
244
+ check_pg_constraint_error(ds){super}
245
+ end
246
+
247
+ # Convert PostgreSQL constraint errors when inserting.
248
+ def _insert_select_raw(ds)
249
+ check_pg_constraint_error(ds){super}
250
+ end
251
+
252
+ # Convert PostgreSQL constraint errors when updating.
253
+ def _update_without_checking(_)
254
+ check_pg_constraint_error(_update_dataset){super}
255
+ end
256
+ end
257
+ end
258
+ end
259
+ end
260
+