sequel 5.60.1 → 5.62.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (72) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG +44 -0
  3. data/README.rdoc +20 -19
  4. data/doc/advanced_associations.rdoc +13 -13
  5. data/doc/association_basics.rdoc +21 -15
  6. data/doc/cheat_sheet.rdoc +3 -3
  7. data/doc/model_hooks.rdoc +1 -1
  8. data/doc/object_model.rdoc +8 -8
  9. data/doc/opening_databases.rdoc +4 -4
  10. data/doc/postgresql.rdoc +8 -8
  11. data/doc/querying.rdoc +1 -1
  12. data/doc/release_notes/5.61.0.txt +43 -0
  13. data/doc/release_notes/5.62.0.txt +132 -0
  14. data/doc/schema_modification.rdoc +1 -1
  15. data/doc/security.rdoc +9 -9
  16. data/doc/sql.rdoc +13 -13
  17. data/doc/testing.rdoc +13 -11
  18. data/doc/transactions.rdoc +6 -6
  19. data/doc/virtual_rows.rdoc +1 -1
  20. data/lib/sequel/adapters/postgres.rb +4 -0
  21. data/lib/sequel/adapters/shared/access.rb +9 -1
  22. data/lib/sequel/adapters/shared/mssql.rb +9 -5
  23. data/lib/sequel/adapters/shared/mysql.rb +7 -0
  24. data/lib/sequel/adapters/shared/oracle.rb +7 -0
  25. data/lib/sequel/adapters/shared/postgres.rb +275 -152
  26. data/lib/sequel/adapters/shared/sqlanywhere.rb +7 -0
  27. data/lib/sequel/adapters/shared/sqlite.rb +5 -0
  28. data/lib/sequel/connection_pool.rb +42 -28
  29. data/lib/sequel/database/connecting.rb +24 -0
  30. data/lib/sequel/database/misc.rb +62 -12
  31. data/lib/sequel/database/query.rb +37 -0
  32. data/lib/sequel/dataset/actions.rb +31 -11
  33. data/lib/sequel/dataset/features.rb +5 -0
  34. data/lib/sequel/dataset/misc.rb +1 -1
  35. data/lib/sequel/dataset/query.rb +9 -9
  36. data/lib/sequel/dataset/sql.rb +5 -1
  37. data/lib/sequel/extensions/_model_pg_row.rb +0 -12
  38. data/lib/sequel/extensions/_pretty_table.rb +1 -1
  39. data/lib/sequel/extensions/async_thread_pool.rb +11 -11
  40. data/lib/sequel/extensions/auto_literal_strings.rb +1 -1
  41. data/lib/sequel/extensions/constraint_validations.rb +1 -1
  42. data/lib/sequel/extensions/date_arithmetic.rb +1 -1
  43. data/lib/sequel/extensions/looser_typecasting.rb +3 -0
  44. data/lib/sequel/extensions/migration.rb +1 -1
  45. data/lib/sequel/extensions/named_timezones.rb +17 -5
  46. data/lib/sequel/extensions/pg_array.rb +22 -3
  47. data/lib/sequel/extensions/pg_auto_parameterize.rb +478 -0
  48. data/lib/sequel/extensions/pg_extended_date_support.rb +27 -24
  49. data/lib/sequel/extensions/pg_extended_integer_support.rb +116 -0
  50. data/lib/sequel/extensions/pg_hstore.rb +5 -0
  51. data/lib/sequel/extensions/pg_inet.rb +10 -11
  52. data/lib/sequel/extensions/pg_interval.rb +10 -11
  53. data/lib/sequel/extensions/pg_json.rb +10 -10
  54. data/lib/sequel/extensions/pg_json_ops.rb +0 -52
  55. data/lib/sequel/extensions/pg_multirange.rb +5 -10
  56. data/lib/sequel/extensions/pg_range.rb +6 -11
  57. data/lib/sequel/extensions/pg_row.rb +18 -13
  58. data/lib/sequel/model/associations.rb +7 -2
  59. data/lib/sequel/model/base.rb +6 -5
  60. data/lib/sequel/plugins/auto_validations.rb +53 -15
  61. data/lib/sequel/plugins/class_table_inheritance.rb +2 -2
  62. data/lib/sequel/plugins/composition.rb +2 -2
  63. data/lib/sequel/plugins/concurrent_eager_loading.rb +4 -4
  64. data/lib/sequel/plugins/dirty.rb +1 -1
  65. data/lib/sequel/plugins/finder.rb +3 -1
  66. data/lib/sequel/plugins/nested_attributes.rb +4 -4
  67. data/lib/sequel/plugins/pg_auto_constraint_validations.rb +1 -1
  68. data/lib/sequel/plugins/primary_key_lookup_check_values.rb +154 -0
  69. data/lib/sequel/plugins/sql_comments.rb +1 -1
  70. data/lib/sequel/plugins/validation_helpers.rb +20 -0
  71. data/lib/sequel/version.rb +2 -2
  72. metadata +12 -5
@@ -241,6 +241,30 @@ module Sequel
241
241
  pool.servers
242
242
  end
243
243
 
244
+ # Connect to the given server/shard. Handles database-generic post-connection
245
+ # setup not handled by #connect, using the :after_connect and :connect_sqls
246
+ # options.
247
+ def new_connection(server)
248
+ conn = connect(server)
249
+ opts = server_opts(server)
250
+
251
+ if ac = opts[:after_connect]
252
+ if ac.arity == 2
253
+ ac.call(conn, server)
254
+ else
255
+ ac.call(conn)
256
+ end
257
+ end
258
+
259
+ if cs = opts[:connect_sqls]
260
+ cs.each do |sql|
261
+ log_connection_execute(conn, sql)
262
+ end
263
+ end
264
+
265
+ conn
266
+ end
267
+
244
268
  # Returns true if the database is using a single-threaded connection pool.
245
269
  def single_threaded?
246
270
  @single_threaded
@@ -91,13 +91,23 @@ module Sequel
91
91
  # The specific default size of string columns for this Sequel::Database, usually 255 by default.
92
92
  attr_accessor :default_string_column_size
93
93
 
94
+ # Whether to check the bytesize of strings before typecasting (to avoid typecasting strings that
95
+ # would be too long for the given type), true by default. Strings that are too long will raise
96
+ # a typecasting error.
97
+ attr_accessor :check_string_typecast_bytesize
98
+
94
99
  # Constructs a new instance of a database connection with the specified
95
100
  # options hash.
96
101
  #
97
102
  # Accepts the following options:
103
+ # :after_connect :: A callable object called after each new connection is made, with the
104
+ # connection object (and server argument if the callable accepts 2 arguments),
105
+ # useful for customizations that you want to apply to all connections.
98
106
  # :before_preconnect :: Callable that runs after extensions from :preconnect_extensions are loaded,
99
107
  # but before any connections are created.
100
108
  # :cache_schema :: Whether schema should be cached for this Database instance
109
+ # :check_string_typecast_bytesize :: Whether to check the bytesize of strings before typecasting.
110
+ # :connect_sqls :: An array of sql strings to execute on each new connection, after :after_connect runs.
101
111
  # :default_string_column_size :: The default size of string columns, 255 by default.
102
112
  # :extensions :: Extensions to load into this Database instance. Can be a symbol, array of symbols,
103
113
  # or string with extensions separated by columns. These extensions are loaded after
@@ -107,7 +117,7 @@ module Sequel
107
117
  # :loggers :: An array of loggers to use.
108
118
  # :log_connection_info :: Whether connection information should be logged when logging queries.
109
119
  # :log_warn_duration :: The number of elapsed seconds after which queries should be logged at warn level.
110
- # :name :: A name to use for the Database object, displayed in PoolTimeout .
120
+ # :name :: A name to use for the Database object, displayed in PoolTimeout.
111
121
  # :preconnect :: Automatically create the maximum number of connections, so that they don't
112
122
  # need to be created as needed. This is useful when connecting takes a long time
113
123
  # and you want to avoid possible latency during runtime.
@@ -116,13 +126,15 @@ module Sequel
116
126
  # :preconnect_extensions :: Similar to the :extensions option, but loads the extensions before the
117
127
  # connections are made by the :preconnect option.
118
128
  # :quote_identifiers :: Whether to quote identifiers.
119
- # :servers :: A hash specifying a server/shard specific options, keyed by shard symbol .
129
+ # :servers :: A hash specifying a server/shard specific options, keyed by shard symbol.
120
130
  # :single_threaded :: Whether to use a single-threaded connection pool.
121
131
  # :sql_log_level :: Method to use to log SQL to a logger, :info by default.
122
132
  #
133
+ # For sharded connection pools, :after_connect and :connect_sqls can be specified per-shard.
134
+ #
123
135
  # All options given are also passed to the connection pool. Additional options respected by
124
- # the connection pool are :after_connect, :connect_sqls, :max_connections, :pool_timeout,
125
- # :servers, and :servers_hash. See the connection pool documentation for details.
136
+ # the connection pool are :max_connections, :pool_timeout, :servers, and :servers_hash. See the
137
+ # connection pool documentation for details.
126
138
  def initialize(opts = OPTS)
127
139
  @opts ||= opts
128
140
  @opts = connection_pool_default_options.merge(@opts)
@@ -132,6 +144,7 @@ module Sequel
132
144
  @opts[:adapter_class] = self.class
133
145
  @opts[:single_threaded] = @single_threaded = typecast_value_boolean(@opts.fetch(:single_threaded, Sequel.single_threaded))
134
146
  @default_string_column_size = @opts[:default_string_column_size] || DEFAULT_STRING_COLUMN_SIZE
147
+ @check_string_typecast_bytesize = typecast_value_boolean(@opts.fetch(:check_string_typecast_bytesize, true))
135
148
 
136
149
  @schemas = {}
137
150
  @prepared_statements = {}
@@ -465,6 +478,21 @@ module Sequel
465
478
  # Don't rescue other exceptions, they will be raised normally.
466
479
  end
467
480
 
481
+ # Check the bytesize of a string before conversion. There is no point
482
+ # trying to typecast strings that would be way too long.
483
+ def typecast_check_string_length(string, max_size)
484
+ if @check_string_typecast_bytesize && string.bytesize > max_size
485
+ raise InvalidValue, "string too long to typecast (bytesize: #{string.bytesize}, max: #{max_size})"
486
+ end
487
+ string
488
+ end
489
+
490
+ # Check the bytesize of the string value, if value is a string.
491
+ def typecast_check_length(value, max_size)
492
+ typecast_check_string_length(value, max_size) if String === value
493
+ value
494
+ end
495
+
468
496
  # Typecast the value to an SQL::Blob
469
497
  def typecast_value_blob(value)
470
498
  value.is_a?(Sequel::SQL::Blob) ? value : Sequel::SQL::Blob.new(value)
@@ -488,9 +516,9 @@ module Sequel
488
516
  when Date
489
517
  value
490
518
  when String
491
- Sequel.string_to_date(value)
519
+ Sequel.string_to_date(typecast_check_string_length(value, 100))
492
520
  when Hash
493
- Date.new(*[:year, :month, :day].map{|x| (value[x] || value[x.to_s]).to_i})
521
+ Date.new(*[:year, :month, :day].map{|x| typecast_check_length(value[x] || value[x.to_s], 100).to_i})
494
522
  else
495
523
  raise InvalidValue, "invalid value for Date: #{value.inspect}"
496
524
  end
@@ -498,7 +526,17 @@ module Sequel
498
526
 
499
527
  # Typecast the value to a DateTime or Time depending on Sequel.datetime_class
500
528
  def typecast_value_datetime(value)
501
- Sequel.typecast_to_application_timestamp(value)
529
+ case value
530
+ when String
531
+ Sequel.typecast_to_application_timestamp(typecast_check_string_length(value, 100))
532
+ when Hash
533
+ [:year, :month, :day, :hour, :minute, :second, :nanos, :offset].each do |x|
534
+ typecast_check_length(value[x] || value[x.to_s], 100)
535
+ end
536
+ Sequel.typecast_to_application_timestamp(value)
537
+ else
538
+ Sequel.typecast_to_application_timestamp(value)
539
+ end
502
540
  end
503
541
 
504
542
  if RUBY_VERSION >= '2.4'
@@ -531,18 +569,30 @@ module Sequel
531
569
  when Numeric
532
570
  BigDecimal(value.to_s)
533
571
  when String
534
- _typecast_value_string_to_decimal(value)
572
+ _typecast_value_string_to_decimal(typecast_check_string_length(value, 1000))
535
573
  else
536
574
  raise InvalidValue, "invalid value for BigDecimal: #{value.inspect}"
537
575
  end
538
576
  end
539
577
 
540
578
  # Typecast the value to a Float
541
- alias typecast_value_float Float
579
+ def typecast_value_float(value)
580
+ Float(typecast_check_length(value, 1000))
581
+ end
542
582
 
543
583
  # Typecast the value to an Integer
544
584
  def typecast_value_integer(value)
545
- (value.is_a?(String) && value =~ /\A0+(\d)/) ? Integer(value, 10) : Integer(value)
585
+ case value
586
+ when String
587
+ typecast_check_string_length(value, 100)
588
+ if value =~ /\A-?0+(\d)/
589
+ Integer(value, 10)
590
+ else
591
+ Integer(value)
592
+ end
593
+ else
594
+ Integer(value)
595
+ end
546
596
  end
547
597
 
548
598
  # Typecast the value to a String
@@ -565,9 +615,9 @@ module Sequel
565
615
  SQLTime.create(value.hour, value.min, value.sec, value.nsec/1000.0)
566
616
  end
567
617
  when String
568
- Sequel.string_to_time(value)
618
+ Sequel.string_to_time(typecast_check_string_length(value, 100))
569
619
  when Hash
570
- SQLTime.create(*[:hour, :minute, :second].map{|x| (value[x] || value[x.to_s]).to_i})
620
+ SQLTime.create(*[:hour, :minute, :second].map{|x| typecast_check_length(value[x] || value[x.to_s], 100).to_i})
571
621
  else
572
622
  raise Sequel::InvalidValue, "invalid value for Time: #{value.inspect}"
573
623
  end
@@ -175,6 +175,9 @@ module Sequel
175
175
  if !c[:max_length] && c[:type] == :string && (max_length = column_schema_max_length(c[:db_type]))
176
176
  c[:max_length] = max_length
177
177
  end
178
+ if !c[:max_value] && !c[:min_value] && c[:type] == :integer && (min_max = column_schema_integer_min_max_values(c[:db_type]))
179
+ c[:min_value], c[:max_value] = min_max
180
+ end
178
181
  end
179
182
  schema_post_process(cols)
180
183
 
@@ -272,6 +275,40 @@ module Sequel
272
275
  column_schema_default_to_ruby_value(default, type) rescue nil
273
276
  end
274
277
 
278
+ INTEGER1_MIN_MAX = [-128, 127].freeze
279
+ INTEGER2_MIN_MAX = [-32768, 32767].freeze
280
+ INTEGER3_MIN_MAX = [-8388608, 8388607].freeze
281
+ INTEGER4_MIN_MAX = [-2147483648, 2147483647].freeze
282
+ INTEGER8_MIN_MAX = [-9223372036854775808, 9223372036854775807].freeze
283
+ UNSIGNED_INTEGER1_MIN_MAX = [0, 255].freeze
284
+ UNSIGNED_INTEGER2_MIN_MAX = [0, 65535].freeze
285
+ UNSIGNED_INTEGER3_MIN_MAX = [0, 16777215].freeze
286
+ UNSIGNED_INTEGER4_MIN_MAX = [0, 4294967295].freeze
287
+ UNSIGNED_INTEGER8_MIN_MAX = [0, 18446744073709551615].freeze
288
+
289
+ # Look at the db_type and guess the minimum and maximum integer values for
290
+ # the column.
291
+ def column_schema_integer_min_max_values(db_type)
292
+ unsigned = /unsigned/i =~ db_type
293
+ case db_type
294
+ when /big|int8/i
295
+ unsigned ? UNSIGNED_INTEGER8_MIN_MAX : INTEGER8_MIN_MAX
296
+ when /medium/i
297
+ unsigned ? UNSIGNED_INTEGER3_MIN_MAX : INTEGER3_MIN_MAX
298
+ when /small|int2/i
299
+ unsigned ? UNSIGNED_INTEGER2_MIN_MAX : INTEGER2_MIN_MAX
300
+ when /tiny/i
301
+ (unsigned || column_schema_tinyint_type_is_unsigned?) ? UNSIGNED_INTEGER1_MIN_MAX : INTEGER1_MIN_MAX
302
+ else
303
+ unsigned ? UNSIGNED_INTEGER4_MIN_MAX : INTEGER4_MIN_MAX
304
+ end
305
+ end
306
+
307
+ # Whether the tinyint type (if supported by the database) is unsigned by default.
308
+ def column_schema_tinyint_type_is_unsigned?
309
+ false
310
+ end
311
+
275
312
  # Look at the db_type and guess the maximum length of the column.
276
313
  # This assumes types such as varchar(255).
277
314
  def column_schema_max_length(db_type)
@@ -313,14 +313,18 @@ module Sequel
313
313
 
314
314
  # Inserts multiple records into the associated table. This method can be
315
315
  # used to efficiently insert a large number of records into a table in a
316
- # single query if the database supports it. Inserts
317
- # are automatically wrapped in a transaction.
316
+ # single query if the database supports it. Inserts are automatically
317
+ # wrapped in a transaction if necessary.
318
318
  #
319
319
  # This method is called with a columns array and an array of value arrays:
320
320
  #
321
321
  # DB[:table].import([:x, :y], [[1, 2], [3, 4]])
322
322
  # # INSERT INTO table (x, y) VALUES (1, 2)
323
- # # INSERT INTO table (x, y) VALUES (3, 4)
323
+ # # INSERT INTO table (x, y) VALUES (3, 4)
324
+ #
325
+ # or, if the database supports it:
326
+ #
327
+ # # INSERT INTO table (x, y) VALUES (1, 2), (3, 4)
324
328
  #
325
329
  # This method also accepts a dataset instead of an array of value arrays:
326
330
  #
@@ -328,9 +332,13 @@ module Sequel
328
332
  # # INSERT INTO table (x, y) SELECT a, b FROM table2
329
333
  #
330
334
  # Options:
331
- # :commit_every :: Open a new transaction for every given number of records.
332
- # For example, if you provide a value of 50, will commit
333
- # after every 50 records.
335
+ # :commit_every :: Open a new transaction for every given number of
336
+ # records. For example, if you provide a value of 50,
337
+ # will commit after every 50 records. When a
338
+ # transaction is not required, this option controls
339
+ # the maximum number of values to insert with a single
340
+ # statement; it does not force the use of a
341
+ # transaction.
334
342
  # :return :: When this is set to :primary_key, returns an array of
335
343
  # autoincremented primary key values for the rows inserted.
336
344
  # This does not have an effect if +values+ is a Dataset.
@@ -576,7 +584,7 @@ module Sequel
576
584
  # # SELECT * FROM table ORDER BY id LIMIT 1000 OFFSET 1000
577
585
  # # ...
578
586
  #
579
- # DB[:table].order(:id).paged_each(:rows_per_fetch=>100){|row| }
587
+ # DB[:table].order(:id).paged_each(rows_per_fetch: 100){|row| }
580
588
  # # SELECT * FROM table ORDER BY id LIMIT 100
581
589
  # # SELECT * FROM table ORDER BY id LIMIT 100 OFFSET 100
582
590
  # # ...
@@ -1023,18 +1031,19 @@ module Sequel
1023
1031
 
1024
1032
  # Internals of #import. If primary key values are requested, use
1025
1033
  # separate insert commands for each row. Otherwise, call #multi_insert_sql
1026
- # and execute each statement it gives separately.
1034
+ # and execute each statement it gives separately. A transaction is only used
1035
+ # if there are multiple statements to execute.
1027
1036
  def _import(columns, values, opts)
1028
1037
  trans_opts = Hash[opts]
1029
1038
  trans_opts[:server] = @opts[:server]
1030
1039
  if opts[:return] == :primary_key
1031
- @db.transaction(trans_opts){values.map{|v| insert(columns, v)}}
1040
+ _import_transaction(values, trans_opts){values.map{|v| insert(columns, v)}}
1032
1041
  else
1033
1042
  stmts = multi_insert_sql(columns, values)
1034
- @db.transaction(trans_opts){stmts.each{|st| execute_dui(st)}}
1043
+ _import_transaction(stmts, trans_opts){stmts.each{|st| execute_dui(st)}}
1035
1044
  end
1036
1045
  end
1037
-
1046
+
1038
1047
  # Return an array of arrays of values given by the symbols in ret_cols.
1039
1048
  def _select_map_multiple(ret_cols)
1040
1049
  map{|r| r.values_at(*ret_cols)}
@@ -1073,6 +1082,17 @@ module Sequel
1073
1082
  end
1074
1083
  end
1075
1084
 
1085
+ # Use a transaction when yielding to the block if multiple values/statements
1086
+ # are provided. When only a single value or statement is provided, then yield
1087
+ # without using a transaction.
1088
+ def _import_transaction(values, trans_opts, &block)
1089
+ if values.length > 1
1090
+ @db.transaction(trans_opts, &block)
1091
+ else
1092
+ yield
1093
+ end
1094
+ end
1095
+
1076
1096
  # Internals of +select_hash+ and +select_hash_groups+
1077
1097
  def _select_hash(meth, key_column, value_column, opts=OPTS)
1078
1098
  select(*(key_column.is_a?(Array) ? key_column : [key_column]) + (value_column.is_a?(Array) ? value_column : [value_column])).
@@ -152,6 +152,11 @@ module Sequel
152
152
  supports_distinct_on?
153
153
  end
154
154
 
155
+ # Whether placeholder literalizers are supported, true by default.
156
+ def supports_placeholder_literalizer?
157
+ true
158
+ end
159
+
155
160
  # Whether the dataset supports pattern matching by regular expressions, false by default.
156
161
  def supports_regexp?
157
162
  false
@@ -302,7 +302,7 @@ module Sequel
302
302
  cache_set(key, loader + 1)
303
303
  loader = nil
304
304
  end
305
- elsif cache_sql?
305
+ elsif cache_sql? && supports_placeholder_literalizer?
306
306
  cache_set(key, 1)
307
307
  end
308
308
 
@@ -65,7 +65,7 @@ module Sequel
65
65
  Sequel.synchronize{EXTENSIONS[ext] = block}
66
66
  end
67
67
 
68
- # On Ruby 2.4+, use clone(:freeze=>false) to create clones, because
68
+ # On Ruby 2.4+, use clone(freeze: false) to create clones, because
69
69
  # we use true freezing in that case, and we need to modify the opts
70
70
  # in the frozen copy.
71
71
  #
@@ -116,7 +116,7 @@ module Sequel
116
116
  # DB[:items].order(:id).distinct(:id) # SQL: SELECT DISTINCT ON (id) * FROM items ORDER BY id
117
117
  # DB[:items].order(:id).distinct{func(:id)} # SQL: SELECT DISTINCT ON (func(id)) * FROM items ORDER BY id
118
118
  #
119
- # There is support for emualting the DISTINCT ON support in MySQL, but it
119
+ # There is support for emulating the DISTINCT ON support in MySQL, but it
120
120
  # does not support the ORDER of the dataset, and also doesn't work in many
121
121
  # cases if the ONLY_FULL_GROUP_BY sql_mode is used, which is the default on
122
122
  # MySQL 5.7.5+.
@@ -787,7 +787,7 @@ module Sequel
787
787
  # DB[:items].order(Sequel.lit('a + b')) # SELECT * FROM items ORDER BY a + b
788
788
  # DB[:items].order(Sequel[:a] + :b) # SELECT * FROM items ORDER BY (a + b)
789
789
  # DB[:items].order(Sequel.desc(:name)) # SELECT * FROM items ORDER BY name DESC
790
- # DB[:items].order(Sequel.asc(:name, :nulls=>:last)) # SELECT * FROM items ORDER BY name ASC NULLS LAST
790
+ # DB[:items].order(Sequel.asc(:name, nulls: :last)) # SELECT * FROM items ORDER BY name ASC NULLS LAST
791
791
  # DB[:items].order{sum(name).desc} # SELECT * FROM items ORDER BY sum(name) DESC
792
792
  # DB[:items].order(nil) # SELECT * FROM items
793
793
  def order(*columns, &block)
@@ -857,13 +857,13 @@ module Sequel
857
857
  # DB[:items].returning(nil) # RETURNING NULL
858
858
  # DB[:items].returning(:id, :name) # RETURNING id, name
859
859
  #
860
- # DB[:items].returning.insert(:a=>1) do |hash|
860
+ # DB[:items].returning.insert(a: 1) do |hash|
861
861
  # # hash for each row inserted, with values for all columns
862
862
  # end
863
- # DB[:items].returning.update(:a=>1) do |hash|
863
+ # DB[:items].returning.update(a: 1) do |hash|
864
864
  # # hash for each row updated, with values for all columns
865
865
  # end
866
- # DB[:items].returning.delete(:a=>1) do |hash|
866
+ # DB[:items].returning.delete(a: 1) do |hash|
867
867
  # # hash for each row deleted, with values for all columns
868
868
  # end
869
869
  def returning(*values)
@@ -1102,7 +1102,7 @@ module Sequel
1102
1102
  # referenced in window functions. See Sequel::SQL::Window for a list of
1103
1103
  # options that can be passed in. Example:
1104
1104
  #
1105
- # DB[:items].window(:w, :partition=>:c1, :order=>:c2)
1105
+ # DB[:items].window(:w, partition: :c1, order: :c2)
1106
1106
  # # SELECT * FROM items WINDOW w AS (PARTITION BY c1 ORDER BY c2)
1107
1107
  def window(name, opts)
1108
1108
  clone(:window=>((@opts[:window]||EMPTY_ARRAY) + [[name, SQL::Window.new(opts)].freeze]).freeze)
@@ -1163,7 +1163,7 @@ module Sequel
1163
1163
  # DB[:t].with_recursive(:t,
1164
1164
  # DB[:i1].select(:id, :parent_id).where(parent_id: nil),
1165
1165
  # DB[:i1].join(:t, id: :parent_id).select(Sequel[:i1][:id], Sequel[:i1][:parent_id]),
1166
- # :args=>[:id, :parent_id])
1166
+ # args: [:id, :parent_id])
1167
1167
  #
1168
1168
  # # WITH RECURSIVE t(id, parent_id) AS (
1169
1169
  # # SELECT id, parent_id FROM i1 WHERE (parent_id IS NULL)
@@ -1241,7 +1241,7 @@ module Sequel
1241
1241
  #
1242
1242
  # You can also provide a method name and arguments to call to get the SQL:
1243
1243
  #
1244
- # DB[:items].with_sql(:insert_sql, :b=>1) # INSERT INTO items (b) VALUES (1)
1244
+ # DB[:items].with_sql(:insert_sql, b: 1) # INSERT INTO items (b) VALUES (1)
1245
1245
  #
1246
1246
  # Note that datasets that specify custom SQL using this method will generally
1247
1247
  # ignore future dataset methods that modify the SQL used, as specifying custom SQL
@@ -1725,7 +1725,7 @@ module Sequel
1725
1725
  # Append literalization of the subselect to SQL string.
1726
1726
  def subselect_sql_append(sql, ds)
1727
1727
  sds = subselect_sql_dataset(sql, ds)
1728
- sds.sql
1728
+ subselect_sql_append_sql(sql, sds)
1729
1729
  unless sds.send(:cache_sql?)
1730
1730
  # If subquery dataset does not allow caching SQL,
1731
1731
  # then this dataset should not allow caching SQL.
@@ -1737,6 +1737,10 @@ module Sequel
1737
1737
  ds.clone(:append_sql=>sql)
1738
1738
  end
1739
1739
 
1740
+ def subselect_sql_append_sql(sql, ds)
1741
+ ds.sql
1742
+ end
1743
+
1740
1744
  # The number of decimal digits of precision to use in timestamps.
1741
1745
  def timestamp_precision
1742
1746
  supports_timestamp_usecs? ? 6 : 0
@@ -23,18 +23,6 @@ module Sequel
23
23
  super
24
24
  end
25
25
  end
26
-
27
- private
28
-
29
- # Handle Sequel::Model instances in bound variable arrays.
30
- def bound_variable_array(arg)
31
- case arg
32
- when Sequel::Model
33
- "\"(#{arg.values.values_at(*arg.columns).map{|v| bound_variable_array(v)}.join(',').gsub(/("|\\)/, '\\\\\1')})\""
34
- else
35
- super
36
- end
37
- end
38
26
  end
39
27
  end
40
28
  end
@@ -39,7 +39,7 @@ module Sequel
39
39
 
40
40
  # Hash of the maximum size of the value for each column
41
41
  def self.column_sizes(records, columns) # :nodoc:
42
- sizes = Hash.new {0}
42
+ sizes = Hash.new(0)
43
43
  columns.each do |c|
44
44
  sizes[c] = c.to_s.size
45
45
  end
@@ -5,9 +5,9 @@
5
5
  # code
6
6
  #
7
7
  # DB.extension :async_thread_pool
8
- # foos = DB[:foos].async.where{:name=>'A'..'M'}.all
8
+ # foos = DB[:foos].async.where{name: 'A'..'M'}.all
9
9
  # bar_names = DB[:bar].async.select_order_map(:name)
10
- # baz_1 = DB[:bazes].async.first(:id=>1)
10
+ # baz_1 = DB[:bazes].async.first(id: 1)
11
11
  #
12
12
  # All 3 queries will be run in separate threads. +foos+, +bar_names+
13
13
  # and +baz_1+ will be proxy objects. Calling a method on the proxy
@@ -15,9 +15,9 @@
15
15
  # of calling that method on the result of the query method. For example,
16
16
  # if you run:
17
17
  #
18
- # foos = DB[:foos].async.where{:name=>'A'..'M'}.all
18
+ # foos = DB[:foos].async.where{name: 'A'..'M'}.all
19
19
  # bar_names = DB[:bars].async.select_order_map(:name)
20
- # baz_1 = DB[:bazes].async.first(:id=>1)
20
+ # baz_1 = DB[:bazes].async.first(id: 1)
21
21
  # sleep(1)
22
22
  # foos.size
23
23
  # bar_names.first
@@ -26,9 +26,9 @@
26
26
  # These three queries will generally be run concurrently in separate
27
27
  # threads. If you instead run:
28
28
  #
29
- # DB[:foos].async.where{:name=>'A'..'M'}.all.size
29
+ # DB[:foos].async.where{name: 'A'..'M'}.all.size
30
30
  # DB[:bars].async.select_order_map(:name).first
31
- # DB[:bazes].async.first(:id=>1).name
31
+ # DB[:bazes].async.first(id: 1).name
32
32
  #
33
33
  # Then will run each query sequentially, since you need the result of
34
34
  # one query before running the next query. The queries will still be
@@ -37,11 +37,11 @@
37
37
  # What is run in the separate thread is the entire method call that
38
38
  # returns results. So with the original example:
39
39
  #
40
- # foos = DB[:foos].async.where{:name=>'A'..'M'}.all
40
+ # foos = DB[:foos].async.where{name: 'A'..'M'}.all
41
41
  # bar_names = DB[:bars].async.select_order_map(:name)
42
- # baz_1 = DB[:bazes].async.first(:id=>1)
42
+ # baz_1 = DB[:bazes].async.first(id: 1)
43
43
  #
44
- # The +all+, <tt>select_order_map(:name)</tt>, and <tt>first(:id=>1)</tt>
44
+ # The +all+, <tt>select_order_map(:name)</tt>, and <tt>first(id: 1)</tt>
45
45
  # calls are run in separate threads. If a block is passed to a method
46
46
  # such as +all+ or +each+, the block is also run in that thread. If you
47
47
  # have code such as:
@@ -156,10 +156,10 @@
156
156
  # so that the query will run in the current thread instead of waiting
157
157
  # for an async thread to become available. With the following code:
158
158
  #
159
- # foos = DB[:foos].async.where{:name=>'A'..'M'}.all
159
+ # foos = DB[:foos].async.where{name: 'A'..'M'}.all
160
160
  # bar_names = DB[:bar].async.select_order_map(:name)
161
161
  # if foos.length > 4
162
- # baz_1 = DB[:bazes].async.first(:id=>1)
162
+ # baz_1 = DB[:bazes].async.first(id: 1)
163
163
  # end
164
164
  #
165
165
  # Whether you need the +baz_1+ variable depends on the value of foos.
@@ -22,7 +22,7 @@
22
22
  #
23
23
  # Named placeholders can also be used with a hash:
24
24
  #
25
- # ds.where("name > :a", :a=>"A")
25
+ # ds.where("name > :a", a: "A")
26
26
  # # SELECT * FROM table WHERE (name > 'A')
27
27
  #
28
28
  # This extension also allows the use of a plain string passed to Dataset#update:
@@ -126,7 +126,7 @@
126
126
  # be emulated by dropping the table and recreating it with the constraints.
127
127
  # If you want to use this plugin on SQLite with an alter_table block,
128
128
  # you should drop all constraint validation metadata using
129
- # <tt>drop_constraint_validations_for(:table=>'table')</tt>, and then
129
+ # <tt>drop_constraint_validations_for(table: 'table')</tt>, and then
130
130
  # readd all constraints you want to use inside the alter table block,
131
131
  # making no other changes inside the alter_table block.
132
132
  #
@@ -25,7 +25,7 @@
25
25
  # By default, values are casted to the generic timestamp type for the
26
26
  # database. You can override the cast type using the :cast option:
27
27
  #
28
- # add = Sequel.date_add(:date_column, {years: 1, months: 2, days: 3}, :cast=>:timestamptz)
28
+ # add = Sequel.date_add(:date_column, {years: 1, months: 2, days: 3}, cast: :timestamptz)
29
29
  #
30
30
  # These expressions can be used in your datasets, or anywhere else that
31
31
  # Sequel expressions are allowed:
@@ -8,6 +8,9 @@
8
8
  # :decimal :: use 0.0 for unsupported strings
9
9
  # :string :: silently allow hash and array conversion to string
10
10
  #
11
+ # This also removes bytesize checks for string inputs for float, integer
12
+ # and decimal conversions.
13
+ #
11
14
  # To load the extension into the database:
12
15
  #
13
16
  # DB.extension :looser_typecasting
@@ -377,7 +377,7 @@ module Sequel
377
377
  # Raise a NotCurrentError unless the migrator is current, takes the same
378
378
  # arguments as #run.
379
379
  def self.check_current(*args)
380
- raise(NotCurrentError, 'migrator is not current') unless is_current?(*args)
380
+ raise(NotCurrentError, 'current migration version does not match latest available version') unless is_current?(*args)
381
381
  end
382
382
 
383
383
  # Return whether the migrator is current (i.e. it does not need to make
@@ -76,33 +76,45 @@ module Sequel
76
76
  raise unless disamb = tzinfo_disambiguator_for(v)
77
77
  period = input_timezone.period_for_local(v, &disamb)
78
78
  offset = period.utc_total_offset
79
- Time.at(v.to_i - offset, :in => input_timezone)
79
+ # :nocov:
80
+ if defined?(JRUBY_VERSION)
81
+ Time.at(v.to_i - offset, :in => input_timezone) + v.nsec/1000000000.0
82
+ # :nocov:
83
+ else
84
+ Time.at(v.to_i - offset, v.nsec, :nsec, :in => input_timezone)
85
+ end
80
86
  end
81
87
 
82
88
  # Convert the given input Time to the given output timezone,
83
89
  # which should be a TZInfo::Timezone instance.
84
90
  def convert_output_time_other(v, output_timezone)
85
- Time.at(v.to_i, :in => output_timezone)
91
+ # :nocov:
92
+ if defined?(JRUBY_VERSION)
93
+ Time.at(v.to_i, :in => output_timezone) + v.nsec/1000000000.0
94
+ # :nocov:
95
+ else
96
+ Time.at(v.to_i, v.nsec, :nsec, :in => output_timezone)
97
+ end
86
98
  end
87
99
  # :nodoc:
88
100
  # :nocov:
89
101
  else
90
102
  def convert_input_time_other(v, input_timezone)
91
103
  local_offset = input_timezone.period_for_local(v, &tzinfo_disambiguator_for(v)).utc_total_offset
92
- Time.new(1970, 1, 1, 0, 0, 0, local_offset) + v.to_i
104
+ Time.new(1970, 1, 1, 0, 0, 0, local_offset) + v.to_i + v.nsec/1000000000.0
93
105
  end
94
106
 
95
107
  if defined?(TZInfo::VERSION) && TZInfo::VERSION > '2'
96
108
  def convert_output_time_other(v, output_timezone)
97
109
  v = output_timezone.utc_to_local(v.getutc)
98
110
  local_offset = output_timezone.period_for_local(v, &tzinfo_disambiguator_for(v)).utc_total_offset
99
- Time.new(1970, 1, 1, 0, 0, 0, local_offset) + v.to_i + local_offset
111
+ Time.new(1970, 1, 1, 0, 0, 0, local_offset) + v.to_i + v.nsec/1000000000.0 + local_offset
100
112
  end
101
113
  else
102
114
  def convert_output_time_other(v, output_timezone)
103
115
  v = output_timezone.utc_to_local(v.getutc)
104
116
  local_offset = output_timezone.period_for_local(v, &tzinfo_disambiguator_for(v)).utc_total_offset
105
- Time.new(1970, 1, 1, 0, 0, 0, local_offset) + v.to_i
117
+ Time.new(1970, 1, 1, 0, 0, 0, local_offset) + v.to_i + v.nsec/1000000000.0
106
118
  end
107
119
  end
108
120
  # :nodoc: