sequel 4.24.0 → 4.25.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG +40 -0
  3. data/doc/association_basics.rdoc +2 -5
  4. data/doc/dataset_basics.rdoc +1 -1
  5. data/doc/postgresql.rdoc +47 -0
  6. data/doc/querying.rdoc +5 -0
  7. data/doc/release_notes/4.25.0.txt +181 -0
  8. data/lib/sequel/adapters/ibmdb.rb +0 -28
  9. data/lib/sequel/adapters/shared/db2.rb +31 -2
  10. data/lib/sequel/adapters/shared/mssql.rb +12 -12
  11. data/lib/sequel/adapters/shared/postgres.rb +102 -3
  12. data/lib/sequel/adapters/shared/sqlite.rb +1 -0
  13. data/lib/sequel/adapters/swift/sqlite.rb +12 -0
  14. data/lib/sequel/database/schema_generator.rb +4 -0
  15. data/lib/sequel/database/schema_methods.rb +3 -1
  16. data/lib/sequel/dataset/actions.rb +1 -1
  17. data/lib/sequel/dataset/prepared_statements.rb +15 -7
  18. data/lib/sequel/dataset/query.rb +16 -2
  19. data/lib/sequel/dataset/sql.rb +19 -16
  20. data/lib/sequel/extensions/empty_array_consider_nulls.rb +35 -0
  21. data/lib/sequel/extensions/empty_array_ignore_nulls.rb +3 -34
  22. data/lib/sequel/extensions/pg_json_ops.rb +9 -1
  23. data/lib/sequel/extensions/query_literals.rb +1 -1
  24. data/lib/sequel/model/base.rb +7 -11
  25. data/lib/sequel/model/dataset_module.rb +1 -1
  26. data/lib/sequel/plugins/association_pks.rb +6 -0
  27. data/lib/sequel/plugins/dirty.rb +6 -1
  28. data/lib/sequel/plugins/inverted_subsets.rb +48 -0
  29. data/lib/sequel/plugins/serialization.rb +2 -0
  30. data/lib/sequel/plugins/singular_table_names.rb +31 -0
  31. data/lib/sequel/plugins/static_cache.rb +17 -0
  32. data/lib/sequel/sql.rb +1 -0
  33. data/lib/sequel/version.rb +1 -1
  34. data/spec/adapters/db2_spec.rb +12 -0
  35. data/spec/adapters/mysql_spec.rb +1 -0
  36. data/spec/adapters/postgres_spec.rb +41 -1
  37. data/spec/core/database_spec.rb +1 -0
  38. data/spec/core/dataset_spec.rb +55 -7
  39. data/spec/core/expression_filters_spec.rb +18 -0
  40. data/spec/core/schema_spec.rb +10 -2
  41. data/spec/extensions/association_pks_spec.rb +12 -0
  42. data/spec/extensions/{empty_array_ignore_nulls_spec.rb → empty_array_consider_nulls_spec.rb} +7 -7
  43. data/spec/extensions/inverted_subsets_spec.rb +33 -0
  44. data/spec/extensions/query_literals_spec.rb +16 -0
  45. data/spec/extensions/serialization_spec.rb +21 -0
  46. data/spec/extensions/singular_table_names_spec.rb +22 -0
  47. data/spec/integration/dataset_test.rb +2 -1
  48. data/spec/integration/prepared_statement_test.rb +35 -1
  49. data/spec/model/associations_spec.rb +2 -2
  50. data/spec/model/base_spec.rb +13 -8
  51. data/spec/model/class_dataset_methods_spec.rb +1 -0
  52. metadata +10 -5
  53. data/lib/sequel/adapters/firebird.rb +0 -105
  54. data/lib/sequel/adapters/informix.rb +0 -68
@@ -625,6 +625,15 @@ module Sequel
625
625
  end
626
626
  end
627
627
 
628
+ # Literalize non-String collate options. This is because unquoted collatations
629
+ # are folded to lowercase, and PostgreSQL used mixed case or capitalized collations.
630
+ def column_definition_collate_sql(sql, column)
631
+ if collate = column[:collate]
632
+ collate = literal(collate) unless collate.is_a?(String)
633
+ sql << " COLLATE #{collate}"
634
+ end
635
+ end
636
+
628
637
  # Handle PostgreSQL specific default format.
629
638
  def column_schema_normalize_default(default, type)
630
639
  if m = POSTGRES_DEFAULT_RE.match(default)
@@ -1200,7 +1209,7 @@ module Sequel
1200
1209
  LOCK_MODES = ['ACCESS SHARE', 'ROW SHARE', 'ROW EXCLUSIVE', 'SHARE UPDATE EXCLUSIVE', 'SHARE', 'SHARE ROW EXCLUSIVE', 'EXCLUSIVE', 'ACCESS EXCLUSIVE'].each(&:freeze)
1201
1210
 
1202
1211
  Dataset.def_sql_method(self, :delete, [['if server_version >= 90100', %w'with delete from using where returning'], ['else', %w'delete from using where returning']])
1203
- Dataset.def_sql_method(self, :insert, [['if server_version >= 90100', %w'with insert into columns values returning'], ['else', %w'insert into columns values returning']])
1212
+ Dataset.def_sql_method(self, :insert, [['if server_version >= 90500', %w'with insert into columns values conflict returning'], ['elsif server_version >= 90100', %w'with insert into columns values returning'], ['else', %w'insert into columns values returning']])
1204
1213
  Dataset.def_sql_method(self, :select, [['if opts[:values]', %w'values order limit'], ['elsif server_version >= 80400', %w'with select distinct columns from join where group having window compounds order limit lock'], ['else', %w'select distinct columns from join where group having compounds order limit lock']])
1205
1214
  Dataset.def_sql_method(self, :update, [['if server_version >= 90100', %w'with update table set from where returning'], ['else', %w'update table set from where returning']])
1206
1215
 
@@ -1208,7 +1217,7 @@ module Sequel
1208
1217
  module PreparedStatementMethods
1209
1218
  # Override insert action to use RETURNING if the server supports it.
1210
1219
  def run
1211
- if @prepared_type == :insert
1220
+ if @prepared_type == :insert && (opts[:returning_pk] || !opts[:returning])
1212
1221
  fetch_rows(prepared_sql){|r| return r.values.first}
1213
1222
  else
1214
1223
  super
@@ -1217,7 +1226,10 @@ module Sequel
1217
1226
 
1218
1227
  def prepared_sql
1219
1228
  return @prepared_sql if @prepared_sql
1220
- @opts[:returning] = insert_pk if @prepared_type == :insert
1229
+ if @prepared_type == :insert && !opts[:returning]
1230
+ @opts[:returning] = insert_pk
1231
+ @opts[:returning_pk] = true
1232
+ end
1221
1233
  super
1222
1234
  @prepared_sql
1223
1235
  end
@@ -1342,6 +1354,50 @@ module Sequel
1342
1354
  end
1343
1355
  end
1344
1356
 
1357
+ # Handle uniqueness violations when inserting, by updating the conflicting row, using
1358
+ # ON CONFLICT. With no options, uses ON CONFLICT DO NOTHING. Options:
1359
+ # :constraint :: An explicit constraint name, has precendence over :target.
1360
+ # :target :: The column name or expression to handle uniqueness violations on.
1361
+ # :update :: A hash of columns and values to set. Uses ON CONFLICT DO UPDATE.
1362
+ # :update_where :: A WHERE condition to use for the update.
1363
+ #
1364
+ # Examples:
1365
+ #
1366
+ # DB[:table].insert_conflict.insert(:a=>1, :b=>2)
1367
+ # # INSERT INTO TABLE (a, b) VALUES (1, 2)
1368
+ # # ON CONFLICT DO NOTHING
1369
+ #
1370
+ # DB[:table].insert_conflict(:constraint=>:table_a_uidx).insert(:a=>1, :b=>2)
1371
+ # # INSERT INTO TABLE (a, b) VALUES (1, 2)
1372
+ # # ON CONFLICT ON CONSTRAINT table_a_uidx DO NOTHING
1373
+ #
1374
+ # DB[:table].insert_conflict(:target=>:a).insert(:a=>1, :b=>2)
1375
+ # # INSERT INTO TABLE (a, b) VALUES (1, 2)
1376
+ # # ON CONFLICT (a) DO NOTHING
1377
+ #
1378
+ # DB[:table].insert_conflict(:target=>:a, :update=>{:b=>:excluded__b}).insert(:a=>1, :b=>2)
1379
+ # # INSERT INTO TABLE (a, b) VALUES (1, 2)
1380
+ # # ON CONFLICT (a) DO UPDATE SET b = excluded.b
1381
+ #
1382
+ # DB[:table].insert_conflict(:constraint=>:table_a_uidx,
1383
+ # :update=>{:b=>:excluded__b}, :update_where=>{:table__status_id=>1}).insert(:a=>1, :b=>2)
1384
+ # # INSERT INTO TABLE (a, b) VALUES (1, 2)
1385
+ # # ON CONFLICT ON CONSTRAINT table_a_uidx
1386
+ # # DO UPDATE SET b = excluded.b WHERE (table.status_id = 1)
1387
+ def insert_conflict(opts=OPTS)
1388
+ clone(:insert_conflict => opts)
1389
+ end
1390
+
1391
+ # Ignore uniqueness/exclusion violations when inserting, using ON CONFLICT DO NOTHING.
1392
+ # Exists mostly for compatibility to MySQL's insert_ignore. Example:
1393
+ #
1394
+ # DB[:table].insert_ignore.insert(:a=>1, :b=>2)
1395
+ # # INSERT INTO TABLE (a, b) VALUES (1, 2)
1396
+ # # ON CONFLICT DO NOTHING
1397
+ def insert_ignore
1398
+ insert_conflict
1399
+ end
1400
+
1345
1401
  # Insert a record returning the record inserted. Always returns nil without
1346
1402
  # inserting a query if disable_insert_returning is used.
1347
1403
  def insert_select(*values)
@@ -1396,11 +1452,26 @@ module Sequel
1396
1452
  true
1397
1453
  end
1398
1454
 
1455
+ # PostgreSQL 9.5+ supports GROUP CUBE
1456
+ def supports_group_cube?
1457
+ server_version >= 90500
1458
+ end
1459
+
1460
+ # PostgreSQL 9.5+ supports GROUP ROLLUP
1461
+ def supports_group_rollup?
1462
+ server_version >= 90500
1463
+ end
1464
+
1399
1465
  # True unless insert returning has been disabled for this dataset.
1400
1466
  def supports_insert_select?
1401
1467
  !@opts[:disable_insert_returning]
1402
1468
  end
1403
1469
 
1470
+ # PostgreSQL 9.5+ supports the ON CONFLICT clause to INSERT.
1471
+ def supports_insert_conflict?
1472
+ server_version >= 90500
1473
+ end
1474
+
1404
1475
  # PostgreSQL 9.3rc1+ supports lateral subqueries
1405
1476
  def supports_lateral_subqueries?
1406
1477
  server_version >= 90300
@@ -1503,6 +1574,34 @@ module Sequel
1503
1574
  join_from_sql(:USING, sql)
1504
1575
  end
1505
1576
 
1577
+ # Add ON CONFLICT clause if it should be used
1578
+ def insert_conflict_sql(sql)
1579
+ if opts = @opts[:insert_conflict]
1580
+ sql << " ON CONFLICT"
1581
+
1582
+ if target = opts[:constraint]
1583
+ sql << " ON CONSTRAINT "
1584
+ identifier_append(sql, target)
1585
+ elsif target = opts[:target]
1586
+ sql << ' ('
1587
+ identifier_append(sql, target)
1588
+ sql << ')'
1589
+ end
1590
+
1591
+ if values = opts[:update]
1592
+ sql << " DO UPDATE SET "
1593
+ update_sql_values_hash(sql, values)
1594
+ if where = opts[:update_where]
1595
+ sql << " WHERE "
1596
+ literal_append(sql, where)
1597
+ end
1598
+ else
1599
+ sql << " DO NOTHING"
1600
+ end
1601
+
1602
+ end
1603
+ end
1604
+
1506
1605
  # Return the primary key to use for RETURNING in an INSERT statement
1507
1606
  def insert_pk
1508
1607
  if (f = opts[:from]) && !f.empty?
@@ -335,6 +335,7 @@ module Sequel
335
335
  /\ACHECK constraint failed/ => CheckConstraintViolation,
336
336
  /\A(SQLITE ERROR 19 \(CONSTRAINT\) : )?constraint failed\z/ => ConstraintViolation,
337
337
  /may not be NULL\z|NOT NULL constraint failed: .+\z/ => NotNullConstraintViolation,
338
+ /\ASQLITE ERROR \d+ \(\) : CHECK constraint failed: / => CheckConstraintViolation
338
339
  }.freeze
339
340
  def database_error_regexps
340
341
  DATABASE_ERROR_REGEXPS
@@ -11,6 +11,18 @@ module Sequel
11
11
  extend Sequel::Database::ResetIdentifierMangling
12
12
  include Sequel::SQLite::DatabaseMethods
13
13
 
14
+ DATABASE_ERROR_REGEXPS = {
15
+ /\AUNIQUE constraint failed: / => UniqueConstraintViolation,
16
+ /\AFOREIGN KEY constraint failed/ => ForeignKeyConstraintViolation,
17
+ /\ACHECK constraint failed/ => CheckConstraintViolation,
18
+ /\A(SQLITE ERROR 19 \(CONSTRAINT\) : )?constraint failed/ => ConstraintViolation,
19
+ /may not be NULL\z|NOT NULL constraint failed: .+/ => NotNullConstraintViolation,
20
+ /\ASQLITE ERROR \d+ \(\) : CHECK constraint failed: / => CheckConstraintViolation
21
+ }.freeze
22
+ def database_error_regexps
23
+ DATABASE_ERROR_REGEXPS
24
+ end
25
+
14
26
  # Set the correct pragmas on the connection.
15
27
  def connect(opts)
16
28
  c = super
@@ -81,6 +81,10 @@ module Sequel
81
81
  #
82
82
  # The following options are supported:
83
83
  #
84
+ # :collate :: The collation to use for the column. For backwards compatibility,
85
+ # only symbols and string values are supported, and they are used verbatim.
86
+ # However, on PostgreSQL, symbols are literalized as regular identifiers,
87
+ # since unquoted collations are unlikely to be valid.
84
88
  # :default :: The default value for the column.
85
89
  # :deferrable :: For foreign key columns, this ensures referential integrity will work even if
86
90
  # referencing table uses a foreign key value that does not
@@ -524,7 +524,9 @@ module Sequel
524
524
 
525
525
  # Add collate SQL fragment to column creation SQL.
526
526
  def column_definition_collate_sql(sql, column)
527
- sql << " COLLATE #{column[:collate]}" if column[:collate]
527
+ if collate = column[:collate]
528
+ sql << " COLLATE #{collate}"
529
+ end
528
530
  end
529
531
 
530
532
  # Add default SQL fragment to column creation SQL.
@@ -915,7 +915,7 @@ module Sequel
915
915
  db = @db
916
916
  if db.sharded?
917
917
  opts = Hash[opts]
918
- opts[:server] = @opts[:server] || :read_only
918
+ opts[:server] = @opts[:server] || (@opts[:lock] ? :default : :read_only)
919
919
  opts
920
920
  end
921
921
  db.execute(sql, opts, &block)
@@ -90,13 +90,19 @@ module Sequel
90
90
  # The argument to supply to insert and update, which may use
91
91
  # placeholders specified by prepared_args
92
92
  attr_accessor :prepared_modify_values
93
-
93
+
94
94
  # Sets the prepared_args to the given hash and runs the
95
95
  # prepared statement.
96
96
  def call(bind_vars={}, &block)
97
97
  bind(bind_vars).run(&block)
98
98
  end
99
99
 
100
+ # Raise an error if attempting to call prepare on an already
101
+ # prepared statement.
102
+ def prepare(*)
103
+ raise Error, "cannot prepare an already prepared statement"
104
+ end
105
+
100
106
  # Send the columns to the original dataset, as calling it
101
107
  # on the prepared statement can cause problems.
102
108
  def columns
@@ -164,12 +170,14 @@ module Sequel
164
170
  with_sql(prepared_sql).first
165
171
  when :first
166
172
  first
167
- when :insert
168
- insert(*@prepared_modify_values)
169
- when :update
170
- update(*@prepared_modify_values)
171
- when :delete
172
- delete
173
+ when :insert, :update, :delete
174
+ if opts[:returning] && supports_returning?(@prepared_type)
175
+ returning_fetch_rows(prepared_sql)
176
+ elsif @prepared_type == :delete
177
+ delete
178
+ else
179
+ send(@prepared_type, *@prepared_modify_values)
180
+ end
173
181
  when Array
174
182
  case @prepared_type.at(0)
175
183
  when :map, :to_hash, :to_hash_groups
@@ -34,7 +34,7 @@ module Sequel
34
34
  # Methods that return modified datasets
35
35
  QUERY_METHODS = (<<-METHS).split.map(&:to_sym) + JOIN_METHODS
36
36
  add_graph_aliases and distinct except exclude exclude_having exclude_where
37
- filter for_update from from_self graph grep group group_and_count group_by having intersect invert
37
+ filter for_update from from_self graph grep group group_and_count group_append group_by having intersect invert
38
38
  limit lock_style naked offset or order order_append order_by order_more order_prepend qualify
39
39
  reverse reverse_order select select_all select_append select_group select_more server
40
40
  set_graph_aliases unfiltered ungraphed ungrouped union
@@ -314,6 +314,17 @@ module Sequel
314
314
  select_group(*columns, &block).select_more(COUNT_OF_ALL_AS_COUNT)
315
315
  end
316
316
 
317
+ # Returns a copy of the dataset with the given columns added to the list of
318
+ # existing columns to group on. If no existing columns are present this
319
+ # method simply sets the columns as the initial ones to group on.
320
+ #
321
+ # DB[:items].group_append(:b) # SELECT * FROM items GROUP BY b
322
+ # DB[:items].group(:a).group_append(:b) # SELECT * FROM items GROUP BY a, b
323
+ def group_append(*columns, &block)
324
+ columns = @opts[:group] + columns if @opts[:group]
325
+ group(*columns, &block)
326
+ end
327
+
317
328
  # Adds the appropriate CUBE syntax to GROUP BY.
318
329
  def group_cube
319
330
  raise Error, "GROUP BY CUBE not supported on #{db.database_type}" unless supports_group_cube?
@@ -576,7 +587,10 @@ module Sequel
576
587
  # A symbol may be used for database independent locking behavior, but
577
588
  # all supported symbols have separate methods (e.g. for_update).
578
589
  #
579
- # DB[:items].lock_style('FOR SHARE NOWAIT') # SELECT * FROM items FOR SHARE NOWAIT
590
+ # DB[:items].lock_style('FOR SHARE NOWAIT')
591
+ # # SELECT * FROM items FOR SHARE NOWAIT
592
+ # DB[:items].lock_style('FOR UPDATE OF table1 SKIP LOCKED')
593
+ # # SELECT * FROM items FOR UPDATE OF table1 SKIP LOCKED
580
594
  def lock_style(style)
581
595
  clone(:lock => style)
582
596
  end
@@ -1010,8 +1010,7 @@ module Sequel
1010
1010
 
1011
1011
  # An expression for how to handle an empty array lookup.
1012
1012
  def empty_array_value(op, cols)
1013
- c = Array(cols)
1014
- SQL::BooleanExpression.from_value_pairs(c.zip(c), :AND, op == :IN)
1013
+ {1 => ((op == :IN) ? 0 : 1)}
1015
1014
  end
1016
1015
 
1017
1016
  # Format the timestamp based on the default_timestamp_format, with a couple
@@ -1527,27 +1526,31 @@ module Sequel
1527
1526
  end
1528
1527
 
1529
1528
  def update_set_sql(sql)
1530
- values = opts[:values]
1531
1529
  sql << SET
1530
+ values = @opts[:values]
1532
1531
  if values.is_a?(Hash)
1533
- c = false
1534
- eq = EQUAL
1535
- values.each do |k, v|
1536
- sql << COMMA if c
1537
- if k.is_a?(String) && !k.is_a?(LiteralString)
1538
- quote_identifier_append(sql, k)
1539
- else
1540
- literal_append(sql, k)
1541
- end
1542
- sql << eq
1543
- literal_append(sql, v)
1544
- c ||= true
1545
- end
1532
+ update_sql_values_hash(sql, values)
1546
1533
  else
1547
1534
  sql << values
1548
1535
  end
1549
1536
  end
1550
1537
 
1538
+ def update_sql_values_hash(sql, values)
1539
+ c = false
1540
+ eq = EQUAL
1541
+ values.each do |k, v|
1542
+ sql << COMMA if c
1543
+ if k.is_a?(String) && !k.is_a?(LiteralString)
1544
+ quote_identifier_append(sql, k)
1545
+ else
1546
+ literal_append(sql, k)
1547
+ end
1548
+ sql << eq
1549
+ literal_append(sql, v)
1550
+ c ||= true
1551
+ end
1552
+ end
1553
+
1551
1554
  def update_update_sql(sql)
1552
1555
  sql << UPDATE
1553
1556
  end
@@ -0,0 +1,35 @@
1
+ # This changes Sequel's literalization of IN/NOT IN with an empty
2
+ # array value to consider NULL values if one of the referenced
3
+ # columns is NULL:
4
+ #
5
+ # DB[:test].where(:name=>[])
6
+ # # SELECT * FROM test WHERE (name != name)
7
+ # DB[:test].exclude(:name=>[])
8
+ # # SELECT * FROM test WHERE (name = name)
9
+ #
10
+ # The default Sequel behavior is to ignore NULLs, as the above
11
+ # query is not generally optimized well by databases.
12
+ #
13
+ # You can load this extension into specific datasets:
14
+ #
15
+ # ds = DB[:table]
16
+ # ds = ds.extension(:empty_array_consider_nulls)
17
+ #
18
+ # Or you can load it into all of a database's datasets, which
19
+ # is probably the desired behavior if you are using this extension:
20
+ #
21
+ # DB.extension(:empty_array_consider_nulls)
22
+
23
+ #
24
+ module Sequel
25
+ module EmptyArrayConsiderNulls
26
+ # Use a simple expression that is always true or false, never NULL.
27
+ def empty_array_value(op, cols)
28
+ c = Array(cols)
29
+ SQL::BooleanExpression.from_value_pairs(c.zip(c), :AND, op == :IN)
30
+ end
31
+
32
+ end
33
+
34
+ Dataset.register_extension(:empty_array_consider_nulls, EmptyArrayConsiderNulls)
35
+ end
@@ -1,34 +1,3 @@
1
- # This changes Sequel's literalization of IN/NOT IN with an empty
2
- # array value to not return NULL even if one of the referenced
3
- # columns is NULL:
4
- #
5
- # DB[:test].where(:name=>[])
6
- # # SELECT * FROM test WHERE (1 = 0)
7
- # DB[:test].exclude(:name=>[])
8
- # # SELECT * FROM test WHERE (1 = 1)
9
- #
10
- # The default Sequel behavior is to respect NULLs, so that when
11
- # name is NULL, the expression returns NULL.
12
- #
13
- # You can load this extension into specific datasets:
14
- #
15
- # ds = DB[:table]
16
- # ds = ds.extension(:empty_array_ignore_nulls)
17
- #
18
- # Or you can load it into all of a database's datasets, which
19
- # is probably the desired behavior if you are using this extension:
20
- #
21
- # DB.extension(:empty_array_ignore_nulls)
22
-
23
- #
24
- module Sequel
25
- module EmptyArrayIgnoreNulls
26
- # Use a simple expression that is always true or false, never NULL.
27
- def empty_array_value(op, cols)
28
- {1 => ((op == :IN) ? 0 : 1)}
29
- end
30
-
31
- end
32
-
33
- Dataset.register_extension(:empty_array_ignore_nulls, EmptyArrayIgnoreNulls)
34
- end
1
+ # This only exists for backwards compatibility, as the behavior
2
+ # added by this extension is now the default Sequel behavior.
3
+ Sequel::Dataset.register_extension(:empty_array_ignore_nulls){}
@@ -34,7 +34,7 @@
34
34
  # jb = :jsonb_column.pg_jsonb
35
35
  #
36
36
  # This creates a Sequel::Postgres::JSONOp or Sequel::Postgres::JSONBOp object that can be used
37
- # for easier querying:
37
+ # for easier querying. The following methods are available for both JSONOp and JSONBOp instances:
38
38
  #
39
39
  # j[1] # (json_column -> 1)
40
40
  # j[%w'a b'] # (json_column #> ARRAY['a','b'])
@@ -56,6 +56,14 @@
56
56
  # j.to_record # json_to_record(json_column)
57
57
  # j.to_recordset # json_to_recordset(json_column)
58
58
  #
59
+ # There are additional methods are are only supported on JSONBOp instances:
60
+ #
61
+ # j.contain_all(:a) # (jsonb_column ?& a)
62
+ # j.contain_any(:a) # (jsonb_column ?| a)
63
+ # j.contains(:h) # (jsonb_column @> h)
64
+ # j.contained_by(:h) # (jsonb_column <@ h)
65
+ # j.has_key?('a') # (jsonb_column ? 'a')
66
+ #
59
67
  # If you are also using the pg_json extension, you should load it before
60
68
  # loading this extension. Doing so will allow you to use the #op method on
61
69
  # JSONHash, JSONHarray, JSONBHash, and JSONBArray, allowing you to perform json/jsonb operations