sequel 5.80.0 → 5.92.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (205) hide show
  1. checksums.yaml +4 -4
  2. data/bin/sequel +9 -4
  3. data/lib/sequel/adapters/ado.rb +1 -1
  4. data/lib/sequel/adapters/ibmdb.rb +1 -0
  5. data/lib/sequel/adapters/jdbc/db2.rb +2 -2
  6. data/lib/sequel/adapters/jdbc/derby.rb +3 -3
  7. data/lib/sequel/adapters/jdbc/h2.rb +2 -2
  8. data/lib/sequel/adapters/jdbc/hsqldb.rb +2 -2
  9. data/lib/sequel/adapters/jdbc/jtds.rb +2 -2
  10. data/lib/sequel/adapters/jdbc/mysql.rb +1 -1
  11. data/lib/sequel/adapters/jdbc/oracle.rb +5 -5
  12. data/lib/sequel/adapters/jdbc/postgresql.rb +5 -5
  13. data/lib/sequel/adapters/jdbc/sqlanywhere.rb +6 -6
  14. data/lib/sequel/adapters/jdbc/sqlite.rb +2 -2
  15. data/lib/sequel/adapters/jdbc/sqlserver.rb +2 -2
  16. data/lib/sequel/adapters/jdbc.rb +8 -8
  17. data/lib/sequel/adapters/mysql2.rb +8 -1
  18. data/lib/sequel/adapters/shared/access.rb +1 -0
  19. data/lib/sequel/adapters/shared/db2.rb +1 -1
  20. data/lib/sequel/adapters/shared/mssql.rb +18 -5
  21. data/lib/sequel/adapters/shared/mysql.rb +8 -4
  22. data/lib/sequel/adapters/shared/oracle.rb +1 -0
  23. data/lib/sequel/adapters/shared/postgres.rb +106 -13
  24. data/lib/sequel/adapters/shared/sqlite.rb +4 -2
  25. data/lib/sequel/adapters/sqlite.rb +4 -0
  26. data/lib/sequel/adapters/trilogy.rb +1 -2
  27. data/lib/sequel/connection_pool/sharded_threaded.rb +26 -10
  28. data/lib/sequel/connection_pool/threaded.rb +26 -10
  29. data/lib/sequel/connection_pool.rb +2 -2
  30. data/lib/sequel/core.rb +15 -0
  31. data/lib/sequel/database/connecting.rb +20 -26
  32. data/lib/sequel/database/dataset_defaults.rb +3 -3
  33. data/lib/sequel/database/misc.rb +46 -10
  34. data/lib/sequel/database/query.rb +11 -11
  35. data/lib/sequel/database/schema_generator.rb +8 -0
  36. data/lib/sequel/database/schema_methods.rb +17 -1
  37. data/lib/sequel/dataset/actions.rb +9 -1
  38. data/lib/sequel/dataset/deprecated_singleton_class_methods.rb +1 -1
  39. data/lib/sequel/dataset/prepared_statements.rb +2 -1
  40. data/lib/sequel/dataset/query.rb +9 -5
  41. data/lib/sequel/dataset/sql.rb +25 -5
  42. data/lib/sequel/extensions/caller_logging.rb +2 -0
  43. data/lib/sequel/extensions/connection_validator.rb +15 -10
  44. data/lib/sequel/extensions/dataset_run.rb +41 -0
  45. data/lib/sequel/extensions/migration.rb +23 -3
  46. data/lib/sequel/extensions/null_dataset.rb +2 -2
  47. data/lib/sequel/extensions/pg_auto_parameterize.rb +1 -1
  48. data/lib/sequel/extensions/pg_auto_parameterize_in_array.rb +93 -10
  49. data/lib/sequel/extensions/pg_enum.rb +3 -3
  50. data/lib/sequel/extensions/pg_json_ops.rb +642 -9
  51. data/lib/sequel/extensions/pg_row.rb +3 -1
  52. data/lib/sequel/extensions/pg_schema_caching.rb +90 -0
  53. data/lib/sequel/extensions/provenance.rb +2 -0
  54. data/lib/sequel/extensions/query_blocker.rb +172 -0
  55. data/lib/sequel/extensions/schema_caching.rb +24 -9
  56. data/lib/sequel/extensions/schema_dumper.rb +16 -4
  57. data/lib/sequel/extensions/sqlite_json_ops.rb +1 -1
  58. data/lib/sequel/extensions/stdio_logger.rb +48 -0
  59. data/lib/sequel/extensions/string_agg.rb +17 -4
  60. data/lib/sequel/extensions/temporarily_release_connection.rb +178 -0
  61. data/lib/sequel/extensions/virtual_row_method_block.rb +1 -0
  62. data/lib/sequel/model/associations.rb +28 -3
  63. data/lib/sequel/model/base.rb +67 -18
  64. data/lib/sequel/plugins/association_pks.rb +1 -1
  65. data/lib/sequel/plugins/column_encryption.rb +1 -1
  66. data/lib/sequel/plugins/composition.rb +1 -1
  67. data/lib/sequel/plugins/defaults_setter.rb +16 -4
  68. data/lib/sequel/plugins/enum.rb +1 -1
  69. data/lib/sequel/plugins/forbid_lazy_load.rb +14 -1
  70. data/lib/sequel/plugins/input_transformer.rb +1 -1
  71. data/lib/sequel/plugins/inspect_pk.rb +44 -0
  72. data/lib/sequel/plugins/instance_filters.rb +4 -1
  73. data/lib/sequel/plugins/inverted_subsets.rb +1 -0
  74. data/lib/sequel/plugins/lazy_attributes.rb +1 -1
  75. data/lib/sequel/plugins/nested_attributes.rb +10 -5
  76. data/lib/sequel/plugins/optimistic_locking.rb +2 -0
  77. data/lib/sequel/plugins/paged_operations.rb +5 -2
  78. data/lib/sequel/plugins/pg_auto_constraint_validations.rb +6 -1
  79. data/lib/sequel/plugins/pg_auto_validate_enums.rb +88 -0
  80. data/lib/sequel/plugins/pg_eager_any_typed_array.rb +95 -0
  81. data/lib/sequel/plugins/rcte_tree.rb +1 -1
  82. data/lib/sequel/plugins/serialization.rb +11 -5
  83. data/lib/sequel/plugins/sql_comments.rb +7 -2
  84. data/lib/sequel/plugins/static_cache_cache.rb +50 -13
  85. data/lib/sequel/plugins/subset_conditions.rb +85 -5
  86. data/lib/sequel/plugins/subset_static_cache.rb +263 -0
  87. data/lib/sequel/plugins/tactical_eager_loading.rb +6 -2
  88. data/lib/sequel/plugins/validate_associated.rb +1 -1
  89. data/lib/sequel/sql.rb +16 -6
  90. data/lib/sequel/version.rb +1 -1
  91. metadata +12 -234
  92. data/CHANGELOG +0 -1355
  93. data/README.rdoc +0 -936
  94. data/doc/advanced_associations.rdoc +0 -884
  95. data/doc/association_basics.rdoc +0 -1859
  96. data/doc/bin_sequel.rdoc +0 -146
  97. data/doc/cheat_sheet.rdoc +0 -255
  98. data/doc/code_order.rdoc +0 -102
  99. data/doc/core_extensions.rdoc +0 -405
  100. data/doc/dataset_basics.rdoc +0 -96
  101. data/doc/dataset_filtering.rdoc +0 -222
  102. data/doc/extensions.rdoc +0 -77
  103. data/doc/fork_safety.rdoc +0 -84
  104. data/doc/mass_assignment.rdoc +0 -98
  105. data/doc/migration.rdoc +0 -660
  106. data/doc/model_dataset_method_design.rdoc +0 -129
  107. data/doc/model_hooks.rdoc +0 -254
  108. data/doc/model_plugins.rdoc +0 -270
  109. data/doc/mssql_stored_procedures.rdoc +0 -43
  110. data/doc/object_model.rdoc +0 -563
  111. data/doc/opening_databases.rdoc +0 -436
  112. data/doc/postgresql.rdoc +0 -611
  113. data/doc/prepared_statements.rdoc +0 -144
  114. data/doc/querying.rdoc +0 -1070
  115. data/doc/reflection.rdoc +0 -120
  116. data/doc/release_notes/5.0.0.txt +0 -159
  117. data/doc/release_notes/5.1.0.txt +0 -31
  118. data/doc/release_notes/5.10.0.txt +0 -84
  119. data/doc/release_notes/5.11.0.txt +0 -83
  120. data/doc/release_notes/5.12.0.txt +0 -141
  121. data/doc/release_notes/5.13.0.txt +0 -27
  122. data/doc/release_notes/5.14.0.txt +0 -63
  123. data/doc/release_notes/5.15.0.txt +0 -39
  124. data/doc/release_notes/5.16.0.txt +0 -110
  125. data/doc/release_notes/5.17.0.txt +0 -31
  126. data/doc/release_notes/5.18.0.txt +0 -69
  127. data/doc/release_notes/5.19.0.txt +0 -28
  128. data/doc/release_notes/5.2.0.txt +0 -33
  129. data/doc/release_notes/5.20.0.txt +0 -89
  130. data/doc/release_notes/5.21.0.txt +0 -87
  131. data/doc/release_notes/5.22.0.txt +0 -48
  132. data/doc/release_notes/5.23.0.txt +0 -56
  133. data/doc/release_notes/5.24.0.txt +0 -56
  134. data/doc/release_notes/5.25.0.txt +0 -32
  135. data/doc/release_notes/5.26.0.txt +0 -35
  136. data/doc/release_notes/5.27.0.txt +0 -21
  137. data/doc/release_notes/5.28.0.txt +0 -16
  138. data/doc/release_notes/5.29.0.txt +0 -22
  139. data/doc/release_notes/5.3.0.txt +0 -121
  140. data/doc/release_notes/5.30.0.txt +0 -20
  141. data/doc/release_notes/5.31.0.txt +0 -148
  142. data/doc/release_notes/5.32.0.txt +0 -46
  143. data/doc/release_notes/5.33.0.txt +0 -24
  144. data/doc/release_notes/5.34.0.txt +0 -40
  145. data/doc/release_notes/5.35.0.txt +0 -56
  146. data/doc/release_notes/5.36.0.txt +0 -60
  147. data/doc/release_notes/5.37.0.txt +0 -30
  148. data/doc/release_notes/5.38.0.txt +0 -28
  149. data/doc/release_notes/5.39.0.txt +0 -19
  150. data/doc/release_notes/5.4.0.txt +0 -80
  151. data/doc/release_notes/5.40.0.txt +0 -40
  152. data/doc/release_notes/5.41.0.txt +0 -25
  153. data/doc/release_notes/5.42.0.txt +0 -136
  154. data/doc/release_notes/5.43.0.txt +0 -98
  155. data/doc/release_notes/5.44.0.txt +0 -32
  156. data/doc/release_notes/5.45.0.txt +0 -34
  157. data/doc/release_notes/5.46.0.txt +0 -87
  158. data/doc/release_notes/5.47.0.txt +0 -59
  159. data/doc/release_notes/5.48.0.txt +0 -14
  160. data/doc/release_notes/5.49.0.txt +0 -59
  161. data/doc/release_notes/5.5.0.txt +0 -61
  162. data/doc/release_notes/5.50.0.txt +0 -78
  163. data/doc/release_notes/5.51.0.txt +0 -47
  164. data/doc/release_notes/5.52.0.txt +0 -87
  165. data/doc/release_notes/5.53.0.txt +0 -23
  166. data/doc/release_notes/5.54.0.txt +0 -27
  167. data/doc/release_notes/5.55.0.txt +0 -21
  168. data/doc/release_notes/5.56.0.txt +0 -51
  169. data/doc/release_notes/5.57.0.txt +0 -23
  170. data/doc/release_notes/5.58.0.txt +0 -31
  171. data/doc/release_notes/5.59.0.txt +0 -73
  172. data/doc/release_notes/5.6.0.txt +0 -31
  173. data/doc/release_notes/5.60.0.txt +0 -22
  174. data/doc/release_notes/5.61.0.txt +0 -43
  175. data/doc/release_notes/5.62.0.txt +0 -132
  176. data/doc/release_notes/5.63.0.txt +0 -33
  177. data/doc/release_notes/5.64.0.txt +0 -50
  178. data/doc/release_notes/5.65.0.txt +0 -21
  179. data/doc/release_notes/5.66.0.txt +0 -24
  180. data/doc/release_notes/5.67.0.txt +0 -32
  181. data/doc/release_notes/5.68.0.txt +0 -61
  182. data/doc/release_notes/5.69.0.txt +0 -26
  183. data/doc/release_notes/5.7.0.txt +0 -108
  184. data/doc/release_notes/5.70.0.txt +0 -35
  185. data/doc/release_notes/5.71.0.txt +0 -21
  186. data/doc/release_notes/5.72.0.txt +0 -33
  187. data/doc/release_notes/5.73.0.txt +0 -66
  188. data/doc/release_notes/5.74.0.txt +0 -45
  189. data/doc/release_notes/5.75.0.txt +0 -35
  190. data/doc/release_notes/5.76.0.txt +0 -86
  191. data/doc/release_notes/5.77.0.txt +0 -63
  192. data/doc/release_notes/5.78.0.txt +0 -67
  193. data/doc/release_notes/5.79.0.txt +0 -28
  194. data/doc/release_notes/5.8.0.txt +0 -170
  195. data/doc/release_notes/5.80.0.txt +0 -40
  196. data/doc/release_notes/5.9.0.txt +0 -99
  197. data/doc/schema_modification.rdoc +0 -679
  198. data/doc/security.rdoc +0 -443
  199. data/doc/sharding.rdoc +0 -286
  200. data/doc/sql.rdoc +0 -648
  201. data/doc/testing.rdoc +0 -190
  202. data/doc/thread_safety.rdoc +0 -15
  203. data/doc/transactions.rdoc +0 -250
  204. data/doc/validations.rdoc +0 -558
  205. data/doc/virtual_rows.rdoc +0 -265
data/doc/postgresql.rdoc DELETED
@@ -1,611 +0,0 @@
1
- = PostgreSQL-specific Support in Sequel
2
-
3
- Sequel's core database and dataset functions are designed to support the features
4
- shared by most common SQL database implementations. However, Sequel's database
5
- adapters extend the core support to include support for database-specific features.
6
-
7
- By far the most extensive database-specific support in Sequel is for PostgreSQL. This
8
- support is roughly broken into the following areas:
9
-
10
- * Database Types
11
- * DDL Support
12
- * DML Support
13
- * sequel_pg
14
-
15
- Note that while this guide is extensive, it is not exhaustive. There are additional
16
- rarely used PostgreSQL features that Sequel supports which are not mentioned here.
17
-
18
- == Adapter/Driver Specific Support
19
-
20
- Some of this this support depends on the specific adapter or underlying driver in use.
21
- <tt>postgres only</tt> will denote support specific to the postgres adapter (i.e.
22
- not available when connecting to PostgreSQL via the jdbc adapter).
23
- <tt>postgres/pg only</tt> will denote support specific to the postgres adapter when
24
- pg is used as the underlying driver (i.e. not available when using the postgres-pr
25
- driver).
26
-
27
- == PostgreSQL-specific Database Type Support
28
-
29
- Sequel's default support on PostgreSQL only includes common database types. However,
30
- Sequel ships with support for many PostgreSQL-specific types via extensions. In general,
31
- you load these extensions via <tt>Database#extension</tt>. For example, to load support
32
- for arrays, you would do:
33
-
34
- DB.extension :pg_array
35
-
36
- The following PostgreSQL-specific type extensions are available:
37
-
38
- pg_array :: arrays (single and multidimensional, for any scalar type), as a ruby Array-like object
39
- pg_hstore :: hstore, as a ruby Hash-like object
40
- pg_inet :: inet/cidr, as ruby IPAddr objects
41
- pg_interval :: interval, as ActiveSupport::Duration objects
42
- pg_json :: json, as either ruby Array-like or Hash-like objects
43
- pg_range :: ranges (for any scalar type), as a ruby Range-like object
44
- pg_row :: row-valued/composite types, as a ruby Hash-like or Sequel::Model object
45
-
46
- In general, these extensions just add support for Database objects to return retrieved
47
- column values as the appropriate type and support for literalizing
48
- the objects correctly for use in an SQL string, or using them as bound variable values (<tt>postgres/pg and jdbc/postgres only</tt>).
49
-
50
- There are also type-specific extensions that make it easy to use database functions
51
- and operators related to the type. These extensions are:
52
-
53
- pg_array_ops :: array-related functions and operators
54
- pg_hstore_ops :: hstore-related functions and operators
55
- pg_json_ops :: json-related functions and operators
56
- pg_range_ops :: range-related functions and operators
57
- pg_row_ops :: row-valued/composite type syntax support
58
-
59
- These extensions aren't Database specific, they are global extensions, so you should
60
- load them via <tt>Sequel.extension</tt>, after loading support for the specific types
61
- into the Database instance:
62
-
63
- DB.extension :pg_array
64
- Sequel.extension :pg_array_ops
65
-
66
- With regard to common database types, please note that the generic String type
67
- is +text+ on PostgreSQL and not <tt>varchar(255)</tt> as it is on some other
68
- databases. +text+ is PostgreSQL's recommended type for storage of text data,
69
- and is more similar to Ruby's String type as it allows for unlimited length.
70
- If you want to set a maximum size for a text column, you must specify a
71
- <tt>:size</tt> option. This will use a <tt>varchar($size)</tt> type and
72
- impose a maximum size for the column.
73
-
74
- == PostgreSQL-specific DDL Support
75
-
76
- === Exclusion Constraints
77
-
78
- In +create_table+ blocks, you can use the +exclude+ method to set up exclusion constraints:
79
-
80
- DB.create_table(:table) do
81
- daterange :during
82
- exclude([[:during, '&&']], name: :table_during_excl)
83
- end
84
- # CREATE TABLE "table" ("during" daterange,
85
- # CONSTRAINT "table_during_excl" EXCLUDE USING gist ("during" WITH &&))
86
-
87
- You can also add exclusion constraints in +alter_table+ blocks using add_exclusion_constraint:
88
-
89
- DB.alter_table(:table) do
90
- add_exclusion_constraint([[:during, '&&']], name: :table_during_excl)
91
- end
92
- # ALTER TABLE "table" ADD CONSTRAINT "table_during_excl" EXCLUDE USING gist ("during" WITH &&)
93
-
94
- === Adding Foreign Key and Check Constraints Without Initial Validation
95
-
96
- You can add a <tt>not_valid: true</tt> option when adding constraints to existing tables so
97
- that it doesn't check if all current rows are valid:
98
-
99
- DB.alter_table(:table) do
100
- # Assumes t_id column already exists
101
- add_foreign_key([:t_id], :table, not_valid: true, name: :table_fk)
102
-
103
- constraint({name: :col_123, not_valid: true}, col: [1,2,3])
104
- end
105
- # ALTER TABLE "table" ADD CONSTRAINT "table_fk" FOREIGN KEY ("t_id") REFERENCES "table" NOT VALID
106
- # ALTER TABLE "table" ADD CONSTRAINT "col_123" CHECK (col IN (1, 2, 3)) NOT VALID
107
-
108
- Such constraints will be enforced for newly inserted and updated rows, but not for existing rows. After
109
- all existing rows have been fixed, you can validate the constraint:
110
-
111
- DB.alter_table(:table) do
112
- validate_constraint(:table_fk)
113
- validate_constraint(:col_123)
114
- end
115
- # ALTER TABLE "table" VALIDATE CONSTRAINT "table_fk"
116
- # ALTER TABLE "table" VALIDATE CONSTRAINT "col_123"
117
-
118
- === Creating Indexes Concurrently
119
-
120
- You can create indexes concurrently using the <tt>concurrently: true</tt> option:
121
-
122
- DB.add_index(:table, :t_id, concurrently: true)
123
- # CREATE INDEX CONCURRENTLY "table_t_id_index" ON "table" ("t_id")
124
-
125
- Similarly, you can drop indexes concurrently as well:
126
-
127
- DB.drop_index(:table, :t_id, concurrently: true)
128
- # DROP INDEX CONCURRENTLY "table_t_id_index"
129
-
130
- === Specific Conversions When Altering Column Types
131
-
132
- When altering a column type, PostgreSQL allows the user to specify how to do the
133
- conversion via a USING clause, and Sequel supports this using the <tt>:using</tt> option:
134
-
135
- DB.alter_table(:table) do
136
- # Assume unix_time column is stored as an integer, and you want to change it to timestamp
137
- set_column_type :unix_time, Time, using: (Sequel.cast('epoch', Time) + Sequel.cast('1 second', :interval) * :unix_time)
138
- end
139
- # ALTER TABLE "table" ALTER COLUMN "unix_time" TYPE timestamp
140
- # USING (CAST('epoch' AS timestamp) + (CAST('1 second' AS interval) * "unix_time"))
141
-
142
- === Creating Partitioned Tables
143
-
144
- PostgreSQL allows marking tables as partitioned tables, and adding partitions to such tables. Sequel
145
- offers support for this. You can create a partitioned table using the +:partition_by+ option and
146
- +:partition_type+ options (the default partition type is range partitioning):
147
-
148
- DB.create_table(:table1, partition_by: :column, partition_type: :range) do
149
- Integer :id
150
- Date :column
151
- end
152
-
153
- DB.create_table(:table2, partition_by: :column, partition_type: :list) do
154
- Integer :id
155
- String :column
156
- end
157
-
158
- DB.create_table(:table3, partition_by: :column, partition_type: :hash) do
159
- Integer :id
160
- Integer :column
161
- end
162
-
163
- To add partitions of other tables, you use the +:partition_of+ option. This option will use
164
- a custom DSL specific to partitioning other tables. For range partitioning, you can use the
165
- +from+ and +to+ methods to specify the inclusive beginning and exclusive ending of the
166
- range of the partition. You can call the +minvalue+ and +maxvalue+ methods to get the minimum
167
- and maximum values for the column(s) in the range, useful as arguments to +from+ and +to+:
168
-
169
- DB.create_table(:table1a, partition_of: :table1) do
170
- from minvalue
171
- to 0
172
- end
173
- DB.create_table(:table1b, partition_of: :table1) do
174
- from 0
175
- to 100
176
- end
177
- DB.create_table(:table1c, partition_of: :table1) do
178
- from 100
179
- to maxvalue
180
- end
181
-
182
- For list partitioning, you use the +values_in+ method. You can also use the +default+ method
183
- to mark a partition as the default partition:
184
-
185
- DB.create_table(:table2a, partition_of: :table2) do
186
- values_in 1, 2, 3
187
- end
188
- DB.create_table(:table2b, partition_of: :table2) do
189
- values_in 4, 5, 6
190
- end
191
- DB.create_table(:table2c, partition_of: :table2) do
192
- default
193
- end
194
-
195
- For hash partitioning, you use the +modulus+ and +remainder+ methods:
196
-
197
- DB.create_table(:table3a, partition_of: :table3) do
198
- modulus 3
199
- remainder 0
200
- end
201
- DB.create_table(:table3b, partition_of: :table3) do
202
- modulus 3
203
- remainder 1
204
- end
205
- DB.create_table(:table3c, partition_of: :table3) do
206
- modulus 3
207
- remainder 2
208
- end
209
-
210
- There is currently no support for using custom column or table constraints in partitions of
211
- other tables. Support may be added in the future.
212
-
213
- === Creating Unlogged Tables
214
-
215
- PostgreSQL allows users to create unlogged tables, which are faster but not crash safe. Sequel
216
- allows you to create an unlogged table by specifying the <tt>unlogged: true</tt> option to +create_table+:
217
-
218
- DB.create_table(:table, unlogged: true){Integer :i}
219
- # CREATE UNLOGGED TABLE "table" ("i" integer)
220
-
221
- === Creating Identity Columns
222
-
223
- You can use the +:identity+ option when creating columns to mark them as identity columns.
224
- Identity columns are tied to a sequence for the default value. You can still override the
225
- default value for the column when inserting:
226
-
227
- DB.create_table(:table){Integer :id, identity: true}
228
- # CREATE TABLE "table" ("id" integer GENERATED BY DEFAULT AS IDENTITY)
229
-
230
- If you want to disallow using a user provided value when inserting, you can mark the
231
- identity column using <tt>identity: :always</tt>:
232
-
233
- DB.create_table(:table){Integer :id, identity: :always}
234
- # CREATE TABLE "table" ("id" integer GENERATED ALWAYS AS IDENTITY)
235
-
236
- === Creating/Dropping Schemas, Languages, Functions, and Triggers
237
-
238
- Sequel has built in support for creating and dropping PostgreSQL schemas, procedural languages, functions, and triggers:
239
-
240
- DB.create_schema(:s)
241
- # CREATE SCHEMA "s"
242
- DB.drop_schema(:s)
243
- # DROP SCHEMA "s"
244
-
245
- DB.create_language(:plperl)
246
- # CREATE LANGUAGE plperl
247
- DB.drop_language(:plperl)
248
- # DROP LANGUAGE plperl
249
-
250
- DB.create_function(:set_updated_at, <<-SQL, language: :plpgsql, returns: :trigger)
251
- BEGIN
252
- NEW.updated_at := CURRENT_TIMESTAMP;
253
- RETURN NEW;
254
- END;
255
- SQL
256
- # CREATE FUNCTION set_updated_at() RETURNS trigger LANGUAGE plpgsql AS '
257
- # BEGIN
258
- # NEW.updated_at := CURRENT_TIMESTAMP;
259
- # RETURN NEW;
260
- # END;'
261
- DB.drop_function(:set_updated_at)
262
- # DROP FUNCTION set_updated_at()
263
-
264
- DB.create_trigger(:table, :trg_updated_at, :set_updated_at, events: :update, each_row: true, when: {Sequel[:new][:updated_at] => Sequel[:old][:updated_at]})
265
- # CREATE TRIGGER trg_updated_at BEFORE UPDATE ON "table" FOR EACH ROW WHEN ("new"."updated_at" = "old"."updated_at") EXECUTE PROCEDURE set_updated_at()
266
- DB.drop_trigger(:table, :trg_updated_at)
267
- # DROP TRIGGER trg_updated_at ON "table"
268
-
269
- However, you may want to consider just use <tt>Database#run</tt> with the necessary SQL code, at least for functions and triggers.
270
-
271
- === Parsing Check Constraints
272
-
273
- Sequel has support for parsing CHECK constraints on PostgreSQL using <tt>Sequel::Database#check_constraints</tt>:
274
-
275
- DB.create_table(:foo) do
276
- Integer :i
277
- Integer :j
278
- constraint(:ic, Sequel[:i] > 2)
279
- constraint(:jc, Sequel[:j] > 2)
280
- constraint(:ijc, Sequel[:i] - Sequel[:j] > 2)
281
- end
282
- DB.check_constraints(:foo)
283
- # => {
284
- # :ic=>{:definition=>"CHECK ((i > 2))", :columns=>[:i]},
285
- # :jc=>{:definition=>"CHECK ((j > 2))", :columns=>[:j]},
286
- # :ijc=>{:definition=>"CHECK (((i - j) > 2))", :columns=>[:i, :j]}
287
- # }
288
-
289
- === Parsing Foreign Key Constraints Referencing A Given Table
290
-
291
- Sequel has support for parsing FOREIGN KEY constraints that reference a given table, using the +:reverse+
292
- option to +foreign_key_list+:
293
-
294
- DB.create_table!(:a) do
295
- primary_key :id
296
- Integer :i
297
- Integer :j
298
- foreign_key :a_id, :a, foreign_key_constraint_name: :a_a
299
- unique [:i, :j]
300
- end
301
- DB.create_table!(:b) do
302
- foreign_key :a_id, :a, foreign_key_constraint_name: :a_a
303
- Integer :c
304
- Integer :d
305
- foreign_key [:c, :d], :a, key: [:j, :i], name: :a_c_d
306
- end
307
- DB.foreign_key_list(:a, reverse: true)
308
- # => [
309
- # {:name=>:a_a, :columns=>[:a_id], :key=>[:id], :on_update=>:no_action, :on_delete=>:no_action, :deferrable=>false, :table=>:a, :schema=>:public},
310
- # {:name=>:a_a, :columns=>[:a_id], :key=>[:id], :on_update=>:no_action, :on_delete=>:no_action, :deferrable=>false, :table=>:b, :schema=>:public},
311
- # {:name=>:a_c_d, :columns=>[:c, :d], :key=>[:j, :i], :on_update=>:no_action, :on_delete=>:no_action, :deferrable=>false, :table=>:b, :schema=>:public}
312
- # ]
313
-
314
- == PostgreSQL-specific DML Support
315
-
316
- === Returning Rows From Insert, Update, and Delete Statements
317
-
318
- Sequel supports the ability to return rows from insert, update, and delete statements, via
319
- <tt>Dataset#returning</tt>:
320
-
321
- DB[:table].returning.insert
322
- # INSERT INTO "table" DEFAULT VALUES RETURNING *
323
-
324
- DB[:table].returning(:id).delete
325
- # DELETE FROM "table" RETURNING "id"
326
-
327
- DB[:table].returning(:id, Sequel.*(:id, :id).as(:idsq)).update(id: 2)
328
- # UPDATE "table" SET "id" = 2 RETURNING "id", ("id" * "id") AS "idsq"
329
-
330
- When returning is used, instead of returning the number of rows affected (for updated/delete)
331
- or the serial primary key value (for insert), it will return an array of hashes with the
332
- returning results.
333
-
334
- === VALUES Support
335
-
336
- Sequel offers support for the +VALUES+ statement using <tt>Database#values</tt>:
337
-
338
- DB.values([[1,2],[2,3],[3,4]])
339
- # VALUES (1, 2), (2, 3), (3, 4)
340
-
341
- DB.values([[1,2],[2,3],[3,4]]).order(2, 1)
342
- # VALUES (1, 2), (2, 3), (3, 4) ORDER BY 2, 1
343
-
344
- DB.values([[1,2],[2,3],[3,4]]).order(2, 1).limit(1,2)
345
- # VALUES (1, 2), (2, 3), (3, 4) ORDER BY 2, 1 LIMIT 1 OFFSET 2
346
-
347
- === INSERT ON CONFLICT Support
348
-
349
- Starting with PostgreSQL 9.5, you can do an upsert or ignore unique or exclusion constraint
350
- violations when inserting using <tt>Dataset#insert_conflict</tt>:
351
-
352
- DB[:table].insert_conflict.insert(a: 1, b: 2)
353
- # INSERT INTO TABLE (a, b) VALUES (1, 2)
354
- # ON CONFLICT DO NOTHING
355
-
356
- For compatibility with Sequel's MySQL support, you can also use +insert_ignore+:
357
-
358
- DB[:table].insert_ignore.insert(a: 1, b: 2)
359
- # INSERT INTO TABLE (a, b) VALUES (1, 2)
360
- # ON CONFLICT DO NOTHING
361
-
362
- You can pass a specific constraint name using +:constraint+, to only ignore a specific
363
- constraint violation:
364
-
365
- DB[:table].insert_conflict(constraint: :table_a_uidx).insert(a: 1, b: 2)
366
- # INSERT INTO TABLE (a, b) VALUES (1, 2)
367
- # ON CONFLICT ON CONSTRAINT table_a_uidx DO NOTHING
368
-
369
- If the unique or exclusion constraint covers the whole table (e.g. it isn't a partial unique
370
- index), then you can just specify the column using the +:target+ option:
371
-
372
- DB[:table].insert_conflict(target: :a).insert(a: 1, b: 2)
373
- # INSERT INTO TABLE (a, b) VALUES (1, 2)
374
- # ON CONFLICT (a) DO NOTHING
375
-
376
- If you want to update the existing row instead of ignoring the constraint violation, you
377
- can pass an +:update+ option with a hash of values to update. You must pass either the
378
- +:target+ or +:constraint+ options when passing the +:update+ option:
379
-
380
- DB[:table].insert_conflict(target: :a, update: {b: Sequel[:excluded][:b]}).insert(a: 1, b: 2)
381
- # INSERT INTO TABLE (a, b) VALUES (1, 2)
382
- # ON CONFLICT (a) DO UPDATE SET b = excluded.b
383
-
384
- If you want to update existing rows but using the current value of the column, you can build
385
- the desired calculation using <tt>Sequel[]</tt>
386
-
387
- DB[:table]
388
- .insert_conflict(
389
- target: :a,
390
- update: {b: Sequel[:excluded][:b] + Sequel[:table][:a]}
391
- )
392
- .import([:a, :b], [ [1, 2] ])
393
- # INSERT INTO TABLE (a, b) VALUES (1, 2)
394
- # ON CONFLICT (a) DO UPDATE SET b = (excluded.b + table.a)
395
-
396
- Additionally, if you only want to do the update in certain cases, you can specify an
397
- +:update_where+ option, which will be used as a filter. If the row doesn't match the
398
- conditions, the constraint violation will be ignored, but the row will not be updated:
399
-
400
- DB[:table].insert_conflict(constraint::table_a_uidx,
401
- update: {b: Sequel[:excluded][:b]},
402
- update_where: {Sequel[:table][:status_id]=>1}).insert(a: 1, b: 2)
403
- # INSERT INTO TABLE (a, b) VALUES (1, 2)
404
- # ON CONFLICT ON CONSTRAINT table_a_uidx
405
- # DO UPDATE SET b = excluded.b WHERE (table.status_id = 1)
406
-
407
- === INSERT OVERRIDING SYSTEM|USER VALUE Support
408
-
409
- PostgreSQL 10+ supports identity columns, which are designed to replace the serial
410
- columns previously used for autoincrementing primary keys. You can use
411
- Dataset#overriding_system_value and Dataset#overriding_user_value to use this new
412
- syntax:
413
-
414
- DB.create_table(:table){primary_key :id}
415
- # Ignore the given value for id, using the identity's sequence value.
416
- DB[:table].overriding_user_value.insert(id: 1)
417
-
418
- DB.create_table(:table){primary_key :id, identity: :always}
419
- # Force the use of the given value for id, because otherwise the insert will
420
- # raise an error, since GENERATED ALWAYS was used when creating the column.
421
- DB[:table].overriding_system_value.insert(id: 1)
422
-
423
- === Distinct On Specific Columns
424
-
425
- Sequel allows passing columns to <tt>Dataset#distinct</tt>, which will make the dataset return
426
- rows that are distinct on just those columns:
427
-
428
- DB[:table].distinct(:id).all
429
- # SELECT DISTINCT ON ("id") * FROM "table"
430
-
431
- === JOIN USING table alias
432
-
433
- Sequel allows passing an SQL::AliasedExpression to join table methods to use a USING
434
- join with a table alias for the USING columns:
435
-
436
- DB[:t1].join(:t2, Sequel.as([:c1, :c2], :alias))
437
- # SELECT * FROM "t1" INNER JOIN "t2" USING ("c1", "c2") AS "alias"
438
-
439
- === Calling PostgreSQL 11+ Procedures <tt>postgres only</tt>
440
-
441
- PostgreSQL 11+ added support for procedures, which are different from the user defined
442
- functions that PostgreSQL has historically supported. These procedures are
443
- called via a special +CALL+ syntax, and Sequel supports them via
444
- <tt>Database#call_procedure</tt>:
445
-
446
- DB.call_procedure(:foo, 1, "bar")
447
- # CALL foo(1, 'bar')
448
-
449
- <tt>Database#call_procedure</tt> will return a hash of return values if
450
- the procedure returns a result, or +nil+ if the procedure does not return
451
- a result.
452
-
453
- === Using a Cursor to Process Large Datasets <tt>postgres only</tt>
454
-
455
- The postgres adapter offers a <tt>Dataset#use_cursor</tt> method to process large result sets
456
- without keeping all rows in memory:
457
-
458
- DB[:table].use_cursor.each{|row| }
459
- # BEGIN;
460
- # DECLARE sequel_cursor NO SCROLL CURSOR WITHOUT HOLD FOR SELECT * FROM "table";
461
- # FETCH FORWARD 1000 FROM sequel_cursor
462
- # FETCH FORWARD 1000 FROM sequel_cursor
463
- # ...
464
- # FETCH FORWARD 1000 FROM sequel_cursor
465
- # CLOSE sequel_cursor
466
- # COMMIT
467
-
468
- This support is used by default when using <tt>Dataset#paged_each</tt>.
469
-
470
- Using cursors, it is possible to update individual rows of a large dataset
471
- easily using the <tt>rows_per_fetch: 1</tt> option in conjunction with
472
- <tt>Dataset#where_current_of</tt>. This is useful if the logic needed to
473
- update the rows exists in the application and not in the database:
474
-
475
- ds.use_cursor(rows_per_fetch: 1).each do |row|
476
- ds.where_current_of.update(col: new_col_value(row))
477
- end
478
-
479
- === Truncate Modifiers
480
-
481
- Sequel supports PostgreSQL-specific truncate options:
482
-
483
- DB[:table].truncate(cascade: true, only: true, restart: true)
484
- # TRUNCATE TABLE ONLY "table" RESTART IDENTITY CASCADE
485
-
486
- === COPY Support <tt>postgres/pg and jdbc/postgres only</tt>
487
-
488
- PostgreSQL's COPY feature is pretty much the fastest way to get data in or out of the database.
489
- Sequel supports getting data out of the database via <tt>Database#copy_table</tt>, either for
490
- a specific table or for an arbitrary dataset:
491
-
492
- DB.copy_table(:table, format: :csv)
493
- # COPY "table" TO STDOUT (FORMAT csv)
494
- DB.copy_table(DB[:table], format: :csv)
495
- # COPY (SELECT * FROM "table") TO STDOUT (FORMAT csv)
496
-
497
- It supports putting data into the database via <tt>Database#copy_into</tt>:
498
-
499
- DB.copy_into(:table, format: :csv, columns: [:column1, :column2], data: "1,2\n2,3\n")
500
- # COPY "table"("column1", "column2") FROM STDIN (FORMAT csv)
501
-
502
- === Anonymous Function Execution
503
-
504
- You can execute anonymous functions using a database procedural language via <tt>Database#do</tt> (the
505
- plpgsql language is the default):
506
-
507
- DB.do <<-SQL
508
- DECLARE r record;
509
- BEGIN
510
- FOR r IN SELECT table_schema, table_name FROM information_schema.tables
511
- WHERE table_type = 'VIEW' AND table_schema = 'public'
512
- LOOP
513
- EXECUTE 'GRANT ALL ON ' || quote_ident(r.table_schema) || '.' || quote_ident(r.table_name) || ' TO webuser';
514
- END LOOP;
515
- END;
516
- SQL
517
-
518
- === Listening On and Notifying Channels
519
-
520
- You can use <tt>Database#notify</tt> to send notification to channels:
521
-
522
- DB.notify(:channel)
523
- # NOTIFY "channel"
524
-
525
- <tt>postgres/pg only</tt> You can listen on channels via <tt>Database#listen</tt>. Note that
526
- this blocks until the listening thread is notified:
527
-
528
- DB.listen(:channel)
529
- # LISTEN "channel"
530
- # after notification received:
531
- # UNLISTEN *
532
-
533
- Note that +listen+ by default only listens for a single notification. If you want to loop and process
534
- notifications:
535
-
536
- DB.listen(:channel, loop: true){|channel| p channel}
537
-
538
- The +pg_static_cache_updater+ extension uses this support to automatically update
539
- the caches for models using the +static_cache+ plugin. Look at the documentation of that
540
- plugin for details.
541
-
542
- === Locking Tables
543
-
544
- Sequel makes it easy to lock tables, though it is generally better to let the database
545
- handle locking:
546
-
547
- DB[:table].lock('EXCLUSIVE') do
548
- DB[:table].insert(id: DB[:table].max(:id)+1)
549
- end
550
- # BEGIN;
551
- # LOCK TABLE "table" IN EXCLUSIVE MODE;
552
- # SELECT max("id") FROM "table" LIMIT 1;
553
- # INSERT INTO "table" ("id") VALUES (2) RETURNING NULL;
554
- # COMMIT;
555
-
556
- == Extended Error Info (<tt>postgres/pg only</tt>)
557
-
558
- If you run a query that raises a Sequel::DatabaseError, you can pass the exception object to
559
- <tt>Database#error_info</tt>, and that will return a hash with metadata regarding the error,
560
- such as the related table and column or constraint.
561
-
562
- DB.create_table(:test1){primary_key :id}
563
- DB.create_table(:test2){primary_key :id; foreign_key :test1_id, :test1}
564
- DB[:test2].insert(test1_id: 1) rescue DB.error_info($!)
565
- # => {
566
- # :schema=>"public",
567
- # :table=>"test2",
568
- # :column=>nil,
569
- # :constraint=>"test2_test1_id_fkey",
570
- # :type=>nil,
571
- # :severity=>"ERROR",
572
- # :sql_state=>"23503",
573
- # :message_primary=>"insert or update on table \"test2\" violates foreign key constraint \"test2_test1_id_fkey\"",
574
- # :message_detail=>"Key (test1_id)=(1) is not present in table \"test1\"."
575
- # :message_hint=>nil,
576
- # :statement_position=>nil,
577
- # :internal_position=>nil,
578
- # :internal_query=>nil,
579
- # :source_file=>"ri_triggers.c",
580
- # :source_line=>"3321",
581
- # :source_function=>"ri_ReportViolation"
582
- # }
583
-
584
- == sequel_pg (<tt>postgres/pg only</tt>)
585
-
586
- When the postgres adapter is used with the pg driver, Sequel automatically checks for sequel_pg, and
587
- loads it if it is available. sequel_pg is a C extension that optimizes the fetching of rows, generally
588
- resulting in a ~2x speedup. It is highly recommended to install sequel_pg if you are using the
589
- postgres adapter with pg.
590
-
591
- sequel_pg has additional optimizations when using the Dataset +map+, +as_hash+,
592
- +to_hash_groups+, +select_hash+, +select_hash_groups+, +select_map+, and +select_order_map+ methods,
593
- which avoids creating intermediate hashes and can add further speedups.
594
-
595
- In addition to optimization, sequel_pg also adds streaming support if used on PostgreSQL 9.2+. Streaming
596
- support is similar to using a cursor, but it is faster and more transparent.
597
-
598
- You can enable the streaming support:
599
-
600
- DB.extension(:pg_streaming)
601
-
602
- Then you can stream individual datasets:
603
-
604
- DB[:table].stream.each{|row| }
605
-
606
- Or stream all datasets by default:
607
-
608
- DB.stream_all_queries = true
609
-
610
- When streaming is enabled, <tt>Dataset#paged_each</tt> will use streaming to implement
611
- paging.