em-pg-client-helper 2.0.7 → 2.0.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/em-pg-client-helper.rb +32 -32
- data/lib/em-pg-client-helper/transaction.rb +28 -9
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 454ef9b154d19499a552861f4a24f395f7ce1b28
|
4
|
+
data.tar.gz: 31b6924e0d271d1e481594d15b9014558f780b40
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 119c158a73d677ac6a17407c0fda64fee465afb4d131a6cede0b0519206336f23cfcc54afdc5ff22020e0ca7138eb655422a45e3fa8fde55bf2f02d6adf4d6de
|
7
|
+
data.tar.gz: e16bd157b126e540e1e0bcf48c63342777fd1c4a9408b28fc51b0f2677e846ce11153d764428b5c46d73d40d6e1c281bde4600bfd0202e87a252bfd1e470b9fe
|
data/lib/em-pg-client-helper.rb
CHANGED
@@ -71,7 +71,7 @@ module PG::EM::Client::Helper
|
|
71
71
|
# # ... you don't want to know.
|
72
72
|
#
|
73
73
|
def sequel_sql
|
74
|
-
sqldb = Sequel.connect("mock://postgres")
|
74
|
+
sqldb = Thread.current[:em_pg_client_sequel_db] ||= Sequel.connect("mock://postgres", :keep_reference => false)
|
75
75
|
ret = yield sqldb if block_given?
|
76
76
|
sqls = sqldb.sqls
|
77
77
|
|
@@ -197,37 +197,37 @@ module PG::EM::Client::Helper
|
|
197
197
|
|
198
198
|
# Efficiently perform a "bulk" insert of multiple rows.
|
199
199
|
#
|
200
|
-
# When you have a large quantity of data to insert into a table, you
|
201
|
-
# want to do it one row at a time -- that's *really* inefficient.
|
202
|
-
# other hand, if you do one giant multi-row insert statement, the
|
203
|
-
# will fail if *any* of the rows causes a constraint failure.
|
204
|
-
#
|
205
|
-
#
|
206
|
-
#
|
207
|
-
#
|
208
|
-
#
|
209
|
-
#
|
210
|
-
#
|
211
|
-
#
|
212
|
-
#
|
213
|
-
#
|
214
|
-
#
|
215
|
-
#
|
216
|
-
#
|
217
|
-
#
|
218
|
-
#
|
219
|
-
#
|
220
|
-
#
|
221
|
-
#
|
222
|
-
#
|
223
|
-
#
|
224
|
-
#
|
225
|
-
#
|
226
|
-
#
|
227
|
-
#
|
228
|
-
#
|
229
|
-
#
|
230
|
-
#
|
200
|
+
# When you have a large quantity of data to insert into a table, you
|
201
|
+
# don't want to do it one row at a time -- that's *really* inefficient.
|
202
|
+
# On the other hand, if you do one giant multi-row insert statement, the
|
203
|
+
# insert will fail if *any* of the rows causes a constraint failure.
|
204
|
+
# What to do?
|
205
|
+
#
|
206
|
+
# Well, here's our answer: try to insert all the records at once. If
|
207
|
+
# that fails with a constraint violation, then split the set of records
|
208
|
+
# in half and try to bulk insert each of those halves. Recurse in this
|
209
|
+
# fashion until you only have one record to insert.
|
210
|
+
#
|
211
|
+
# @param db [PG::EM::Client, PG::EM::ConnectionPool] the connection
|
212
|
+
# against which the insert wil be run.
|
213
|
+
#
|
214
|
+
# @param tbl [#to_sym] see
|
215
|
+
# {PG::EM::Client::Helper::Transaction#bulk_insert}.
|
216
|
+
#
|
217
|
+
# @param columns [Array<#to_sym>] see
|
218
|
+
# {PG::EM::Client::Helper::Transaction#bulk_insert}.
|
219
|
+
#
|
220
|
+
# @param rows [Array<Array<Object>>] see
|
221
|
+
# {PG::EM::Client::Helper::Transaction#bulk_insert}.
|
222
|
+
#
|
223
|
+
# @return [EM::Deferrable] the deferrable in which the query is being
|
224
|
+
# called; once the bulk insert completes successfully, the deferrable
|
225
|
+
# will succeed with the number of rows that were successfully inserted.
|
226
|
+
# If the insert could not be completed, the deferrable will fail
|
227
|
+
# (`#errback`) with the exception.
|
228
|
+
#
|
229
|
+
# @note for details on the `tbl`, `columns`, and `rows` parameters, see
|
230
|
+
# {PG::EM::Client::Helper::Transaction#bulk_insert}.
|
231
231
|
#
|
232
232
|
# @since 2.0.0
|
233
233
|
#
|
@@ -218,18 +218,37 @@ class PG::EM::Client::Helper::Transaction
|
|
218
218
|
# and try to bulk insert each of those halves. Recurse in this fashion until
|
219
219
|
# you only have one record to insert.
|
220
220
|
#
|
221
|
-
# @param tbl [
|
222
|
-
# your data.
|
221
|
+
# @param tbl [Symbol, String] the name of the table into which you wish to insert
|
222
|
+
# your data. If provided as a Symbol, the name will be escaped,
|
223
|
+
# otherwise it will be inserted into the query as-is (and may `$DEITY`
|
224
|
+
# have mercy on your soul).
|
223
225
|
#
|
224
|
-
#
|
225
|
-
# will be
|
226
|
+
# If the symbol name has a double underscore (`__`) in it, the part to
|
227
|
+
# the left of the double-underscore will be taken as a schema name, and
|
228
|
+
# the part to the right will be taken as the table name.
|
229
|
+
#
|
230
|
+
# @param columns [Array<Symbol, String>] the columns into which each
|
231
|
+
# record of data will be inserted. Any element of the array which is a
|
232
|
+
# symbol will be run through `Sequel::Database#literal` to escape it
|
233
|
+
# into a "safe" form; elements which are Strings are inserted as-is,
|
234
|
+
# and you're responsible for any escaping which may be required.
|
226
235
|
#
|
227
236
|
# @param rows [Array<Array<Object>>] the values to insert. Each entry in
|
228
237
|
# the outermost array is a row of data; the elements of each of these inner
|
229
238
|
# arrays corresponds to the column in the same position in the `columns`
|
230
|
-
# array.
|
231
|
-
#
|
232
|
-
#
|
239
|
+
# array.
|
240
|
+
#
|
241
|
+
# Due to the way the bulk insert query is constructed, some of
|
242
|
+
# PostgreSQL's default casting behaviours don't work so well,
|
243
|
+
# particularly around dates and times. If you find that you're getting
|
244
|
+
# errors of the form `column "foo" is of type <something> but
|
245
|
+
# expression is of type text`, you'll need to explicitly cast the field
|
246
|
+
# that is having problems, replacing `value` in the array with
|
247
|
+
# something like `Sequel.cast(value, "timestamp without time zone")`.
|
248
|
+
#
|
249
|
+
# **NOTE**: We don't do any checking to make sure you're providing the
|
250
|
+
# correct number of elements for each row. Thus, if you give a row
|
251
|
+
# array that has too few, or too many, entries, the database will puke.
|
233
252
|
#
|
234
253
|
# @yield [Integer] Once the insert has completed, the number of rows that
|
235
254
|
# were successfully inserted (that may be less than `rows.length` if
|
@@ -259,7 +278,7 @@ class PG::EM::Client::Helper::Transaction
|
|
259
278
|
else
|
260
279
|
# Guh hand-hacked SQL is fugly... but what I'm doing is so utterly
|
261
280
|
# niche that Sequel doesn't support it.
|
262
|
-
q_tbl = usdb.literal(tbl
|
281
|
+
q_tbl = usdb.literal(tbl)
|
263
282
|
q_cols = columns.map { |c| usdb.literal(c) }
|
264
283
|
|
265
284
|
# If there are any unique indexes which the set of columns to
|
@@ -362,7 +381,7 @@ class PG::EM::Client::Helper::Transaction
|
|
362
381
|
# call methods like `#literal` on, for easy quoting.
|
363
382
|
#
|
364
383
|
def usdb
|
365
|
-
|
384
|
+
Thread.current[:em_pg_client_sequel_db] ||= Sequel.connect("mock://postgres", :keep_reference => false)
|
366
385
|
end
|
367
386
|
|
368
387
|
# Find the unique indexes for a table, and yield the columns in each.
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: em-pg-client-helper
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 2.0.
|
4
|
+
version: 2.0.8
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Matt Palmer
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2015-
|
11
|
+
date: 2015-05-05 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: em-pg-client
|