sequel_pg 1.12.5 → 1.15.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG +20 -0
- data/README.rdoc +0 -19
- data/ext/sequel_pg/sequel_pg.c +162 -56
- data/lib/sequel/extensions/pg_streaming.rb +13 -2
- data/lib/sequel_pg/sequel_pg.rb +3 -0
- metadata +3 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 41aa757cb6f4ca6f56ca26044517a47a68731dd7d51335451f6da2e5b5b34d6f
|
4
|
+
data.tar.gz: ec43d49fd7b4b8821ecbb813f6244aaf46f69866e84b94df96a3c52ef3a6fd41
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 2a2578bec8fed224dcd3c1263cb86a4729cf11318f80d6cdb03366ae7dae6e68995447db06a1105d1ecda50222d2ff6258079fb7395d3f262094e1e05502fd48
|
7
|
+
data.tar.gz: 26a470d0e7d8290cade76b07cecd972be840c838492607a5649c0fd803de2ca7ea2e34b0c77fddcaff734a2a875b1f26ce8d5b03ec0fe0ab887ae3045ffa10e0
|
data/CHANGELOG
CHANGED
@@ -1,3 +1,23 @@
|
|
1
|
+
=== 1.15.0 (2022-03-16)
|
2
|
+
|
3
|
+
* Avoid deprecation warning in the pg_streaming extension on pg 1.3+ when streaming a query with bound parameters (jeremyevans)
|
4
|
+
|
5
|
+
* Use pgresult_stream_any when using pg 1.3.4+ for faster streaming (jeremyevans)
|
6
|
+
|
7
|
+
* Do not use streaming by default for Dataset#paged_each in the pg_streaming extension (jeremyevans)
|
8
|
+
|
9
|
+
* Avoid verbose warning if loading sequel_pg after Sequel pg_array extension (jeremyevans)
|
10
|
+
|
11
|
+
=== 1.14.0 (2020-09-22)
|
12
|
+
|
13
|
+
* Reduce stack memory usage for result sets with 64 or fewer columns (jeremyevans)
|
14
|
+
|
15
|
+
* Support result sets with more than 256 columns by default (jeremyevans) (#39)
|
16
|
+
|
17
|
+
=== 1.13.0 (2020-04-13)
|
18
|
+
|
19
|
+
* Allow overriding of inet/cidr type conversion using conversion procs (beanieboi, jeremyevans) (#36, #37)
|
20
|
+
|
1
21
|
=== 1.12.5 (2020-03-23)
|
2
22
|
|
3
23
|
* Fix offset calculation for timestamptz types when datetime_class is DateTime and using local application timezone (jeremyevans)
|
data/README.rdoc
CHANGED
@@ -70,12 +70,6 @@ can do the following:
|
|
70
70
|
|
71
71
|
gem install sequel_pg
|
72
72
|
|
73
|
-
Note that by default sequel_pg only supports result sets with up to
|
74
|
-
256 columns. If you will have a result set with more than 256 columns,
|
75
|
-
you should modify the maximum supported number of columns via:
|
76
|
-
|
77
|
-
gem install sequel_pg -- --with-cflags=\"-DSPG_MAX_FIELDS=512\"
|
78
|
-
|
79
73
|
Make sure the pg_config binary is in your PATH so the installation
|
80
74
|
can find the PostgreSQL shared library and header files. Alternatively,
|
81
75
|
you can use the POSTGRES_LIB and POSTGRES_INCLUDE environment
|
@@ -118,19 +112,6 @@ requirements:
|
|
118
112
|
|
119
113
|
rake build
|
120
114
|
|
121
|
-
== Platforms Supported
|
122
|
-
|
123
|
-
sequel_pg has been tested on the following:
|
124
|
-
|
125
|
-
* ruby 1.9.3
|
126
|
-
* ruby 2.0
|
127
|
-
* ruby 2.1
|
128
|
-
* ruby 2.2
|
129
|
-
* ruby 2.3
|
130
|
-
* ruby 2.4
|
131
|
-
* ruby 2.5
|
132
|
-
* ruby 2.6
|
133
|
-
|
134
115
|
== Known Issues
|
135
116
|
|
136
117
|
* You must be using the ISO PostgreSQL date format (which is the
|
data/ext/sequel_pg/sequel_pg.c
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
#define SEQUEL_PG_VERSION_INTEGER
|
1
|
+
#define SEQUEL_PG_VERSION_INTEGER 11500
|
2
2
|
|
3
3
|
#include <string.h>
|
4
4
|
#include <stdio.h>
|
@@ -15,9 +15,6 @@
|
|
15
15
|
#include <ruby/version.h>
|
16
16
|
#include <ruby/encoding.h>
|
17
17
|
|
18
|
-
#ifndef SPG_MAX_FIELDS
|
19
|
-
#define SPG_MAX_FIELDS 256
|
20
|
-
#endif
|
21
18
|
#define SPG_MINUTES_PER_DAY 1440.0
|
22
19
|
#define SPG_SECONDS_PER_DAY 86400.0
|
23
20
|
|
@@ -73,9 +70,11 @@
|
|
73
70
|
PGconn* pg_get_pgconn(VALUE);
|
74
71
|
PGresult* pgresult_get(VALUE);
|
75
72
|
int pg_get_result_enc_idx(VALUE);
|
73
|
+
VALUE pgresult_stream_any(VALUE self, void (*yielder)(VALUE, int, int, void*), void* data);
|
76
74
|
|
77
75
|
static int spg_use_ipaddr_alloc;
|
78
76
|
static int spg_use_pg_get_result_enc_idx;
|
77
|
+
static int spg_use_pg_stream_any;
|
79
78
|
|
80
79
|
static VALUE spg_Sequel;
|
81
80
|
static VALUE spg_PGArray;
|
@@ -535,7 +534,7 @@ static VALUE spg_timestamp(const char *s, VALUE self, size_t length, int tz) {
|
|
535
534
|
}
|
536
535
|
|
537
536
|
if (remaining < 19) {
|
538
|
-
return spg_timestamp_error(s, self, "unexpected
|
537
|
+
return spg_timestamp_error(s, self, "unexpected timestamp format, too short");
|
539
538
|
}
|
540
539
|
|
541
540
|
year = parse_year(&p, &remaining);
|
@@ -931,8 +930,10 @@ static VALUE spg__array_col_value(char *v, size_t length, VALUE converter, int e
|
|
931
930
|
break;
|
932
931
|
case 869: /* inet */
|
933
932
|
case 650: /* cidr */
|
934
|
-
|
935
|
-
|
933
|
+
if (!RTEST(converter)) {
|
934
|
+
rv = spg_inet(v, length);
|
935
|
+
break;
|
936
|
+
}
|
936
937
|
default:
|
937
938
|
rv = rb_tainted_str_new(v, length);
|
938
939
|
PG_ENCODING_SET_NOCHECK(rv, enc_index);
|
@@ -1079,10 +1080,6 @@ static VALUE spg__col_value(VALUE self, PGresult *res, long i, long j, VALUE* co
|
|
1079
1080
|
rv = rb_tainted_str_new(v, PQgetlength(res, i, j));
|
1080
1081
|
PG_ENCODING_SET_NOCHECK(rv, enc_index);
|
1081
1082
|
break;
|
1082
|
-
case 869: /* inet */
|
1083
|
-
case 650: /* cidr */
|
1084
|
-
rv = spg_inet(v, PQgetlength(res, i, j));
|
1085
|
-
break;
|
1086
1083
|
/* array types */
|
1087
1084
|
case 1009:
|
1088
1085
|
case 1014:
|
@@ -1220,17 +1217,30 @@ static VALUE spg__col_value(VALUE self, PGresult *res, long i, long j, VALUE* co
|
|
1220
1217
|
scalar_oid = 22;
|
1221
1218
|
break;
|
1222
1219
|
case 1041:
|
1220
|
+
if (RTEST(colconvert[j])) {
|
1221
|
+
goto default_cond;
|
1222
|
+
}
|
1223
1223
|
array_type = spg_sym_inet;
|
1224
1224
|
scalar_oid = 869;
|
1225
1225
|
break;
|
1226
1226
|
case 651:
|
1227
|
+
if (RTEST(colconvert[j])) {
|
1228
|
+
goto default_cond;
|
1229
|
+
}
|
1227
1230
|
array_type = spg_sym_cidr;
|
1228
1231
|
scalar_oid = 650;
|
1229
1232
|
break;
|
1230
1233
|
}
|
1231
1234
|
rv = spg_array_value(v, PQgetlength(res, i, j), colconvert[j], enc_index, scalar_oid, self, array_type);
|
1232
1235
|
break;
|
1236
|
+
case 869: /* inet */
|
1237
|
+
case 650: /* cidr */
|
1238
|
+
if (colconvert[j] == Qnil) {
|
1239
|
+
rv = spg_inet(v, PQgetlength(res, i, j));
|
1240
|
+
break;
|
1241
|
+
}
|
1233
1242
|
default:
|
1243
|
+
default_cond:
|
1234
1244
|
rv = rb_tainted_str_new(v, PQgetlength(res, i, j));
|
1235
1245
|
PG_ENCODING_SET_NOCHECK(rv, enc_index);
|
1236
1246
|
if (colconvert[j] != Qnil) {
|
@@ -1369,10 +1379,7 @@ static void spg_set_column_info(VALUE self, PGresult *res, VALUE *colsyms, VALUE
|
|
1369
1379
|
rb_funcall(self, spg_id_columns_equal, 1, rb_ary_new4(nfields, colsyms));
|
1370
1380
|
}
|
1371
1381
|
|
1372
|
-
static VALUE
|
1373
|
-
PGresult *res;
|
1374
|
-
VALUE colsyms[SPG_MAX_FIELDS];
|
1375
|
-
VALUE colconvert[SPG_MAX_FIELDS];
|
1382
|
+
static VALUE spg_yield_hash_rows_internal(VALUE self, PGresult *res, int enc_index, VALUE* colsyms, VALUE* colconvert) {
|
1376
1383
|
long ntuples;
|
1377
1384
|
long nfields;
|
1378
1385
|
long i;
|
@@ -1382,21 +1389,9 @@ static VALUE spg_yield_hash_rows(VALUE self, VALUE rres, VALUE ignore) {
|
|
1382
1389
|
VALUE pg_type;
|
1383
1390
|
VALUE pg_value;
|
1384
1391
|
char type = SPG_YIELD_NORMAL;
|
1385
|
-
int enc_index;
|
1386
|
-
|
1387
|
-
if (!RTEST(rres)) {
|
1388
|
-
return self;
|
1389
|
-
}
|
1390
|
-
res = pgresult_get(rres);
|
1391
|
-
|
1392
|
-
enc_index = spg_use_pg_get_result_enc_idx ? pg_get_result_enc_idx(rres) : enc_get_index(rres);
|
1393
1392
|
|
1394
1393
|
ntuples = PQntuples(res);
|
1395
1394
|
nfields = PQnfields(res);
|
1396
|
-
if (nfields > SPG_MAX_FIELDS) {
|
1397
|
-
rb_raise(rb_eRangeError, "more than %d columns in query (%ld columns detected)", SPG_MAX_FIELDS, nfields);
|
1398
|
-
}
|
1399
|
-
|
1400
1395
|
spg_set_column_info(self, res, colsyms, colconvert, enc_index);
|
1401
1396
|
|
1402
1397
|
opts = rb_funcall(self, spg_id_opts, 0);
|
@@ -1613,6 +1608,40 @@ static VALUE spg_yield_hash_rows(VALUE self, VALUE rres, VALUE ignore) {
|
|
1613
1608
|
return self;
|
1614
1609
|
}
|
1615
1610
|
|
1611
|
+
#define def_spg_yield_hash_rows(max_fields) static VALUE spg_yield_hash_rows_ ## max_fields(VALUE self, PGresult *res, int enc_index) { \
|
1612
|
+
VALUE colsyms[max_fields]; \
|
1613
|
+
VALUE colconvert[max_fields]; \
|
1614
|
+
return spg_yield_hash_rows_internal(self, res, enc_index, colsyms, colconvert); \
|
1615
|
+
}
|
1616
|
+
|
1617
|
+
def_spg_yield_hash_rows(16)
|
1618
|
+
def_spg_yield_hash_rows(64)
|
1619
|
+
def_spg_yield_hash_rows(256)
|
1620
|
+
def_spg_yield_hash_rows(1664)
|
1621
|
+
|
1622
|
+
static VALUE spg_yield_hash_rows(VALUE self, VALUE rres, VALUE ignore) {
|
1623
|
+
PGresult *res;
|
1624
|
+
long nfields;
|
1625
|
+
int enc_index;
|
1626
|
+
|
1627
|
+
if (!RTEST(rres)) {
|
1628
|
+
return self;
|
1629
|
+
}
|
1630
|
+
res = pgresult_get(rres);
|
1631
|
+
|
1632
|
+
enc_index = spg_use_pg_get_result_enc_idx ? pg_get_result_enc_idx(rres) : enc_get_index(rres);
|
1633
|
+
|
1634
|
+
nfields = PQnfields(res);
|
1635
|
+
if (nfields <= 16) return spg_yield_hash_rows_16(self, res, enc_index);
|
1636
|
+
else if (nfields <= 64) return spg_yield_hash_rows_64(self, res, enc_index);
|
1637
|
+
else if (nfields <= 256) return spg_yield_hash_rows_256(self, res, enc_index);
|
1638
|
+
else if (nfields <= 1664) return spg_yield_hash_rows_1664(self, res, enc_index);
|
1639
|
+
else rb_raise(rb_eRangeError, "more than 1664 columns in query (%ld columns detected)", nfields);
|
1640
|
+
|
1641
|
+
/* UNREACHABLE */
|
1642
|
+
return self;
|
1643
|
+
}
|
1644
|
+
|
1616
1645
|
static VALUE spg_supports_streaming_p(VALUE self) {
|
1617
1646
|
return
|
1618
1647
|
#if HAVE_PQSETSINGLEROWMODE
|
@@ -1632,12 +1661,39 @@ static VALUE spg_set_single_row_mode(VALUE self) {
|
|
1632
1661
|
return Qnil;
|
1633
1662
|
}
|
1634
1663
|
|
1635
|
-
|
1636
|
-
|
1637
|
-
VALUE
|
1638
|
-
VALUE
|
1639
|
-
VALUE
|
1640
|
-
|
1664
|
+
struct spg__yield_each_row_stream_data {
|
1665
|
+
VALUE self;
|
1666
|
+
VALUE *colsyms;
|
1667
|
+
VALUE *colconvert;
|
1668
|
+
VALUE pg_value;
|
1669
|
+
int enc_index;
|
1670
|
+
char type;
|
1671
|
+
};
|
1672
|
+
|
1673
|
+
static void spg__yield_each_row_stream(VALUE rres, int ntuples, int nfields, void *rdata) {
|
1674
|
+
struct spg__yield_each_row_stream_data* data = (struct spg__yield_each_row_stream_data *)rdata;
|
1675
|
+
VALUE h = rb_hash_new();
|
1676
|
+
VALUE self = data->self;
|
1677
|
+
VALUE *colsyms = data->colsyms;
|
1678
|
+
VALUE *colconvert= data->colconvert;
|
1679
|
+
PGresult *res = pgresult_get(rres);
|
1680
|
+
int enc_index = data->enc_index;
|
1681
|
+
long j;
|
1682
|
+
|
1683
|
+
for(j=0; j<nfields; j++) {
|
1684
|
+
rb_hash_aset(h, colsyms[j], spg__col_value(self, res, 0, j, colconvert , enc_index));
|
1685
|
+
}
|
1686
|
+
|
1687
|
+
if(data->type == SPG_YIELD_MODEL) {
|
1688
|
+
VALUE model = rb_obj_alloc(data->pg_value);
|
1689
|
+
rb_ivar_set(model, spg_id_values, h);
|
1690
|
+
rb_yield(model);
|
1691
|
+
} else {
|
1692
|
+
rb_yield(h);
|
1693
|
+
}
|
1694
|
+
}
|
1695
|
+
|
1696
|
+
static VALUE spg__yield_each_row_internal(VALUE self, VALUE rconn, VALUE rres, PGresult *res, int enc_index, VALUE *colsyms, VALUE *colconvert) {
|
1641
1697
|
long nfields;
|
1642
1698
|
long j;
|
1643
1699
|
VALUE h;
|
@@ -1645,19 +1701,9 @@ static VALUE spg__yield_each_row(VALUE self) {
|
|
1645
1701
|
VALUE pg_type;
|
1646
1702
|
VALUE pg_value = Qnil;
|
1647
1703
|
char type = SPG_YIELD_NORMAL;
|
1648
|
-
|
1649
|
-
|
1650
|
-
rconn = rb_ary_entry(self, 1);
|
1651
|
-
self = rb_ary_entry(self, 0);
|
1704
|
+
struct spg__yield_each_row_stream_data data;
|
1652
1705
|
|
1653
|
-
|
1654
|
-
if (rres == Qnil) {
|
1655
|
-
goto end_yield_each_row;
|
1656
|
-
}
|
1657
|
-
rb_funcall(rres, spg_id_check, 0);
|
1658
|
-
res = pgresult_get(rres);
|
1659
|
-
|
1660
|
-
enc_index = spg_use_pg_get_result_enc_idx ? pg_get_result_enc_idx(rres) : enc_get_index(rres);
|
1706
|
+
nfields = PQnfields(res);
|
1661
1707
|
|
1662
1708
|
/* Only handle regular and model types. All other types require compiling all
|
1663
1709
|
* of the results at once, which is not a use case for streaming. The streaming
|
@@ -1671,14 +1717,20 @@ static VALUE spg__yield_each_row(VALUE self) {
|
|
1671
1717
|
}
|
1672
1718
|
}
|
1673
1719
|
|
1674
|
-
nfields = PQnfields(res);
|
1675
|
-
if (nfields > SPG_MAX_FIELDS) {
|
1676
|
-
rb_funcall(rres, spg_id_clear, 0);
|
1677
|
-
rb_raise(rb_eRangeError, "more than %d columns in query", SPG_MAX_FIELDS);
|
1678
|
-
}
|
1679
|
-
|
1680
1720
|
spg_set_column_info(self, res, colsyms, colconvert, enc_index);
|
1681
1721
|
|
1722
|
+
if (spg_use_pg_stream_any) {
|
1723
|
+
data.self = self;
|
1724
|
+
data.colsyms = colsyms;
|
1725
|
+
data.colconvert = colconvert;
|
1726
|
+
data.pg_value = pg_value;
|
1727
|
+
data.enc_index = enc_index;
|
1728
|
+
data.type = type;
|
1729
|
+
|
1730
|
+
pgresult_stream_any(rres, spg__yield_each_row_stream, &data);
|
1731
|
+
return self;
|
1732
|
+
}
|
1733
|
+
|
1682
1734
|
while (PQntuples(res) != 0) {
|
1683
1735
|
h = rb_hash_new();
|
1684
1736
|
for(j=0; j<nfields; j++) {
|
@@ -1698,14 +1750,57 @@ static VALUE spg__yield_each_row(VALUE self) {
|
|
1698
1750
|
|
1699
1751
|
rres = rb_funcall(rconn, spg_id_get_result, 0);
|
1700
1752
|
if (rres == Qnil) {
|
1701
|
-
|
1753
|
+
return self;
|
1702
1754
|
}
|
1703
1755
|
rb_funcall(rres, spg_id_check, 0);
|
1704
1756
|
res = pgresult_get(rres);
|
1705
1757
|
}
|
1706
1758
|
rb_funcall(rres, spg_id_clear, 0);
|
1707
1759
|
|
1708
|
-
|
1760
|
+
return self;
|
1761
|
+
}
|
1762
|
+
|
1763
|
+
#define def_spg__yield_each_row(max_fields) static VALUE spg__yield_each_row_ ## max_fields(VALUE self, VALUE rconn, VALUE rres, PGresult *res, int enc_index) { \
|
1764
|
+
VALUE colsyms[max_fields]; \
|
1765
|
+
VALUE colconvert[max_fields]; \
|
1766
|
+
return spg__yield_each_row_internal(self, rconn, rres, res, enc_index, colsyms, colconvert); \
|
1767
|
+
}
|
1768
|
+
|
1769
|
+
def_spg__yield_each_row(16)
|
1770
|
+
def_spg__yield_each_row(64)
|
1771
|
+
def_spg__yield_each_row(256)
|
1772
|
+
def_spg__yield_each_row(1664)
|
1773
|
+
|
1774
|
+
static VALUE spg__yield_each_row(VALUE self) {
|
1775
|
+
PGresult *res;
|
1776
|
+
VALUE rres;
|
1777
|
+
VALUE rconn;
|
1778
|
+
int enc_index;
|
1779
|
+
long nfields;
|
1780
|
+
|
1781
|
+
rconn = rb_ary_entry(self, 1);
|
1782
|
+
self = rb_ary_entry(self, 0);
|
1783
|
+
|
1784
|
+
rres = rb_funcall(rconn, spg_id_get_result, 0);
|
1785
|
+
if (rres == Qnil) {
|
1786
|
+
return self;
|
1787
|
+
}
|
1788
|
+
rb_funcall(rres, spg_id_check, 0);
|
1789
|
+
res = pgresult_get(rres);
|
1790
|
+
|
1791
|
+
enc_index = spg_use_pg_get_result_enc_idx ? pg_get_result_enc_idx(rres) : enc_get_index(rres);
|
1792
|
+
|
1793
|
+
nfields = PQnfields(res);
|
1794
|
+
if (nfields <= 16) return spg__yield_each_row_16(self, rconn, rres, res, enc_index);
|
1795
|
+
else if (nfields <= 64) return spg__yield_each_row_64(self, rconn, rres, res, enc_index);
|
1796
|
+
else if (nfields <= 256) return spg__yield_each_row_256(self, rconn, rres, res, enc_index);
|
1797
|
+
else if (nfields <= 1664) return spg__yield_each_row_1664(self, rconn, rres, res, enc_index);
|
1798
|
+
else {
|
1799
|
+
rb_funcall(rres, spg_id_clear, 0);
|
1800
|
+
rb_raise(rb_eRangeError, "more than 1664 columns in query (%ld columns detected)", nfields);
|
1801
|
+
}
|
1802
|
+
|
1803
|
+
/* UNREACHABLE */
|
1709
1804
|
return self;
|
1710
1805
|
}
|
1711
1806
|
|
@@ -1761,10 +1856,21 @@ void Init_sequel_pg(void) {
|
|
1761
1856
|
}
|
1762
1857
|
}
|
1763
1858
|
|
1764
|
-
|
1765
|
-
|
1859
|
+
c = rb_eval_string("defined?(PG::VERSION) && PG::VERSION.split('.').map(&:to_i)");
|
1860
|
+
if (RB_TYPE_P(c, T_ARRAY) && RARRAY_LEN(c) >= 3) {
|
1861
|
+
if (FIX2INT(RARRAY_AREF(c, 0)) > 1) {
|
1862
|
+
spg_use_pg_get_result_enc_idx = 1;
|
1863
|
+
spg_use_pg_stream_any = 1;
|
1864
|
+
} else if (FIX2INT(RARRAY_AREF(c, 0)) == 1) {
|
1865
|
+
if (FIX2INT(RARRAY_AREF(c, 1)) >= 2) {
|
1866
|
+
spg_use_pg_get_result_enc_idx = 1;
|
1867
|
+
}
|
1868
|
+
if (FIX2INT(RARRAY_AREF(c, 1)) > 3 || (FIX2INT(RARRAY_AREF(c, 1)) == 3 && FIX2INT(RARRAY_AREF(c, 2)) >= 4)) {
|
1869
|
+
spg_use_pg_stream_any = 1;
|
1870
|
+
}
|
1871
|
+
}
|
1766
1872
|
}
|
1767
|
-
|
1873
|
+
|
1768
1874
|
rb_const_set(spg_Postgres, rb_intern("SEQUEL_PG_VERSION_INTEGER"), INT2FIX(SEQUEL_PG_VERSION_INTEGER));
|
1769
1875
|
|
1770
1876
|
spg_id_BigDecimal = rb_intern("BigDecimal");
|
@@ -73,12 +73,18 @@ module Sequel::Postgres::Streaming
|
|
73
73
|
|
74
74
|
private
|
75
75
|
|
76
|
+
unless Sequel::Postgres::Adapter.method_defined?(:send_query_params)
|
77
|
+
def send_query_params(*args)
|
78
|
+
send_query(*args)
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
76
82
|
if Sequel::Database.instance_methods.map(&:to_s).include?('log_connection_yield')
|
77
83
|
# If using single row mode, send the query instead of executing it.
|
78
84
|
def execute_query(sql, args)
|
79
85
|
if @single_row_mode
|
80
86
|
@single_row_mode = false
|
81
|
-
@db.log_connection_yield(sql, self, args){args ?
|
87
|
+
@db.log_connection_yield(sql, self, args){args ? send_query_params(sql, args) : send_query(sql)}
|
82
88
|
set_single_row_mode
|
83
89
|
block
|
84
90
|
self
|
@@ -122,7 +128,12 @@ module Sequel::Postgres::Streaming
|
|
122
128
|
unless block_given?
|
123
129
|
return enum_for(:paged_each, opts)
|
124
130
|
end
|
125
|
-
|
131
|
+
|
132
|
+
if stream_results?
|
133
|
+
each(&block)
|
134
|
+
else
|
135
|
+
super
|
136
|
+
end
|
126
137
|
end
|
127
138
|
|
128
139
|
# Return a clone of the dataset that will use streaming to load
|
data/lib/sequel_pg/sequel_pg.rb
CHANGED
@@ -120,6 +120,9 @@ if defined?(Sequel::Postgres::PGArray)
|
|
120
120
|
# pg_array extension previously loaded
|
121
121
|
|
122
122
|
class Sequel::Postgres::PGArray::Creator
|
123
|
+
# Avoid method redefined verbose warning
|
124
|
+
alias call call if method_defined?(:call)
|
125
|
+
|
123
126
|
# Override Creator to use sequel_pg's C-based parser instead of the pure ruby parser.
|
124
127
|
def call(string)
|
125
128
|
Sequel::Postgres::PGArray.new(Sequel::Postgres.parse_pg_array(string, @converter), @type)
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: sequel_pg
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.15.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Jeremy Evans
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2022-03-16 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: pg
|
@@ -101,7 +101,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
101
101
|
- !ruby/object:Gem::Version
|
102
102
|
version: '0'
|
103
103
|
requirements: []
|
104
|
-
rubygems_version: 3.
|
104
|
+
rubygems_version: 3.3.7
|
105
105
|
signing_key:
|
106
106
|
specification_version: 4
|
107
107
|
summary: Faster SELECTs when using Sequel with pg
|