sequel_pg 1.12.2 → 1.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: a66312d2f3a6446adc3a43ac5bf4fe9651136a89d645e02f7e0f0da8228ddb97
4
- data.tar.gz: 80f9ab2109eb0571a820f4e29efb6261adcaaf6f9013005b6565e8d37ef21ef6
3
+ metadata.gz: 277c659f136e9d34a715ccd94bab948b776d903f72b0f679fd919bedacef9cf7
4
+ data.tar.gz: 031cd967776330ecc1c999c8597c49eb5a96f376dfcc88c51ab9d9bdbda52190
5
5
  SHA512:
6
- metadata.gz: 073a1004d8f109314aead84089cc0a3d3810c29ece58a89eac8d9750bf64b645998a4df8fdc3c23ad8608a7aec52b46066911b366b72acd124c19baf1a328378
7
- data.tar.gz: '05806296363f8df23db1c4454d1d601eb938aebbed17f5c599535374ff4cc4405e71245f8ee44242e81f850d0e86914ae5d52fe7b14cd0c0dbdd8b507f8ec4c9'
6
+ metadata.gz: 7b722922bf49095005ad19bc7bb1ba41d358d851f367df6f51bd5fbf0202784244cee83293919ee27fb1ff7accb3fb10ddbbc7745455cf545f2b1a7a7e301bdd
7
+ data.tar.gz: 2b3933ab84065cdd81b5c544523ba4c50192acbc912b449d4d4f03d5650873a5ad125fc3f339a435cbcc514d350bbf6e98d49d10f39349faa9cb8b2c7236fa61
data/CHANGELOG CHANGED
@@ -1,3 +1,29 @@
1
+ === 1.14.0 (2020-09-22)
2
+
3
+ * Reduce stack memory usage for result sets with 64 or fewer columns (jeremyevans)
4
+
5
+ * Support result sets with more than 256 columns by default (jeremyevans) (#39)
6
+
7
+ === 1.13.0 (2020-04-13)
8
+
9
+ * Allow overriding of inet/cidr type conversion using conversion procs (beanieboi, jeremyevans) (#36, #37)
10
+
11
+ === 1.12.5 (2020-03-23)
12
+
13
+ * Fix offset calculation for timestamptz types when datetime_class is DateTime and using local application timezone (jeremyevans)
14
+
15
+ * Fix wrong method call when parsing timestamptz types when datetime_class is Time and using utc database timezone and local application timezone (jeremyevans)
16
+
17
+ === 1.12.4 (2020-01-02)
18
+
19
+ * Work with pg 1.2.1+ (jeremyevans)
20
+
21
+ === 1.12.3 (2020-01-02)
22
+
23
+ * Warn and do not load sequel_pg if pg >1.2 is used (jeremyevans)
24
+
25
+ * Avoid verbose warnings on Ruby 2.7 due to tainting (jeremyevans)
26
+
1
27
  === 1.12.2 (2019-06-06)
2
28
 
3
29
  * Avoid use of pkg_config as it breaks compilation in some environments (jeremyevans) (#33)
@@ -1,4 +1,4 @@
1
- Copyright (c) 2010-2018 Jeremy Evans
1
+ Copyright (c) 2010-2020 Jeremy Evans
2
2
 
3
3
  Permission is hereby granted, free of charge, to any person obtaining a copy
4
4
  of this software and associated documentation files (the "Software"), to
@@ -70,12 +70,6 @@ can do the following:
70
70
 
71
71
  gem install sequel_pg
72
72
 
73
- Note that by default sequel_pg only supports result sets with up to
74
- 256 columns. If you will have a result set with more than 256 columns,
75
- you should modify the maximum supported number of columns via:
76
-
77
- gem install sequel_pg -- --with-cflags=\"-DSPG_MAX_FIELDS=512\"
78
-
79
73
  Make sure the pg_config binary is in your PATH so the installation
80
74
  can find the PostgreSQL shared library and header files. Alternatively,
81
75
  you can use the POSTGRES_LIB and POSTGRES_INCLUDE environment
@@ -130,6 +124,7 @@ sequel_pg has been tested on the following:
130
124
  * ruby 2.4
131
125
  * ruby 2.5
132
126
  * ruby 2.6
127
+ * ruby 2.7
133
128
 
134
129
  == Known Issues
135
130
 
@@ -1,5 +1,6 @@
1
1
  require 'mkmf'
2
2
  $CFLAGS << " -O0 -g" if ENV['DEBUG']
3
+ $CFLAGS << " -Drb_tainted_str_new=rb_str_new -DNO_TAINT" if RUBY_VERSION >= '2.7'
3
4
  $CFLAGS << " -Wall " unless RUBY_PLATFORM =~ /solaris/
4
5
  dir_config('pg', ENV["POSTGRES_INCLUDE"] || (IO.popen("pg_config --includedir").readline.chomp rescue nil),
5
6
  ENV["POSTGRES_LIB"] || (IO.popen("pg_config --libdir").readline.chomp rescue nil))
@@ -1,4 +1,4 @@
1
- #define SEQUEL_PG_VERSION_INTEGER 11202
1
+ #define SEQUEL_PG_VERSION_INTEGER 11400
2
2
 
3
3
  #include <string.h>
4
4
  #include <stdio.h>
@@ -15,9 +15,6 @@
15
15
  #include <ruby/version.h>
16
16
  #include <ruby/encoding.h>
17
17
 
18
- #ifndef SPG_MAX_FIELDS
19
- #define SPG_MAX_FIELDS 256
20
- #endif
21
18
  #define SPG_MINUTES_PER_DAY 1440.0
22
19
  #define SPG_SECONDS_PER_DAY 86400.0
23
20
 
@@ -72,8 +69,10 @@
72
69
  /* External functions defined by ruby-pg */
73
70
  PGconn* pg_get_pgconn(VALUE);
74
71
  PGresult* pgresult_get(VALUE);
72
+ int pg_get_result_enc_idx(VALUE);
75
73
 
76
74
  static int spg_use_ipaddr_alloc;
75
+ static int spg_use_pg_get_result_enc_idx;
77
76
 
78
77
  static VALUE spg_Sequel;
79
78
  static VALUE spg_PGArray;
@@ -409,7 +408,7 @@ static VALUE spg_timestamp_error(const char *s, VALUE self, const char *error_ms
409
408
  self = rb_funcall(self, spg_id_db, 0);
410
409
  if(RTEST(rb_funcall(self, spg_id_convert_infinite_timestamps, 0))) {
411
410
  if((strcmp(s, "infinity") == 0) || (strcmp(s, "-infinity") == 0)) {
412
- return rb_funcall(self, spg_id_infinite_timestamp_value, 1, rb_tainted_str_new2(s));
411
+ return rb_funcall(self, spg_id_infinite_timestamp_value, 1, rb_tainted_str_new(s, strlen(s)));
413
412
  }
414
413
  }
415
414
  rb_raise(rb_eArgError, "%s", error_msg);
@@ -629,7 +628,7 @@ static VALUE spg_timestamp(const char *s, VALUE self, size_t length, int tz) {
629
628
  if (tz & SPG_APP_UTC) {
630
629
  dt = rb_funcall(dt, spg_id_utc, 0);
631
630
  } else if (tz & SPG_APP_LOCAL) {
632
- dt = rb_funcall(dt, spg_id_local, 0);
631
+ dt = rb_funcall(dt, spg_id_localtime, 0);
633
632
  }
634
633
 
635
634
  return dt;
@@ -701,8 +700,8 @@ static VALUE spg_timestamp(const char *s, VALUE self, size_t length, int tz) {
701
700
  SPG_DT_ADD_USEC
702
701
 
703
702
  if (tz & SPG_APP_LOCAL) {
704
- utc_offset = NUM2INT(rb_funcall(rb_funcall(rb_cTime, spg_id_new, 0), spg_id_utc_offset, 0))/SPG_SECONDS_PER_DAY;
705
- dt = rb_funcall(dt, spg_id_new_offset, 1, rb_float_new(utc_offset));
703
+ offset_fraction = NUM2INT(rb_funcall(rb_funcall(rb_cTime, spg_id_local, 6, INT2NUM(year), INT2NUM(month), INT2NUM(day), INT2NUM(hour), INT2NUM(min), INT2NUM(sec)), spg_id_utc_offset, 0))/SPG_SECONDS_PER_DAY;
704
+ dt = rb_funcall(dt, spg_id_new_offset, 1, rb_float_new(offset_fraction));
706
705
  } else if (tz & SPG_APP_UTC) {
707
706
  dt = rb_funcall(dt, spg_id_new_offset, 1, INT2NUM(0));
708
707
  }
@@ -857,7 +856,11 @@ static VALUE spg_create_Blob(VALUE v) {
857
856
  if (bi->blob_string == NULL) {
858
857
  rb_raise(rb_eNoMemError, "PQunescapeBytea failure: probably not enough memory");
859
858
  }
860
- return rb_obj_taint(rb_str_new_with_class(spg_Blob_instance, bi->blob_string, bi->length));
859
+ v = rb_str_new_with_class(spg_Blob_instance, bi->blob_string, bi->length);
860
+ #ifndef NO_TAINT
861
+ rb_obj_taint(v);
862
+ #endif
863
+ return v;
861
864
  }
862
865
 
863
866
  static VALUE spg_fetch_rows_set_cols(VALUE self, VALUE ignore) {
@@ -925,8 +928,10 @@ static VALUE spg__array_col_value(char *v, size_t length, VALUE converter, int e
925
928
  break;
926
929
  case 869: /* inet */
927
930
  case 650: /* cidr */
928
- rv = spg_inet(v, length);
929
- break;
931
+ if (!RTEST(converter)) {
932
+ rv = spg_inet(v, length);
933
+ break;
934
+ }
930
935
  default:
931
936
  rv = rb_tainted_str_new(v, length);
932
937
  PG_ENCODING_SET_NOCHECK(rv, enc_index);
@@ -1073,10 +1078,6 @@ static VALUE spg__col_value(VALUE self, PGresult *res, long i, long j, VALUE* co
1073
1078
  rv = rb_tainted_str_new(v, PQgetlength(res, i, j));
1074
1079
  PG_ENCODING_SET_NOCHECK(rv, enc_index);
1075
1080
  break;
1076
- case 869: /* inet */
1077
- case 650: /* cidr */
1078
- rv = spg_inet(v, PQgetlength(res, i, j));
1079
- break;
1080
1081
  /* array types */
1081
1082
  case 1009:
1082
1083
  case 1014:
@@ -1214,17 +1215,30 @@ static VALUE spg__col_value(VALUE self, PGresult *res, long i, long j, VALUE* co
1214
1215
  scalar_oid = 22;
1215
1216
  break;
1216
1217
  case 1041:
1218
+ if (RTEST(colconvert[j])) {
1219
+ goto default_cond;
1220
+ }
1217
1221
  array_type = spg_sym_inet;
1218
1222
  scalar_oid = 869;
1219
1223
  break;
1220
1224
  case 651:
1225
+ if (RTEST(colconvert[j])) {
1226
+ goto default_cond;
1227
+ }
1221
1228
  array_type = spg_sym_cidr;
1222
1229
  scalar_oid = 650;
1223
1230
  break;
1224
1231
  }
1225
1232
  rv = spg_array_value(v, PQgetlength(res, i, j), colconvert[j], enc_index, scalar_oid, self, array_type);
1226
1233
  break;
1234
+ case 869: /* inet */
1235
+ case 650: /* cidr */
1236
+ if (colconvert[j] == Qnil) {
1237
+ rv = spg_inet(v, PQgetlength(res, i, j));
1238
+ break;
1239
+ }
1227
1240
  default:
1241
+ default_cond:
1228
1242
  rv = rb_tainted_str_new(v, PQgetlength(res, i, j));
1229
1243
  PG_ENCODING_SET_NOCHECK(rv, enc_index);
1230
1244
  if (colconvert[j] != Qnil) {
@@ -1363,10 +1377,7 @@ static void spg_set_column_info(VALUE self, PGresult *res, VALUE *colsyms, VALUE
1363
1377
  rb_funcall(self, spg_id_columns_equal, 1, rb_ary_new4(nfields, colsyms));
1364
1378
  }
1365
1379
 
1366
- static VALUE spg_yield_hash_rows(VALUE self, VALUE rres, VALUE ignore) {
1367
- PGresult *res;
1368
- VALUE colsyms[SPG_MAX_FIELDS];
1369
- VALUE colconvert[SPG_MAX_FIELDS];
1380
+ static VALUE spg_yield_hash_rows_internal(VALUE self, PGresult *res, int enc_index, VALUE* colsyms, VALUE* colconvert) {
1370
1381
  long ntuples;
1371
1382
  long nfields;
1372
1383
  long i;
@@ -1376,21 +1387,9 @@ static VALUE spg_yield_hash_rows(VALUE self, VALUE rres, VALUE ignore) {
1376
1387
  VALUE pg_type;
1377
1388
  VALUE pg_value;
1378
1389
  char type = SPG_YIELD_NORMAL;
1379
- int enc_index;
1380
-
1381
- if (!RTEST(rres)) {
1382
- return self;
1383
- }
1384
- res = pgresult_get(rres);
1385
-
1386
- enc_index = enc_get_index(rres);
1387
1390
 
1388
1391
  ntuples = PQntuples(res);
1389
1392
  nfields = PQnfields(res);
1390
- if (nfields > SPG_MAX_FIELDS) {
1391
- rb_raise(rb_eRangeError, "more than %d columns in query (%ld columns detected)", SPG_MAX_FIELDS, nfields);
1392
- }
1393
-
1394
1393
  spg_set_column_info(self, res, colsyms, colconvert, enc_index);
1395
1394
 
1396
1395
  opts = rb_funcall(self, spg_id_opts, 0);
@@ -1607,6 +1606,40 @@ static VALUE spg_yield_hash_rows(VALUE self, VALUE rres, VALUE ignore) {
1607
1606
  return self;
1608
1607
  }
1609
1608
 
1609
+ #define def_spg_yield_hash_rows(max_fields) static VALUE spg_yield_hash_rows_ ## max_fields(VALUE self, PGresult *res, int enc_index) { \
1610
+ VALUE colsyms[max_fields]; \
1611
+ VALUE colconvert[max_fields]; \
1612
+ return spg_yield_hash_rows_internal(self, res, enc_index, colsyms, colconvert); \
1613
+ }
1614
+
1615
+ def_spg_yield_hash_rows(16)
1616
+ def_spg_yield_hash_rows(64)
1617
+ def_spg_yield_hash_rows(256)
1618
+ def_spg_yield_hash_rows(1664)
1619
+
1620
+ static VALUE spg_yield_hash_rows(VALUE self, VALUE rres, VALUE ignore) {
1621
+ PGresult *res;
1622
+ long nfields;
1623
+ int enc_index;
1624
+
1625
+ if (!RTEST(rres)) {
1626
+ return self;
1627
+ }
1628
+ res = pgresult_get(rres);
1629
+
1630
+ enc_index = spg_use_pg_get_result_enc_idx ? pg_get_result_enc_idx(rres) : enc_get_index(rres);
1631
+
1632
+ nfields = PQnfields(res);
1633
+ if (nfields <= 16) return spg_yield_hash_rows_16(self, res, enc_index);
1634
+ else if (nfields <= 64) return spg_yield_hash_rows_64(self, res, enc_index);
1635
+ else if (nfields <= 256) return spg_yield_hash_rows_256(self, res, enc_index);
1636
+ else if (nfields <= 1664) return spg_yield_hash_rows_1664(self, res, enc_index);
1637
+ else rb_raise(rb_eRangeError, "more than 1664 columns in query (%ld columns detected)", nfields);
1638
+
1639
+ /* UNREACHABLE */
1640
+ return self;
1641
+ }
1642
+
1610
1643
  static VALUE spg_supports_streaming_p(VALUE self) {
1611
1644
  return
1612
1645
  #if HAVE_PQSETSINGLEROWMODE
@@ -1626,12 +1659,7 @@ static VALUE spg_set_single_row_mode(VALUE self) {
1626
1659
  return Qnil;
1627
1660
  }
1628
1661
 
1629
- static VALUE spg__yield_each_row(VALUE self) {
1630
- PGresult *res;
1631
- VALUE rres;
1632
- VALUE rconn;
1633
- VALUE colsyms[SPG_MAX_FIELDS];
1634
- VALUE colconvert[SPG_MAX_FIELDS];
1662
+ static VALUE spg__yield_each_row_internal(VALUE self, VALUE rconn, VALUE rres, PGresult *res, int enc_index, VALUE *colsyms, VALUE *colconvert) {
1635
1663
  long nfields;
1636
1664
  long j;
1637
1665
  VALUE h;
@@ -1639,19 +1667,8 @@ static VALUE spg__yield_each_row(VALUE self) {
1639
1667
  VALUE pg_type;
1640
1668
  VALUE pg_value = Qnil;
1641
1669
  char type = SPG_YIELD_NORMAL;
1642
- int enc_index;
1643
-
1644
- rconn = rb_ary_entry(self, 1);
1645
- self = rb_ary_entry(self, 0);
1646
-
1647
- rres = rb_funcall(rconn, spg_id_get_result, 0);
1648
- if (rres == Qnil) {
1649
- goto end_yield_each_row;
1650
- }
1651
- rb_funcall(rres, spg_id_check, 0);
1652
- res = pgresult_get(rres);
1653
1670
 
1654
- enc_index = enc_get_index(rres);
1671
+ nfields = PQnfields(res);
1655
1672
 
1656
1673
  /* Only handle regular and model types. All other types require compiling all
1657
1674
  * of the results at once, which is not a use case for streaming. The streaming
@@ -1665,12 +1682,6 @@ static VALUE spg__yield_each_row(VALUE self) {
1665
1682
  }
1666
1683
  }
1667
1684
 
1668
- nfields = PQnfields(res);
1669
- if (nfields > SPG_MAX_FIELDS) {
1670
- rb_funcall(rres, spg_id_clear, 0);
1671
- rb_raise(rb_eRangeError, "more than %d columns in query", SPG_MAX_FIELDS);
1672
- }
1673
-
1674
1685
  spg_set_column_info(self, res, colsyms, colconvert, enc_index);
1675
1686
 
1676
1687
  while (PQntuples(res) != 0) {
@@ -1692,14 +1703,57 @@ static VALUE spg__yield_each_row(VALUE self) {
1692
1703
 
1693
1704
  rres = rb_funcall(rconn, spg_id_get_result, 0);
1694
1705
  if (rres == Qnil) {
1695
- goto end_yield_each_row;
1706
+ return self;
1696
1707
  }
1697
1708
  rb_funcall(rres, spg_id_check, 0);
1698
1709
  res = pgresult_get(rres);
1699
1710
  }
1700
1711
  rb_funcall(rres, spg_id_clear, 0);
1701
1712
 
1702
- end_yield_each_row:
1713
+ return self;
1714
+ }
1715
+
1716
+ #define def_spg__yield_each_row(max_fields) static VALUE spg__yield_each_row_ ## max_fields(VALUE self, VALUE rconn, VALUE rres, PGresult *res, int enc_index) { \
1717
+ VALUE colsyms[max_fields]; \
1718
+ VALUE colconvert[max_fields]; \
1719
+ return spg__yield_each_row_internal(self, rconn, rres, res, enc_index, colsyms, colconvert); \
1720
+ }
1721
+
1722
+ def_spg__yield_each_row(16)
1723
+ def_spg__yield_each_row(64)
1724
+ def_spg__yield_each_row(256)
1725
+ def_spg__yield_each_row(1664)
1726
+
1727
+ static VALUE spg__yield_each_row(VALUE self) {
1728
+ PGresult *res;
1729
+ VALUE rres;
1730
+ VALUE rconn;
1731
+ int enc_index;
1732
+ long nfields;
1733
+
1734
+ rconn = rb_ary_entry(self, 1);
1735
+ self = rb_ary_entry(self, 0);
1736
+
1737
+ rres = rb_funcall(rconn, spg_id_get_result, 0);
1738
+ if (rres == Qnil) {
1739
+ return self;
1740
+ }
1741
+ rb_funcall(rres, spg_id_check, 0);
1742
+ res = pgresult_get(rres);
1743
+
1744
+ enc_index = spg_use_pg_get_result_enc_idx ? pg_get_result_enc_idx(rres) : enc_get_index(rres);
1745
+
1746
+ nfields = PQnfields(res);
1747
+ if (nfields <= 16) return spg__yield_each_row_16(self, rconn, rres, res, enc_index);
1748
+ else if (nfields <= 64) return spg__yield_each_row_64(self, rconn, rres, res, enc_index);
1749
+ else if (nfields <= 256) return spg__yield_each_row_256(self, rconn, rres, res, enc_index);
1750
+ else if (nfields <= 1664) return spg__yield_each_row_1664(self, rconn, rres, res, enc_index);
1751
+ else {
1752
+ rb_funcall(rres, spg_id_clear, 0);
1753
+ rb_raise(rb_eRangeError, "more than 1664 columns in query (%ld columns detected)", nfields);
1754
+ }
1755
+
1756
+ /* UNREACHABLE */
1703
1757
  return self;
1704
1758
  }
1705
1759
 
@@ -1754,6 +1808,10 @@ void Init_sequel_pg(void) {
1754
1808
  return;
1755
1809
  }
1756
1810
  }
1811
+
1812
+ if (RTEST(rb_eval_string("defined?(PG::VERSION) && PG::VERSION.to_f >= 1.2"))) {
1813
+ spg_use_pg_get_result_enc_idx = 1;
1814
+ }
1757
1815
 
1758
1816
  rb_const_set(spg_Postgres, rb_intern("SEQUEL_PG_VERSION_INTEGER"), INT2FIX(SEQUEL_PG_VERSION_INTEGER));
1759
1817
 
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: sequel_pg
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.12.2
4
+ version: 1.14.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Jeremy Evans
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2019-06-06 00:00:00.000000000 Z
11
+ date: 2020-09-22 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: pg
@@ -17,6 +17,9 @@ dependencies:
17
17
  - - ">="
18
18
  - !ruby/object:Gem::Version
19
19
  version: 0.18.0
20
+ - - "!="
21
+ - !ruby/object:Gem::Version
22
+ version: 1.2.0
20
23
  type: :runtime
21
24
  prerelease: false
22
25
  version_requirements: !ruby/object:Gem::Requirement
@@ -24,6 +27,9 @@ dependencies:
24
27
  - - ">="
25
28
  - !ruby/object:Gem::Version
26
29
  version: 0.18.0
30
+ - - "!="
31
+ - !ruby/object:Gem::Version
32
+ version: 1.2.0
27
33
  - !ruby/object:Gem::Dependency
28
34
  name: sequel
29
35
  requirement: !ruby/object:Gem::Requirement
@@ -95,7 +101,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
95
101
  - !ruby/object:Gem::Version
96
102
  version: '0'
97
103
  requirements: []
98
- rubygems_version: 3.0.3
104
+ rubygems_version: 3.1.2
99
105
  signing_key:
100
106
  specification_version: 4
101
107
  summary: Faster SELECTs when using Sequel with pg