pg 1.3.2-x86-mingw32 → 1.3.5-x86-mingw32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/.github/workflows/binary-gems.yml +1 -1
- data/.github/workflows/source-gem.yml +3 -4
- data/History.rdoc +44 -1
- data/Rakefile.cross +2 -0
- data/ext/errorcodes.rb +0 -0
- data/ext/extconf.rb +35 -4
- data/ext/pg.h +1 -0
- data/ext/pg_connection.c +19 -27
- data/ext/pg_record_coder.c +6 -4
- data/ext/pg_result.c +16 -9
- data/ext/pg_tuple.c +2 -8
- data/lib/2.5/pg_ext.so +0 -0
- data/lib/2.6/pg_ext.so +0 -0
- data/lib/2.7/pg_ext.so +0 -0
- data/lib/3.0/pg_ext.so +0 -0
- data/lib/3.1/pg_ext.so +0 -0
- data/lib/pg/basic_type_registry.rb +8 -3
- data/lib/pg/connection.rb +8 -7
- data/lib/pg/version.rb +1 -1
- data/lib/x86-mingw32/libpq.dll +0 -0
- data/misc/openssl-pg-segfault.rb +0 -0
- data/rakelib/task_extension.rb +46 -0
- data/sample/array_insert.rb +0 -0
- data/sample/async_api.rb +3 -7
- data/sample/async_copyto.rb +0 -0
- data/sample/async_mixed.rb +0 -0
- data/sample/check_conn.rb +0 -0
- data/sample/copydata.rb +0 -0
- data/sample/copyfrom.rb +0 -0
- data/sample/copyto.rb +0 -0
- data/sample/cursor.rb +0 -0
- data/sample/disk_usage_report.rb +0 -0
- data/sample/issue-119.rb +0 -0
- data/sample/losample.rb +0 -0
- data/sample/minimal-testcase.rb +0 -0
- data/sample/notify_wait.rb +0 -0
- data/sample/pg_statistics.rb +0 -0
- data/sample/replication_monitor.rb +0 -0
- data/sample/test_binary_values.rb +0 -0
- data/sample/wal_shipper.rb +0 -0
- data/sample/warehouse_partitions.rb +0 -0
- data.tar.gz.sig +0 -0
- metadata +3 -2
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 1a712a730f257c2d56f3a5d2dd14ffe938f24668f84dd2489121ac0412f9ff85
|
4
|
+
data.tar.gz: 1f2d0c7ce8472465da40dae02bf4fda27519334dcccf772b9ce31961986862ed
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 5215d65a832e4fef1a7dffbba41bdd4d8817f4a68d8c34060085754d9c98496df51ef34415a6c6a3de5b5b3c1b9841ae709c4bc08dec2b4b93079bb9c7baa1bd
|
7
|
+
data.tar.gz: 279a731af4ff16579fd78f366cdddd6461845a480adda19072486fa7ad47519d645e9996048d5355e05690876dd48c80913e6d6f0a15c3d1d9f762d94f3ba136
|
checksums.yaml.gz.sig
CHANGED
Binary file
|
@@ -35,8 +35,8 @@ jobs:
|
|
35
35
|
PGVER: "14"
|
36
36
|
- os: windows
|
37
37
|
ruby: "2.5"
|
38
|
-
PGVERSION: 9.
|
39
|
-
PGVER: "9.
|
38
|
+
PGVERSION: 9.4.26-1-windows-x64
|
39
|
+
PGVER: "9.4"
|
40
40
|
- os: ubuntu
|
41
41
|
ruby: "head"
|
42
42
|
PGVER: "14"
|
@@ -65,7 +65,7 @@ jobs:
|
|
65
65
|
steps:
|
66
66
|
- uses: actions/checkout@v2
|
67
67
|
- name: Set up Ruby
|
68
|
-
uses:
|
68
|
+
uses: ruby/setup-ruby@v1
|
69
69
|
with:
|
70
70
|
ruby-version: ${{ matrix.ruby }}
|
71
71
|
|
@@ -116,7 +116,6 @@ jobs:
|
|
116
116
|
- run: gem install --local *.gem --verbose
|
117
117
|
|
118
118
|
- name: Run specs
|
119
|
-
continue-on-error: ${{ matrix.ruby == 'truffleruby-head' }}
|
120
119
|
env:
|
121
120
|
PG_DEBUG: 0
|
122
121
|
run: ruby -rpg -S rspec spec/**/*_spec.rb -cfdoc
|
data/History.rdoc
CHANGED
@@ -1,3 +1,46 @@
|
|
1
|
+
== v1.3.5 [2022-03-31] Lars Kanis <lars@greiz-reinsdorf.de>
|
2
|
+
|
3
|
+
Bugfixes:
|
4
|
+
|
5
|
+
- Handle PGRES_COMMAND_OK in pgresult_stream_any. #447
|
6
|
+
Fixes usage when trying to stream the result of a procedure call that returns no results.
|
7
|
+
|
8
|
+
Enhancements:
|
9
|
+
|
10
|
+
- Rename BasicTypeRegistry#define_default_types to #register_default_types to use a more consistent terminology.
|
11
|
+
Keeping define_default_types for compatibility.
|
12
|
+
- BasicTypeRegistry: return self instead of objects by accident.
|
13
|
+
This allows call chaining.
|
14
|
+
- Add some April fun. #449
|
15
|
+
|
16
|
+
Documentation:
|
17
|
+
- Refine documentation of conn.socket_io and conn.connect_poll
|
18
|
+
|
19
|
+
|
20
|
+
== v1.3.4 [2022-03-10] Lars Kanis <lars@greiz-reinsdorf.de>
|
21
|
+
|
22
|
+
Bugfixes:
|
23
|
+
|
24
|
+
- Don't leak IO in case of connection errors. #439
|
25
|
+
Previously it was kept open until the PG::Connection was garbage collected.
|
26
|
+
- Fix a performance regession in conn.get_result noticed in single row mode. #442
|
27
|
+
- Fix occasional error Errno::EBADF (Bad file descriptor) while connecting. #444
|
28
|
+
- Fix compatibility of res.stream_each* methods with Fiber.scheduler. #446
|
29
|
+
- Remove FL_TEST and FL_SET, which are MRI-internal. #437
|
30
|
+
|
31
|
+
Enhancements:
|
32
|
+
|
33
|
+
- Allow pgresult_stream_any to be used by sequel_pg. #443
|
34
|
+
|
35
|
+
|
36
|
+
== v1.3.3 [2022-02-22] Lars Kanis <lars@greiz-reinsdorf.de>
|
37
|
+
|
38
|
+
Bugfixes:
|
39
|
+
|
40
|
+
- Fix omission of the third digit of IPv4 addresses in connection URI. #435
|
41
|
+
- Fix wrong permission of certs/larskanis-2022.pem in the pg-1.3.2.gem. #432
|
42
|
+
|
43
|
+
|
1
44
|
== v1.3.2 [2022-02-14] Lars Kanis <lars@greiz-reinsdorf.de>
|
2
45
|
|
3
46
|
Bugfixes:
|
@@ -55,7 +98,7 @@ API Enhancements:
|
|
55
98
|
- Run Connection.ping in a second thread.
|
56
99
|
- Make discard_results scheduler friendly
|
57
100
|
- Do all socket waiting through the conn.socket_io object.
|
58
|
-
- Avoid PG.connect blocking while address resolution by automatically providing the +hostaddr+ parameter.
|
101
|
+
- Avoid PG.connect blocking while address resolution by automatically providing the +hostaddr+ parameter and resolving in Ruby instead of libpq.
|
59
102
|
- On Windows Fiber.scheduler support requires Ruby-3.1+.
|
60
103
|
It is also only partly usable since may ruby IO methods are not yet scheduler aware on Windows.
|
61
104
|
- Add support for pipeline mode of PostgreSQL-14. #401
|
data/Rakefile.cross
CHANGED
@@ -7,6 +7,7 @@ require 'rake/clean'
|
|
7
7
|
require 'rake/extensiontask'
|
8
8
|
require 'rake/extensioncompiler'
|
9
9
|
require 'ostruct'
|
10
|
+
require_relative 'rakelib/task_extension'
|
10
11
|
|
11
12
|
MISCDIR = BASEDIR + 'misc'
|
12
13
|
|
@@ -20,6 +21,7 @@ end
|
|
20
21
|
|
21
22
|
class CrossLibrary < OpenStruct
|
22
23
|
include Rake::DSL
|
24
|
+
prepend TaskExtension
|
23
25
|
|
24
26
|
def initialize(for_platform, openssl_config, toolchain)
|
25
27
|
super()
|
data/ext/errorcodes.rb
CHANGED
File without changes
|
data/ext/extconf.rb
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
# -*- encoding: utf-8 -*-
|
2
|
+
|
1
3
|
require 'pp'
|
2
4
|
require 'mkmf'
|
3
5
|
|
@@ -37,12 +39,12 @@ else
|
|
37
39
|
|
38
40
|
if pgconfig && pgconfig != 'ignore'
|
39
41
|
$stderr.puts "Using config values from %s" % [ pgconfig ]
|
40
|
-
incdir =
|
41
|
-
libdir =
|
42
|
+
incdir = IO.popen([pgconfig, "--includedir"], &:read).chomp
|
43
|
+
libdir = IO.popen([pgconfig, "--libdir"], &:read).chomp
|
42
44
|
dir_config 'pg', incdir, libdir
|
43
45
|
|
44
46
|
# Windows traditionally stores DLLs beside executables, not in libdir
|
45
|
-
dlldir = RUBY_PLATFORM=~/mingw|mswin/ ?
|
47
|
+
dlldir = RUBY_PLATFORM=~/mingw|mswin/ ? IO.popen([pgconfig, "--bindir"], &:read).chomp : libdir
|
46
48
|
|
47
49
|
elsif checking_for "libpq per pkg-config" do
|
48
50
|
_cflags, ldflags, _libs = pkg_config("libpq")
|
@@ -87,7 +89,7 @@ begin
|
|
87
89
|
have_library( 'libpq', 'PQconnectdb', ['libpq-fe.h'] ) ||
|
88
90
|
have_library( 'ms/libpq', 'PQconnectdb', ['libpq-fe.h'] )
|
89
91
|
|
90
|
-
rescue SystemExit
|
92
|
+
rescue SystemExit
|
91
93
|
install_text = case RUBY_PLATFORM
|
92
94
|
when /linux/
|
93
95
|
<<-EOT
|
@@ -167,3 +169,32 @@ end
|
|
167
169
|
create_header()
|
168
170
|
create_makefile( "pg_ext" )
|
169
171
|
|
172
|
+
|
173
|
+
def message!(important_message)
|
174
|
+
message important_message
|
175
|
+
if !$stdout.tty? && File.chardev?('/dev/tty')
|
176
|
+
File.open('/dev/tty', 'w') do |tty|
|
177
|
+
tty.print important_message
|
178
|
+
end
|
179
|
+
end
|
180
|
+
rescue
|
181
|
+
end
|
182
|
+
|
183
|
+
if "2022-04-01" == Time.now.strftime("%Y-%m-%d")
|
184
|
+
message! <<-EOM
|
185
|
+
===================================================================
|
186
|
+
HEADS UP! Prepare for pg-1.4.2022 ! 🎉🎉🎉
|
187
|
+
|
188
|
+
Now that psycopg3 has most of the features of ruby-pg, we plan to
|
189
|
+
switch to using it as our foundation for the next pg release.
|
190
|
+
It will run through pycall and requires a working python setup.
|
191
|
+
This will minimize our development and maintenance efforts, since
|
192
|
+
it allows us to use one and the same code base for both programming
|
193
|
+
languages - ruby and python. 👏😃
|
194
|
+
|
195
|
+
And we follow the recent merge of the Django and Rails teams! ❤️
|
196
|
+
|
197
|
+
Stay up-to-date at https://github.com/ged/ruby-pg/issues/449
|
198
|
+
===================================================================
|
199
|
+
EOM
|
200
|
+
end
|
data/ext/pg.h
CHANGED
@@ -344,6 +344,7 @@ void pg_typemap_compact _(( void * ));
|
|
344
344
|
|
345
345
|
PGconn *pg_get_pgconn _(( VALUE ));
|
346
346
|
t_pg_connection *pg_get_connection _(( VALUE ));
|
347
|
+
VALUE pgconn_block _(( int, VALUE *, VALUE ));
|
347
348
|
|
348
349
|
VALUE pg_new_result _(( PGresult *, VALUE ));
|
349
350
|
VALUE pg_new_result_autoclear _(( PGresult *, VALUE ));
|
data/ext/pg_connection.c
CHANGED
@@ -450,17 +450,18 @@ pgconn_s_encrypt_password(VALUE self, VALUE password, VALUE username)
|
|
450
450
|
* the asynchronous connection is ready
|
451
451
|
*
|
452
452
|
* Example:
|
453
|
-
*
|
454
|
-
*
|
453
|
+
* require "io/wait"
|
454
|
+
*
|
455
|
+
* conn = PG::Connection.connect_start(dbname: 'mydatabase')
|
455
456
|
* status = conn.connect_poll
|
456
457
|
* while(status != PG::PGRES_POLLING_OK) do
|
457
458
|
* # do some work while waiting for the connection to complete
|
458
459
|
* if(status == PG::PGRES_POLLING_READING)
|
459
|
-
*
|
460
|
+
* unless conn.socket_io.wait_readable(10.0)
|
460
461
|
* raise "Asynchronous connection timed out!"
|
461
462
|
* end
|
462
463
|
* elsif(status == PG::PGRES_POLLING_WRITING)
|
463
|
-
*
|
464
|
+
* unless conn.socket_io.wait_writable(10.0)
|
464
465
|
* raise "Asynchronous connection timed out!"
|
465
466
|
* end
|
466
467
|
* end
|
@@ -475,9 +476,7 @@ pgconn_connect_poll(VALUE self)
|
|
475
476
|
PostgresPollingStatusType status;
|
476
477
|
status = gvl_PQconnectPoll(pg_get_pgconn(self));
|
477
478
|
|
478
|
-
|
479
|
-
pgconn_close_socket_io(self);
|
480
|
-
}
|
479
|
+
pgconn_close_socket_io(self);
|
481
480
|
|
482
481
|
return INT2FIX((int)status);
|
483
482
|
}
|
@@ -556,9 +555,7 @@ pgconn_reset_poll(VALUE self)
|
|
556
555
|
PostgresPollingStatusType status;
|
557
556
|
status = gvl_PQresetPoll(pg_get_pgconn(self));
|
558
557
|
|
559
|
-
|
560
|
-
pgconn_close_socket_io(self);
|
561
|
-
}
|
558
|
+
pgconn_close_socket_io(self);
|
562
559
|
|
563
560
|
return INT2FIX((int)status);
|
564
561
|
}
|
@@ -822,13 +819,15 @@ pgconn_socket(VALUE self)
|
|
822
819
|
* call-seq:
|
823
820
|
* conn.socket_io() -> IO
|
824
821
|
*
|
825
|
-
* Fetch
|
826
|
-
* This object can be used for IO.select to wait for events while running
|
827
|
-
*
|
822
|
+
* Fetch an IO object created from the Connection's underlying socket.
|
823
|
+
* This object can be used per <tt>socket_io.wait_readable</tt>, <tt>socket_io.wait_writable</tt> or for <tt>IO.select</tt> to wait for events while running asynchronous API calls.
|
824
|
+
* <tt>IO#wait_*able</tt> is is <tt>Fiber.scheduler</tt> compatible in contrast to <tt>IO.select</tt>.
|
825
|
+
*
|
826
|
+
* The IO object can change while the connection is established, but is memorized afterwards.
|
827
|
+
* So be sure not to cache the IO object, but repeat calling <tt>conn.socket_io</tt> instead.
|
828
828
|
*
|
829
|
-
* Using this
|
830
|
-
* being closed by Ruby when an IO created using <tt>IO.for_fd(conn.socket)</tt>
|
831
|
-
* goes out of scope. In contrast to #socket, it also works on Windows.
|
829
|
+
* Using this method also works on Windows in contrast to using #socket .
|
830
|
+
* It also avoids the problem of the underlying connection being closed by Ruby when an IO created using <tt>IO.for_fd(conn.socket)</tt> goes out of scope.
|
832
831
|
*/
|
833
832
|
static VALUE
|
834
833
|
pgconn_socket_io(VALUE self)
|
@@ -2343,21 +2342,12 @@ pg_rb_io_wait(VALUE io, VALUE events, VALUE timeout) {
|
|
2343
2342
|
static void *
|
2344
2343
|
wait_socket_readable( VALUE self, struct timeval *ptimeout, void *(*is_readable)(PGconn *))
|
2345
2344
|
{
|
2346
|
-
VALUE socket_io;
|
2347
2345
|
VALUE ret;
|
2348
2346
|
void *retval;
|
2349
2347
|
struct timeval aborttime={0,0}, currtime, waittime;
|
2350
2348
|
VALUE wait_timeout = Qnil;
|
2351
2349
|
PGconn *conn = pg_get_pgconn(self);
|
2352
2350
|
|
2353
|
-
socket_io = pgconn_socket_io(self);
|
2354
|
-
|
2355
|
-
/* Check for connection errors (PQisBusy is true on connection errors) */
|
2356
|
-
if ( PQconsumeInput(conn) == 0 ) {
|
2357
|
-
pgconn_close_socket_io(self);
|
2358
|
-
rb_raise( rb_eConnectionBad, "PQconsumeInput() %s", PQerrorMessage(conn) );
|
2359
|
-
}
|
2360
|
-
|
2361
2351
|
if ( ptimeout ) {
|
2362
2352
|
gettimeofday(&currtime, NULL);
|
2363
2353
|
timeradd(&currtime, ptimeout, &aborttime);
|
@@ -2372,6 +2362,7 @@ wait_socket_readable( VALUE self, struct timeval *ptimeout, void *(*is_readable)
|
|
2372
2362
|
|
2373
2363
|
/* Is the given timeout valid? */
|
2374
2364
|
if( !ptimeout || (waittime.tv_sec >= 0 && waittime.tv_usec >= 0) ){
|
2365
|
+
VALUE socket_io = pgconn_socket_io(self);
|
2375
2366
|
/* Wait for the socket to become readable before checking again */
|
2376
2367
|
ret = pg_rb_io_wait(socket_io, RB_INT2NUM(PG_RUBY_IO_READABLE), wait_timeout);
|
2377
2368
|
} else {
|
@@ -2984,7 +2975,7 @@ get_result_readable(PGconn *conn)
|
|
2984
2975
|
* If +true+ is returned, +conn.is_busy+ will return +false+
|
2985
2976
|
* and +conn.get_result+ will not block.
|
2986
2977
|
*/
|
2987
|
-
|
2978
|
+
VALUE
|
2988
2979
|
pgconn_block( int argc, VALUE *argv, VALUE self ) {
|
2989
2980
|
struct timeval timeout;
|
2990
2981
|
struct timeval *ptimeout = NULL;
|
@@ -3072,7 +3063,8 @@ pgconn_async_get_last_result(VALUE self)
|
|
3072
3063
|
for(;;) {
|
3073
3064
|
int status;
|
3074
3065
|
|
3075
|
-
|
3066
|
+
/* wait for input (without blocking) before reading each result */
|
3067
|
+
wait_socket_readable(self, NULL, get_result_readable);
|
3076
3068
|
|
3077
3069
|
cur = gvl_PQgetResult(conn);
|
3078
3070
|
if (cur == NULL)
|
data/ext/pg_record_coder.c
CHANGED
@@ -344,10 +344,12 @@ record_isspace(char ch)
|
|
344
344
|
* oids = conn.exec( "SELECT (NULL::complex).*" )
|
345
345
|
* # Build a type map (PG::TypeMapByColumn) for decoding the "complex" type
|
346
346
|
* dtm = PG::BasicTypeMapForResults.new(conn).build_column_map( oids )
|
347
|
-
* #
|
348
|
-
* PG::BasicTypeRegistry.
|
349
|
-
* #
|
350
|
-
*
|
347
|
+
* # Build a type map and populate with basic types
|
348
|
+
* btr = PG::BasicTypeRegistry.new.register_default_types
|
349
|
+
* # Register a new record decoder for decoding our type "complex"
|
350
|
+
* btr.register_coder(PG::TextDecoder::Record.new(type_map: dtm, name: "complex"))
|
351
|
+
* # Apply our basic type registry to all results retrieved from the server
|
352
|
+
* conn.type_map_for_results = PG::BasicTypeMapForResults.new(conn, registry: btr)
|
351
353
|
* # Now queries decode the "complex" type (and many basic types) automatically
|
352
354
|
* conn.exec("SELECT * FROM my_table").to_a
|
353
355
|
* # => [{"v1"=>[2.0, 3.0], "v2"=>[4.0, 5.0]}, {"v1"=>[6.0, 7.0], "v2"=>[8.0, 9.0]}]
|
data/ext/pg_result.c
CHANGED
@@ -1383,7 +1383,7 @@ pgresult_type_map_get(VALUE self)
|
|
1383
1383
|
|
1384
1384
|
|
1385
1385
|
static void
|
1386
|
-
yield_hash(VALUE self, int ntuples, int nfields)
|
1386
|
+
yield_hash(VALUE self, int ntuples, int nfields, void *data)
|
1387
1387
|
{
|
1388
1388
|
int tuple_num;
|
1389
1389
|
t_pg_result *this = pgresult_get_this(self);
|
@@ -1397,7 +1397,7 @@ yield_hash(VALUE self, int ntuples, int nfields)
|
|
1397
1397
|
}
|
1398
1398
|
|
1399
1399
|
static void
|
1400
|
-
yield_array(VALUE self, int ntuples, int nfields)
|
1400
|
+
yield_array(VALUE self, int ntuples, int nfields, void *data)
|
1401
1401
|
{
|
1402
1402
|
int row;
|
1403
1403
|
t_pg_result *this = pgresult_get_this(self);
|
@@ -1417,7 +1417,7 @@ yield_array(VALUE self, int ntuples, int nfields)
|
|
1417
1417
|
}
|
1418
1418
|
|
1419
1419
|
static void
|
1420
|
-
yield_tuple(VALUE self, int ntuples, int nfields)
|
1420
|
+
yield_tuple(VALUE self, int ntuples, int nfields, void *data)
|
1421
1421
|
{
|
1422
1422
|
int tuple_num;
|
1423
1423
|
t_pg_result *this = pgresult_get_this(self);
|
@@ -1436,8 +1436,9 @@ yield_tuple(VALUE self, int ntuples, int nfields)
|
|
1436
1436
|
}
|
1437
1437
|
}
|
1438
1438
|
|
1439
|
-
static
|
1440
|
-
|
1439
|
+
/* Non-static, and data pointer for use by sequel_pg */
|
1440
|
+
VALUE
|
1441
|
+
pgresult_stream_any(VALUE self, void (*yielder)(VALUE, int, int, void*), void* data)
|
1441
1442
|
{
|
1442
1443
|
t_pg_result *this;
|
1443
1444
|
int nfields;
|
@@ -1456,6 +1457,7 @@ pgresult_stream_any(VALUE self, void (*yielder)(VALUE, int, int))
|
|
1456
1457
|
|
1457
1458
|
switch( PQresultStatus(pgresult) ){
|
1458
1459
|
case PGRES_TUPLES_OK:
|
1460
|
+
case PGRES_COMMAND_OK:
|
1459
1461
|
if( ntuples == 0 )
|
1460
1462
|
return self;
|
1461
1463
|
rb_raise( rb_eInvalidResultStatus, "PG::Result is not in single row mode");
|
@@ -1465,7 +1467,12 @@ pgresult_stream_any(VALUE self, void (*yielder)(VALUE, int, int))
|
|
1465
1467
|
pg_result_check( self );
|
1466
1468
|
}
|
1467
1469
|
|
1468
|
-
yielder( self, ntuples, nfields );
|
1470
|
+
yielder( self, ntuples, nfields, data );
|
1471
|
+
|
1472
|
+
if( gvl_PQisBusy(pgconn) ){
|
1473
|
+
/* wait for input (without blocking) before reading each result */
|
1474
|
+
pgconn_block( 0, NULL, this->connection );
|
1475
|
+
}
|
1469
1476
|
|
1470
1477
|
pgresult = gvl_PQgetResult(pgconn);
|
1471
1478
|
if( pgresult == NULL )
|
@@ -1516,7 +1523,7 @@ pgresult_stream_any(VALUE self, void (*yielder)(VALUE, int, int))
|
|
1516
1523
|
static VALUE
|
1517
1524
|
pgresult_stream_each(VALUE self)
|
1518
1525
|
{
|
1519
|
-
return pgresult_stream_any(self, yield_hash);
|
1526
|
+
return pgresult_stream_any(self, yield_hash, NULL);
|
1520
1527
|
}
|
1521
1528
|
|
1522
1529
|
/*
|
@@ -1532,7 +1539,7 @@ pgresult_stream_each(VALUE self)
|
|
1532
1539
|
static VALUE
|
1533
1540
|
pgresult_stream_each_row(VALUE self)
|
1534
1541
|
{
|
1535
|
-
return pgresult_stream_any(self, yield_array);
|
1542
|
+
return pgresult_stream_any(self, yield_array, NULL);
|
1536
1543
|
}
|
1537
1544
|
|
1538
1545
|
/*
|
@@ -1549,7 +1556,7 @@ pgresult_stream_each_tuple(VALUE self)
|
|
1549
1556
|
/* allocate VALUEs that are shared between all streamed tuples */
|
1550
1557
|
ensure_init_for_tuple(self);
|
1551
1558
|
|
1552
|
-
return pgresult_stream_any(self, yield_tuple);
|
1559
|
+
return pgresult_stream_any(self, yield_tuple, NULL);
|
1553
1560
|
}
|
1554
1561
|
|
1555
1562
|
/*
|
data/ext/pg_tuple.c
CHANGED
@@ -471,10 +471,7 @@ pg_tuple_dump(VALUE self)
|
|
471
471
|
values = rb_ary_new4(this->num_fields, &this->values[0]);
|
472
472
|
a = rb_ary_new3(2, field_names, values);
|
473
473
|
|
474
|
-
|
475
|
-
rb_copy_generic_ivar(a, self);
|
476
|
-
FL_SET(a, FL_EXIVAR);
|
477
|
-
}
|
474
|
+
rb_copy_generic_ivar(a, self);
|
478
475
|
|
479
476
|
return a;
|
480
477
|
}
|
@@ -542,10 +539,7 @@ pg_tuple_load(VALUE self, VALUE a)
|
|
542
539
|
|
543
540
|
RTYPEDDATA_DATA(self) = this;
|
544
541
|
|
545
|
-
|
546
|
-
rb_copy_generic_ivar(self, a);
|
547
|
-
FL_SET(self, FL_EXIVAR);
|
548
|
-
}
|
542
|
+
rb_copy_generic_ivar(self, a);
|
549
543
|
|
550
544
|
return self;
|
551
545
|
}
|
data/lib/2.5/pg_ext.so
CHANGED
Binary file
|
data/lib/2.6/pg_ext.so
CHANGED
Binary file
|
data/lib/2.7/pg_ext.so
CHANGED
Binary file
|
data/lib/3.0/pg_ext.so
CHANGED
Binary file
|
data/lib/3.1/pg_ext.so
CHANGED
Binary file
|
@@ -22,7 +22,7 @@ require 'pg' unless defined?( PG )
|
|
22
22
|
# end
|
23
23
|
#
|
24
24
|
# conn = PG.connect
|
25
|
-
# regi = PG::BasicTypeRegistry.new.
|
25
|
+
# regi = PG::BasicTypeRegistry.new.register_default_types
|
26
26
|
# regi.register_type(0, 'inet', InetEncoder, InetDecoder)
|
27
27
|
# conn.type_map_for_results = PG::BasicTypeMapForResults.new(conn, registry: regi)
|
28
28
|
class PG::BasicTypeRegistry
|
@@ -184,6 +184,7 @@ class PG::BasicTypeRegistry
|
|
184
184
|
name = coder.name || raise(ArgumentError, "name of #{coder.inspect} must be defined")
|
185
185
|
h[:encoder][name] = coder if coder.respond_to?(:encode)
|
186
186
|
h[:decoder][name] = coder if coder.respond_to?(:decode)
|
187
|
+
self
|
187
188
|
end
|
188
189
|
|
189
190
|
# Register the given +encoder_class+ and/or +decoder_class+ for casting a PostgreSQL type.
|
@@ -193,6 +194,7 @@ class PG::BasicTypeRegistry
|
|
193
194
|
def register_type(format, name, encoder_class, decoder_class)
|
194
195
|
register_coder(encoder_class.new(name: name, format: format)) if encoder_class
|
195
196
|
register_coder(decoder_class.new(name: name, format: format)) if decoder_class
|
197
|
+
self
|
196
198
|
end
|
197
199
|
|
198
200
|
# Alias the +old+ type to the +new+ type.
|
@@ -205,10 +207,11 @@ class PG::BasicTypeRegistry
|
|
205
207
|
@coders_by_name[format][ende].delete(new)
|
206
208
|
end
|
207
209
|
end
|
210
|
+
self
|
208
211
|
end
|
209
212
|
|
210
213
|
# Populate the registry with all builtin types of ruby-pg
|
211
|
-
def
|
214
|
+
def register_default_types
|
212
215
|
register_type 0, 'int2', PG::TextEncoder::Integer, PG::TextDecoder::Integer
|
213
216
|
alias_type 0, 'int4', 'int2'
|
214
217
|
alias_type 0, 'int8', 'int2'
|
@@ -281,8 +284,10 @@ class PG::BasicTypeRegistry
|
|
281
284
|
self
|
282
285
|
end
|
283
286
|
|
287
|
+
alias define_default_types register_default_types
|
288
|
+
|
284
289
|
# @private
|
285
|
-
DEFAULT_TYPE_REGISTRY = PG::BasicTypeRegistry.new.
|
290
|
+
DEFAULT_TYPE_REGISTRY = PG::BasicTypeRegistry.new.register_default_types
|
286
291
|
|
287
292
|
# Delegate class method calls to DEFAULT_TYPE_REGISTRY
|
288
293
|
class << self
|
data/lib/pg/connection.rb
CHANGED
@@ -105,7 +105,7 @@ class PG::Connection
|
|
105
105
|
end
|
106
106
|
# extract "host1,host2" from "host1:5432,host2:5432"
|
107
107
|
iopts[:host] = uri_match['hostports'].split(',', -1).map do |hostport|
|
108
|
-
hostmatch = HOST_AND_PORT
|
108
|
+
hostmatch = /\A#{HOST_AND_PORT}\z/.match(hostport)
|
109
109
|
hostmatch['IPv6address'] || hostmatch['IPv4address'] || hostmatch['reg-name']&.gsub(/%(\h\h)/){ $1.hex.chr }
|
110
110
|
end.join(',')
|
111
111
|
oopts = {}
|
@@ -612,9 +612,6 @@ class PG::Connection
|
|
612
612
|
alias async_cancel cancel
|
613
613
|
|
614
614
|
private def async_connect_or_reset(poll_meth)
|
615
|
-
# Now grab a reference to the underlying socket so we know when the connection is established
|
616
|
-
socket = socket_io
|
617
|
-
|
618
615
|
# Track the progress of the connection, waiting for the socket to become readable/writable before polling it
|
619
616
|
poll_status = PG::PGRES_POLLING_WRITING
|
620
617
|
until poll_status == PG::PGRES_POLLING_OK ||
|
@@ -623,18 +620,22 @@ class PG::Connection
|
|
623
620
|
# If the socket needs to read, wait 'til it becomes readable to poll again
|
624
621
|
case poll_status
|
625
622
|
when PG::PGRES_POLLING_READING
|
626
|
-
|
623
|
+
socket_io.wait_readable
|
627
624
|
|
628
625
|
# ...and the same for when the socket needs to write
|
629
626
|
when PG::PGRES_POLLING_WRITING
|
630
|
-
|
627
|
+
socket_io.wait_writable
|
631
628
|
end
|
632
629
|
|
633
630
|
# Check to see if it's finished or failed yet
|
634
631
|
poll_status = send( poll_meth )
|
635
632
|
end
|
636
633
|
|
637
|
-
|
634
|
+
unless status == PG::CONNECTION_OK
|
635
|
+
msg = error_message
|
636
|
+
finish
|
637
|
+
raise PG::ConnectionBad, msg
|
638
|
+
end
|
638
639
|
|
639
640
|
# Set connection to nonblocking to handle all blocking states in ruby.
|
640
641
|
# That way a fiber scheduler is able to handle IO requests.
|
data/lib/pg/version.rb
CHANGED
data/lib/x86-mingw32/libpq.dll
CHANGED
Binary file
|
data/misc/openssl-pg-segfault.rb
CHANGED
File without changes
|
@@ -0,0 +1,46 @@
|
|
1
|
+
# This source code is borrowed from:
|
2
|
+
# https://github.com/oneclick/rubyinstaller2/blob/b3dcbf69f131e44c78ea3a1c5e0041c223f266ce/lib/ruby_installer/build/utils.rb#L104-L144
|
3
|
+
|
4
|
+
module TaskExtension
|
5
|
+
# Extend rake's file task to be defined only once and to check the expected file is indeed generated
|
6
|
+
#
|
7
|
+
# The same as #task, but for #file.
|
8
|
+
# In addition this file task raises an error, if the file that is expected to be generated is not present after the block was executed.
|
9
|
+
def file(name, *args, &block)
|
10
|
+
task_once(name, block) do
|
11
|
+
super(name, *args) do |ta|
|
12
|
+
block.call(ta).tap do
|
13
|
+
raise "file #{ta.name} is missing after task executed" unless File.exist?(ta.name)
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
# Extend rake's task definition to be defined only once, even if called several times
|
20
|
+
#
|
21
|
+
# This allows to define common tasks next to specific tasks.
|
22
|
+
# It is expected that any variation of the task's block is reflected in the task name or namespace.
|
23
|
+
# If the task name is identical, the task block is executed only once, even if the file task definition is executed twice.
|
24
|
+
def task(name, *args, &block)
|
25
|
+
task_once(name, block) do
|
26
|
+
super
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
private def task_once(name, block)
|
31
|
+
name = name.keys.first if name.is_a?(Hash)
|
32
|
+
if block &&
|
33
|
+
Rake::Task.task_defined?(name) &&
|
34
|
+
Rake::Task[name].instance_variable_get('@task_block_location') == block.source_location
|
35
|
+
# task is already defined for this target and the same block
|
36
|
+
# So skip double definition of the same action
|
37
|
+
Rake::Task[name]
|
38
|
+
elsif block
|
39
|
+
yield.tap do
|
40
|
+
Rake::Task[name].instance_variable_set('@task_block_location', block.source_location)
|
41
|
+
end
|
42
|
+
else
|
43
|
+
yield
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
data/sample/array_insert.rb
CHANGED
File without changes
|
data/sample/async_api.rb
CHANGED
@@ -27,10 +27,6 @@ conn = PG::Connection.connect_start( :dbname => 'test' ) or
|
|
27
27
|
abort "Connection failed: %s" % [ conn.error_message ] if
|
28
28
|
conn.status == PG::CONNECTION_BAD
|
29
29
|
|
30
|
-
# Now grab a reference to the underlying socket so we know when the
|
31
|
-
# connection is established
|
32
|
-
socket = conn.socket_io
|
33
|
-
|
34
30
|
# Track the progress of the connection, waiting for the socket to become readable/writable
|
35
31
|
# before polling it
|
36
32
|
poll_status = PG::PGRES_POLLING_WRITING
|
@@ -41,13 +37,13 @@ until poll_status == PG::PGRES_POLLING_OK ||
|
|
41
37
|
case poll_status
|
42
38
|
when PG::PGRES_POLLING_READING
|
43
39
|
output_progress " waiting for socket to become readable"
|
44
|
-
select( [
|
40
|
+
select( [conn.socket_io], nil, nil, TIMEOUT ) or
|
45
41
|
raise "Asynchronous connection timed out!"
|
46
42
|
|
47
43
|
# ...and the same for when the socket needs to write
|
48
44
|
when PG::PGRES_POLLING_WRITING
|
49
45
|
output_progress " waiting for socket to become writable"
|
50
|
-
select( nil, [
|
46
|
+
select( nil, [conn.socket_io], nil, TIMEOUT ) or
|
51
47
|
raise "Asynchronous connection timed out!"
|
52
48
|
end
|
53
49
|
|
@@ -85,7 +81,7 @@ loop do
|
|
85
81
|
# Buffer any incoming data on the socket until a full result is ready.
|
86
82
|
conn.consume_input
|
87
83
|
while conn.is_busy
|
88
|
-
select( [
|
84
|
+
select( [conn.socket_io], nil, nil, TIMEOUT ) or
|
89
85
|
raise "Timeout waiting for query response."
|
90
86
|
conn.consume_input
|
91
87
|
end
|
data/sample/async_copyto.rb
CHANGED
File without changes
|
data/sample/async_mixed.rb
CHANGED
File without changes
|
data/sample/check_conn.rb
CHANGED
File without changes
|
data/sample/copydata.rb
CHANGED
File without changes
|
data/sample/copyfrom.rb
CHANGED
File without changes
|
data/sample/copyto.rb
CHANGED
File without changes
|
data/sample/cursor.rb
CHANGED
File without changes
|
data/sample/disk_usage_report.rb
CHANGED
File without changes
|
data/sample/issue-119.rb
CHANGED
File without changes
|
data/sample/losample.rb
CHANGED
File without changes
|
data/sample/minimal-testcase.rb
CHANGED
File without changes
|
data/sample/notify_wait.rb
CHANGED
File without changes
|
data/sample/pg_statistics.rb
CHANGED
File without changes
|
File without changes
|
File without changes
|
data/sample/wal_shipper.rb
CHANGED
File without changes
|
File without changes
|
data.tar.gz.sig
CHANGED
Binary file
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: pg
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.3.
|
4
|
+
version: 1.3.5
|
5
5
|
platform: x86-mingw32
|
6
6
|
authors:
|
7
7
|
- Michael Granger
|
@@ -36,7 +36,7 @@ cert_chain:
|
|
36
36
|
oL1mUdzB8KrZL4/WbG5YNX6UTtJbIOu9qEFbBAy4/jtIkJX+dlNoFwd4GXQW1YNO
|
37
37
|
nA==
|
38
38
|
-----END CERTIFICATE-----
|
39
|
-
date: 2022-
|
39
|
+
date: 2022-03-31 00:00:00.000000000 Z
|
40
40
|
dependencies: []
|
41
41
|
description: Pg is the Ruby interface to the PostgreSQL RDBMS. It works with PostgreSQL
|
42
42
|
9.3 and later.
|
@@ -139,6 +139,7 @@ files:
|
|
139
139
|
- misc/ruby-pg/Rakefile
|
140
140
|
- misc/ruby-pg/lib/ruby/pg.rb
|
141
141
|
- pg.gemspec
|
142
|
+
- rakelib/task_extension.rb
|
142
143
|
- sample/array_insert.rb
|
143
144
|
- sample/async_api.rb
|
144
145
|
- sample/async_copyto.rb
|
metadata.gz.sig
CHANGED
Binary file
|