pg 1.4.1 → 1.5.6

Sign up to get free protection for your applications and to get access to all the features.
Files changed (72) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.appveyor.yml +15 -9
  4. data/.github/workflows/binary-gems.yml +45 -14
  5. data/.github/workflows/source-gem.yml +35 -23
  6. data/.gitignore +11 -2
  7. data/.travis.yml +2 -2
  8. data/Gemfile +3 -0
  9. data/{History.rdoc → History.md} +285 -140
  10. data/README.ja.md +300 -0
  11. data/README.md +286 -0
  12. data/Rakefile +18 -6
  13. data/Rakefile.cross +8 -11
  14. data/certs/kanis@comcard.de.pem +20 -0
  15. data/certs/larskanis-2023.pem +24 -0
  16. data/certs/larskanis-2024.pem +24 -0
  17. data/ext/errorcodes.def +4 -0
  18. data/ext/errorcodes.txt +2 -1
  19. data/ext/extconf.rb +4 -0
  20. data/ext/pg.c +15 -55
  21. data/ext/pg.h +11 -6
  22. data/ext/pg_binary_decoder.c +80 -1
  23. data/ext/pg_binary_encoder.c +225 -1
  24. data/ext/pg_coder.c +17 -8
  25. data/ext/pg_connection.c +201 -73
  26. data/ext/pg_copy_coder.c +307 -18
  27. data/ext/pg_errors.c +1 -1
  28. data/ext/pg_record_coder.c +6 -5
  29. data/ext/pg_result.c +102 -26
  30. data/ext/pg_text_decoder.c +28 -10
  31. data/ext/pg_text_encoder.c +23 -10
  32. data/ext/pg_tuple.c +35 -32
  33. data/ext/pg_type_map.c +4 -3
  34. data/ext/pg_type_map_all_strings.c +3 -3
  35. data/ext/pg_type_map_by_class.c +6 -4
  36. data/ext/pg_type_map_by_column.c +9 -5
  37. data/ext/pg_type_map_by_mri_type.c +1 -1
  38. data/ext/pg_type_map_by_oid.c +8 -5
  39. data/ext/pg_type_map_in_ruby.c +6 -3
  40. data/lib/pg/basic_type_map_based_on_result.rb +21 -1
  41. data/lib/pg/basic_type_map_for_queries.rb +19 -10
  42. data/lib/pg/basic_type_map_for_results.rb +26 -3
  43. data/lib/pg/basic_type_registry.rb +35 -33
  44. data/lib/pg/binary_decoder/date.rb +9 -0
  45. data/lib/pg/binary_decoder/timestamp.rb +26 -0
  46. data/lib/pg/binary_encoder/timestamp.rb +20 -0
  47. data/lib/pg/coder.rb +15 -13
  48. data/lib/pg/connection.rb +186 -104
  49. data/lib/pg/exceptions.rb +7 -0
  50. data/lib/pg/text_decoder/date.rb +18 -0
  51. data/lib/pg/text_decoder/inet.rb +9 -0
  52. data/lib/pg/text_decoder/json.rb +14 -0
  53. data/lib/pg/text_decoder/numeric.rb +9 -0
  54. data/lib/pg/text_decoder/timestamp.rb +30 -0
  55. data/lib/pg/text_encoder/date.rb +12 -0
  56. data/lib/pg/text_encoder/inet.rb +28 -0
  57. data/lib/pg/text_encoder/json.rb +14 -0
  58. data/lib/pg/text_encoder/numeric.rb +9 -0
  59. data/lib/pg/text_encoder/timestamp.rb +24 -0
  60. data/lib/pg/version.rb +1 -1
  61. data/lib/pg.rb +55 -15
  62. data/pg.gemspec +5 -3
  63. data/rakelib/task_extension.rb +1 -1
  64. data.tar.gz.sig +0 -0
  65. metadata +96 -32
  66. metadata.gz.sig +0 -0
  67. data/README.ja.rdoc +0 -13
  68. data/README.rdoc +0 -214
  69. data/lib/pg/binary_decoder.rb +0 -23
  70. data/lib/pg/constants.rb +0 -12
  71. data/lib/pg/text_decoder.rb +0 -46
  72. data/lib/pg/text_encoder.rb +0 -59
data/lib/pg/coder.rb CHANGED
@@ -6,22 +6,24 @@ module PG
6
6
  class Coder
7
7
 
8
8
  module BinaryFormatting
9
- Params = { format: 1 }
10
- def initialize( params={} )
11
- super(Params.merge(params))
9
+ def initialize(hash={}, **kwargs)
10
+ warn("PG::Coder.new(hash) is deprecated. Please use keyword arguments instead! Called from #{caller.first}", category: :deprecated) unless hash.empty?
11
+ super(format: 1, **hash, **kwargs)
12
12
  end
13
13
  end
14
14
 
15
15
 
16
16
  # Create a new coder object based on the attribute Hash.
17
- def initialize(params={})
18
- params.each do |key, val|
17
+ def initialize(hash=nil, **kwargs)
18
+ warn("PG::Coder.new(hash) is deprecated. Please use keyword arguments instead! Called from #{caller.first}", category: :deprecated) if hash
19
+
20
+ (hash || kwargs).each do |key, val|
19
21
  send("#{key}=", val)
20
22
  end
21
23
  end
22
24
 
23
25
  def dup
24
- self.class.new(to_h)
26
+ self.class.new(**to_h)
25
27
  end
26
28
 
27
29
  # Returns coder attributes as Hash.
@@ -43,7 +45,7 @@ module PG
43
45
  end
44
46
 
45
47
  def marshal_load(str)
46
- initialize Marshal.load(str)
48
+ initialize(**Marshal.load(str))
47
49
  end
48
50
 
49
51
  def inspect
@@ -70,11 +72,11 @@ module PG
70
72
 
71
73
  class CompositeCoder < Coder
72
74
  def to_h
73
- super.merge!({
75
+ { **super,
74
76
  elements_type: elements_type,
75
77
  needs_quotation: needs_quotation?,
76
78
  delimiter: delimiter,
77
- })
79
+ }
78
80
  end
79
81
 
80
82
  def inspect
@@ -86,19 +88,19 @@ module PG
86
88
 
87
89
  class CopyCoder < Coder
88
90
  def to_h
89
- super.merge!({
91
+ { **super,
90
92
  type_map: type_map,
91
93
  delimiter: delimiter,
92
94
  null_string: null_string,
93
- })
95
+ }
94
96
  end
95
97
  end
96
98
 
97
99
  class RecordCoder < Coder
98
100
  def to_h
99
- super.merge!({
101
+ { **super,
100
102
  type_map: type_map,
101
- })
103
+ }
102
104
  end
103
105
  end
104
106
  end # module PG
data/lib/pg/connection.rb CHANGED
@@ -2,8 +2,7 @@
2
2
  # frozen_string_literal: true
3
3
 
4
4
  require 'pg' unless defined?( PG )
5
- require 'uri'
6
- require 'io/wait'
5
+ require 'io/wait' unless ::IO.public_instance_methods(false).include?(:wait_readable)
7
6
  require 'socket'
8
7
 
9
8
  # The PostgreSQL connection class. The interface for this class is based on
@@ -31,8 +30,8 @@ require 'socket'
31
30
  class PG::Connection
32
31
 
33
32
  # The order the options are passed to the ::connect method.
34
- CONNECT_ARGUMENT_ORDER = %w[host port options tty dbname user password]
35
-
33
+ CONNECT_ARGUMENT_ORDER = %w[host port options tty dbname user password].freeze
34
+ private_constant :CONNECT_ARGUMENT_ORDER
36
35
 
37
36
  ### Quote a single +value+ for use in a connection-parameter string.
38
37
  def self.quote_connstr( value )
@@ -46,6 +45,10 @@ class PG::Connection
46
45
  hash.map { |k,v| "#{k}=#{quote_connstr(v)}" }.join( ' ' )
47
46
  end
48
47
 
48
+ # Shareable program name for Ractor
49
+ PROGRAM_NAME = $PROGRAM_NAME.dup.freeze
50
+ private_constant :PROGRAM_NAME
51
+
49
52
  # Parse the connection +args+ into a connection-parameter string.
50
53
  # See PG::Connection.new for valid arguments.
51
54
  #
@@ -63,8 +66,8 @@ class PG::Connection
63
66
  iopts = {}
64
67
 
65
68
  if args.length == 1
66
- case args.first
67
- when URI, /=/, /:\/\//
69
+ case args.first.to_s
70
+ when /=/, /:\/\//
68
71
  # Option or URL string style
69
72
  conn_string = args.first.to_s
70
73
  iopts = PG::Connection.conninfo_parse(conn_string).each_with_object({}){|h, o| o[h[:keyword].to_sym] = h[:val] if h[:val] }
@@ -87,12 +90,36 @@ class PG::Connection
87
90
  iopts.merge!( hash_arg )
88
91
 
89
92
  if !iopts[:fallback_application_name]
90
- iopts[:fallback_application_name] = $0.sub( /^(.{30}).{4,}(.{30})$/ ){ $1+"..."+$2 }
93
+ iopts[:fallback_application_name] = PROGRAM_NAME.sub( /^(.{30}).{4,}(.{30})$/ ){ $1+"..."+$2 }
91
94
  end
92
95
 
93
96
  return connect_hash_to_string(iopts)
94
97
  end
95
98
 
99
+ # Return a String representation of the object suitable for debugging.
100
+ def inspect
101
+ str = self.to_s
102
+ str[-1,0] = if finished?
103
+ " finished"
104
+ else
105
+ stats = []
106
+ stats << " status=#{ PG.constants.grep(/CONNECTION_/).find{|c| PG.const_get(c) == status} }" if status != CONNECTION_OK
107
+ stats << " transaction_status=#{ PG.constants.grep(/PQTRANS_/).find{|c| PG.const_get(c) == transaction_status} }" if transaction_status != PG::PQTRANS_IDLE
108
+ stats << " nonblocking=#{ isnonblocking }" if isnonblocking
109
+ stats << " pipeline_status=#{ PG.constants.grep(/PQ_PIPELINE_/).find{|c| PG.const_get(c) == pipeline_status} }" if respond_to?(:pipeline_status) && pipeline_status != PG::PQ_PIPELINE_OFF
110
+ stats << " client_encoding=#{ get_client_encoding }" if get_client_encoding != "UTF8"
111
+ stats << " type_map_for_results=#{ type_map_for_results.to_s }" unless type_map_for_results.is_a?(PG::TypeMapAllStrings)
112
+ stats << " type_map_for_queries=#{ type_map_for_queries.to_s }" unless type_map_for_queries.is_a?(PG::TypeMapAllStrings)
113
+ stats << " encoder_for_put_copy_data=#{ encoder_for_put_copy_data.to_s }" if encoder_for_put_copy_data
114
+ stats << " decoder_for_get_copy_data=#{ decoder_for_get_copy_data.to_s }" if decoder_for_get_copy_data
115
+ " host=#{host} port=#{port} user=#{user}#{stats.join}"
116
+ end
117
+ return str
118
+ end
119
+
120
+ BinarySignature = "PGCOPY\n\377\r\n\0".b
121
+ private_constant :BinarySignature
122
+
96
123
  # call-seq:
97
124
  # conn.copy_data( sql [, coder] ) {|sql_result| ... } -> PG::Result
98
125
  #
@@ -139,6 +166,14 @@ class PG::Connection
139
166
  # conn.put_copy_data ['more', 'data', 'to', 'copy']
140
167
  # end
141
168
  #
169
+ # Also PG::BinaryEncoder::CopyRow can be used to send data in binary format to the server.
170
+ # In this case copy_data generates the header and trailer data automatically:
171
+ # enco = PG::BinaryEncoder::CopyRow.new
172
+ # conn.copy_data "COPY my_table FROM STDIN (FORMAT binary)", enco do
173
+ # conn.put_copy_data ['some', 'data', 'to', 'copy']
174
+ # conn.put_copy_data ['more', 'data', 'to', 'copy']
175
+ # end
176
+ #
142
177
  # Example with CSV output format:
143
178
  # conn.copy_data "COPY my_table TO STDOUT CSV" do
144
179
  # while row=conn.get_copy_data
@@ -160,6 +195,18 @@ class PG::Connection
160
195
  # This receives all rows of +my_table+ as ruby array:
161
196
  # ["some", "data", "to", "copy"]
162
197
  # ["more", "data", "to", "copy"]
198
+ #
199
+ # Also PG::BinaryDecoder::CopyRow can be used to retrieve data in binary format from the server.
200
+ # In this case the header and trailer data is processed by the decoder and the remaining +nil+ from get_copy_data is processed by copy_data, so that binary data can be processed equally to text data:
201
+ # deco = PG::BinaryDecoder::CopyRow.new
202
+ # conn.copy_data "COPY my_table TO STDOUT (FORMAT binary)", deco do
203
+ # while row=conn.get_copy_data
204
+ # p row
205
+ # end
206
+ # end
207
+ # This receives all rows of +my_table+ as ruby array:
208
+ # ["some", "data", "to", "copy"]
209
+ # ["more", "data", "to", "copy"]
163
210
 
164
211
  def copy_data( sql, coder=nil )
165
212
  raise PG::NotInBlockingMode.new("copy_data can not be used in nonblocking mode", connection: self) if nonblocking?
@@ -168,18 +215,38 @@ class PG::Connection
168
215
  case res.result_status
169
216
  when PGRES_COPY_IN
170
217
  begin
218
+ if coder && res.binary_tuples == 1
219
+ # Binary file header (11 byte signature, 32 bit flags and 32 bit extension length)
220
+ put_copy_data(BinarySignature + ("\x00" * 8))
221
+ end
222
+
171
223
  if coder
172
224
  old_coder = self.encoder_for_put_copy_data
173
225
  self.encoder_for_put_copy_data = coder
174
226
  end
227
+
175
228
  yield res
176
229
  rescue Exception => err
177
230
  errmsg = "%s while copy data: %s" % [ err.class.name, err.message ]
178
- put_copy_end( errmsg )
179
- get_result
180
- raise
231
+ begin
232
+ put_copy_end( errmsg )
233
+ rescue PG::Error
234
+ # Ignore error in cleanup to avoid losing original exception
235
+ end
236
+ discard_results
237
+ raise err
181
238
  else
182
- put_copy_end
239
+ begin
240
+ self.encoder_for_put_copy_data = old_coder if coder
241
+
242
+ if coder && res.binary_tuples == 1
243
+ put_copy_data("\xFF\xFF") # Binary file trailer 16 bit "-1"
244
+ end
245
+
246
+ put_copy_end
247
+ rescue PG::Error => err
248
+ raise PG::LostCopyState.new("#{err} (probably by executing another SQL query while running a COPY command)", connection: self)
249
+ end
183
250
  get_last_result
184
251
  ensure
185
252
  self.encoder_for_put_copy_data = old_coder if coder
@@ -192,24 +259,25 @@ class PG::Connection
192
259
  self.decoder_for_get_copy_data = coder
193
260
  end
194
261
  yield res
195
- rescue Exception => err
262
+ rescue Exception
196
263
  cancel
197
- begin
198
- while get_copy_data
264
+ discard_results
265
+ raise
266
+ else
267
+ if coder && res.binary_tuples == 1
268
+ # There are two end markers in binary mode: file trailer and the final nil.
269
+ # The file trailer is expected to be processed by BinaryDecoder::CopyRow and already returns nil, so that the remaining NULL from PQgetCopyData is retrieved here:
270
+ if get_copy_data
271
+ discard_results
272
+ raise PG::NotAllCopyDataRetrieved.new("Not all binary COPY data retrieved", connection: self)
199
273
  end
200
- rescue PG::Error
201
- # Ignore error in cleanup to avoid losing original exception
202
274
  end
203
- while get_result
204
- end
205
- raise err
206
- else
207
275
  res = get_last_result
208
- if !res || res.result_status != PGRES_COMMAND_OK
209
- while get_copy_data
210
- end
211
- while get_result
212
- end
276
+ if !res
277
+ discard_results
278
+ raise PG::LostCopyState.new("Lost COPY state (probably by executing another SQL query while running a COPY command)", connection: self)
279
+ elsif res.result_status != PGRES_COMMAND_OK
280
+ discard_results
213
281
  raise PG::NotAllCopyDataRetrieved.new("Not all COPY data retrieved", connection: self)
214
282
  end
215
283
  res
@@ -298,6 +366,23 @@ class PG::Connection
298
366
  end
299
367
  end
300
368
 
369
+ # Read all pending socket input to internal memory and raise an exception in case of errors.
370
+ #
371
+ # This verifies that the connection socket is in a usable state and not aborted in any way.
372
+ # No communication is done with the server.
373
+ # Only pending data is read from the socket - the method doesn't wait for any outstanding server answers.
374
+ #
375
+ # Raises a kind of PG::Error if there was an error reading the data or if the socket is in a failure state.
376
+ #
377
+ # The method doesn't verify that the server is still responding.
378
+ # To verify that the communication to the server works, it is recommended to use something like <tt>conn.exec('')</tt> instead.
379
+ def check_socket
380
+ while socket_io.wait_readable(0)
381
+ consume_input
382
+ end
383
+ nil
384
+ end
385
+
301
386
  # call-seq:
302
387
  # conn.get_result() -> PG::Result
303
388
  # conn.get_result() {|pg_result| block }
@@ -408,7 +493,17 @@ class PG::Connection
408
493
  # See also #copy_data.
409
494
  #
410
495
  def put_copy_data(buffer, encoder=nil)
496
+ # sync_put_copy_data does a non-blocking attept to flush data.
411
497
  until res=sync_put_copy_data(buffer, encoder)
498
+ # It didn't flush immediately and allocation of more buffering memory failed.
499
+ # Wait for all data sent by doing a blocking flush.
500
+ res = flush
501
+ end
502
+
503
+ # And do a blocking flush every 100 calls.
504
+ # This is to avoid memory bloat, when sending the data is slower than calls to put_copy_data happen.
505
+ if (@calls_to_put_copy_data += 1) > 100
506
+ @calls_to_put_copy_data = 0
412
507
  res = flush
413
508
  end
414
509
  res
@@ -431,6 +526,7 @@ class PG::Connection
431
526
  until sync_put_copy_end(*args)
432
527
  flush
433
528
  end
529
+ @calls_to_put_copy_data = 0
434
530
  flush
435
531
  end
436
532
  alias async_put_copy_end put_copy_end
@@ -469,7 +565,12 @@ class PG::Connection
469
565
  # Resets the backend connection. This method closes the
470
566
  # backend connection and tries to re-connect.
471
567
  def reset
472
- reset_start
568
+ iopts = conninfo_hash.compact
569
+ if iopts[:host] && !iopts[:host].empty? && PG.library_version >= 100000
570
+ iopts = self.class.send(:resolve_hosts, iopts)
571
+ end
572
+ conninfo = self.class.parse_connect_args( iopts );
573
+ reset_start2(conninfo)
473
574
  async_connect_or_reset(:reset_poll)
474
575
  self
475
576
  end
@@ -544,14 +645,17 @@ class PG::Connection
544
645
  if (timeo = conninfo_hash[:connect_timeout].to_i) && timeo > 0
545
646
  # Lowest timeout is 2 seconds - like in libpq
546
647
  timeo = [timeo, 2].max
547
- stop_time = timeo + Process.clock_gettime(Process::CLOCK_MONOTONIC)
648
+ host_count = conninfo_hash[:host].to_s.count(",") + 1
649
+ stop_time = timeo * host_count + Process.clock_gettime(Process::CLOCK_MONOTONIC)
548
650
  end
549
651
 
550
652
  poll_status = PG::PGRES_POLLING_WRITING
551
653
  until poll_status == PG::PGRES_POLLING_OK ||
552
654
  poll_status == PG::PGRES_POLLING_FAILED
553
655
 
554
- timeout = stop_time&.-(Process.clock_gettime(Process::CLOCK_MONOTONIC))
656
+ # Set single timeout to parameter "connect_timeout" but
657
+ # don't exceed total connection time of number-of-hosts * connect_timeout.
658
+ timeout = [timeo, stop_time - Process.clock_gettime(Process::CLOCK_MONOTONIC)].min if stop_time
555
659
  event = if !timeout || timeout >= 0
556
660
  # If the socket needs to read, wait 'til it becomes readable to poll again
557
661
  case poll_status
@@ -589,7 +693,6 @@ class PG::Connection
589
693
 
590
694
  # Check to see if it's finished or failed yet
591
695
  poll_status = send( poll_meth )
592
- @last_status = status unless [PG::CONNECTION_BAD, PG::CONNECTION_OK].include?(status)
593
696
  end
594
697
 
595
698
  unless status == PG::CONNECTION_OK
@@ -675,89 +778,59 @@ class PG::Connection
675
778
  alias setdb new
676
779
  alias setdblogin new
677
780
 
781
+ # Resolve DNS in Ruby to avoid blocking state while connecting.
782
+ # Multiple comma-separated values are generated, if the hostname resolves to both IPv4 and IPv6 addresses.
783
+ # This requires PostgreSQL-10+, so no DNS resolving is done on earlier versions.
784
+ private def resolve_hosts(iopts)
785
+ ihosts = iopts[:host].split(",", -1)
786
+ iports = iopts[:port].split(",", -1)
787
+ iports = [nil] if iports.size == 0
788
+ iports = iports * ihosts.size if iports.size == 1
789
+ raise PG::ConnectionBad, "could not match #{iports.size} port numbers to #{ihosts.size} hosts" if iports.size != ihosts.size
790
+
791
+ dests = ihosts.each_with_index.flat_map do |mhost, idx|
792
+ unless host_is_named_pipe?(mhost)
793
+ if Fiber.respond_to?(:scheduler) &&
794
+ Fiber.scheduler &&
795
+ RUBY_VERSION < '3.1.'
796
+
797
+ # Use a second thread to avoid blocking of the scheduler.
798
+ # `TCPSocket.gethostbyname` isn't fiber aware before ruby-3.1.
799
+ hostaddrs = Thread.new{ Addrinfo.getaddrinfo(mhost, nil, nil, :STREAM).map(&:ip_address) rescue [''] }.value
800
+ else
801
+ hostaddrs = Addrinfo.getaddrinfo(mhost, nil, nil, :STREAM).map(&:ip_address) rescue ['']
802
+ end
803
+ else
804
+ # No hostname to resolve (UnixSocket)
805
+ hostaddrs = [nil]
806
+ end
807
+ hostaddrs.map { |hostaddr| [hostaddr, mhost, iports[idx]] }
808
+ end
809
+ iopts.merge(
810
+ hostaddr: dests.map{|d| d[0] }.join(","),
811
+ host: dests.map{|d| d[1] }.join(","),
812
+ port: dests.map{|d| d[2] }.join(","))
813
+ end
814
+
678
815
  private def connect_to_hosts(*args)
679
816
  option_string = parse_connect_args(*args)
680
817
  iopts = PG::Connection.conninfo_parse(option_string).each_with_object({}){|h, o| o[h[:keyword].to_sym] = h[:val] if h[:val] }
681
818
  iopts = PG::Connection.conndefaults.each_with_object({}){|h, o| o[h[:keyword].to_sym] = h[:val] if h[:val] }.merge(iopts)
682
819
 
683
- errors = []
684
820
  if iopts[:hostaddr]
685
821
  # hostaddr is provided -> no need to resolve hostnames
686
- ihostaddrs = iopts[:hostaddr].split(",", -1)
687
-
688
- ihosts = iopts[:host].split(",", -1) if iopts[:host]
689
- raise PG::ConnectionBad, "could not match #{ihosts.size} host names to #{ihostaddrs.size} hostaddr values" if ihosts && ihosts.size != ihostaddrs.size
690
-
691
- iports = iopts[:port].split(",", -1)
692
- iports = iports * ihostaddrs.size if iports.size == 1
693
- raise PG::ConnectionBad, "could not match #{iports.size} port numbers to #{ihostaddrs.size} hosts" if iports.size != ihostaddrs.size
694
822
 
695
- # Try to connect to each hostaddr with separate timeout
696
- ihostaddrs.each_with_index do |ihostaddr, idx|
697
- oopts = iopts.merge(hostaddr: ihostaddr, port: iports[idx])
698
- oopts[:host] = ihosts[idx] if ihosts
699
- c = connect_internal(oopts, errors)
700
- return c if c
701
- end
702
- elsif iopts[:host]
703
- # Resolve DNS in Ruby to avoid blocking state while connecting, when it ...
704
- ihosts = iopts[:host].split(",", -1)
705
-
706
- iports = iopts[:port].split(",", -1)
707
- iports = iports * ihosts.size if iports.size == 1
708
- raise PG::ConnectionBad, "could not match #{iports.size} port numbers to #{ihosts.size} hosts" if iports.size != ihosts.size
709
-
710
- ihosts.each_with_index do |mhost, idx|
711
- unless host_is_named_pipe?(mhost)
712
- addrs = if Fiber.respond_to?(:scheduler) &&
713
- Fiber.scheduler &&
714
- RUBY_VERSION < '3.1.'
715
-
716
- # Use a second thread to avoid blocking of the scheduler.
717
- # `TCPSocket.gethostbyname` isn't fiber aware before ruby-3.1.
718
- Thread.new{ Addrinfo.getaddrinfo(mhost, nil, nil, :STREAM).map(&:ip_address) rescue [''] }.value
719
- else
720
- Addrinfo.getaddrinfo(mhost, nil, nil, :STREAM).map(&:ip_address) rescue ['']
721
- end
722
-
723
- # Try to connect to each host with separate timeout
724
- addrs.each do |addr|
725
- oopts = iopts.merge(hostaddr: addr, host: mhost, port: iports[idx])
726
- c = connect_internal(oopts, errors)
727
- return c if c
728
- end
729
- else
730
- # No hostname to resolve (UnixSocket)
731
- oopts = iopts.merge(host: mhost, port: iports[idx])
732
- c = connect_internal(oopts, errors)
733
- return c if c
734
- end
735
- end
823
+ elsif iopts[:host] && !iopts[:host].empty? && PG.library_version >= 100000
824
+ iopts = resolve_hosts(iopts)
736
825
  else
737
826
  # No host given
738
- return connect_internal(iopts)
739
827
  end
740
- raise PG::ConnectionBad, errors.join("\n")
741
- end
828
+ conn = self.connect_start(iopts) or
829
+ raise(PG::Error, "Unable to create a new connection")
742
830
 
743
- private def connect_internal(opts, errors=nil)
744
- begin
745
- conn = self.connect_start(opts) or
746
- raise(PG::Error, "Unable to create a new connection")
747
-
748
- raise PG::ConnectionBad.new(conn.error_message, connection: self) if conn.status == PG::CONNECTION_BAD
831
+ raise PG::ConnectionBad, conn.error_message if conn.status == PG::CONNECTION_BAD
749
832
 
750
- conn.send(:async_connect_or_reset, :connect_poll)
751
- rescue PG::ConnectionBad => err
752
- if errors && !(conn && [PG::CONNECTION_AWAITING_RESPONSE].include?(conn.instance_variable_get(:@last_status)))
753
- # Seems to be no authentication error -> try next host
754
- errors << err
755
- return nil
756
- else
757
- # Probably an authentication error
758
- raise
759
- end
760
- end
833
+ conn.send(:async_connect_or_reset, :connect_poll)
761
834
  conn
762
835
  end
763
836
 
@@ -773,7 +846,10 @@ class PG::Connection
773
846
  # PG::Connection.ping(connection_string) -> Integer
774
847
  # PG::Connection.ping(host, port, options, tty, dbname, login, password) -> Integer
775
848
  #
776
- # Check server status.
849
+ # PQpingParams reports the status of the server.
850
+ #
851
+ # It accepts connection parameters identical to those of PQ::Connection.new .
852
+ # It is not necessary to supply correct user name, password, or database name values to obtain the server status; however, if incorrect values are provided, the server will log a failed connection attempt.
777
853
  #
778
854
  # See PG::Connection.new for a description of the parameters.
779
855
  #
@@ -786,6 +862,8 @@ class PG::Connection
786
862
  # could not establish connection
787
863
  # [+PQPING_NO_ATTEMPT+]
788
864
  # connection not attempted (bad params)
865
+ #
866
+ # See also check_socket for a way to check the connection without doing any server communication.
789
867
  def ping(*args)
790
868
  if Fiber.respond_to?(:scheduler) && Fiber.scheduler
791
869
  # Run PQping in a second thread to avoid blocking of the scheduler.
@@ -797,23 +875,25 @@ class PG::Connection
797
875
  end
798
876
  alias async_ping ping
799
877
 
800
- REDIRECT_CLASS_METHODS = {
878
+ REDIRECT_CLASS_METHODS = PG.make_shareable({
801
879
  :new => [:async_connect, :sync_connect],
802
880
  :connect => [:async_connect, :sync_connect],
803
881
  :open => [:async_connect, :sync_connect],
804
882
  :setdb => [:async_connect, :sync_connect],
805
883
  :setdblogin => [:async_connect, :sync_connect],
806
884
  :ping => [:async_ping, :sync_ping],
807
- }
885
+ })
886
+ private_constant :REDIRECT_CLASS_METHODS
808
887
 
809
888
  # These methods are affected by PQsetnonblocking
810
- REDIRECT_SEND_METHODS = {
889
+ REDIRECT_SEND_METHODS = PG.make_shareable({
811
890
  :isnonblocking => [:async_isnonblocking, :sync_isnonblocking],
812
891
  :nonblocking? => [:async_isnonblocking, :sync_isnonblocking],
813
892
  :put_copy_data => [:async_put_copy_data, :sync_put_copy_data],
814
893
  :put_copy_end => [:async_put_copy_end, :sync_put_copy_end],
815
894
  :flush => [:async_flush, :sync_flush],
816
- }
895
+ })
896
+ private_constant :REDIRECT_SEND_METHODS
817
897
  REDIRECT_METHODS = {
818
898
  :exec => [:async_exec, :sync_exec],
819
899
  :query => [:async_exec, :sync_exec],
@@ -831,12 +911,14 @@ class PG::Connection
831
911
  :client_encoding= => [:async_set_client_encoding, :sync_set_client_encoding],
832
912
  :cancel => [:async_cancel, :sync_cancel],
833
913
  }
914
+ private_constant :REDIRECT_METHODS
834
915
 
835
916
  if PG::Connection.instance_methods.include? :async_encrypt_password
836
917
  REDIRECT_METHODS.merge!({
837
918
  :encrypt_password => [:async_encrypt_password, :sync_encrypt_password],
838
919
  })
839
920
  end
921
+ PG.make_shareable(REDIRECT_METHODS)
840
922
 
841
923
  def async_send_api=(enable)
842
924
  REDIRECT_SEND_METHODS.each do |ali, (async, sync)|
data/lib/pg/exceptions.rb CHANGED
@@ -14,5 +14,12 @@ module PG
14
14
  end
15
15
  end
16
16
 
17
+ class NotAllCopyDataRetrieved < PG::Error
18
+ end
19
+ class LostCopyState < PG::Error
20
+ end
21
+ class NotInBlockingMode < PG::Error
22
+ end
23
+
17
24
  end # module PG
18
25
 
@@ -0,0 +1,18 @@
1
+ # -*- ruby -*-
2
+ # frozen_string_literal: true
3
+
4
+ require 'date'
5
+
6
+ module PG
7
+ module TextDecoder
8
+ class Date < SimpleDecoder
9
+ def decode(string, tuple=nil, field=nil)
10
+ if string =~ /\A(\d{4})-(\d\d)-(\d\d)\z/
11
+ ::Date.new $1.to_i, $2.to_i, $3.to_i
12
+ else
13
+ string
14
+ end
15
+ end
16
+ end
17
+ end
18
+ end # module PG
@@ -0,0 +1,9 @@
1
+ # -*- ruby -*-
2
+ # frozen_string_literal: true
3
+
4
+ module PG
5
+ module TextDecoder
6
+ # Init C part of the decoder
7
+ init_inet
8
+ end
9
+ end # module PG
@@ -0,0 +1,14 @@
1
+ # -*- ruby -*-
2
+ # frozen_string_literal: true
3
+
4
+ require 'json'
5
+
6
+ module PG
7
+ module TextDecoder
8
+ class JSON < SimpleDecoder
9
+ def decode(string, tuple=nil, field=nil)
10
+ ::JSON.parse(string, quirks_mode: true)
11
+ end
12
+ end
13
+ end
14
+ end # module PG
@@ -0,0 +1,9 @@
1
+ # -*- ruby -*-
2
+ # frozen_string_literal: true
3
+
4
+ module PG
5
+ module TextDecoder
6
+ # Init C part of the decoder
7
+ init_numeric
8
+ end
9
+ end # module PG
@@ -0,0 +1,30 @@
1
+ # -*- ruby -*-
2
+ # frozen_string_literal: true
3
+
4
+ module PG
5
+ module TextDecoder
6
+ # Convenience classes for timezone options
7
+ class TimestampUtc < Timestamp
8
+ def initialize(hash={}, **kwargs)
9
+ warn("PG::Coder.new(hash) is deprecated. Please use keyword arguments instead! Called from #{caller.first}", category: :deprecated) unless hash.empty?
10
+ super(**hash, **kwargs, flags: PG::Coder::TIMESTAMP_DB_UTC | PG::Coder::TIMESTAMP_APP_UTC)
11
+ end
12
+ end
13
+ class TimestampUtcToLocal < Timestamp
14
+ def initialize(hash={}, **kwargs)
15
+ warn("PG::Coder.new(hash) is deprecated. Please use keyword arguments instead! Called from #{caller.first}", category: :deprecated) unless hash.empty?
16
+ super(**hash, **kwargs, flags: PG::Coder::TIMESTAMP_DB_UTC | PG::Coder::TIMESTAMP_APP_LOCAL)
17
+ end
18
+ end
19
+ class TimestampLocal < Timestamp
20
+ def initialize(hash={}, **kwargs)
21
+ warn("PG::Coder.new(hash) is deprecated. Please use keyword arguments instead! Called from #{caller.first}", category: :deprecated) unless hash.empty?
22
+ super(**hash, **kwargs, flags: PG::Coder::TIMESTAMP_DB_LOCAL | PG::Coder::TIMESTAMP_APP_LOCAL)
23
+ end
24
+ end
25
+
26
+ # For backward compatibility:
27
+ TimestampWithoutTimeZone = TimestampLocal
28
+ TimestampWithTimeZone = Timestamp
29
+ end
30
+ end # module PG
@@ -0,0 +1,12 @@
1
+ # -*- ruby -*-
2
+ # frozen_string_literal: true
3
+
4
+ module PG
5
+ module TextEncoder
6
+ class Date < SimpleEncoder
7
+ def encode(value)
8
+ value.respond_to?(:strftime) ? value.strftime("%Y-%m-%d") : value
9
+ end
10
+ end
11
+ end
12
+ end # module PG