pg 1.6.0.rc1-x86_64-linux

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (118) hide show
  1. checksums.yaml +7 -0
  2. checksums.yaml.gz.sig +4 -0
  3. data/BSDL +22 -0
  4. data/Contributors.rdoc +46 -0
  5. data/Gemfile +23 -0
  6. data/History.md +958 -0
  7. data/LICENSE +56 -0
  8. data/Manifest.txt +72 -0
  9. data/POSTGRES +23 -0
  10. data/README-OS_X.rdoc +68 -0
  11. data/README-Windows.rdoc +56 -0
  12. data/README.ja.md +300 -0
  13. data/README.md +286 -0
  14. data/Rakefile +161 -0
  15. data/certs/ged.pem +24 -0
  16. data/certs/kanis@comcard.de.pem +20 -0
  17. data/certs/larskanis-2022.pem +26 -0
  18. data/certs/larskanis-2023.pem +24 -0
  19. data/certs/larskanis-2024.pem +24 -0
  20. data/ext/errorcodes.def +1043 -0
  21. data/ext/errorcodes.rb +45 -0
  22. data/ext/errorcodes.txt +494 -0
  23. data/ext/extconf.rb +282 -0
  24. data/ext/gvl_wrappers.c +32 -0
  25. data/ext/gvl_wrappers.h +297 -0
  26. data/ext/pg.c +703 -0
  27. data/ext/pg.h +390 -0
  28. data/ext/pg_binary_decoder.c +460 -0
  29. data/ext/pg_binary_encoder.c +583 -0
  30. data/ext/pg_cancel_connection.c +360 -0
  31. data/ext/pg_coder.c +622 -0
  32. data/ext/pg_connection.c +4869 -0
  33. data/ext/pg_copy_coder.c +921 -0
  34. data/ext/pg_errors.c +95 -0
  35. data/ext/pg_record_coder.c +522 -0
  36. data/ext/pg_result.c +1764 -0
  37. data/ext/pg_text_decoder.c +1008 -0
  38. data/ext/pg_text_encoder.c +833 -0
  39. data/ext/pg_tuple.c +572 -0
  40. data/ext/pg_type_map.c +200 -0
  41. data/ext/pg_type_map_all_strings.c +130 -0
  42. data/ext/pg_type_map_by_class.c +271 -0
  43. data/ext/pg_type_map_by_column.c +355 -0
  44. data/ext/pg_type_map_by_mri_type.c +313 -0
  45. data/ext/pg_type_map_by_oid.c +388 -0
  46. data/ext/pg_type_map_in_ruby.c +333 -0
  47. data/ext/pg_util.c +149 -0
  48. data/ext/pg_util.h +65 -0
  49. data/ext/vc/pg.sln +26 -0
  50. data/ext/vc/pg_18/pg.vcproj +216 -0
  51. data/ext/vc/pg_19/pg_19.vcproj +209 -0
  52. data/lib/2.7/pg_ext.so +0 -0
  53. data/lib/3.0/pg_ext.so +0 -0
  54. data/lib/3.1/pg_ext.so +0 -0
  55. data/lib/3.2/pg_ext.so +0 -0
  56. data/lib/3.3/pg_ext.so +0 -0
  57. data/lib/pg/basic_type_map_based_on_result.rb +67 -0
  58. data/lib/pg/basic_type_map_for_queries.rb +202 -0
  59. data/lib/pg/basic_type_map_for_results.rb +104 -0
  60. data/lib/pg/basic_type_registry.rb +311 -0
  61. data/lib/pg/binary_decoder/date.rb +9 -0
  62. data/lib/pg/binary_decoder/timestamp.rb +26 -0
  63. data/lib/pg/binary_encoder/timestamp.rb +20 -0
  64. data/lib/pg/cancel_connection.rb +30 -0
  65. data/lib/pg/coder.rb +106 -0
  66. data/lib/pg/connection.rb +1027 -0
  67. data/lib/pg/exceptions.rb +31 -0
  68. data/lib/pg/result.rb +43 -0
  69. data/lib/pg/text_decoder/date.rb +21 -0
  70. data/lib/pg/text_decoder/inet.rb +9 -0
  71. data/lib/pg/text_decoder/json.rb +17 -0
  72. data/lib/pg/text_decoder/numeric.rb +9 -0
  73. data/lib/pg/text_decoder/timestamp.rb +30 -0
  74. data/lib/pg/text_encoder/date.rb +13 -0
  75. data/lib/pg/text_encoder/inet.rb +31 -0
  76. data/lib/pg/text_encoder/json.rb +17 -0
  77. data/lib/pg/text_encoder/numeric.rb +9 -0
  78. data/lib/pg/text_encoder/timestamp.rb +24 -0
  79. data/lib/pg/tuple.rb +30 -0
  80. data/lib/pg/type_map_by_column.rb +16 -0
  81. data/lib/pg/version.rb +4 -0
  82. data/lib/pg.rb +144 -0
  83. data/misc/openssl-pg-segfault.rb +31 -0
  84. data/misc/postgres/History.txt +9 -0
  85. data/misc/postgres/Manifest.txt +5 -0
  86. data/misc/postgres/README.txt +21 -0
  87. data/misc/postgres/Rakefile +21 -0
  88. data/misc/postgres/lib/postgres.rb +16 -0
  89. data/misc/ruby-pg/History.txt +9 -0
  90. data/misc/ruby-pg/Manifest.txt +5 -0
  91. data/misc/ruby-pg/README.txt +21 -0
  92. data/misc/ruby-pg/Rakefile +21 -0
  93. data/misc/ruby-pg/lib/ruby/pg.rb +16 -0
  94. data/pg.gemspec +36 -0
  95. data/ports/x86_64-linux/lib/libpq-ruby-pg.so.1 +0 -0
  96. data/rakelib/task_extension.rb +46 -0
  97. data/sample/array_insert.rb +20 -0
  98. data/sample/async_api.rb +102 -0
  99. data/sample/async_copyto.rb +39 -0
  100. data/sample/async_mixed.rb +56 -0
  101. data/sample/check_conn.rb +21 -0
  102. data/sample/copydata.rb +71 -0
  103. data/sample/copyfrom.rb +81 -0
  104. data/sample/copyto.rb +19 -0
  105. data/sample/cursor.rb +21 -0
  106. data/sample/disk_usage_report.rb +177 -0
  107. data/sample/issue-119.rb +94 -0
  108. data/sample/losample.rb +69 -0
  109. data/sample/minimal-testcase.rb +17 -0
  110. data/sample/notify_wait.rb +72 -0
  111. data/sample/pg_statistics.rb +285 -0
  112. data/sample/replication_monitor.rb +222 -0
  113. data/sample/test_binary_values.rb +33 -0
  114. data/sample/wal_shipper.rb +434 -0
  115. data/sample/warehouse_partitions.rb +311 -0
  116. data.tar.gz.sig +0 -0
  117. metadata +252 -0
  118. metadata.gz.sig +0 -0
@@ -0,0 +1,1027 @@
1
+ # -*- ruby -*-
2
+ # frozen_string_literal: true
3
+
4
+ require 'pg' unless defined?( PG )
5
+ require 'io/wait' unless ::IO.public_instance_methods(false).include?(:wait_readable) # for ruby < 3.0
6
+ require 'socket'
7
+
8
+ # The PostgreSQL connection class. The interface for this class is based on
9
+ # {libpq}[http://www.postgresql.org/docs/current/libpq.html], the C
10
+ # application programmer's interface to PostgreSQL. Some familiarity with libpq
11
+ # is recommended, but not necessary.
12
+ #
13
+ # For example, to send query to the database on the localhost:
14
+ #
15
+ # require 'pg'
16
+ # conn = PG::Connection.open(:dbname => 'test')
17
+ # res = conn.exec_params('SELECT $1 AS a, $2 AS b, $3 AS c', [1, 2, nil])
18
+ # # Equivalent to:
19
+ # # res = conn.exec('SELECT 1 AS a, 2 AS b, NULL AS c')
20
+ #
21
+ # See the PG::Result class for information on working with the results of a query.
22
+ #
23
+ # Many methods of this class have three variants kind of:
24
+ # 1. #exec - the base method which is an alias to #async_exec .
25
+ # This is the method that should be used in general.
26
+ # 2. #async_exec - the async aware version of the method, implemented by libpq's async API.
27
+ # 3. #sync_exec - the method version that is implemented by blocking function(s) of libpq.
28
+ #
29
+ # Sync and async version of the method can be switched by Connection.async_api= , however it is not recommended to change the default.
30
+ class PG::Connection
31
+
32
+ # The order the options are passed to the ::connect method.
33
+ CONNECT_ARGUMENT_ORDER = %w[host port options tty dbname user password].freeze
34
+ private_constant :CONNECT_ARGUMENT_ORDER
35
+
36
+ ### Quote a single +value+ for use in a connection-parameter string.
37
+ def self.quote_connstr( value )
38
+ return "'" + value.to_s.gsub( /[\\']/ ) {|m| '\\' + m } + "'"
39
+ end
40
+
41
+ # Convert Hash options to connection String
42
+ #
43
+ # Values are properly quoted and escaped.
44
+ def self.connect_hash_to_string( hash )
45
+ hash.map { |k,v| "#{k}=#{quote_connstr(v)}" }.join( ' ' )
46
+ end
47
+
48
+ # Shareable program name for Ractor
49
+ PROGRAM_NAME = $PROGRAM_NAME.dup.freeze
50
+ private_constant :PROGRAM_NAME
51
+
52
+ # Parse the connection +args+ into a connection-parameter string.
53
+ # See PG::Connection.new for valid arguments.
54
+ #
55
+ # It accepts:
56
+ # * an option String kind of "host=name port=5432"
57
+ # * an option Hash kind of {host: "name", port: 5432}
58
+ # * URI string
59
+ # * URI object
60
+ # * positional arguments
61
+ #
62
+ # The method adds the option "fallback_application_name" if it isn't already set.
63
+ # It returns a connection string with "key=value" pairs.
64
+ def self.parse_connect_args( *args )
65
+ hash_arg = args.last.is_a?( Hash ) ? args.pop.transform_keys(&:to_sym) : {}
66
+ iopts = {}
67
+
68
+ if args.length == 1
69
+ case args.first.to_s
70
+ when /=/, /:\/\//
71
+ # Option or URL string style
72
+ conn_string = args.first.to_s
73
+ iopts = PG::Connection.conninfo_parse(conn_string).each_with_object({}){|h, o| o[h[:keyword].to_sym] = h[:val] if h[:val] }
74
+ else
75
+ # Positional parameters (only host given)
76
+ iopts[CONNECT_ARGUMENT_ORDER.first.to_sym] = args.first
77
+ end
78
+ else
79
+ # Positional parameters with host and more
80
+ max = CONNECT_ARGUMENT_ORDER.length
81
+ raise ArgumentError,
82
+ "Extra positional parameter %d: %p" % [ max + 1, args[max] ] if args.length > max
83
+
84
+ CONNECT_ARGUMENT_ORDER.zip( args ) do |(k,v)|
85
+ iopts[ k.to_sym ] = v if v
86
+ end
87
+ iopts.delete(:tty) # ignore obsolete tty parameter
88
+ end
89
+
90
+ iopts.merge!( hash_arg )
91
+
92
+ if !iopts[:fallback_application_name]
93
+ iopts[:fallback_application_name] = PROGRAM_NAME.sub( /^(.{30}).{4,}(.{30})$/ ){ $1+"..."+$2 }
94
+ end
95
+
96
+ return connect_hash_to_string(iopts)
97
+ end
98
+
99
+ # Return a String representation of the object suitable for debugging.
100
+ def inspect
101
+ str = self.to_s
102
+ str[-1,0] = if finished?
103
+ " finished"
104
+ else
105
+ stats = []
106
+ stats << " status=#{ PG.constants.grep(/CONNECTION_/).find{|c| PG.const_get(c) == status} }" if status != CONNECTION_OK
107
+ stats << " transaction_status=#{ PG.constants.grep(/PQTRANS_/).find{|c| PG.const_get(c) == transaction_status} }" if transaction_status != PG::PQTRANS_IDLE
108
+ stats << " nonblocking=#{ isnonblocking }" if isnonblocking
109
+ stats << " pipeline_status=#{ PG.constants.grep(/PQ_PIPELINE_/).find{|c| PG.const_get(c) == pipeline_status} }" if respond_to?(:pipeline_status) && pipeline_status != PG::PQ_PIPELINE_OFF
110
+ stats << " client_encoding=#{ get_client_encoding }" if get_client_encoding != "UTF8"
111
+ stats << " type_map_for_results=#{ type_map_for_results.to_s }" unless type_map_for_results.is_a?(PG::TypeMapAllStrings)
112
+ stats << " type_map_for_queries=#{ type_map_for_queries.to_s }" unless type_map_for_queries.is_a?(PG::TypeMapAllStrings)
113
+ stats << " encoder_for_put_copy_data=#{ encoder_for_put_copy_data.to_s }" if encoder_for_put_copy_data
114
+ stats << " decoder_for_get_copy_data=#{ decoder_for_get_copy_data.to_s }" if decoder_for_get_copy_data
115
+ " host=#{host} port=#{port} user=#{user}#{stats.join}"
116
+ end
117
+ return str
118
+ end
119
+
120
+ BinarySignature = "PGCOPY\n\377\r\n\0"
121
+ private_constant :BinarySignature
122
+
123
+ # call-seq:
124
+ # conn.copy_data( sql [, coder] ) {|sql_result| ... } -> PG::Result
125
+ #
126
+ # Execute a copy process for transferring data to or from the server.
127
+ #
128
+ # This issues the SQL COPY command via #exec. The response to this
129
+ # (if there is no error in the command) is a PG::Result object that
130
+ # is passed to the block, bearing a status code of PGRES_COPY_OUT or
131
+ # PGRES_COPY_IN (depending on the specified copy direction).
132
+ # The application should then use #put_copy_data or #get_copy_data
133
+ # to receive or transmit data rows and should return from the block
134
+ # when finished.
135
+ #
136
+ # #copy_data returns another PG::Result object when the data transfer
137
+ # is complete. An exception is raised if some problem was encountered,
138
+ # so it isn't required to make use of any of them.
139
+ # At this point further SQL commands can be issued via #exec.
140
+ # (It is not possible to execute other SQL commands using the same
141
+ # connection while the COPY operation is in progress.)
142
+ #
143
+ # This method ensures, that the copy process is properly terminated
144
+ # in case of client side or server side failures. Therefore, in case
145
+ # of blocking mode of operation, #copy_data is preferred to raw calls
146
+ # of #put_copy_data, #get_copy_data and #put_copy_end.
147
+ #
148
+ # _coder_ can be a PG::Coder derivation
149
+ # (typically PG::TextEncoder::CopyRow or PG::TextDecoder::CopyRow).
150
+ # This enables encoding of data fields given to #put_copy_data
151
+ # or decoding of fields received by #get_copy_data.
152
+ #
153
+ # Example with CSV input format:
154
+ # conn.exec "create table my_table (a text,b text,c text,d text)"
155
+ # conn.copy_data "COPY my_table FROM STDIN CSV" do
156
+ # conn.put_copy_data "some,data,to,copy\n"
157
+ # conn.put_copy_data "more,data,to,copy\n"
158
+ # end
159
+ # This creates +my_table+ and inserts two CSV rows.
160
+ #
161
+ # The same with text format encoder PG::TextEncoder::CopyRow
162
+ # and Array input:
163
+ # enco = PG::TextEncoder::CopyRow.new
164
+ # conn.copy_data "COPY my_table FROM STDIN", enco do
165
+ # conn.put_copy_data ['some', 'data', 'to', 'copy']
166
+ # conn.put_copy_data ['more', 'data', 'to', 'copy']
167
+ # end
168
+ #
169
+ # All 4 CopyRow classes can take a type map to specify how the columns are mapped to and from the database format.
170
+ # For details see the particular CopyRow class description.
171
+ #
172
+ # PG::BinaryEncoder::CopyRow can be used to send data in binary format to the server.
173
+ # In this case copy_data generates the header and trailer data automatically:
174
+ # enco = PG::BinaryEncoder::CopyRow.new
175
+ # conn.copy_data "COPY my_table FROM STDIN (FORMAT binary)", enco do
176
+ # conn.put_copy_data ['some', 'data', 'to', 'copy']
177
+ # conn.put_copy_data ['more', 'data', 'to', 'copy']
178
+ # end
179
+ #
180
+ # Example with CSV output format:
181
+ # conn.copy_data "COPY my_table TO STDOUT CSV" do
182
+ # while row=conn.get_copy_data
183
+ # p row
184
+ # end
185
+ # end
186
+ # This prints all rows of +my_table+ to stdout:
187
+ # "some,data,to,copy\n"
188
+ # "more,data,to,copy\n"
189
+ #
190
+ # The same with text format decoder PG::TextDecoder::CopyRow
191
+ # and Array output:
192
+ # deco = PG::TextDecoder::CopyRow.new
193
+ # conn.copy_data "COPY my_table TO STDOUT", deco do
194
+ # while row=conn.get_copy_data
195
+ # p row
196
+ # end
197
+ # end
198
+ # This receives all rows of +my_table+ as ruby array:
199
+ # ["some", "data", "to", "copy"]
200
+ # ["more", "data", "to", "copy"]
201
+ #
202
+ # Also PG::BinaryDecoder::CopyRow can be used to retrieve data in binary format from the server.
203
+ # In this case the header and trailer data is processed by the decoder and the remaining +nil+ from get_copy_data is processed by copy_data, so that binary data can be processed equally to text data:
204
+ # deco = PG::BinaryDecoder::CopyRow.new
205
+ # conn.copy_data "COPY my_table TO STDOUT (FORMAT binary)", deco do
206
+ # while row=conn.get_copy_data
207
+ # p row
208
+ # end
209
+ # end
210
+ # This receives all rows of +my_table+ as ruby array:
211
+ # ["some", "data", "to", "copy"]
212
+ # ["more", "data", "to", "copy"]
213
+
214
+ def copy_data( sql, coder=nil )
215
+ raise PG::NotInBlockingMode.new("copy_data can not be used in nonblocking mode", connection: self) if nonblocking?
216
+ res = exec( sql )
217
+
218
+ case res.result_status
219
+ when PGRES_COPY_IN
220
+ begin
221
+ if coder && res.binary_tuples == 1
222
+ # Binary file header (11 byte signature, 32 bit flags and 32 bit extension length)
223
+ put_copy_data(BinarySignature + ("\x00" * 8))
224
+ end
225
+
226
+ if coder
227
+ old_coder = self.encoder_for_put_copy_data
228
+ self.encoder_for_put_copy_data = coder
229
+ end
230
+
231
+ yield res
232
+ rescue Exception => err
233
+ errmsg = "%s while copy data: %s" % [ err.class.name, err.message ]
234
+ begin
235
+ put_copy_end( errmsg )
236
+ rescue PG::Error
237
+ # Ignore error in cleanup to avoid losing original exception
238
+ end
239
+ discard_results
240
+ raise err
241
+ else
242
+ begin
243
+ self.encoder_for_put_copy_data = old_coder if coder
244
+
245
+ if coder && res.binary_tuples == 1
246
+ put_copy_data("\xFF\xFF") # Binary file trailer 16 bit "-1"
247
+ end
248
+
249
+ put_copy_end
250
+ rescue PG::Error => err
251
+ raise PG::LostCopyState.new("#{err} (probably by executing another SQL query while running a COPY command)", connection: self)
252
+ end
253
+ get_last_result
254
+ ensure
255
+ self.encoder_for_put_copy_data = old_coder if coder
256
+ end
257
+
258
+ when PGRES_COPY_OUT
259
+ begin
260
+ if coder
261
+ old_coder = self.decoder_for_get_copy_data
262
+ self.decoder_for_get_copy_data = coder
263
+ end
264
+ yield res
265
+ rescue Exception
266
+ cancel
267
+ discard_results
268
+ raise
269
+ else
270
+ if coder && res.binary_tuples == 1
271
+ # There are two end markers in binary mode: file trailer and the final nil.
272
+ # The file trailer is expected to be processed by BinaryDecoder::CopyRow and already returns nil, so that the remaining NULL from PQgetCopyData is retrieved here:
273
+ if get_copy_data
274
+ discard_results
275
+ raise PG::NotAllCopyDataRetrieved.new("Not all binary COPY data retrieved", connection: self)
276
+ end
277
+ end
278
+ res = get_last_result
279
+ if !res
280
+ discard_results
281
+ raise PG::LostCopyState.new("Lost COPY state (probably by executing another SQL query while running a COPY command)", connection: self)
282
+ elsif res.result_status != PGRES_COMMAND_OK
283
+ discard_results
284
+ raise PG::NotAllCopyDataRetrieved.new("Not all COPY data retrieved", connection: self)
285
+ end
286
+ res
287
+ ensure
288
+ self.decoder_for_get_copy_data = old_coder if coder
289
+ end
290
+
291
+ else
292
+ raise ArgumentError, "SQL command is no COPY statement: #{sql}"
293
+ end
294
+ end
295
+
296
+ # Backward-compatibility aliases for stuff that's moved into PG.
297
+ class << self
298
+ define_method( :isthreadsafe, &PG.method(:isthreadsafe) )
299
+ end
300
+
301
+ #
302
+ # call-seq:
303
+ # conn.transaction { |conn| ... } -> result of the block
304
+ #
305
+ # Executes a +BEGIN+ at the start of the block,
306
+ # and a +COMMIT+ at the end of the block, or
307
+ # +ROLLBACK+ if any exception occurs.
308
+ def transaction
309
+ rollback = false
310
+ exec "BEGIN"
311
+ yield(self)
312
+ rescue PG::RollbackTransaction
313
+ rollback = true
314
+ cancel if transaction_status == PG::PQTRANS_ACTIVE
315
+ block
316
+ exec "ROLLBACK"
317
+ rescue Exception
318
+ rollback = true
319
+ cancel if transaction_status == PG::PQTRANS_ACTIVE
320
+ block
321
+ exec "ROLLBACK"
322
+ raise
323
+ ensure
324
+ exec "COMMIT" unless rollback
325
+ end
326
+
327
+ ### Returns an array of Hashes with connection defaults. See ::conndefaults
328
+ ### for details.
329
+ def conndefaults
330
+ return self.class.conndefaults
331
+ end
332
+
333
+ ### Return the Postgres connection defaults structure as a Hash keyed by option
334
+ ### keyword (as a Symbol).
335
+ ###
336
+ ### See also #conndefaults
337
+ def self.conndefaults_hash
338
+ return self.conndefaults.each_with_object({}) do |info, hash|
339
+ hash[ info[:keyword].to_sym ] = info[:val]
340
+ end
341
+ end
342
+
343
+ ### Returns a Hash with connection defaults. See ::conndefaults_hash
344
+ ### for details.
345
+ def conndefaults_hash
346
+ return self.class.conndefaults_hash
347
+ end
348
+
349
+ ### Return the Postgres connection info structure as a Hash keyed by option
350
+ ### keyword (as a Symbol).
351
+ ###
352
+ ### See also #conninfo
353
+ def conninfo_hash
354
+ return self.conninfo.each_with_object({}) do |info, hash|
355
+ hash[ info[:keyword].to_sym ] = info[:val]
356
+ end
357
+ end
358
+
359
+ # call-seq:
360
+ # conn.ssl_attributes -> Hash<String,String>
361
+ #
362
+ # Returns SSL-related information about the connection as key/value pairs
363
+ #
364
+ # The available attributes varies depending on the SSL library being used,
365
+ # and the type of connection.
366
+ #
367
+ # See also #ssl_attribute
368
+ def ssl_attributes
369
+ ssl_attribute_names.each.with_object({}) do |n,h|
370
+ h[n] = ssl_attribute(n)
371
+ end
372
+ end
373
+
374
+ # Read all pending socket input to internal memory and raise an exception in case of errors.
375
+ #
376
+ # This verifies that the connection socket is in a usable state and not aborted in any way.
377
+ # No communication is done with the server.
378
+ # Only pending data is read from the socket - the method doesn't wait for any outstanding server answers.
379
+ #
380
+ # Raises a kind of PG::Error if there was an error reading the data or if the socket is in a failure state.
381
+ #
382
+ # The method doesn't verify that the server is still responding.
383
+ # To verify that the communication to the server works, it is recommended to use something like <tt>conn.exec('')</tt> instead.
384
+ def check_socket
385
+ while socket_io.wait_readable(0)
386
+ consume_input
387
+ end
388
+ nil
389
+ end
390
+
391
+ # call-seq:
392
+ # conn.get_result() -> PG::Result
393
+ # conn.get_result() {|pg_result| block }
394
+ #
395
+ # Blocks waiting for the next result from a call to
396
+ # #send_query (or another asynchronous command), and returns
397
+ # it. Returns +nil+ if no more results are available.
398
+ #
399
+ # Note: call this function repeatedly until it returns +nil+, or else
400
+ # you will not be able to issue further commands.
401
+ #
402
+ # If the optional code block is given, it will be passed <i>result</i> as an argument,
403
+ # and the PG::Result object will automatically be cleared when the block terminates.
404
+ # In this instance, <code>conn.exec</code> returns the value of the block.
405
+ def get_result
406
+ block
407
+ sync_get_result
408
+ end
409
+ alias async_get_result get_result
410
+
411
+ # call-seq:
412
+ # conn.get_copy_data( [ nonblock = false [, decoder = nil ]] ) -> Object
413
+ #
414
+ # Return one row of data, +nil+
415
+ # if the copy is done, or +false+ if the call would
416
+ # block (only possible if _nonblock_ is true).
417
+ #
418
+ # If _decoder_ is not set or +nil+, data is returned as binary string.
419
+ #
420
+ # If _decoder_ is set to a PG::Coder derivation, the return type depends on this decoder.
421
+ # PG::TextDecoder::CopyRow decodes the received data fields from one row of PostgreSQL's
422
+ # COPY text format to an Array of Strings.
423
+ # Optionally the decoder can type cast the single fields to various Ruby types in one step,
424
+ # if PG::TextDecoder::CopyRow#type_map is set accordingly.
425
+ #
426
+ # See also #copy_data.
427
+ #
428
+ def get_copy_data(async=false, decoder=nil)
429
+ if async
430
+ return sync_get_copy_data(async, decoder)
431
+ else
432
+ while (res=sync_get_copy_data(true, decoder)) == false
433
+ socket_io.wait_readable
434
+ consume_input
435
+ end
436
+ return res
437
+ end
438
+ end
439
+ alias async_get_copy_data get_copy_data
440
+
441
+
442
+ # In async_api=true mode (default) all send calls run nonblocking.
443
+ # The difference is that setnonblocking(true) disables automatic handling of would-block cases.
444
+ # In async_api=false mode all send calls run directly on libpq.
445
+ # Blocking vs. nonblocking state can be changed in libpq.
446
+
447
+ # call-seq:
448
+ # conn.setnonblocking(Boolean) -> nil
449
+ #
450
+ # Sets the nonblocking status of the connection.
451
+ # In the blocking state, calls to #send_query
452
+ # will block until the message is sent to the server,
453
+ # but will not wait for the query results.
454
+ # In the nonblocking state, calls to #send_query
455
+ # will return an error if the socket is not ready for
456
+ # writing.
457
+ # Note: This function does not affect #exec, because
458
+ # that function doesn't return until the server has
459
+ # processed the query and returned the results.
460
+ #
461
+ # Returns +nil+.
462
+ def setnonblocking(enabled)
463
+ singleton_class.async_send_api = !enabled
464
+ self.flush_data = !enabled
465
+ sync_setnonblocking(true)
466
+ end
467
+ alias async_setnonblocking setnonblocking
468
+
469
+ # sync/async isnonblocking methods are switched by async_setnonblocking()
470
+
471
+ # call-seq:
472
+ # conn.isnonblocking() -> Boolean
473
+ #
474
+ # Returns the blocking status of the database connection.
475
+ # Returns +true+ if the connection is set to nonblocking mode and +false+ if blocking.
476
+ def isnonblocking
477
+ false
478
+ end
479
+ alias async_isnonblocking isnonblocking
480
+ alias nonblocking? isnonblocking
481
+
482
+ # call-seq:
483
+ # conn.put_copy_data( buffer [, encoder] ) -> Boolean
484
+ #
485
+ # Transmits _buffer_ as copy data to the server.
486
+ # Returns true if the data was sent, false if it was
487
+ # not sent (false is only possible if the connection
488
+ # is in nonblocking mode, and this command would block).
489
+ #
490
+ # _encoder_ can be a PG::Coder derivation (typically PG::TextEncoder::CopyRow).
491
+ # This encodes the data fields given as _buffer_ from an Array of Strings to
492
+ # PostgreSQL's COPY text format inclusive proper escaping. Optionally
493
+ # the encoder can type cast the fields from various Ruby types in one step,
494
+ # if PG::TextEncoder::CopyRow#type_map is set accordingly.
495
+ #
496
+ # Raises an exception if an error occurs.
497
+ #
498
+ # See also #copy_data.
499
+ #
500
+ def put_copy_data(buffer, encoder=nil)
501
+ # sync_put_copy_data does a non-blocking attempt to flush data.
502
+ until res=sync_put_copy_data(buffer, encoder)
503
+ # It didn't flush immediately and allocation of more buffering memory failed.
504
+ # Wait for all data sent by doing a blocking flush.
505
+ res = flush
506
+ end
507
+
508
+ # And do a blocking flush every 100 calls.
509
+ # This is to avoid memory bloat, when sending the data is slower than calls to put_copy_data happen.
510
+ if (@calls_to_put_copy_data += 1) > 100
511
+ @calls_to_put_copy_data = 0
512
+ res = flush
513
+ end
514
+ res
515
+ end
516
+ alias async_put_copy_data put_copy_data
517
+
518
+ # call-seq:
519
+ # conn.put_copy_end( [ error_message ] ) -> Boolean
520
+ #
521
+ # Sends end-of-data indication to the server.
522
+ #
523
+ # _error_message_ is an optional parameter, and if set,
524
+ # forces the COPY command to fail with the string
525
+ # _error_message_.
526
+ #
527
+ # Returns true if the end-of-data was sent, #false* if it was
528
+ # not sent (*false* is only possible if the connection
529
+ # is in nonblocking mode, and this command would block).
530
+ def put_copy_end(*args)
531
+ until sync_put_copy_end(*args)
532
+ flush
533
+ end
534
+ @calls_to_put_copy_data = 0
535
+ flush
536
+ end
537
+ alias async_put_copy_end put_copy_end
538
+
539
+ if method_defined? :send_pipeline_sync
540
+ # call-seq:
541
+ # conn.pipeline_sync
542
+ #
543
+ # Marks a synchronization point in a pipeline by sending a sync message and flushing the send buffer.
544
+ # This serves as the delimiter of an implicit transaction and an error recovery point.
545
+ #
546
+ # See enter_pipeline_mode
547
+ #
548
+ # Raises PG::Error if the connection is not in pipeline mode or sending a sync message failed.
549
+ #
550
+ # Available since PostgreSQL-14
551
+ def pipeline_sync(*args)
552
+ send_pipeline_sync(*args)
553
+ flush
554
+ end
555
+ alias async_pipeline_sync pipeline_sync
556
+ end
557
+
558
+ if method_defined? :sync_encrypt_password
559
+ # call-seq:
560
+ # conn.encrypt_password( password, username, algorithm=nil ) -> String
561
+ #
562
+ # This function is intended to be used by client applications that wish to send commands like <tt>ALTER USER joe PASSWORD 'pwd'</tt>.
563
+ # It is good practice not to send the original cleartext password in such a command, because it might be exposed in command logs, activity displays, and so on.
564
+ # Instead, use this function to convert the password to encrypted form before it is sent.
565
+ #
566
+ # The +password+ and +username+ arguments are the cleartext password, and the SQL name of the user it is for.
567
+ # +algorithm+ specifies the encryption algorithm to use to encrypt the password.
568
+ # Currently supported algorithms are +md5+ and +scram-sha-256+ (+on+ and +off+ are also accepted as aliases for +md5+, for compatibility with older server versions).
569
+ # Note that support for +scram-sha-256+ was introduced in PostgreSQL version 10, and will not work correctly with older server versions.
570
+ # If algorithm is omitted or +nil+, this function will query the server for the current value of the +password_encryption+ setting.
571
+ # That can block, and will fail if the current transaction is aborted, or if the connection is busy executing another query.
572
+ # If you wish to use the default algorithm for the server but want to avoid blocking, query +password_encryption+ yourself before calling #encrypt_password, and pass that value as the algorithm.
573
+ #
574
+ # Return value is the encrypted password.
575
+ # The caller can assume the string doesn't contain any special characters that would require escaping.
576
+ #
577
+ # Available since PostgreSQL-10.
578
+ # See also corresponding {libpq function}[https://www.postgresql.org/docs/current/libpq-misc.html#LIBPQ-PQENCRYPTPASSWORDCONN].
579
+ def encrypt_password( password, username, algorithm=nil )
580
+ algorithm ||= exec("SHOW password_encryption").getvalue(0,0)
581
+ sync_encrypt_password(password, username, algorithm)
582
+ end
583
+ alias async_encrypt_password encrypt_password
584
+ end
585
+
586
+ # call-seq:
587
+ # conn.reset()
588
+ #
589
+ # Resets the backend connection. This method closes the
590
+ # backend connection and tries to re-connect.
591
+ def reset
592
+ # Use connection options from PG::Connection.new to reconnect with the same options but with renewed DNS resolution.
593
+ # Use conninfo_hash as a fallback when connect_start was used to create the connection object.
594
+ iopts = @iopts_for_reset || conninfo_hash.compact
595
+ if iopts[:host] && !iopts[:host].empty? && PG.library_version >= 100000
596
+ iopts = self.class.send(:resolve_hosts, iopts)
597
+ end
598
+ conninfo = self.class.parse_connect_args( iopts );
599
+ reset_start2(conninfo)
600
+ async_connect_or_reset(:reset_poll)
601
+ self
602
+ end
603
+ alias async_reset reset
604
+
605
+ if defined?(PG::CancelConnection)
606
+ # PostgreSQL-17+
607
+
608
+ def sync_cancel
609
+ cancon = PG::CancelConnection.new(self)
610
+ cancon.sync_cancel
611
+ rescue PG::Error => err
612
+ err.to_s
613
+ end
614
+
615
+ # call-seq:
616
+ # conn.cancel() -> String
617
+ #
618
+ # Requests cancellation of the command currently being
619
+ # processed.
620
+ #
621
+ # Returns +nil+ on success, or a string containing the
622
+ # error message if a failure occurs.
623
+ #
624
+ # On PostgreSQL-17+ client libaray the class PG::CancelConnection is used.
625
+ # On older client library a pure ruby implementation is used.
626
+ def cancel
627
+ cancon = PG::CancelConnection.new(self)
628
+ cancon.async_connect_timeout = conninfo_hash[:connect_timeout]
629
+ cancon.async_cancel
630
+ rescue PG::Error => err
631
+ err.to_s
632
+ end
633
+
634
+ else
635
+
636
+ # PostgreSQL < 17
637
+
638
+ def cancel
639
+ be_pid = backend_pid
640
+ be_key = backend_key
641
+ cancel_request = [0x10, 1234, 5678, be_pid, be_key].pack("NnnNN")
642
+
643
+ if Fiber.respond_to?(:scheduler) && Fiber.scheduler && RUBY_PLATFORM =~ /mingw|mswin/
644
+ # Ruby's nonblocking IO is not really supported on Windows.
645
+ # We work around by using threads and explicit calls to wait_readable/wait_writable.
646
+ cl = Thread.new(socket_io.remote_address) { |ra| ra.connect }.value
647
+ begin
648
+ cl.write_nonblock(cancel_request)
649
+ rescue IO::WaitReadable, Errno::EINTR
650
+ cl.wait_writable
651
+ retry
652
+ end
653
+ begin
654
+ cl.read_nonblock(1)
655
+ rescue IO::WaitReadable, Errno::EINTR
656
+ cl.wait_readable
657
+ retry
658
+ rescue EOFError
659
+ end
660
+ else
661
+ cl = socket_io.remote_address.connect
662
+ # Send CANCEL_REQUEST_CODE and parameters
663
+ cl.write(cancel_request)
664
+ # Wait for the postmaster to close the connection, which indicates that it's processed the request.
665
+ cl.read(1)
666
+ end
667
+
668
+ cl.close
669
+ nil
670
+ rescue SystemCallError => err
671
+ err.to_s
672
+ end
673
+ end
674
+ alias async_cancel cancel
675
+
676
+ module Pollable
677
+ # Track the progress of the connection, waiting for the socket to become readable/writable before polling it
678
+ private def polling_loop(poll_meth, connect_timeout)
679
+ if (timeo = connect_timeout.to_i) && timeo > 0
680
+ host_count = conninfo_hash[:host].to_s.count(",") + 1
681
+ stop_time = timeo * host_count + Process.clock_gettime(Process::CLOCK_MONOTONIC)
682
+ end
683
+
684
+ poll_status = PG::PGRES_POLLING_WRITING
685
+ until poll_status == PG::PGRES_POLLING_OK ||
686
+ poll_status == PG::PGRES_POLLING_FAILED
687
+
688
+ # Set single timeout to parameter "connect_timeout" but
689
+ # don't exceed total connection time of number-of-hosts * connect_timeout.
690
+ timeout = [timeo, stop_time - Process.clock_gettime(Process::CLOCK_MONOTONIC)].min if stop_time
691
+ event = if !timeout || timeout >= 0
692
+ # If the socket needs to read, wait 'til it becomes readable to poll again
693
+ case poll_status
694
+ when PG::PGRES_POLLING_READING
695
+ if defined?(IO::READABLE) # ruby-3.0+
696
+ socket_io.wait(IO::READABLE | IO::PRIORITY, timeout)
697
+ else
698
+ IO.select([socket_io], nil, [socket_io], timeout)
699
+ end
700
+
701
+ # ...and the same for when the socket needs to write
702
+ when PG::PGRES_POLLING_WRITING
703
+ if defined?(IO::WRITABLE) # ruby-3.0+
704
+ # Use wait instead of wait_readable, since connection errors are delivered as
705
+ # exceptional/priority events on Windows.
706
+ socket_io.wait(IO::WRITABLE | IO::PRIORITY, timeout)
707
+ else
708
+ # io#wait on ruby-2.x doesn't wait for priority, so fallback to IO.select
709
+ IO.select(nil, [socket_io], [socket_io], timeout)
710
+ end
711
+ end
712
+ end
713
+ # connection to server at "localhost" (127.0.0.1), port 5433 failed: timeout expired (PG::ConnectionBad)
714
+ # connection to server on socket "/var/run/postgresql/.s.PGSQL.5433" failed: No such file or directory
715
+ unless event
716
+ if self.class.send(:host_is_named_pipe?, host)
717
+ connhost = "on socket \"#{host}\""
718
+ elsif respond_to?(:hostaddr)
719
+ connhost = "at \"#{host}\" (#{hostaddr}), port #{port}"
720
+ else
721
+ connhost = "at \"#{host}\", port #{port}"
722
+ end
723
+ raise PG::ConnectionBad.new("connection to server #{connhost} failed: timeout expired", connection: self)
724
+ end
725
+
726
+ # Check to see if it's finished or failed yet
727
+ poll_status = send( poll_meth )
728
+ end
729
+
730
+ unless status == PG::CONNECTION_OK
731
+ msg = error_message
732
+ finish
733
+ raise PG::ConnectionBad.new(msg, connection: self)
734
+ end
735
+ end
736
+ end
737
+
738
+ include Pollable
739
+
740
+ private def async_connect_or_reset(poll_meth)
741
+ # Track the progress of the connection, waiting for the socket to become readable/writable before polling it
742
+ polling_loop(poll_meth, conninfo_hash[:connect_timeout])
743
+
744
+ # Set connection to nonblocking to handle all blocking states in ruby.
745
+ # That way a fiber scheduler is able to handle IO requests.
746
+ sync_setnonblocking(true)
747
+ self.flush_data = true
748
+ set_default_encoding
749
+ end
750
+
751
+ class << self
752
+ # call-seq:
753
+ # PG::Connection.new -> conn
754
+ # PG::Connection.new(connection_hash) -> conn
755
+ # PG::Connection.new(connection_string) -> conn
756
+ # PG::Connection.new(host, port, options, tty, dbname, user, password) -> conn
757
+ #
758
+ # Create a connection to the specified server.
759
+ #
760
+ # +connection_hash+ must be a ruby Hash with connection parameters.
761
+ # See the {list of valid parameters}[https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS] in the PostgreSQL documentation.
762
+ #
763
+ # There are two accepted formats for +connection_string+: plain <code>keyword = value</code> strings and URIs.
764
+ # See the documentation of {connection strings}[https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING].
765
+ #
766
+ # The positional parameter form has the same functionality except that the missing parameters will always take on default values. The parameters are:
767
+ # [+host+]
768
+ # server hostname
769
+ # [+port+]
770
+ # server port number
771
+ # [+options+]
772
+ # backend options
773
+ # [+tty+]
774
+ # (ignored in all versions of PostgreSQL)
775
+ # [+dbname+]
776
+ # connecting database name
777
+ # [+user+]
778
+ # login user name
779
+ # [+password+]
780
+ # login password
781
+ #
782
+ # Examples:
783
+ #
784
+ # # Connect using all defaults
785
+ # PG::Connection.new
786
+ #
787
+ # # As a Hash
788
+ # PG::Connection.new( dbname: 'test', port: 5432 )
789
+ #
790
+ # # As a String
791
+ # PG::Connection.new( "dbname=test port=5432" )
792
+ #
793
+ # # As an Array
794
+ # PG::Connection.new( nil, 5432, nil, nil, 'test', nil, nil )
795
+ #
796
+ # # As an URI
797
+ # PG::Connection.new( "postgresql://user:pass@pgsql.example.com:5432/testdb?sslmode=require" )
798
+ #
799
+ # If the Ruby default internal encoding is set (i.e., <code>Encoding.default_internal != nil</code>), the
800
+ # connection will have its +client_encoding+ set accordingly.
801
+ #
802
+ # Raises a PG::Error if the connection fails.
803
+ def new(*args)
804
+ conn = connect_to_hosts(*args)
805
+
806
+ if block_given?
807
+ begin
808
+ return yield conn
809
+ ensure
810
+ conn.finish
811
+ end
812
+ end
813
+ conn
814
+ end
815
+ alias async_connect new
816
+ alias connect new
817
+ alias open new
818
+ alias setdb new
819
+ alias setdblogin new
820
+
821
+ # Resolve DNS in Ruby to avoid blocking state while connecting.
822
+ # Multiple comma-separated values are generated, if the hostname resolves to both IPv4 and IPv6 addresses.
823
+ # This requires PostgreSQL-10+, so no DNS resolving is done on earlier versions.
824
+ private def resolve_hosts(iopts)
825
+ ihosts = iopts[:host].split(",", -1)
826
+ iports = iopts[:port].split(",", -1)
827
+ iports = [nil] if iports.size == 0
828
+ iports = iports * ihosts.size if iports.size == 1
829
+ raise PG::ConnectionBad, "could not match #{iports.size} port numbers to #{ihosts.size} hosts" if iports.size != ihosts.size
830
+
831
+ dests = ihosts.each_with_index.flat_map do |mhost, idx|
832
+ unless host_is_named_pipe?(mhost)
833
+ if Fiber.respond_to?(:scheduler) &&
834
+ Fiber.scheduler &&
835
+ RUBY_VERSION < '3.1.'
836
+
837
+ # Use a second thread to avoid blocking of the scheduler.
838
+ # `TCPSocket.gethostbyname` isn't fiber aware before ruby-3.1.
839
+ hostaddrs = Thread.new{ Addrinfo.getaddrinfo(mhost, nil, nil, :STREAM).map(&:ip_address) rescue [''] }.value
840
+ else
841
+ hostaddrs = Addrinfo.getaddrinfo(mhost, nil, nil, :STREAM).map(&:ip_address) rescue ['']
842
+ end
843
+ else
844
+ # No hostname to resolve (UnixSocket)
845
+ hostaddrs = [nil]
846
+ end
847
+ hostaddrs.map { |hostaddr| [hostaddr, mhost, iports[idx]] }
848
+ end
849
+ iopts.merge(
850
+ hostaddr: dests.map{|d| d[0] }.join(","),
851
+ host: dests.map{|d| d[1] }.join(","),
852
+ port: dests.map{|d| d[2] }.join(","))
853
+ end
854
+
855
+ private def connect_to_hosts(*args)
856
+ option_string = parse_connect_args(*args)
857
+ iopts = PG::Connection.conninfo_parse(option_string).each_with_object({}){|h, o| o[h[:keyword].to_sym] = h[:val] if h[:val] }
858
+ iopts = PG::Connection.conndefaults.each_with_object({}){|h, o| o[h[:keyword].to_sym] = h[:val] if h[:val] }.merge(iopts)
859
+
860
+ if PG::BUNDLED_LIBPQ_WITH_UNIXSOCKET && iopts[:host].to_s.empty?
861
+ # Many distors patch the hardcoded default UnixSocket path in libpq to /var/run/postgresql instead of /tmp .
862
+ # We simply try them all.
863
+ iopts[:host] = "/var/run/postgresql" + # Ubuntu, Debian, Fedora, Opensuse
864
+ ",/run/postgresql" + # Alpine, Archlinux, Gentoo
865
+ ",/tmp" # Stock PostgreSQL
866
+ end
867
+
868
+ iopts_for_reset = iopts
869
+ if iopts[:hostaddr]
870
+ # hostaddr is provided -> no need to resolve hostnames
871
+
872
+ elsif iopts[:host] && !iopts[:host].empty? && PG.library_version >= 100000
873
+ iopts = resolve_hosts(iopts)
874
+ else
875
+ # No host given
876
+ end
877
+ conn = self.connect_start(iopts) or
878
+ raise(PG::Error, "Unable to create a new connection")
879
+
880
+ raise PG::ConnectionBad, conn.error_message if conn.status == PG::CONNECTION_BAD
881
+
882
+ # save the connection options for conn.reset
883
+ conn.instance_variable_set(:@iopts_for_reset, iopts_for_reset)
884
+ conn.send(:async_connect_or_reset, :connect_poll)
885
+ conn
886
+ end
887
+
888
+ private def host_is_named_pipe?(host_string)
889
+ host_string.empty? || host_string.start_with?("/") || # it's UnixSocket?
890
+ host_string.start_with?("@") || # it's UnixSocket in the abstract namespace?
891
+ # it's a path on Windows?
892
+ (RUBY_PLATFORM =~ /mingw|mswin/ && host_string =~ /\A([\/\\]|\w:[\/\\])/)
893
+ end
894
+
895
+ # call-seq:
896
+ # PG::Connection.ping(connection_hash) -> Integer
897
+ # PG::Connection.ping(connection_string) -> Integer
898
+ # PG::Connection.ping(host, port, options, tty, dbname, login, password) -> Integer
899
+ #
900
+ # PQpingParams reports the status of the server.
901
+ #
902
+ # It accepts connection parameters identical to those of PQ::Connection.new .
903
+ # It is not necessary to supply correct user name, password, or database name values to obtain the server status; however, if incorrect values are provided, the server will log a failed connection attempt.
904
+ #
905
+ # See PG::Connection.new for a description of the parameters.
906
+ #
907
+ # Returns one of:
908
+ # [+PQPING_OK+]
909
+ # server is accepting connections
910
+ # [+PQPING_REJECT+]
911
+ # server is alive but rejecting connections
912
+ # [+PQPING_NO_RESPONSE+]
913
+ # could not establish connection
914
+ # [+PQPING_NO_ATTEMPT+]
915
+ # connection not attempted (bad params)
916
+ #
917
+ # See also check_socket for a way to check the connection without doing any server communication.
918
+ def ping(*args)
919
+ if Fiber.respond_to?(:scheduler) && Fiber.scheduler
920
+ # Run PQping in a second thread to avoid blocking of the scheduler.
921
+ # Unfortunately there's no nonblocking way to run ping.
922
+ Thread.new { sync_ping(*args) }.value
923
+ else
924
+ sync_ping(*args)
925
+ end
926
+ end
927
+ alias async_ping ping
928
+
929
+ REDIRECT_CLASS_METHODS = PG.make_shareable({
930
+ :new => [:async_connect, :sync_connect],
931
+ :connect => [:async_connect, :sync_connect],
932
+ :open => [:async_connect, :sync_connect],
933
+ :setdb => [:async_connect, :sync_connect],
934
+ :setdblogin => [:async_connect, :sync_connect],
935
+ :ping => [:async_ping, :sync_ping],
936
+ })
937
+ private_constant :REDIRECT_CLASS_METHODS
938
+
939
+ # These methods are affected by PQsetnonblocking
940
+ REDIRECT_SEND_METHODS = {
941
+ :isnonblocking => [:async_isnonblocking, :sync_isnonblocking],
942
+ :nonblocking? => [:async_isnonblocking, :sync_isnonblocking],
943
+ :put_copy_data => [:async_put_copy_data, :sync_put_copy_data],
944
+ :put_copy_end => [:async_put_copy_end, :sync_put_copy_end],
945
+ :flush => [:async_flush, :sync_flush],
946
+ }
947
+ private_constant :REDIRECT_SEND_METHODS
948
+ if PG::Connection.instance_methods.include? :sync_pipeline_sync
949
+ if PG::Connection.instance_methods.include? :send_pipeline_sync
950
+ # PostgreSQL-17+
951
+ REDIRECT_SEND_METHODS.merge!({
952
+ :pipeline_sync => [:async_pipeline_sync, :sync_pipeline_sync],
953
+ })
954
+ else
955
+ # PostgreSQL-14+
956
+ REDIRECT_SEND_METHODS.merge!({
957
+ :pipeline_sync => [:sync_pipeline_sync, :sync_pipeline_sync],
958
+ })
959
+ end
960
+ end
961
+ PG.make_shareable(REDIRECT_SEND_METHODS)
962
+
963
+ REDIRECT_METHODS = {
964
+ :exec => [:async_exec, :sync_exec],
965
+ :query => [:async_exec, :sync_exec],
966
+ :exec_params => [:async_exec_params, :sync_exec_params],
967
+ :prepare => [:async_prepare, :sync_prepare],
968
+ :exec_prepared => [:async_exec_prepared, :sync_exec_prepared],
969
+ :describe_portal => [:async_describe_portal, :sync_describe_portal],
970
+ :describe_prepared => [:async_describe_prepared, :sync_describe_prepared],
971
+ :setnonblocking => [:async_setnonblocking, :sync_setnonblocking],
972
+ :get_result => [:async_get_result, :sync_get_result],
973
+ :get_last_result => [:async_get_last_result, :sync_get_last_result],
974
+ :get_copy_data => [:async_get_copy_data, :sync_get_copy_data],
975
+ :reset => [:async_reset, :sync_reset],
976
+ :set_client_encoding => [:async_set_client_encoding, :sync_set_client_encoding],
977
+ :client_encoding= => [:async_set_client_encoding, :sync_set_client_encoding],
978
+ :cancel => [:async_cancel, :sync_cancel],
979
+ :encrypt_password => [:async_encrypt_password, :sync_encrypt_password],
980
+ }
981
+ private_constant :REDIRECT_METHODS
982
+ if PG::Connection.instance_methods.include? :async_close_prepared
983
+ REDIRECT_METHODS.merge!({
984
+ :close_prepared => [:async_close_prepared, :sync_close_prepared],
985
+ :close_portal => [:async_close_portal, :sync_close_portal],
986
+ })
987
+ end
988
+ PG.make_shareable(REDIRECT_METHODS)
989
+
990
+ def async_send_api=(enable)
991
+ REDIRECT_SEND_METHODS.each do |ali, (async, sync)|
992
+ undef_method(ali) if method_defined?(ali)
993
+ alias_method( ali, enable ? async : sync )
994
+ end
995
+ end
996
+
997
+ # Switch between sync and async libpq API.
998
+ #
999
+ # PG::Connection.async_api = true
1000
+ # this is the default.
1001
+ # It sets an alias from #exec to #async_exec, #reset to #async_reset and so on.
1002
+ #
1003
+ # PG::Connection.async_api = false
1004
+ # sets an alias from #exec to #sync_exec, #reset to #sync_reset and so on.
1005
+ #
1006
+ # pg-1.1.0+ defaults to libpq's async API for query related blocking methods.
1007
+ # pg-1.3.0+ defaults to libpq's async API for all possibly blocking methods.
1008
+ #
1009
+ # _PLEASE_ _NOTE_: This method is not part of the public API and is for debug and development use only.
1010
+ # Do not use this method in production code.
1011
+ # Any issues with the default setting of <tt>async_api=true</tt> should be reported to the maintainers instead.
1012
+ #
1013
+ def async_api=(enable)
1014
+ self.async_send_api = enable
1015
+ REDIRECT_METHODS.each do |ali, (async, sync)|
1016
+ remove_method(ali) if method_defined?(ali)
1017
+ alias_method( ali, enable ? async : sync )
1018
+ end
1019
+ REDIRECT_CLASS_METHODS.each do |ali, (async, sync)|
1020
+ singleton_class.remove_method(ali) if method_defined?(ali)
1021
+ singleton_class.alias_method(ali, enable ? async : sync )
1022
+ end
1023
+ end
1024
+ end
1025
+
1026
+ self.async_api = true
1027
+ end # class PG::Connection