pg 1.1.0.pre20180730144600-x64-mingw32 → 1.1.0.pre20180730171000-x64-mingw32
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data.tar.gz.sig +2 -1
- data/.gems +6 -0
- data/.hgignore +21 -0
- data/.hgsigs +29 -0
- data/.hgtags +36 -0
- data/.hoerc +2 -0
- data/.irbrc +23 -0
- data/.pryrc +23 -0
- data/.tm_properties +21 -0
- data/.travis.yml +41 -0
- data/Gemfile +2 -0
- data/Manifest.txt +2 -65
- data/Rakefile +1 -0
- data/appveyor.yml +50 -0
- data/certs/ged.pem +26 -0
- data/lib/2.0/pg_ext.so +0 -0
- data/lib/2.1/pg_ext.so +0 -0
- data/lib/2.2/pg_ext.so +0 -0
- data/lib/2.3/pg_ext.so +0 -0
- data/lib/2.4/pg_ext.so +0 -0
- data/lib/2.5/pg_ext.so +0 -0
- data/lib/libpq.dll +0 -0
- data/lib/pg.rb +1 -1
- data/lib/pg/binary_decoder.rb +22 -0
- data/lib/pg/tuple.rb +30 -0
- data/misc/openssl-pg-segfault.rb +31 -0
- data/misc/postgres/History.txt +9 -0
- data/misc/postgres/Manifest.txt +5 -0
- data/misc/postgres/README.txt +21 -0
- data/misc/postgres/Rakefile +21 -0
- data/misc/postgres/lib/postgres.rb +16 -0
- data/misc/ruby-pg/History.txt +9 -0
- data/misc/ruby-pg/Manifest.txt +5 -0
- data/misc/ruby-pg/README.txt +21 -0
- data/misc/ruby-pg/Rakefile +21 -0
- data/misc/ruby-pg/lib/ruby/pg.rb +16 -0
- data/pg.gemspec +61 -0
- data/sample/array_insert.rb +20 -0
- data/sample/async_api.rb +106 -0
- data/sample/async_copyto.rb +39 -0
- data/sample/async_mixed.rb +56 -0
- data/sample/check_conn.rb +21 -0
- data/sample/copydata.rb +71 -0
- data/sample/copyfrom.rb +81 -0
- data/sample/copyto.rb +19 -0
- data/sample/cursor.rb +21 -0
- data/sample/disk_usage_report.rb +177 -0
- data/sample/issue-119.rb +94 -0
- data/sample/losample.rb +69 -0
- data/sample/minimal-testcase.rb +17 -0
- data/sample/notify_wait.rb +72 -0
- data/sample/pg_statistics.rb +285 -0
- data/sample/replication_monitor.rb +222 -0
- data/sample/test_binary_values.rb +33 -0
- data/sample/wal_shipper.rb +434 -0
- data/sample/warehouse_partitions.rb +311 -0
- data/spec/pg/connection_sync_spec.rb +41 -0
- data/spec/pg/tuple_spec.rb +266 -0
- metadata +69 -25
- metadata.gz.sig +0 -0
- data/ChangeLog +0 -0
@@ -0,0 +1,311 @@
|
|
1
|
+
# -*- ruby -*-
|
2
|
+
# vim: set nosta noet ts=4 sw=4:
|
3
|
+
#
|
4
|
+
# Script to automatically move partitioned tables and their indexes
|
5
|
+
# to a separate area on disk.
|
6
|
+
#
|
7
|
+
# Mahlon E. Smith <mahlon@martini.nu>
|
8
|
+
#
|
9
|
+
# Example use case:
|
10
|
+
#
|
11
|
+
# - You've got a heavy insert table, such as syslog data.
|
12
|
+
# - This table has a partitioning trigger (or is manually partitioned)
|
13
|
+
# by date, to separate incoming stuff from archival/report stuff.
|
14
|
+
# - You have a tablespace on cheap or slower disk (maybe even
|
15
|
+
# ZFS compressed, or some such!)
|
16
|
+
#
|
17
|
+
# The only assumption this script makes is that your tables are dated, and
|
18
|
+
# the tablespace they're moving into already exists.
|
19
|
+
#
|
20
|
+
# A full example, using the syslog idea from above, where each child
|
21
|
+
# table is date partitioned by a convention of "syslog_YEAR-WEEKOFYEAR":
|
22
|
+
#
|
23
|
+
# syslog # <--- parent
|
24
|
+
# syslog_2012_06 # <--- inherited
|
25
|
+
# syslog_2012_07 # <--- inherited
|
26
|
+
# syslog_2012_08 # <--- inherited
|
27
|
+
# ...
|
28
|
+
#
|
29
|
+
# You'd run this script like so:
|
30
|
+
#
|
31
|
+
# ./warehouse_partitions.rb -F syslog_%Y_%U
|
32
|
+
#
|
33
|
+
# Assuming this was week 12 of the year, tables syslog_2012_06 through
|
34
|
+
# syslog_2012_11 would start sequentially migrating into the tablespace
|
35
|
+
# called 'warehouse'.
|
36
|
+
#
|
37
|
+
|
38
|
+
|
39
|
+
require 'date'
|
40
|
+
require 'ostruct'
|
41
|
+
require 'optparse'
|
42
|
+
require 'pathname'
|
43
|
+
require 'etc'
|
44
|
+
require 'pg'
|
45
|
+
|
46
|
+
|
47
|
+
### A tablespace migration class.
|
48
|
+
###
|
49
|
+
class PGWarehouse
|
50
|
+
|
51
|
+
def initialize( opts )
|
52
|
+
@opts = opts
|
53
|
+
@db = PG.connect(
|
54
|
+
:dbname => opts.database,
|
55
|
+
:host => opts.host,
|
56
|
+
:port => opts.port,
|
57
|
+
:user => opts.user,
|
58
|
+
:password => opts.pass,
|
59
|
+
:sslmode => 'prefer'
|
60
|
+
)
|
61
|
+
@db.exec "SET search_path TO %s" % [ opts.schema ] if opts.schema
|
62
|
+
|
63
|
+
@relations = self.relations
|
64
|
+
end
|
65
|
+
|
66
|
+
attr_reader :db
|
67
|
+
|
68
|
+
######
|
69
|
+
public
|
70
|
+
######
|
71
|
+
|
72
|
+
### Perform the tablespace moves.
|
73
|
+
###
|
74
|
+
def migrate
|
75
|
+
if @relations.empty?
|
76
|
+
$stderr.puts 'No tables were found for warehousing.'
|
77
|
+
return
|
78
|
+
end
|
79
|
+
|
80
|
+
$stderr.puts "Found %d relation%s to move." % [ relations.length, relations.length == 1 ? '' : 's' ]
|
81
|
+
@relations.sort_by{|_,v| v[:name] }.each do |_, val|
|
82
|
+
$stderr.print " - Moving table '%s' to '%s'... " % [
|
83
|
+
val[:name], @opts.tablespace
|
84
|
+
]
|
85
|
+
|
86
|
+
if @opts.dryrun
|
87
|
+
$stderr.puts '(not really)'
|
88
|
+
|
89
|
+
else
|
90
|
+
age = self.timer do
|
91
|
+
db.exec "ALTER TABLE %s SET TABLESPACE %s;" % [
|
92
|
+
val[:name], @opts.tablespace
|
93
|
+
]
|
94
|
+
end
|
95
|
+
puts age
|
96
|
+
end
|
97
|
+
|
98
|
+
val[ :indexes ].each do |idx|
|
99
|
+
$stderr.print " - Moving index '%s' to '%s'... " % [
|
100
|
+
idx, @opts.tablespace
|
101
|
+
]
|
102
|
+
if @opts.dryrun
|
103
|
+
$stderr.puts '(not really)'
|
104
|
+
|
105
|
+
else
|
106
|
+
age = self.timer do
|
107
|
+
db.exec "ALTER INDEX %s SET TABLESPACE %s;" % [
|
108
|
+
idx, @opts.tablespace
|
109
|
+
]
|
110
|
+
end
|
111
|
+
puts age
|
112
|
+
end
|
113
|
+
end
|
114
|
+
end
|
115
|
+
end
|
116
|
+
|
117
|
+
|
118
|
+
#########
|
119
|
+
protected
|
120
|
+
#########
|
121
|
+
|
122
|
+
### Get OIDs and current tablespaces for everything under the
|
123
|
+
### specified schema.
|
124
|
+
###
|
125
|
+
def relations
|
126
|
+
return @relations if @relations
|
127
|
+
relations = {}
|
128
|
+
|
129
|
+
query = %q{
|
130
|
+
SELECT c.oid AS oid,
|
131
|
+
c.relname AS name,
|
132
|
+
c.relkind AS kind,
|
133
|
+
t.spcname AS tspace
|
134
|
+
FROM pg_class AS c
|
135
|
+
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
|
136
|
+
LEFT JOIN pg_tablespace t ON t.oid = c.reltablespace
|
137
|
+
WHERE c.relkind = 'r' }
|
138
|
+
query << "AND n.nspname='#{@opts.schema}'" if @opts.schema
|
139
|
+
|
140
|
+
# Get the relations list, along with each element's current tablespace.
|
141
|
+
#
|
142
|
+
self.db.exec( query ) do |res|
|
143
|
+
res.each do |row|
|
144
|
+
relations[ row['oid'] ] = {
|
145
|
+
:name => row['name'],
|
146
|
+
:tablespace => row['tspace'],
|
147
|
+
:indexes => [],
|
148
|
+
:parent => nil
|
149
|
+
}
|
150
|
+
end
|
151
|
+
end
|
152
|
+
|
153
|
+
# Add table inheritence information.
|
154
|
+
#
|
155
|
+
db.exec 'SELECT inhrelid AS oid, inhparent AS parent FROM pg_inherits' do |res|
|
156
|
+
res.each do |row|
|
157
|
+
relations[ row['oid'] ][ :parent ] = row['parent']
|
158
|
+
end
|
159
|
+
end
|
160
|
+
|
161
|
+
# Remove tables that don't qualify for warehousing.
|
162
|
+
#
|
163
|
+
# - Tables that are not children of a parent
|
164
|
+
# - Tables that are already in the warehouse tablespace
|
165
|
+
# - The currently active child (it's likely being written to!)
|
166
|
+
# - Any table that can't be parsed into the specified format
|
167
|
+
#
|
168
|
+
relations.reject! do |oid, val|
|
169
|
+
begin
|
170
|
+
val[:parent].nil? ||
|
171
|
+
val[:tablespace] == @opts.tablespace ||
|
172
|
+
val[:name] == Time.now.strftime( @opts.format ) ||
|
173
|
+
! DateTime.strptime( val[:name], @opts.format )
|
174
|
+
rescue ArgumentError
|
175
|
+
true
|
176
|
+
end
|
177
|
+
end
|
178
|
+
|
179
|
+
query = %q{
|
180
|
+
SELECT c.oid AS oid,
|
181
|
+
i.indexname AS name
|
182
|
+
FROM pg_class AS c
|
183
|
+
INNER JOIN pg_indexes AS i
|
184
|
+
ON i.tablename = c.relname }
|
185
|
+
query << "AND i.schemaname='#{@opts.schema}'" if @opts.schema
|
186
|
+
|
187
|
+
# Attach index names to tables.
|
188
|
+
#
|
189
|
+
db.exec( query ) do |res|
|
190
|
+
res.each do |row|
|
191
|
+
relations[ row['oid'] ][ :indexes ] << row['name'] if relations[ row['oid'] ]
|
192
|
+
end
|
193
|
+
end
|
194
|
+
|
195
|
+
return relations
|
196
|
+
end
|
197
|
+
|
198
|
+
|
199
|
+
### Wrap arbitrary commands in a human readable timer.
|
200
|
+
###
|
201
|
+
def timer
|
202
|
+
start = Time.now
|
203
|
+
yield
|
204
|
+
age = Time.now - start
|
205
|
+
|
206
|
+
diff = age
|
207
|
+
secs = diff % 60
|
208
|
+
diff = ( diff - secs ) / 60
|
209
|
+
mins = diff % 60
|
210
|
+
diff = ( diff - mins ) / 60
|
211
|
+
hour = diff % 24
|
212
|
+
|
213
|
+
return "%02d:%02d:%02d" % [ hour, mins, secs ]
|
214
|
+
end
|
215
|
+
end
|
216
|
+
|
217
|
+
|
218
|
+
### Parse command line arguments. Return a struct of global options.
|
219
|
+
###
|
220
|
+
def parse_args( args )
|
221
|
+
options = OpenStruct.new
|
222
|
+
options.database = Etc.getpwuid( Process.uid ).name
|
223
|
+
options.host = '127.0.0.1'
|
224
|
+
options.port = 5432
|
225
|
+
options.user = Etc.getpwuid( Process.uid ).name
|
226
|
+
options.sslmode = 'prefer'
|
227
|
+
options.tablespace = 'warehouse'
|
228
|
+
|
229
|
+
opts = OptionParser.new do |opts|
|
230
|
+
opts.banner = "Usage: #{$0} [options]"
|
231
|
+
|
232
|
+
opts.separator ''
|
233
|
+
opts.separator 'Connection options:'
|
234
|
+
|
235
|
+
opts.on( '-d', '--database DBNAME',
|
236
|
+
"specify the database to connect to (default: \"#{options.database}\")" ) do |db|
|
237
|
+
options.database = db
|
238
|
+
end
|
239
|
+
|
240
|
+
opts.on( '-h', '--host HOSTNAME', 'database server host' ) do |host|
|
241
|
+
options.host = host
|
242
|
+
end
|
243
|
+
|
244
|
+
opts.on( '-p', '--port PORT', Integer,
|
245
|
+
"database server port (default: \"#{options.port}\")" ) do |port|
|
246
|
+
options.port = port
|
247
|
+
end
|
248
|
+
|
249
|
+
opts.on( '-n', '--schema SCHEMA', String,
|
250
|
+
"operate on the named schema only (default: none)" ) do |schema|
|
251
|
+
options.schema = schema
|
252
|
+
end
|
253
|
+
|
254
|
+
opts.on( '-T', '--tablespace SPACE', String,
|
255
|
+
"move old tables to this tablespace (default: \"#{options.tablespace}\")" ) do |tb|
|
256
|
+
options.tablespace = tb
|
257
|
+
end
|
258
|
+
|
259
|
+
opts.on( '-F', '--tableformat FORMAT', String,
|
260
|
+
"The naming format (strftime) for the inherited tables (default: none)" ) do |format|
|
261
|
+
options.format = format
|
262
|
+
end
|
263
|
+
|
264
|
+
opts.on( '-U', '--user NAME',
|
265
|
+
"database user name (default: \"#{options.user}\")" ) do |user|
|
266
|
+
options.user = user
|
267
|
+
end
|
268
|
+
|
269
|
+
opts.on( '-W', 'force password prompt' ) do |pw|
|
270
|
+
print 'Password: '
|
271
|
+
begin
|
272
|
+
system 'stty -echo'
|
273
|
+
options.pass = gets.chomp
|
274
|
+
ensure
|
275
|
+
system 'stty echo'
|
276
|
+
puts
|
277
|
+
end
|
278
|
+
end
|
279
|
+
|
280
|
+
opts.separator ''
|
281
|
+
opts.separator 'Other options:'
|
282
|
+
|
283
|
+
opts.on_tail( '--dry-run', "don't actually do anything" ) do
|
284
|
+
options.dryrun = true
|
285
|
+
end
|
286
|
+
|
287
|
+
opts.on_tail( '--help', 'show this help, then exit' ) do
|
288
|
+
$stderr.puts opts
|
289
|
+
exit
|
290
|
+
end
|
291
|
+
|
292
|
+
opts.on_tail( '--version', 'output version information, then exit' ) do
|
293
|
+
puts Stats::VERSION
|
294
|
+
exit
|
295
|
+
end
|
296
|
+
end
|
297
|
+
|
298
|
+
opts.parse!( args )
|
299
|
+
return options
|
300
|
+
end
|
301
|
+
|
302
|
+
|
303
|
+
if __FILE__ == $0
|
304
|
+
opts = parse_args( ARGV )
|
305
|
+
raise ArgumentError, "A naming format (-F) is required." unless opts.format
|
306
|
+
|
307
|
+
$stdout.sync = true
|
308
|
+
PGWarehouse.new( opts ).migrate
|
309
|
+
end
|
310
|
+
|
311
|
+
|
@@ -0,0 +1,41 @@
|
|
1
|
+
# -*- rspec -*-
|
2
|
+
#encoding: utf-8
|
3
|
+
|
4
|
+
require_relative '../helpers'
|
5
|
+
|
6
|
+
context "running with sync_* methods" do
|
7
|
+
before :each do
|
8
|
+
PG::Connection.async_api = false
|
9
|
+
end
|
10
|
+
|
11
|
+
after :each do
|
12
|
+
PG::Connection.async_api = true
|
13
|
+
end
|
14
|
+
|
15
|
+
fname = File.expand_path("../connection_spec.rb", __FILE__)
|
16
|
+
eval File.read(fname, encoding: __ENCODING__), binding, fname
|
17
|
+
|
18
|
+
|
19
|
+
it "enables/disables async/sync methods by #async_api" do
|
20
|
+
[true, false].each do |async|
|
21
|
+
PG::Connection.async_api = async
|
22
|
+
|
23
|
+
start = Time.now
|
24
|
+
t = Thread.new do
|
25
|
+
@conn.exec( 'select pg_sleep(1)' )
|
26
|
+
end
|
27
|
+
sleep 0.1
|
28
|
+
|
29
|
+
t.kill
|
30
|
+
t.join
|
31
|
+
dt = Time.now - start
|
32
|
+
|
33
|
+
if async
|
34
|
+
expect( dt ).to be < 1.0
|
35
|
+
else
|
36
|
+
expect( dt ).to be >= 1.0
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
end
|
@@ -0,0 +1,266 @@
|
|
1
|
+
# -*- rspec -*-
|
2
|
+
# encoding: utf-8
|
3
|
+
|
4
|
+
require_relative '../helpers'
|
5
|
+
require 'pg'
|
6
|
+
require 'objspace'
|
7
|
+
|
8
|
+
describe PG::Tuple do
|
9
|
+
let!(:typemap) { PG::BasicTypeMapForResults.new(@conn) }
|
10
|
+
let!(:result2x2) { @conn.exec( "VALUES(1, 'a'), (2, 'b')" ) }
|
11
|
+
let!(:result2x3cast) { @conn.exec( "SELECT * FROM (VALUES(1, TRUE, '3'), (2, FALSE, '4')) AS m (a, b, b)" ).map_types!(typemap) }
|
12
|
+
let!(:tuple0) { result2x2.tuple(0) }
|
13
|
+
let!(:tuple1) { result2x2.tuple(1) }
|
14
|
+
let!(:tuple2) { result2x3cast.tuple(0) }
|
15
|
+
let!(:tuple3) { str = Marshal.dump(result2x3cast.tuple(1)); Marshal.load(str) }
|
16
|
+
let!(:tuple_empty) { PG::Tuple.new }
|
17
|
+
|
18
|
+
describe "[]" do
|
19
|
+
it "returns nil for invalid keys" do
|
20
|
+
expect( tuple0["x"] ).to be_nil
|
21
|
+
expect( tuple0[0.5] ).to be_nil
|
22
|
+
expect( tuple0[2] ).to be_nil
|
23
|
+
expect( tuple0[-3] ).to be_nil
|
24
|
+
expect{ tuple_empty[0] }.to raise_error(TypeError)
|
25
|
+
end
|
26
|
+
|
27
|
+
it "supports array like access" do
|
28
|
+
expect( tuple0[0] ).to eq( "1" )
|
29
|
+
expect( tuple0[1] ).to eq( "a" )
|
30
|
+
expect( tuple1[0] ).to eq( "2" )
|
31
|
+
expect( tuple1[1] ).to eq( "b" )
|
32
|
+
expect( tuple2[0] ).to eq( 1 )
|
33
|
+
expect( tuple2[1] ).to eq( true )
|
34
|
+
expect( tuple2[2] ).to eq( "3" )
|
35
|
+
expect( tuple3[0] ).to eq( 2 )
|
36
|
+
expect( tuple3[1] ).to eq( false )
|
37
|
+
expect( tuple3[2] ).to eq( "4" )
|
38
|
+
end
|
39
|
+
|
40
|
+
it "supports negative indices" do
|
41
|
+
expect( tuple0[-2] ).to eq( "1" )
|
42
|
+
expect( tuple0[-1] ).to eq( "a" )
|
43
|
+
end
|
44
|
+
|
45
|
+
it "supports hash like access" do
|
46
|
+
expect( tuple0["column1"] ).to eq( "1" )
|
47
|
+
expect( tuple0["column2"] ).to eq( "a" )
|
48
|
+
expect( tuple2["a"] ).to eq( 1 )
|
49
|
+
expect( tuple2["b"] ).to eq( "3" )
|
50
|
+
expect( tuple0["x"] ).to be_nil
|
51
|
+
end
|
52
|
+
|
53
|
+
it "casts lazy and caches result" do
|
54
|
+
a = []
|
55
|
+
deco = Class.new(PG::SimpleDecoder) do
|
56
|
+
define_method(:decode) do |*args|
|
57
|
+
a << args
|
58
|
+
args.last
|
59
|
+
end
|
60
|
+
end.new
|
61
|
+
|
62
|
+
result2x2.map_types!(PG::TypeMapByColumn.new([deco, deco]))
|
63
|
+
t = result2x2.tuple(1)
|
64
|
+
|
65
|
+
# cast and cache at first call to [0]
|
66
|
+
a.clear
|
67
|
+
expect( t[0] ).to eq( 0 )
|
68
|
+
expect( a ).to eq([["2", 1, 0]])
|
69
|
+
|
70
|
+
# use cache at second call to [0]
|
71
|
+
a.clear
|
72
|
+
expect( t[0] ).to eq( 0 )
|
73
|
+
expect( a ).to eq([])
|
74
|
+
|
75
|
+
# cast and cache at first call to [1]
|
76
|
+
a.clear
|
77
|
+
expect( t[1] ).to eq( 1 )
|
78
|
+
expect( a ).to eq([["b", 1, 1]])
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
describe "fetch" do
|
83
|
+
it "raises proper errors for invalid keys" do
|
84
|
+
expect{ tuple0.fetch("x") }.to raise_error(KeyError)
|
85
|
+
expect{ tuple0.fetch(0.5) }.to raise_error(KeyError)
|
86
|
+
expect{ tuple0.fetch(2) }.to raise_error(IndexError)
|
87
|
+
expect{ tuple0.fetch(-3) }.to raise_error(IndexError)
|
88
|
+
expect{ tuple0.fetch(-3) }.to raise_error(IndexError)
|
89
|
+
expect{ tuple_empty[0] }.to raise_error(TypeError)
|
90
|
+
end
|
91
|
+
|
92
|
+
it "supports array like access" do
|
93
|
+
expect( tuple0.fetch(0) ).to eq( "1" )
|
94
|
+
expect( tuple0.fetch(1) ).to eq( "a" )
|
95
|
+
end
|
96
|
+
|
97
|
+
it "supports default value for indices" do
|
98
|
+
expect( tuple0.fetch(2, 42) ).to eq( 42 )
|
99
|
+
expect( tuple0.fetch(2){43} ).to eq( 43 )
|
100
|
+
end
|
101
|
+
|
102
|
+
it "supports negative indices" do
|
103
|
+
expect( tuple0.fetch(-2) ).to eq( "1" )
|
104
|
+
expect( tuple0.fetch(-1) ).to eq( "a" )
|
105
|
+
end
|
106
|
+
|
107
|
+
it "supports hash like access" do
|
108
|
+
expect( tuple0.fetch("column1") ).to eq( "1" )
|
109
|
+
expect( tuple0.fetch("column2") ).to eq( "a" )
|
110
|
+
expect( tuple2.fetch("a") ).to eq( 1 )
|
111
|
+
expect( tuple2.fetch("b") ).to eq( "3" )
|
112
|
+
end
|
113
|
+
|
114
|
+
it "supports default value for name keys" do
|
115
|
+
expect( tuple0.fetch("x", "defa") ).to eq("defa")
|
116
|
+
expect( tuple0.fetch("x"){"defa"} ).to eq("defa")
|
117
|
+
end
|
118
|
+
end
|
119
|
+
|
120
|
+
describe "each" do
|
121
|
+
it "can be used as an enumerator" do
|
122
|
+
expect( tuple0.each ).to be_kind_of(Enumerator)
|
123
|
+
expect( tuple0.each.to_a ).to eq( [["column1", "1"], ["column2", "a"]] )
|
124
|
+
expect( tuple1.each.to_a ).to eq( [["column1", "2"], ["column2", "b"]] )
|
125
|
+
expect( tuple2.each.to_a ).to eq( [["a", 1], ["b", true], ["b", "3"]] )
|
126
|
+
expect( tuple3.each.to_a ).to eq( [["a", 2], ["b", false], ["b", "4"]] )
|
127
|
+
expect{ tuple_empty.each }.to raise_error(TypeError)
|
128
|
+
end
|
129
|
+
|
130
|
+
it "can be used with block" do
|
131
|
+
a = []
|
132
|
+
tuple0.each do |*v|
|
133
|
+
a << v
|
134
|
+
end
|
135
|
+
expect( a ).to eq( [["column1", "1"], ["column2", "a"]] )
|
136
|
+
end
|
137
|
+
end
|
138
|
+
|
139
|
+
describe "each_value" do
|
140
|
+
it "can be used as an enumerator" do
|
141
|
+
expect( tuple0.each_value ).to be_kind_of(Enumerator)
|
142
|
+
expect( tuple0.each_value.to_a ).to eq( ["1", "a"] )
|
143
|
+
expect( tuple1.each_value.to_a ).to eq( ["2", "b"] )
|
144
|
+
expect( tuple2.each_value.to_a ).to eq( [1, true, "3"] )
|
145
|
+
expect( tuple3.each_value.to_a ).to eq( [2, false, "4"] )
|
146
|
+
expect{ tuple_empty.each_value }.to raise_error(TypeError)
|
147
|
+
end
|
148
|
+
|
149
|
+
it "can be used with block" do
|
150
|
+
a = []
|
151
|
+
tuple0.each_value do |v|
|
152
|
+
a << v
|
153
|
+
end
|
154
|
+
expect( a ).to eq( ["1", "a"] )
|
155
|
+
end
|
156
|
+
end
|
157
|
+
|
158
|
+
it "responds to values" do
|
159
|
+
expect( tuple0.values ).to eq( ["1", "a"] )
|
160
|
+
expect( tuple3.values ).to eq( [2, false, "4"] )
|
161
|
+
expect{ tuple_empty.values }.to raise_error(TypeError)
|
162
|
+
end
|
163
|
+
|
164
|
+
it "responds to key?" do
|
165
|
+
expect( tuple1.key?("column1") ).to eq( true )
|
166
|
+
expect( tuple1.key?("other") ).to eq( false )
|
167
|
+
expect( tuple1.has_key?("column1") ).to eq( true )
|
168
|
+
expect( tuple1.has_key?("other") ).to eq( false )
|
169
|
+
end
|
170
|
+
|
171
|
+
it "responds to keys" do
|
172
|
+
expect( tuple0.keys ).to eq( ["column1", "column2"] )
|
173
|
+
expect( tuple2.keys ).to eq( ["a", "b", "b"] )
|
174
|
+
end
|
175
|
+
|
176
|
+
describe "each_key" do
|
177
|
+
it "can be used as an enumerator" do
|
178
|
+
expect( tuple0.each_key ).to be_kind_of(Enumerator)
|
179
|
+
expect( tuple0.each_key.to_a ).to eq( ["column1", "column2"] )
|
180
|
+
expect( tuple2.each_key.to_a ).to eq( ["a", "b", "b"] )
|
181
|
+
end
|
182
|
+
|
183
|
+
it "can be used with block" do
|
184
|
+
a = []
|
185
|
+
tuple0.each_key do |v|
|
186
|
+
a << v
|
187
|
+
end
|
188
|
+
expect( a ).to eq( ["column1", "column2"] )
|
189
|
+
end
|
190
|
+
end
|
191
|
+
|
192
|
+
it "responds to length" do
|
193
|
+
expect( tuple0.length ).to eq( 2 )
|
194
|
+
expect( tuple0.size ).to eq( 2 )
|
195
|
+
expect( tuple2.size ).to eq( 3 )
|
196
|
+
end
|
197
|
+
|
198
|
+
it "responds to index" do
|
199
|
+
expect( tuple0.index("column1") ).to eq( 0 )
|
200
|
+
expect( tuple0.index("column2") ).to eq( 1 )
|
201
|
+
expect( tuple0.index("x") ).to eq( nil )
|
202
|
+
expect( tuple2.index("a") ).to eq( 0 )
|
203
|
+
expect( tuple2.index("b") ).to eq( 2 )
|
204
|
+
end
|
205
|
+
|
206
|
+
it "can be used as Enumerable" do
|
207
|
+
expect( tuple0.to_a ).to eq( [["column1", "1"], ["column2", "a"]] )
|
208
|
+
expect( tuple1.to_a ).to eq( [["column1", "2"], ["column2", "b"]] )
|
209
|
+
expect( tuple2.to_a ).to eq( [["a", 1], ["b", true], ["b", "3"]] )
|
210
|
+
expect( tuple3.to_a ).to eq( [["a", 2], ["b", false], ["b", "4"]] )
|
211
|
+
end
|
212
|
+
|
213
|
+
it "can be marshaled" do
|
214
|
+
[tuple0, tuple1, tuple2, tuple3].each do |t1|
|
215
|
+
str = Marshal.dump(t1)
|
216
|
+
t2 = Marshal.load(str)
|
217
|
+
|
218
|
+
expect( t2 ).to be_kind_of(t1.class)
|
219
|
+
expect( t2 ).not_to equal(t1)
|
220
|
+
expect( t2.to_a ).to eq(t1.to_a)
|
221
|
+
end
|
222
|
+
end
|
223
|
+
|
224
|
+
it "passes instance variables when marshaled" do
|
225
|
+
t1 = tuple0
|
226
|
+
t1.instance_variable_set("@a", 4711)
|
227
|
+
str = Marshal.dump(t1)
|
228
|
+
t2 = Marshal.load(str)
|
229
|
+
|
230
|
+
expect( t2.instance_variable_get("@a") ).to eq( 4711 )
|
231
|
+
end
|
232
|
+
|
233
|
+
it "can't be marshaled when empty" do
|
234
|
+
expect{ Marshal.dump(tuple_empty) }.to raise_error(TypeError)
|
235
|
+
end
|
236
|
+
|
237
|
+
it "should give account about memory usage" do
|
238
|
+
expect( ObjectSpace.memsize_of(tuple0) ).to be > 40
|
239
|
+
expect( ObjectSpace.memsize_of(tuple_empty) ).to be > 0
|
240
|
+
end
|
241
|
+
|
242
|
+
it "should override #inspect" do
|
243
|
+
expect( tuple1.inspect ).to eq('#<PG::Tuple column1: "2", column2: "b">')
|
244
|
+
expect( tuple2.inspect ).to eq('#<PG::Tuple a: 1, b: true, b: "3">')
|
245
|
+
expect{ tuple_empty.inspect }.to raise_error(TypeError)
|
246
|
+
end
|
247
|
+
|
248
|
+
context "with cleared result" do
|
249
|
+
it "should raise an error when materialized fields are used" do
|
250
|
+
r = result2x2
|
251
|
+
t = r.tuple(0)
|
252
|
+
t[0] # materialize first field only
|
253
|
+
r.clear
|
254
|
+
expect{ t[1] }.to raise_error(PG::Error)
|
255
|
+
expect{ t.fetch(1) }.to raise_error(PG::Error)
|
256
|
+
expect{ t.fetch("column2") }.to raise_error(PG::Error)
|
257
|
+
expect{ t.values }.to raise_error(PG::Error)
|
258
|
+
|
259
|
+
expect( t[0] ).to eq( "1" )
|
260
|
+
expect( t.fetch(0) ).to eq( "1" )
|
261
|
+
expect( t.fetch("column1") ).to eq( "1" )
|
262
|
+
|
263
|
+
expect{ t.values }.to raise_error(PG::Error)
|
264
|
+
end
|
265
|
+
end
|
266
|
+
end
|