ydbd-pg 0.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/ChangeLog +3703 -0
- data/LICENSE +25 -0
- data/lib/dbd/Pg.rb +188 -0
- data/lib/dbd/pg/database.rb +516 -0
- data/lib/dbd/pg/exec.rb +47 -0
- data/lib/dbd/pg/statement.rb +160 -0
- data/lib/dbd/pg/tuples.rb +121 -0
- data/lib/dbd/pg/type.rb +209 -0
- data/readme.md +274 -0
- data/test/DBD_TESTS +50 -0
- data/test/dbd/general/test_database.rb +206 -0
- data/test/dbd/general/test_statement.rb +326 -0
- data/test/dbd/general/test_types.rb +296 -0
- data/test/dbd/postgresql/base.rb +31 -0
- data/test/dbd/postgresql/down.sql +31 -0
- data/test/dbd/postgresql/test_arrays.rb +179 -0
- data/test/dbd/postgresql/test_async.rb +121 -0
- data/test/dbd/postgresql/test_blob.rb +36 -0
- data/test/dbd/postgresql/test_bytea.rb +87 -0
- data/test/dbd/postgresql/test_ping.rb +10 -0
- data/test/dbd/postgresql/test_timestamp.rb +77 -0
- data/test/dbd/postgresql/test_transactions.rb +58 -0
- data/test/dbd/postgresql/testdbipg.rb +307 -0
- data/test/dbd/postgresql/up.sql +60 -0
- data/test/ts_dbd.rb +131 -0
- metadata +100 -0
data/LICENSE
ADDED
@@ -0,0 +1,25 @@
|
|
1
|
+
(C) 2008 Erik Hollensbe <erik@hollensbe.org>. All rights reserved.
|
2
|
+
|
3
|
+
Please see "README" for earlier copyrights.
|
4
|
+
|
5
|
+
Redistribution and use in source and binary forms, with or without
|
6
|
+
modification, are permitted provided that the following conditions
|
7
|
+
are met:
|
8
|
+
1. Redistributions of source code must retain the above copyright
|
9
|
+
notice, this list of conditions and the following disclaimer.
|
10
|
+
2. Redistributions in binary form must reproduce the above copyright
|
11
|
+
notice, this list of conditions and the following disclaimer in the
|
12
|
+
documentation and/or other materials provided with the distribution.
|
13
|
+
3. The name of the author may not be used to endorse or promote products
|
14
|
+
derived from this software without specific prior written permission.
|
15
|
+
|
16
|
+
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
|
17
|
+
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
|
18
|
+
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
|
19
|
+
THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
20
|
+
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
21
|
+
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
22
|
+
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
23
|
+
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
24
|
+
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
25
|
+
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
data/lib/dbd/Pg.rb
ADDED
@@ -0,0 +1,188 @@
|
|
1
|
+
#--
|
2
|
+
# DBD::Pg
|
3
|
+
#
|
4
|
+
# Copyright (c) 2001, 2002, 2003 Jim Weirich, Michael Neumann <mneumann@ntecs.de>
|
5
|
+
# Copyright (c) 2008 Erik Hollensbe, Christopher Maujean
|
6
|
+
#
|
7
|
+
# All rights reserved.
|
8
|
+
#
|
9
|
+
# Redistribution and use in source and binary forms, with or without
|
10
|
+
# modification, are permitted provided that the following conditions
|
11
|
+
# are met:
|
12
|
+
# 1. Redistributions of source code must retain the above copyright
|
13
|
+
# notice, this list of conditions and the following disclaimer.
|
14
|
+
# 2. Redistributions in binary form must reproduce the above copyright
|
15
|
+
# notice, this list of conditions and the following disclaimer in the
|
16
|
+
# documentation and/or other materials provided with the distribution.
|
17
|
+
# 3. The name of the author may not be used to endorse or promote products
|
18
|
+
# derived from this software without specific prior written permission.
|
19
|
+
#
|
20
|
+
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
|
21
|
+
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
|
22
|
+
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
|
23
|
+
# THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
24
|
+
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
25
|
+
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
26
|
+
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
27
|
+
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
28
|
+
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
29
|
+
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30
|
+
#++
|
31
|
+
|
32
|
+
begin
|
33
|
+
require 'rubygems'
|
34
|
+
gem 'pg'
|
35
|
+
gem 'ydbi'
|
36
|
+
rescue Exception => e
|
37
|
+
end
|
38
|
+
|
39
|
+
require 'dbi'
|
40
|
+
require 'pg'
|
41
|
+
|
42
|
+
module DBI
|
43
|
+
module DBD
|
44
|
+
#
|
45
|
+
# DBD::Pg - Database Driver for the PostgreSQL database system.
|
46
|
+
#
|
47
|
+
# Requires DBI and the 'pg' gem or package to work.
|
48
|
+
#
|
49
|
+
# Only things that extend DBI's results are documented.
|
50
|
+
#
|
51
|
+
module Pg
|
52
|
+
VERSION = "0.5.1"
|
53
|
+
DESCRIPTION = "PostgreSQL DBI DBD"
|
54
|
+
|
55
|
+
#
|
56
|
+
# returns 'Pg'
|
57
|
+
#
|
58
|
+
# See DBI::TypeUtil#convert for more information.
|
59
|
+
#
|
60
|
+
def self.driver_name
|
61
|
+
"Pg"
|
62
|
+
end
|
63
|
+
|
64
|
+
#
|
65
|
+
# This method takes a ruby Array and converts it to PostgreSQL array syntax.
|
66
|
+
#
|
67
|
+
def self.generate_array(obj)
|
68
|
+
# XXX yarr, there be recursion here, and it's probably not a good idea.
|
69
|
+
output = "{"
|
70
|
+
obj.each do |item|
|
71
|
+
case item
|
72
|
+
when ::Array
|
73
|
+
output += generate_array(item)
|
74
|
+
else
|
75
|
+
generated = DBI::TypeUtil.convert(driver_name, item)
|
76
|
+
generated = case item
|
77
|
+
when String
|
78
|
+
# in strings, escapes are doubled and the quotes are different.
|
79
|
+
# this gets *really* ugly and needs to be well-tested
|
80
|
+
"\"#{generated.gsub(/\\/) { "\\\\" }}\""
|
81
|
+
when Fixnum
|
82
|
+
generated.to_s
|
83
|
+
end
|
84
|
+
output += generated
|
85
|
+
end
|
86
|
+
output += "," # FIXME technically, delimiters are variable
|
87
|
+
end
|
88
|
+
|
89
|
+
output.sub(/,$/, '}')
|
90
|
+
end
|
91
|
+
|
92
|
+
#
|
93
|
+
# A quote helper, this uses the new syntax in PostgreSQL 8.2 and up.
|
94
|
+
#
|
95
|
+
def self.quote(value)
|
96
|
+
"E'#{ value.gsub(/\\/){ '\\\\' }.gsub(/'/){ '\\\'' } }'"
|
97
|
+
end
|
98
|
+
|
99
|
+
#
|
100
|
+
# Parse a postgresql type. Returns a hash with these fields (as Symbol)
|
101
|
+
#
|
102
|
+
# * ftype: the full type, as passed in to this method.
|
103
|
+
# * type: the type stripped of all attribute information.
|
104
|
+
# * size: the LHS of the attribute information, typically the precision.
|
105
|
+
# * decimal: the RHS of the attribute information, typically the scale.
|
106
|
+
# * array: true if this type is actually an array of that type.
|
107
|
+
#
|
108
|
+
def self.parse_type(ftype)
|
109
|
+
type = ftype
|
110
|
+
pos = ftype.index('(')
|
111
|
+
decimal = nil
|
112
|
+
size = nil
|
113
|
+
array_of_type = nil
|
114
|
+
|
115
|
+
if pos != nil
|
116
|
+
type = ftype[0..pos-1]
|
117
|
+
size = ftype[pos+1..-2]
|
118
|
+
pos = size.index(',')
|
119
|
+
if pos != nil
|
120
|
+
size, decimal = size.split(',', 2)
|
121
|
+
size = size.to_i
|
122
|
+
decimal = decimal.to_i
|
123
|
+
else
|
124
|
+
size = size.to_i
|
125
|
+
end
|
126
|
+
end
|
127
|
+
|
128
|
+
if type =~ /\[\]$/
|
129
|
+
type.sub!(/\[\]$/, '')
|
130
|
+
array_of_type = true
|
131
|
+
end
|
132
|
+
|
133
|
+
return {
|
134
|
+
:ftype => ftype.dup,
|
135
|
+
:type => type,
|
136
|
+
:size => size,
|
137
|
+
:decimal => decimal,
|
138
|
+
:array => array_of_type
|
139
|
+
}
|
140
|
+
end
|
141
|
+
|
142
|
+
#
|
143
|
+
# See DBI::BaseDriver.
|
144
|
+
#
|
145
|
+
class Driver < DBI::BaseDriver
|
146
|
+
def initialize
|
147
|
+
super("0.4.0")
|
148
|
+
end
|
149
|
+
|
150
|
+
## List of datasources for this database.
|
151
|
+
def data_sources
|
152
|
+
[]
|
153
|
+
end
|
154
|
+
|
155
|
+
## Connect to a database.
|
156
|
+
def connect(dbname, user, auth, attr)
|
157
|
+
Database.new(dbname, user, auth, attr)
|
158
|
+
end
|
159
|
+
end
|
160
|
+
end # module Pg
|
161
|
+
end # module DBD
|
162
|
+
end # module DBI
|
163
|
+
|
164
|
+
require 'dbd/pg/type'
|
165
|
+
require 'dbd/pg/database'
|
166
|
+
require 'dbd/pg/statement'
|
167
|
+
require 'dbd/pg/tuples'
|
168
|
+
require 'dbd/pg/exec'
|
169
|
+
|
170
|
+
pg = DBI::DBD::Pg
|
171
|
+
|
172
|
+
DBI::TypeUtil.register_conversion(pg.driver_name) do |obj|
|
173
|
+
newobj = case obj
|
174
|
+
when ::DateTime
|
175
|
+
obj.strftime("%Y-%m-%dT%H:%M:%S.%N")
|
176
|
+
when ::Time
|
177
|
+
::DateTime.parse(obj.to_s).strftime("%H:%M:%S.%N")
|
178
|
+
when ::Date
|
179
|
+
obj.strftime("%Y-%m-%d")
|
180
|
+
when ::Array
|
181
|
+
pg.generate_array(obj)
|
182
|
+
when DBI::DBD::Pg::Type::ByteA
|
183
|
+
obj.escaped
|
184
|
+
else
|
185
|
+
obj
|
186
|
+
end
|
187
|
+
[newobj, false]
|
188
|
+
end
|
@@ -0,0 +1,516 @@
|
|
1
|
+
#
|
2
|
+
# See DBI::BaseDatabase.
|
3
|
+
#
|
4
|
+
class DBI::DBD::Pg::Database < DBI::BaseDatabase
|
5
|
+
|
6
|
+
# type map
|
7
|
+
POSTGRESQL_to_XOPEN = {
|
8
|
+
"boolean" => [DBI::SQL_CHAR, 1, nil],
|
9
|
+
"character" => [DBI::SQL_CHAR, 1, nil],
|
10
|
+
"char" => [DBI::SQL_CHAR, 1, nil],
|
11
|
+
"real" => [DBI::SQL_REAL, 4, 6],
|
12
|
+
"double precision" => [DBI::SQL_DOUBLE, 8, 15],
|
13
|
+
"smallint" => [DBI::SQL_SMALLINT, 2],
|
14
|
+
"integer" => [DBI::SQL_INTEGER, 4],
|
15
|
+
"bigint" => [DBI::SQL_BIGINT, 8],
|
16
|
+
"numeric" => [DBI::SQL_NUMERIC, nil, nil],
|
17
|
+
"time with time zone" => [DBI::SQL_TIME, nil, nil],
|
18
|
+
"timestamp with time zone" => [DBI::SQL_TIMESTAMP, nil, nil],
|
19
|
+
"bit varying" => [DBI::SQL_BINARY, nil, nil], #huh??
|
20
|
+
"character varying" => [DBI::SQL_VARCHAR, nil, nil],
|
21
|
+
"bit" => [DBI::SQL_TINYINT, nil, nil],
|
22
|
+
"text" => [DBI::SQL_VARCHAR, nil, nil],
|
23
|
+
nil => [DBI::SQL_OTHER, nil, nil]
|
24
|
+
}
|
25
|
+
|
26
|
+
attr_reader :type_map
|
27
|
+
|
28
|
+
#
|
29
|
+
# See DBI::BaseDatabase#new. These attributes are also supported:
|
30
|
+
#
|
31
|
+
# * pg_async: boolean or strings 'true' or 'false'. Indicates if we're to
|
32
|
+
# use PostgreSQL's asyncrohonous support. 'NonBlocking' is a synonym for
|
33
|
+
# this.
|
34
|
+
# * AutoCommit: 'unchained' mode in PostgreSQL. Commits after each
|
35
|
+
# statement execution.
|
36
|
+
# * pg_client_encoding: set the encoding for the client.
|
37
|
+
# * pg_native_binding: Boolean. Indicates whether to use libpq native
|
38
|
+
# binding or DBI's inline binding. Defaults to true.
|
39
|
+
#
|
40
|
+
def initialize(dbname, user, auth, attr)
|
41
|
+
hash = DBI::Utils.parse_params(dbname)
|
42
|
+
|
43
|
+
if hash['dbname'].nil? and hash['database'].nil?
|
44
|
+
raise DBI::InterfaceError, "must specify database"
|
45
|
+
end
|
46
|
+
|
47
|
+
hash['options'] ||= nil
|
48
|
+
hash['tty'] ||= ''
|
49
|
+
hash['port'] = hash['port'].to_i unless hash['port'].nil?
|
50
|
+
|
51
|
+
@connection = PGconn.new(hash['host'], hash['port'], hash['options'], hash['tty'],
|
52
|
+
hash['dbname'] || hash['database'], user, auth)
|
53
|
+
|
54
|
+
@exec_method = :exec
|
55
|
+
@in_transaction = false
|
56
|
+
|
57
|
+
# set attribute defaults, and look for pg_* attrs in the DSN
|
58
|
+
@attr = { 'AutoCommit' => true, 'pg_async' => false }
|
59
|
+
hash.each do |key, value|
|
60
|
+
@attr[key] = value if key =~ /^pg_./
|
61
|
+
end
|
62
|
+
@attr.merge!(attr || {})
|
63
|
+
if @attr['pg_async'].is_a?(String)
|
64
|
+
case @attr['pg_async'].downcase
|
65
|
+
when 'true'
|
66
|
+
@attr['pg_async'] = true
|
67
|
+
when 'false'
|
68
|
+
@attr['pg_async'] = false
|
69
|
+
else
|
70
|
+
raise InterfaceError, %q{'pg_async' must be 'true' or 'false'}
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
@attr.each { |k,v| self[k] = v}
|
75
|
+
@attr["pg_native_binding"] = true unless @attr.has_key? "pg_native_binding"
|
76
|
+
|
77
|
+
load_type_map
|
78
|
+
|
79
|
+
self['AutoCommit'] = true # Postgres starts in unchained mode (AutoCommit=on) by default
|
80
|
+
|
81
|
+
rescue PGError => err
|
82
|
+
raise DBI::OperationalError.new(err.message)
|
83
|
+
end
|
84
|
+
|
85
|
+
def disconnect
|
86
|
+
if not @attr['AutoCommit'] and @in_transaction
|
87
|
+
_exec("ROLLBACK") # rollback outstanding transactions
|
88
|
+
end
|
89
|
+
@connection.close
|
90
|
+
end
|
91
|
+
|
92
|
+
def ping
|
93
|
+
answer = _exec("SELECT 1")
|
94
|
+
if answer
|
95
|
+
return answer.num_tuples == 1
|
96
|
+
else
|
97
|
+
return false
|
98
|
+
end
|
99
|
+
rescue PGError
|
100
|
+
return false
|
101
|
+
ensure
|
102
|
+
answer.clear if answer
|
103
|
+
end
|
104
|
+
|
105
|
+
def database_name
|
106
|
+
@connection.db
|
107
|
+
end
|
108
|
+
|
109
|
+
def tables
|
110
|
+
stmt = execute("SELECT c.relname FROM pg_catalog.pg_class c WHERE c.relkind IN ('r','v') and pg_catalog.pg_table_is_visible(c.oid)")
|
111
|
+
res = stmt.fetch_all.collect {|row| row[0]}
|
112
|
+
stmt.finish
|
113
|
+
res
|
114
|
+
end
|
115
|
+
|
116
|
+
#
|
117
|
+
# See DBI::BaseDatabase.
|
118
|
+
#
|
119
|
+
# These additional attributes are also supported:
|
120
|
+
#
|
121
|
+
# * nullable: true if NULL values are allowed in this column.
|
122
|
+
# * indexed: true if this column is a part of an index.
|
123
|
+
# * primary: true if this column is a part of a primary key.
|
124
|
+
# * unique: true if this column is a part of a unique key.
|
125
|
+
# * default: what will be insert if this column is left out of an insert query.
|
126
|
+
# * array_of_type: true if this is actually an array of this type.
|
127
|
+
# +dbi_type+ will be the type authority if this is the case.
|
128
|
+
#
|
129
|
+
def columns(table)
|
130
|
+
sql1 = %[
|
131
|
+
select a.attname, i.indisprimary, i.indisunique
|
132
|
+
from pg_class bc inner join pg_index i
|
133
|
+
on bc.oid = i.indrelid
|
134
|
+
inner join pg_class c
|
135
|
+
on c.oid = i.indexrelid
|
136
|
+
inner join pg_attribute a
|
137
|
+
on c.oid = a.attrelid
|
138
|
+
where bc.relname = ?
|
139
|
+
and bc.relkind in ('r', 'v')
|
140
|
+
and pg_catalog.pg_table_is_visible(bc.oid);
|
141
|
+
]
|
142
|
+
|
143
|
+
sql2 = %[
|
144
|
+
SELECT a.attname, a.atttypid, a.attnotnull, a.attlen, format_type(a.atttypid, a.atttypmod)
|
145
|
+
FROM pg_catalog.pg_class c, pg_attribute a, pg_type t
|
146
|
+
WHERE a.attnum > 0 AND a.attrelid = c.oid AND a.atttypid = t.oid AND c.relname = ?
|
147
|
+
AND c.relkind IN ('r','v')
|
148
|
+
AND pg_catalog.pg_table_is_visible(c.oid)
|
149
|
+
]
|
150
|
+
|
151
|
+
# by Michael Neumann (get default value)
|
152
|
+
# corrected by Joseph McDonald
|
153
|
+
sql3 = %[
|
154
|
+
SELECT pg_attrdef.adsrc, pg_attribute.attname
|
155
|
+
FROM pg_attribute, pg_attrdef, pg_catalog.pg_class
|
156
|
+
WHERE pg_catalog.pg_class.relname = ? AND
|
157
|
+
pg_attribute.attrelid = pg_catalog.pg_class.oid AND
|
158
|
+
pg_attrdef.adrelid = pg_catalog.pg_class.oid AND
|
159
|
+
pg_attrdef.adnum = pg_attribute.attnum
|
160
|
+
AND pg_catalog.pg_class.relkind IN ('r','v')
|
161
|
+
AND pg_catalog.pg_table_is_visible(pg_catalog.pg_class.oid)
|
162
|
+
]
|
163
|
+
|
164
|
+
dbh = DBI::DatabaseHandle.new(self)
|
165
|
+
dbh.driver_name = DBI::DBD::Pg.driver_name
|
166
|
+
indices = {}
|
167
|
+
default_values = {}
|
168
|
+
|
169
|
+
dbh.select_all(sql3, table) do |default, name|
|
170
|
+
default_values[name] = default
|
171
|
+
end
|
172
|
+
|
173
|
+
dbh.select_all(sql1, table) do |name, primary, unique|
|
174
|
+
indices[name] = [primary, unique]
|
175
|
+
end
|
176
|
+
|
177
|
+
##########
|
178
|
+
|
179
|
+
ret = []
|
180
|
+
dbh.execute(sql2, table) do |sth|
|
181
|
+
ret = sth.collect do |row|
|
182
|
+
name, pg_type, notnullable, len, ftype = row
|
183
|
+
#name = row[2]
|
184
|
+
indexed = false
|
185
|
+
primary = nil
|
186
|
+
unique = nil
|
187
|
+
if indices.has_key?(name)
|
188
|
+
indexed = true
|
189
|
+
primary, unique = indices[name]
|
190
|
+
end
|
191
|
+
|
192
|
+
typeinfo = DBI::DBD::Pg.parse_type(ftype)
|
193
|
+
typeinfo[:size] ||= len
|
194
|
+
|
195
|
+
if POSTGRESQL_to_XOPEN.has_key?(typeinfo[:type])
|
196
|
+
sql_type = POSTGRESQL_to_XOPEN[typeinfo[:type]][0]
|
197
|
+
else
|
198
|
+
sql_type = POSTGRESQL_to_XOPEN[nil][0]
|
199
|
+
end
|
200
|
+
|
201
|
+
row = {}
|
202
|
+
row['name'] = name
|
203
|
+
row['sql_type'] = sql_type
|
204
|
+
row['type_name'] = typeinfo[:type]
|
205
|
+
row['nullable'] = ! notnullable
|
206
|
+
row['indexed'] = indexed
|
207
|
+
row['primary'] = primary
|
208
|
+
row['unique'] = unique
|
209
|
+
row['precision'] = typeinfo[:size]
|
210
|
+
row['scale'] = typeinfo[:decimal]
|
211
|
+
row['default'] = default_values[name]
|
212
|
+
row['array_of_type'] = typeinfo[:array]
|
213
|
+
|
214
|
+
if typeinfo[:array]
|
215
|
+
row['dbi_type'] =
|
216
|
+
DBI::DBD::Pg::Type::Array.new(
|
217
|
+
DBI::TypeUtil.type_name_to_module(typeinfo[:type])
|
218
|
+
)
|
219
|
+
end
|
220
|
+
row
|
221
|
+
end # collect
|
222
|
+
end # execute
|
223
|
+
|
224
|
+
return ret
|
225
|
+
end
|
226
|
+
|
227
|
+
def prepare(statement)
|
228
|
+
DBI::DBD::Pg::Statement.new(self, statement)
|
229
|
+
end
|
230
|
+
|
231
|
+
def [](attr)
|
232
|
+
case attr
|
233
|
+
when 'pg_client_encoding'
|
234
|
+
@connection.client_encoding
|
235
|
+
when 'NonBlocking'
|
236
|
+
@attr['pg_async']
|
237
|
+
else
|
238
|
+
@attr[attr]
|
239
|
+
end
|
240
|
+
end
|
241
|
+
|
242
|
+
def []=(attr, value)
|
243
|
+
case attr
|
244
|
+
when 'AutoCommit'
|
245
|
+
if @attr['AutoCommit'] != value then
|
246
|
+
if value # turn AutoCommit ON
|
247
|
+
if @in_transaction
|
248
|
+
# TODO: commit outstanding transactions?
|
249
|
+
_exec("COMMIT")
|
250
|
+
@in_transaction = false
|
251
|
+
end
|
252
|
+
else # turn AutoCommit OFF
|
253
|
+
@in_transaction = false
|
254
|
+
end
|
255
|
+
end
|
256
|
+
# value is assigned below
|
257
|
+
when 'NonBlocking', 'pg_async'
|
258
|
+
# booleanize input
|
259
|
+
value = value ? true : false
|
260
|
+
@pgexec = (value ? DBI::DBD::Pg::PgExecutorAsync : DBI::DBD::Pg::PgExecutor).new(@connection)
|
261
|
+
# value is assigned to @attr below
|
262
|
+
when 'pg_client_encoding'
|
263
|
+
@connection.set_client_encoding(value)
|
264
|
+
when 'pg_native_binding'
|
265
|
+
@attr[attr] = value
|
266
|
+
else
|
267
|
+
if attr =~ /^pg_/ or attr != /_/
|
268
|
+
raise DBI::NotSupportedError, "Option '#{attr}' not supported"
|
269
|
+
else # option for some other driver - quitly ignore
|
270
|
+
return
|
271
|
+
end
|
272
|
+
end
|
273
|
+
@attr[attr] = value
|
274
|
+
end
|
275
|
+
|
276
|
+
def commit
|
277
|
+
if @in_transaction
|
278
|
+
_exec("COMMIT")
|
279
|
+
@in_transaction = false
|
280
|
+
else
|
281
|
+
# TODO: Warn?
|
282
|
+
end
|
283
|
+
end
|
284
|
+
|
285
|
+
def rollback
|
286
|
+
if @in_transaction
|
287
|
+
_exec("ROLLBACK")
|
288
|
+
@in_transaction = false
|
289
|
+
else
|
290
|
+
# TODO: Warn?
|
291
|
+
end
|
292
|
+
end
|
293
|
+
|
294
|
+
#
|
295
|
+
# Are we in an transaction?
|
296
|
+
#
|
297
|
+
def in_transaction?
|
298
|
+
@in_transaction
|
299
|
+
end
|
300
|
+
|
301
|
+
#
|
302
|
+
# Forcibly initializes a new transaction.
|
303
|
+
#
|
304
|
+
def start_transaction
|
305
|
+
_exec("BEGIN")
|
306
|
+
@in_transaction = true
|
307
|
+
end
|
308
|
+
|
309
|
+
def _exec(sql, *parameters)
|
310
|
+
@pgexec.exec(sql, parameters)
|
311
|
+
end
|
312
|
+
|
313
|
+
def _exec_prepared(stmt_name, *parameters)
|
314
|
+
@pgexec.exec_prepared(stmt_name, parameters)
|
315
|
+
end
|
316
|
+
|
317
|
+
def _prepare(stmt_name, sql)
|
318
|
+
@pgexec.prepare(stmt_name, sql)
|
319
|
+
end
|
320
|
+
|
321
|
+
private
|
322
|
+
|
323
|
+
# special quoting if value is element of an array
|
324
|
+
def quote_array_elements( value )
|
325
|
+
# XXX is this method still being used?
|
326
|
+
case value
|
327
|
+
when Array
|
328
|
+
'{'+ value.collect{|v| quote_array_elements(v) }.join(',') + '}'
|
329
|
+
when String
|
330
|
+
'"' + value.gsub(/\\/){ '\\\\' }.gsub(/"/){ '\\"' } + '"'
|
331
|
+
else
|
332
|
+
quote( value ).sub(/^'/,'').sub(/'$/,'')
|
333
|
+
end
|
334
|
+
end
|
335
|
+
|
336
|
+
def parse_type_name(type_name)
|
337
|
+
case type_name
|
338
|
+
when 'bool' then DBI::Type::Boolean
|
339
|
+
when 'int8', 'int4', 'int2' then DBI::Type::Integer
|
340
|
+
when 'varchar' then DBI::Type::Varchar
|
341
|
+
when 'float4','float8' then DBI::Type::Float
|
342
|
+
when 'time', 'timetz' then DBI::Type::Timestamp
|
343
|
+
when 'timestamp', 'timestamptz' then DBI::Type::Timestamp
|
344
|
+
when 'date' then DBI::Type::Timestamp
|
345
|
+
when 'decimal', 'numeric' then DBI::Type::Decimal
|
346
|
+
when 'bytea' then DBI::DBD::Pg::Type::ByteA
|
347
|
+
when 'enum' then DBI::Type::Varchar
|
348
|
+
end
|
349
|
+
end
|
350
|
+
|
351
|
+
#
|
352
|
+
# Gathers the types from the postgres database and attempts to
|
353
|
+
# locate matching DBI::Type objects for them.
|
354
|
+
#
|
355
|
+
def load_type_map
|
356
|
+
@type_map = Hash.new
|
357
|
+
|
358
|
+
res = _exec("SELECT oid, typname, typelem FROM pg_type WHERE typtype IN ('b', 'e')")
|
359
|
+
|
360
|
+
res.each do |row|
|
361
|
+
rowtype = parse_type_name(row["typname"])
|
362
|
+
@type_map[row["oid"].to_i] =
|
363
|
+
{
|
364
|
+
"type_name" => row["typname"],
|
365
|
+
"dbi_type" =>
|
366
|
+
if rowtype
|
367
|
+
rowtype
|
368
|
+
elsif row["typname"] =~ /^_/ and row["typelem"].to_i > 0 then
|
369
|
+
# arrays are special and have a subtype, as an
|
370
|
+
# oid held in the "typelem" field.
|
371
|
+
# Since we may not have a mapping for the
|
372
|
+
# subtype yet, defer by storing the typelem
|
373
|
+
# integer as a base type in a constructed
|
374
|
+
# Type::Array object. dirty, i know.
|
375
|
+
#
|
376
|
+
# These array objects will be reconstructed
|
377
|
+
# after all rows are processed and therefore
|
378
|
+
# the oid -> type mapping is complete.
|
379
|
+
#
|
380
|
+
DBI::DBD::Pg::Type::Array.new(row["typelem"].to_i)
|
381
|
+
else
|
382
|
+
DBI::Type::Varchar
|
383
|
+
end
|
384
|
+
}
|
385
|
+
end
|
386
|
+
# additional conversions
|
387
|
+
@type_map[705] ||= DBI::Type::Varchar # select 'hallo'
|
388
|
+
@type_map[1114] ||= DBI::Type::Timestamp # TIMESTAMP WITHOUT TIME ZONE
|
389
|
+
|
390
|
+
# remap array subtypes
|
391
|
+
@type_map.each_key do |key|
|
392
|
+
if @type_map[key]["dbi_type"].class == DBI::DBD::Pg::Type::Array
|
393
|
+
oid = @type_map[key]["dbi_type"].base_type
|
394
|
+
if @type_map[oid]
|
395
|
+
@type_map[key]["dbi_type"] = DBI::DBD::Pg::Type::Array.new(@type_map[oid]["dbi_type"])
|
396
|
+
else
|
397
|
+
# punt
|
398
|
+
@type_map[key] = DBI::DBD::Pg::Type::Array.new(DBI::Type::Varchar)
|
399
|
+
end
|
400
|
+
end
|
401
|
+
end
|
402
|
+
end
|
403
|
+
|
404
|
+
public
|
405
|
+
|
406
|
+
# return the postgresql types for this session. returns an oid -> type name mapping.
|
407
|
+
def __types(force=nil)
|
408
|
+
load_type_map if (!@type_map or force)
|
409
|
+
@type_map
|
410
|
+
end
|
411
|
+
|
412
|
+
# deprecated.
|
413
|
+
def __types_old
|
414
|
+
h = { }
|
415
|
+
|
416
|
+
_exec('select oid, typname from pg_type').each do |row|
|
417
|
+
h[row["oid"].to_i] = row["typname"]
|
418
|
+
end
|
419
|
+
|
420
|
+
return h
|
421
|
+
end
|
422
|
+
|
423
|
+
#
|
424
|
+
# Import a BLOB from a file.
|
425
|
+
#
|
426
|
+
def __blob_import(file)
|
427
|
+
start_transaction unless @in_transaction
|
428
|
+
@connection.lo_import(file)
|
429
|
+
rescue PGError => err
|
430
|
+
raise DBI::DatabaseError.new(err.message)
|
431
|
+
end
|
432
|
+
|
433
|
+
#
|
434
|
+
# Export a BLOB to a file.
|
435
|
+
#
|
436
|
+
def __blob_export(oid, file)
|
437
|
+
start_transaction unless @in_transaction
|
438
|
+
@connection.lo_export(oid.to_i, file)
|
439
|
+
rescue PGError => err
|
440
|
+
raise DBI::DatabaseError.new(err.message)
|
441
|
+
end
|
442
|
+
|
443
|
+
#
|
444
|
+
# Create a BLOB.
|
445
|
+
#
|
446
|
+
def __blob_create(mode=PGconn::INV_READ)
|
447
|
+
start_transaction unless @in_transaction
|
448
|
+
@connection.lo_creat(mode)
|
449
|
+
rescue PGError => err
|
450
|
+
raise DBI::DatabaseError.new(err.message)
|
451
|
+
end
|
452
|
+
|
453
|
+
#
|
454
|
+
# Open a BLOB.
|
455
|
+
#
|
456
|
+
def __blob_open(oid, mode=PGconn::INV_READ)
|
457
|
+
start_transaction unless @in_transaction
|
458
|
+
@connection.lo_open(oid.to_i, mode)
|
459
|
+
rescue PGError => err
|
460
|
+
raise DBI::DatabaseError.new(err.message)
|
461
|
+
end
|
462
|
+
|
463
|
+
#
|
464
|
+
# Remove a BLOB.
|
465
|
+
#
|
466
|
+
def __blob_unlink(oid)
|
467
|
+
start_transaction unless @in_transaction
|
468
|
+
@connection.lo_unlink(oid.to_i)
|
469
|
+
rescue PGError => err
|
470
|
+
raise DBI::DatabaseError.new(err.message)
|
471
|
+
end
|
472
|
+
|
473
|
+
#
|
474
|
+
# Read a BLOB and return the data.
|
475
|
+
#
|
476
|
+
def __blob_read(oid, length)
|
477
|
+
blob = @connection.lo_open(oid.to_i, PGconn::INV_READ)
|
478
|
+
|
479
|
+
if length.nil?
|
480
|
+
data = @connection.lo_read(blob)
|
481
|
+
else
|
482
|
+
data = @connection.lo_read(blob, length)
|
483
|
+
end
|
484
|
+
|
485
|
+
# FIXME it doesn't like to close here either.
|
486
|
+
# @connection.lo_close(blob)
|
487
|
+
data
|
488
|
+
rescue PGError => err
|
489
|
+
raise DBI::DatabaseError.new(err.message)
|
490
|
+
end
|
491
|
+
|
492
|
+
#
|
493
|
+
# Write the value to the BLOB.
|
494
|
+
#
|
495
|
+
def __blob_write(oid, value)
|
496
|
+
start_transaction unless @in_transaction
|
497
|
+
blob = @connection.lo_open(oid.to_i, PGconn::INV_WRITE)
|
498
|
+
res = @connection.lo_write(blob, value)
|
499
|
+
# FIXME not sure why PG doesn't like to close here -- seems to be
|
500
|
+
# working but we should make sure it's not eating file descriptors
|
501
|
+
# up before release.
|
502
|
+
# @connection.lo_close(blob)
|
503
|
+
return res
|
504
|
+
rescue PGError => err
|
505
|
+
raise DBI::DatabaseError.new(err.message)
|
506
|
+
end
|
507
|
+
|
508
|
+
#
|
509
|
+
# FIXME DOCUMENT
|
510
|
+
#
|
511
|
+
def __set_notice_processor(proc)
|
512
|
+
@connection.set_notice_processor proc
|
513
|
+
rescue PGError => err
|
514
|
+
raise DBI::DatabaseError.new(err.message)
|
515
|
+
end
|
516
|
+
end # Database
|