dbd-pg 0.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,510 @@
1
+ #
2
+ # See DBI::BaseDatabase.
3
+ #
4
+ class DBI::DBD::Pg::Database < DBI::BaseDatabase
5
+
6
+ # type map
7
+ POSTGRESQL_to_XOPEN = {
8
+ "boolean" => [DBI::SQL_CHAR, 1, nil],
9
+ "character" => [DBI::SQL_CHAR, 1, nil],
10
+ "char" => [DBI::SQL_CHAR, 1, nil],
11
+ "real" => [DBI::SQL_REAL, 4, 6],
12
+ "double precision" => [DBI::SQL_DOUBLE, 8, 15],
13
+ "smallint" => [DBI::SQL_SMALLINT, 2],
14
+ "integer" => [DBI::SQL_INTEGER, 4],
15
+ "bigint" => [DBI::SQL_BIGINT, 8],
16
+ "numeric" => [DBI::SQL_NUMERIC, nil, nil],
17
+ "time with time zone" => [DBI::SQL_TIME, nil, nil],
18
+ "timestamp with time zone" => [DBI::SQL_TIMESTAMP, nil, nil],
19
+ "bit varying" => [DBI::SQL_BINARY, nil, nil], #huh??
20
+ "character varying" => [DBI::SQL_VARCHAR, nil, nil],
21
+ "bit" => [DBI::SQL_TINYINT, nil, nil],
22
+ "text" => [DBI::SQL_VARCHAR, nil, nil],
23
+ nil => [DBI::SQL_OTHER, nil, nil]
24
+ }
25
+
26
+ attr_reader :type_map
27
+
28
+ #
29
+ # See DBI::BaseDatabase#new. These attributes are also supported:
30
+ #
31
+ # * pg_async: boolean or strings 'true' or 'false'. Indicates if we're to
32
+ # use PostgreSQL's asyncrohonous support. 'NonBlocking' is a synonym for
33
+ # this.
34
+ # * AutoCommit: 'unchained' mode in PostgreSQL. Commits after each
35
+ # statement execution.
36
+ # * pg_client_encoding: set the encoding for the client.
37
+ # * pg_native_binding: Boolean. Indicates whether to use libpq native
38
+ # binding or DBI's inline binding. Defaults to true.
39
+ #
40
+ def initialize(dbname, user, auth, attr)
41
+ hash = DBI::Utils.parse_params(dbname)
42
+
43
+ if hash['dbname'].nil? and hash['database'].nil?
44
+ raise DBI::InterfaceError, "must specify database"
45
+ end
46
+
47
+ hash['options'] ||= nil
48
+ hash['tty'] ||= ''
49
+ hash['port'] = hash['port'].to_i unless hash['port'].nil?
50
+
51
+ @connection = PGconn.new(hash['host'], hash['port'], hash['options'], hash['tty'],
52
+ hash['dbname'] || hash['database'], user, auth)
53
+
54
+ @exec_method = :exec
55
+ @in_transaction = false
56
+
57
+ # set attribute defaults, and look for pg_* attrs in the DSN
58
+ @attr = { 'AutoCommit' => true, 'pg_async' => false }
59
+ hash.each do |key, value|
60
+ @attr[key] = value if key =~ /^pg_./
61
+ end
62
+ @attr.merge!(attr || {})
63
+ if @attr['pg_async'].is_a?(String)
64
+ case @attr['pg_async'].downcase
65
+ when 'true'
66
+ @attr['pg_async'] = true
67
+ when 'false'
68
+ @attr['pg_async'] = false
69
+ else
70
+ raise InterfaceError, %q{'pg_async' must be 'true' or 'false'}
71
+ end
72
+ end
73
+
74
+ @attr.each { |k,v| self[k] = v}
75
+ @attr["pg_native_binding"] = true unless @attr.has_key? "pg_native_binding"
76
+
77
+ @type_map = __types
78
+
79
+ self['AutoCommit'] = true # Postgres starts in unchained mode (AutoCommit=on) by default
80
+
81
+ rescue PGError => err
82
+ raise DBI::OperationalError.new(err.message)
83
+ end
84
+
85
+ def disconnect
86
+ if not @attr['AutoCommit'] and @in_transaction
87
+ _exec("ROLLBACK") # rollback outstanding transactions
88
+ end
89
+ @connection.close
90
+ end
91
+
92
+ def ping
93
+ answer = _exec("SELECT 1")
94
+ if answer
95
+ return answer.num_tuples == 1
96
+ else
97
+ return false
98
+ end
99
+ rescue PGError
100
+ return false
101
+ ensure
102
+ answer.clear if answer
103
+ end
104
+
105
+ def tables
106
+ stmt = execute("SELECT c.relname FROM pg_catalog.pg_class c WHERE c.relkind IN ('r','v') and pg_catalog.pg_table_is_visible(c.oid)")
107
+ res = stmt.fetch_all.collect {|row| row[0]}
108
+ stmt.finish
109
+ res
110
+ end
111
+
112
+ #
113
+ # See DBI::BaseDatabase.
114
+ #
115
+ # These additional attributes are also supported:
116
+ #
117
+ # * nullable: true if NULL values are allowed in this column.
118
+ # * indexed: true if this column is a part of an index.
119
+ # * primary: true if this column is a part of a primary key.
120
+ # * unique: true if this column is a part of a unique key.
121
+ # * default: what will be insert if this column is left out of an insert query.
122
+ # * array_of_type: true if this is actually an array of this type.
123
+ # +dbi_type+ will be the type authority if this is the case.
124
+ #
125
+ def columns(table)
126
+ sql1 = %[
127
+ select a.attname, i.indisprimary, i.indisunique
128
+ from pg_class bc inner join pg_index i
129
+ on bc.oid = i.indrelid
130
+ inner join pg_class c
131
+ on c.oid = i.indexrelid
132
+ inner join pg_attribute a
133
+ on c.oid = a.attrelid
134
+ where bc.relname = ?
135
+ and bc.relkind in ('r', 'v')
136
+ and pg_catalog.pg_table_is_visible(bc.oid);
137
+ ]
138
+
139
+ sql2 = %[
140
+ SELECT a.attname, a.atttypid, a.attnotnull, a.attlen, format_type(a.atttypid, a.atttypmod)
141
+ FROM pg_catalog.pg_class c, pg_attribute a, pg_type t
142
+ WHERE a.attnum > 0 AND a.attrelid = c.oid AND a.atttypid = t.oid AND c.relname = ?
143
+ AND c.relkind IN ('r','v')
144
+ AND pg_catalog.pg_table_is_visible(c.oid)
145
+ ]
146
+
147
+ # by Michael Neumann (get default value)
148
+ # corrected by Joseph McDonald
149
+ sql3 = %[
150
+ SELECT pg_attrdef.adsrc, pg_attribute.attname
151
+ FROM pg_attribute, pg_attrdef, pg_catalog.pg_class
152
+ WHERE pg_catalog.pg_class.relname = ? AND
153
+ pg_attribute.attrelid = pg_catalog.pg_class.oid AND
154
+ pg_attrdef.adrelid = pg_catalog.pg_class.oid AND
155
+ pg_attrdef.adnum = pg_attribute.attnum
156
+ AND pg_catalog.pg_class.relkind IN ('r','v')
157
+ AND pg_catalog.pg_table_is_visible(pg_catalog.pg_class.oid)
158
+ ]
159
+
160
+ dbh = DBI::DatabaseHandle.new(self)
161
+ dbh.driver_name = DBI::DBD::Pg.driver_name
162
+ indices = {}
163
+ default_values = {}
164
+
165
+ dbh.select_all(sql3, table) do |default, name|
166
+ default_values[name] = default
167
+ end
168
+
169
+ dbh.select_all(sql1, table) do |name, primary, unique|
170
+ indices[name] = [primary, unique]
171
+ end
172
+
173
+ ##########
174
+
175
+ ret = []
176
+ dbh.execute(sql2, table) do |sth|
177
+ ret = sth.collect do |row|
178
+ name, pg_type, notnullable, len, ftype = row
179
+ #name = row[2]
180
+ indexed = false
181
+ primary = nil
182
+ unique = nil
183
+ if indices.has_key?(name)
184
+ indexed = true
185
+ primary, unique = indices[name]
186
+ end
187
+
188
+ typeinfo = DBI::DBD::Pg.parse_type(ftype)
189
+ typeinfo[:size] ||= len
190
+
191
+ if POSTGRESQL_to_XOPEN.has_key?(typeinfo[:type])
192
+ sql_type = POSTGRESQL_to_XOPEN[typeinfo[:type]][0]
193
+ else
194
+ sql_type = POSTGRESQL_to_XOPEN[nil][0]
195
+ end
196
+
197
+ row = {}
198
+ row['name'] = name
199
+ row['sql_type'] = sql_type
200
+ row['type_name'] = typeinfo[:type]
201
+ row['nullable'] = ! notnullable
202
+ row['indexed'] = indexed
203
+ row['primary'] = primary
204
+ row['unique'] = unique
205
+ row['precision'] = typeinfo[:size]
206
+ row['scale'] = typeinfo[:decimal]
207
+ row['default'] = default_values[name]
208
+ row['array_of_type'] = typeinfo[:array]
209
+
210
+ if typeinfo[:array]
211
+ row['dbi_type'] =
212
+ DBI::DBD::Pg::Type::Array.new(
213
+ DBI::TypeUtil.type_name_to_module(typeinfo[:type])
214
+ )
215
+ end
216
+ row
217
+ end # collect
218
+ end # execute
219
+
220
+ return ret
221
+ end
222
+
223
+ def prepare(statement)
224
+ DBI::DBD::Pg::Statement.new(self, statement)
225
+ end
226
+
227
+ def [](attr)
228
+ case attr
229
+ when 'pg_client_encoding'
230
+ @connection.client_encoding
231
+ when 'NonBlocking'
232
+ @attr['pg_async']
233
+ else
234
+ @attr[attr]
235
+ end
236
+ end
237
+
238
+ def []=(attr, value)
239
+ case attr
240
+ when 'AutoCommit'
241
+ if @attr['AutoCommit'] != value then
242
+ if value # turn AutoCommit ON
243
+ if @in_transaction
244
+ # TODO: commit outstanding transactions?
245
+ _exec("COMMIT")
246
+ @in_transaction = false
247
+ end
248
+ else # turn AutoCommit OFF
249
+ @in_transaction = false
250
+ end
251
+ end
252
+ # value is assigned below
253
+ when 'NonBlocking', 'pg_async'
254
+ # booleanize input
255
+ value = value ? true : false
256
+ @pgexec = (value ? DBI::DBD::Pg::PgExecutorAsync : DBI::DBD::Pg::PgExecutor).new(@connection)
257
+ # value is assigned to @attr below
258
+ when 'pg_client_encoding'
259
+ @connection.set_client_encoding(value)
260
+ when 'pg_native_binding'
261
+ @attr[attr] = value
262
+ else
263
+ if attr =~ /^pg_/ or attr != /_/
264
+ raise DBI::NotSupportedError, "Option '#{attr}' not supported"
265
+ else # option for some other driver - quitly ignore
266
+ return
267
+ end
268
+ end
269
+ @attr[attr] = value
270
+ end
271
+
272
+ def commit
273
+ if @in_transaction
274
+ _exec("COMMIT")
275
+ @in_transaction = false
276
+ else
277
+ # TODO: Warn?
278
+ end
279
+ end
280
+
281
+ def rollback
282
+ if @in_transaction
283
+ _exec("ROLLBACK")
284
+ @in_transaction = false
285
+ else
286
+ # TODO: Warn?
287
+ end
288
+ end
289
+
290
+ #
291
+ # Are we in an transaction?
292
+ #
293
+ def in_transaction?
294
+ @in_transaction
295
+ end
296
+
297
+ #
298
+ # Forcibly initializes a new transaction.
299
+ #
300
+ def start_transaction
301
+ _exec("BEGIN")
302
+ @in_transaction = true
303
+ end
304
+
305
+ def _exec(sql, *parameters)
306
+ @pgexec.exec(sql, parameters)
307
+ end
308
+
309
+ def _exec_prepared(stmt_name, *parameters)
310
+ @pgexec.exec_prepared(stmt_name, parameters)
311
+ end
312
+
313
+ def _prepare(stmt_name, sql)
314
+ @pgexec.prepare(stmt_name, sql)
315
+ end
316
+
317
+ private
318
+
319
+ # special quoting if value is element of an array
320
+ def quote_array_elements( value )
321
+ # XXX is this method still being used?
322
+ case value
323
+ when Array
324
+ '{'+ value.collect{|v| quote_array_elements(v) }.join(',') + '}'
325
+ when String
326
+ '"' + value.gsub(/\\/){ '\\\\' }.gsub(/"/){ '\\"' } + '"'
327
+ else
328
+ quote( value ).sub(/^'/,'').sub(/'$/,'')
329
+ end
330
+ end
331
+
332
+ def parse_type_name(type_name)
333
+ case type_name
334
+ when 'bool' then DBI::Type::Boolean
335
+ when 'int8', 'int4', 'int2' then DBI::Type::Integer
336
+ when 'varchar' then DBI::Type::Varchar
337
+ when 'float4','float8' then DBI::Type::Float
338
+ when 'time', 'timetz' then DBI::Type::Timestamp
339
+ when 'timestamp', 'timestamptz' then DBI::Type::Timestamp
340
+ when 'date' then DBI::Type::Timestamp
341
+ when 'bytea' then DBI::DBD::Pg::Type::ByteA
342
+ end
343
+ end
344
+
345
+ #
346
+ # Gathers the types from the postgres database and attempts to
347
+ # locate matching DBI::Type objects for them.
348
+ #
349
+ def load_type_map
350
+ @type_map = Hash.new
351
+
352
+ res = _exec("SELECT oid, typname, typelem FROM pg_type WHERE typtype = 'b'")
353
+
354
+ res.each do |row|
355
+ rowtype = parse_type_name(row["typname"])
356
+ @type_map[row["oid"].to_i] =
357
+ {
358
+ "type_name" => row["typname"],
359
+ "dbi_type" =>
360
+ if rowtype
361
+ rowtype
362
+ elsif row["typname"] =~ /^_/ and row["typelem"].to_i > 0 then
363
+ # arrays are special and have a subtype, as an
364
+ # oid held in the "typelem" field.
365
+ # Since we may not have a mapping for the
366
+ # subtype yet, defer by storing the typelem
367
+ # integer as a base type in a constructed
368
+ # Type::Array object. dirty, i know.
369
+ #
370
+ # These array objects will be reconstructed
371
+ # after all rows are processed and therefore
372
+ # the oid -> type mapping is complete.
373
+ #
374
+ DBI::DBD::Pg::Type::Array.new(row["typelem"].to_i)
375
+ else
376
+ DBI::Type::Varchar
377
+ end
378
+ }
379
+ end
380
+ # additional conversions
381
+ @type_map[705] ||= DBI::Type::Varchar # select 'hallo'
382
+ @type_map[1114] ||= DBI::Type::Timestamp # TIMESTAMP WITHOUT TIME ZONE
383
+
384
+ # remap array subtypes
385
+ @type_map.each_key do |key|
386
+ if @type_map[key]["dbi_type"].class == DBI::DBD::Pg::Type::Array
387
+ oid = @type_map[key]["dbi_type"].base_type
388
+ if @type_map[oid]
389
+ @type_map[key]["dbi_type"] = DBI::DBD::Pg::Type::Array.new(@type_map[oid]["dbi_type"])
390
+ else
391
+ # punt
392
+ @type_map[key] = DBI::DBD::Pg::Type::Array.new(DBI::Type::Varchar)
393
+ end
394
+ end
395
+ end
396
+ end
397
+
398
+ public
399
+
400
+ # return the postgresql types for this session. returns an oid -> type name mapping.
401
+ def __types(force=nil)
402
+ load_type_map if (!@type_map or force)
403
+ @type_map
404
+ end
405
+
406
+ # deprecated.
407
+ def __types_old
408
+ h = { }
409
+
410
+ _exec('select oid, typname from pg_type').each do |row|
411
+ h[row["oid"].to_i] = row["typname"]
412
+ end
413
+
414
+ return h
415
+ end
416
+
417
+ #
418
+ # Import a BLOB from a file.
419
+ #
420
+ def __blob_import(file)
421
+ start_transaction unless @in_transaction
422
+ @connection.lo_import(file)
423
+ rescue PGError => err
424
+ raise DBI::DatabaseError.new(err.message)
425
+ end
426
+
427
+ #
428
+ # Export a BLOB to a file.
429
+ #
430
+ def __blob_export(oid, file)
431
+ start_transaction unless @in_transaction
432
+ @connection.lo_export(oid.to_i, file)
433
+ rescue PGError => err
434
+ raise DBI::DatabaseError.new(err.message)
435
+ end
436
+
437
+ #
438
+ # Create a BLOB.
439
+ #
440
+ def __blob_create(mode=PGconn::INV_READ)
441
+ start_transaction unless @in_transaction
442
+ @connection.lo_creat(mode)
443
+ rescue PGError => err
444
+ raise DBI::DatabaseError.new(err.message)
445
+ end
446
+
447
+ #
448
+ # Open a BLOB.
449
+ #
450
+ def __blob_open(oid, mode=PGconn::INV_READ)
451
+ start_transaction unless @in_transaction
452
+ @connection.lo_open(oid.to_i, mode)
453
+ rescue PGError => err
454
+ raise DBI::DatabaseError.new(err.message)
455
+ end
456
+
457
+ #
458
+ # Remove a BLOB.
459
+ #
460
+ def __blob_unlink(oid)
461
+ start_transaction unless @in_transaction
462
+ @connection.lo_unlink(oid.to_i)
463
+ rescue PGError => err
464
+ raise DBI::DatabaseError.new(err.message)
465
+ end
466
+
467
+ #
468
+ # Read a BLOB and return the data.
469
+ #
470
+ def __blob_read(oid, length)
471
+ blob = @connection.lo_open(oid.to_i, PGconn::INV_READ)
472
+
473
+ if length.nil?
474
+ data = @connection.lo_read(blob)
475
+ else
476
+ data = @connection.lo_read(blob, length)
477
+ end
478
+
479
+ # FIXME it doesn't like to close here either.
480
+ # @connection.lo_close(blob)
481
+ data
482
+ rescue PGError => err
483
+ raise DBI::DatabaseError.new(err.message)
484
+ end
485
+
486
+ #
487
+ # Write the value to the BLOB.
488
+ #
489
+ def __blob_write(oid, value)
490
+ start_transaction unless @in_transaction
491
+ blob = @connection.lo_open(oid.to_i, PGconn::INV_WRITE)
492
+ res = @connection.lo_write(blob, value)
493
+ # FIXME not sure why PG doesn't like to close here -- seems to be
494
+ # working but we should make sure it's not eating file descriptors
495
+ # up before release.
496
+ # @connection.lo_close(blob)
497
+ return res
498
+ rescue PGError => err
499
+ raise DBI::DatabaseError.new(err.message)
500
+ end
501
+
502
+ #
503
+ # FIXME DOCUMENT
504
+ #
505
+ def __set_notice_processor(proc)
506
+ @connection.set_notice_processor proc
507
+ rescue PGError => err
508
+ raise DBI::DatabaseError.new(err.message)
509
+ end
510
+ end # Database