nbadw-util 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/LICENSE +3 -0
- data/README +3 -0
- data/Rakefile +48 -0
- data/lib/nbadw/util/copy_database_task.rb +311 -0
- data/lib/nbadw/util/progress_bar.rb +236 -0
- data/lib/sequel/adapters/jdbc/access.rb +44 -0
- data/lib/sequel/adapters/shared/access.rb +416 -0
- data/lib/sequel/jdbc_access_adapter.rb +8 -0
- metadata +72 -0
data/LICENSE
ADDED
data/README
ADDED
data/Rakefile
ADDED
@@ -0,0 +1,48 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require 'rubygems'
|
3
|
+
|
4
|
+
begin
|
5
|
+
require 'jeweler'
|
6
|
+
Jeweler::Tasks.new do |s|
|
7
|
+
s.name = "nbadw-util"
|
8
|
+
s.summary = %Q{NB Aquatic Data Warehouse - Models and Database Utilities}
|
9
|
+
s.email = "casey.colin@gmail.com"
|
10
|
+
s.homepage = "http://github.com/colincasey/nbadw"
|
11
|
+
s.description = "Database models, migrations, and utilities for the New Brunswick Aquatic Data Warehouse"
|
12
|
+
s.authors = ["Colin Casey"]
|
13
|
+
s.add_dependency 'sequel', '>= 3.5.0'
|
14
|
+
s.rubygems_version = '1.3.1'
|
15
|
+
s.files = FileList['lib/**/*.rb'] + ['README.rdoc', 'LICENSE', 'VERSION.yml', 'Rakefile']
|
16
|
+
s.files.exclude('main.rb')
|
17
|
+
end
|
18
|
+
rescue LoadError => e
|
19
|
+
if e.message =~ /jeweler/
|
20
|
+
puts "Jeweler not available. Install it with: sudo gem install technicalpickles-jeweler -s http://gems.github.com"
|
21
|
+
else
|
22
|
+
puts e.message + ' -- while loading jeweler.'
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
require 'rake/rdoctask'
|
27
|
+
Rake::RDocTask.new do |rdoc|
|
28
|
+
rdoc.rdoc_dir = 'rdoc'
|
29
|
+
rdoc.title = 'NB Aquatic Data Warehouse'
|
30
|
+
rdoc.options << '--line-numbers' << '--inline-source'
|
31
|
+
rdoc.rdoc_files.include('README*')
|
32
|
+
rdoc.rdoc_files.include('lib/**/*.rb')
|
33
|
+
end
|
34
|
+
|
35
|
+
begin
|
36
|
+
require 'rcov/rcovtask'
|
37
|
+
Rcov::RcovTask.new do |t|
|
38
|
+
t.libs << 'spec'
|
39
|
+
t.test_files = FileList['spec/*_spec.rb']
|
40
|
+
t.verbose = true
|
41
|
+
end
|
42
|
+
rescue LoadError
|
43
|
+
if RUBY_PLATFORM =~ /java/
|
44
|
+
puts "RCov is not available. In order to run rcov, you must: sudo gem install jruby-rcov"
|
45
|
+
else
|
46
|
+
puts "RCov is not available. In order to run rcov, you must: sudo gem install spicycode-rcov"
|
47
|
+
end
|
48
|
+
end
|
@@ -0,0 +1,311 @@
|
|
1
|
+
require 'sequel'
|
2
|
+
require 'sequel/extensions/schema_dumper'
|
3
|
+
require 'sequel/extensions/migration'
|
4
|
+
require 'nbadw/util/progress_bar'
|
5
|
+
require 'sequel/jdbc_access_adapter'
|
6
|
+
|
7
|
+
module NBADW
|
8
|
+
module Util
|
9
|
+
class CopyDatabaseTask
|
10
|
+
attr_reader :source, :destination, :page_size, :except
|
11
|
+
|
12
|
+
def initialize(src, dest, options = {})
|
13
|
+
@source = Sequel.connect(src, :single_threaded => true)
|
14
|
+
@destination = Sequel.connect(dest, :single_threaded => true)
|
15
|
+
@page_size = options[:page_size] || :unlimited
|
16
|
+
@verify_data = !!options[:verify_data]
|
17
|
+
@except = options[:except] || []
|
18
|
+
end
|
19
|
+
|
20
|
+
def self.start(src, dest, options = {})
|
21
|
+
print "Initializing copy operation"
|
22
|
+
task = new(src, dest, options)
|
23
|
+
begin
|
24
|
+
task.copy
|
25
|
+
rescue Exception => e
|
26
|
+
puts "...fail!!!"
|
27
|
+
puts "Reason: #{e.message}"
|
28
|
+
puts e.backtrace.join("\n")
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
def copy
|
33
|
+
puts "..."
|
34
|
+
puts "#{source.tables.length} tables, #{format_number(total_records(source))} records"
|
35
|
+
copy_schema
|
36
|
+
copy_data
|
37
|
+
copy_indexes
|
38
|
+
verify_data if verify_data?
|
39
|
+
puts "...copy completed"
|
40
|
+
end
|
41
|
+
|
42
|
+
def copy_schema
|
43
|
+
begin
|
44
|
+
run_callback :before_copy_schema
|
45
|
+
|
46
|
+
tables = source.tables
|
47
|
+
progress = ProgressBar.new("Schema copy", tables.length)
|
48
|
+
|
49
|
+
tables.each do |t|
|
50
|
+
next if except.include?(t.to_s)
|
51
|
+
args = { :table => t, :schema => source.dump_table_schema(t.to_sym, :indexes => false) }
|
52
|
+
run_callback :before_create_table, args
|
53
|
+
migration = "Class.new(Sequel::Migration) do \n def up \n #{args[:schema]} \n end \n end"
|
54
|
+
eval(migration).apply(destination, :up)
|
55
|
+
run_callback :after_create_table, args
|
56
|
+
progress.inc(1)
|
57
|
+
end
|
58
|
+
|
59
|
+
run_callback :after_copy_schema
|
60
|
+
ensure
|
61
|
+
progress.finish if progress
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
def copy_data
|
66
|
+
run_callback :before_copy_data
|
67
|
+
|
68
|
+
progress = ProgressBar.new("Data copy", source.tables.size)
|
69
|
+
begin
|
70
|
+
source.tables.each do |table_name|
|
71
|
+
next if except.include?(table_name.to_s)
|
72
|
+
src_table = source[table_name.to_sym]
|
73
|
+
dst_table = destination[table_name.to_sym]
|
74
|
+
args = { :table => table_name }
|
75
|
+
page_size == :unlimited ? copy_table_without_limit(src_table, dst_table, args) : copy_table_with_limit(src_table, dst_table, args)
|
76
|
+
progress.inc(1)
|
77
|
+
end
|
78
|
+
ensure
|
79
|
+
progress.finish
|
80
|
+
end
|
81
|
+
|
82
|
+
run_callback :after_copy_data
|
83
|
+
end
|
84
|
+
|
85
|
+
def copy_table_without_limit(src_table, dst_table, args = {})
|
86
|
+
src_table.each do |row|
|
87
|
+
args.merge!({ :row => row })
|
88
|
+
run_callback :before_copy_row, args
|
89
|
+
dst_table.insert(row)
|
90
|
+
run_callback :after_copy_row, args
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
def copy_table_with_limit(src_table, dst_table, args = {})
|
95
|
+
count = src_table.count
|
96
|
+
offset = 0
|
97
|
+
while(offset < count) do
|
98
|
+
rows = src_table.limit(page_size, offset).all
|
99
|
+
rows.each_with_index do |row, i|
|
100
|
+
args.merge!({ :row => row, :index => i, :offset => offset })
|
101
|
+
run_callback :before_copy_row, args
|
102
|
+
dst_table.insert(row)
|
103
|
+
run_callback :after_copy_row, args
|
104
|
+
end
|
105
|
+
offset += rows.size
|
106
|
+
end
|
107
|
+
end
|
108
|
+
|
109
|
+
def copy_indexes
|
110
|
+
begin
|
111
|
+
run_callback :before_copy_indexes
|
112
|
+
|
113
|
+
tables = source.tables
|
114
|
+
progress = ProgressBar.new("Index copy", tables.length)
|
115
|
+
|
116
|
+
tables.each do |t|
|
117
|
+
next if except.include?(t.to_s)
|
118
|
+
args = { :table => t, :indexes => source.send(:dump_table_indexes, t.to_sym, :add_index) }
|
119
|
+
run_callback :before_add_indexes, args
|
120
|
+
migration = "Class.new(Sequel::Migration) do \n def up \n #{args[:indexes]} \n end \n end"
|
121
|
+
eval(migration).apply(destination, :up)
|
122
|
+
run_callback :after_add_indexes, args
|
123
|
+
progress.inc(1)
|
124
|
+
end
|
125
|
+
|
126
|
+
run_callback :after_copy_indexes
|
127
|
+
ensure
|
128
|
+
progress.finish if progress
|
129
|
+
end
|
130
|
+
end
|
131
|
+
|
132
|
+
def verify_data
|
133
|
+
tables = source.tables
|
134
|
+
progress = ProgressBar.new("Verify data", tables.length)
|
135
|
+
begin
|
136
|
+
tables.each do |table_name|
|
137
|
+
next if except.include?(table_name.to_s)
|
138
|
+
src_table = source[table_name.to_sym]
|
139
|
+
dst_table = destination[table_name.to_sym]
|
140
|
+
page_size == :unlimited ? verify_table_without_limit(table_name, src_table, dst_table) : verify_table_with_limit(table_name, src_table, dst_table)
|
141
|
+
progress.inc(1)
|
142
|
+
end
|
143
|
+
ensure
|
144
|
+
progress.finish if progress
|
145
|
+
end
|
146
|
+
end
|
147
|
+
|
148
|
+
def verify_table_without_limit(table_name, src_table, dst_table)
|
149
|
+
src_table.each do |row|
|
150
|
+
row_found = dst_table.filter(row).first
|
151
|
+
raise "no matching row found in #{table_name} for #{row.inspect}" unless row_found
|
152
|
+
verify_row(table_name, row, row_found)
|
153
|
+
end
|
154
|
+
end
|
155
|
+
|
156
|
+
def verify_table_with_limit(table_name, src_table, dst_table)
|
157
|
+
count = src_table.count
|
158
|
+
offset = 0
|
159
|
+
while(offset < count) do
|
160
|
+
rows = src_table.limit(page_size, offset).all
|
161
|
+
rows.each do |row|
|
162
|
+
row_found = dst_table.filter(row).first
|
163
|
+
raise "no matching row found in #{table_name} for #{row.inspect}" unless row_found
|
164
|
+
verify_row(table_name, row, row_found)
|
165
|
+
end
|
166
|
+
offset += rows.length
|
167
|
+
end
|
168
|
+
end
|
169
|
+
|
170
|
+
def verify_row(table_name, row1, row2)
|
171
|
+
diff = {}
|
172
|
+
row1.each do |col, val|
|
173
|
+
eql = case val
|
174
|
+
when Time then (val - row1[col]).abs < 1 # time fields are sometimes off by very miniscule fractions
|
175
|
+
else val == row1[col]
|
176
|
+
end
|
177
|
+
diff[col] = "#{val}, #{row2[col]}" unless eql
|
178
|
+
end
|
179
|
+
raise "row does not match exactly - expected #{row1.inspect}, but was #{row2.inspect} - in table #{table_name}, diff #{diff.inspect}" unless diff.empty?
|
180
|
+
end
|
181
|
+
|
182
|
+
def verify_data?
|
183
|
+
@verify_data
|
184
|
+
end
|
185
|
+
|
186
|
+
def total_records(db)
|
187
|
+
db.tables.inject(0) { |total, table_name| total += db[table_name.to_sym].count }
|
188
|
+
end
|
189
|
+
|
190
|
+
def format_number(num)
|
191
|
+
num.to_s.gsub(/(\d)(?=(\d\d\d)+(?!\d))/, "\\1,")
|
192
|
+
end
|
193
|
+
|
194
|
+
# the following is a callback system that helps to handle slight
|
195
|
+
# differences when copying between database types
|
196
|
+
class << self
|
197
|
+
def callbacks
|
198
|
+
@callbacks ||= []
|
199
|
+
end
|
200
|
+
|
201
|
+
def before(callback, opts = {}, &block)
|
202
|
+
add_callback(:before, callback, opts, &block)
|
203
|
+
end
|
204
|
+
|
205
|
+
def after(callback, opts = {}, &block)
|
206
|
+
add_callback(:after, callback, opts, &block)
|
207
|
+
end
|
208
|
+
|
209
|
+
def add_callback(type, callback, opts, &block)
|
210
|
+
callback_config = {
|
211
|
+
:type => type,
|
212
|
+
:callback => callback,
|
213
|
+
:adapter => opts[:adapter] || :all,
|
214
|
+
:for => opts[:for],
|
215
|
+
:logic => block
|
216
|
+
}
|
217
|
+
callbacks << callback_config
|
218
|
+
end
|
219
|
+
end
|
220
|
+
|
221
|
+
# prevent MySQL from changing '0' values on insert since we'd like an exact copy
|
222
|
+
before :copy_schema, :adapter => :mysql, :for => :destination do |src, dst, args|
|
223
|
+
dst.run("SET sql_mode = 'NO_AUTO_VALUE_ON_ZERO';")
|
224
|
+
end
|
225
|
+
|
226
|
+
# fix to catch schema dumps for PostgreSQL which set an invalid boolean default
|
227
|
+
before :create_table, :adapter => :postgres, :for => :destination do |src, dst, args|
|
228
|
+
schema = args[:schema]
|
229
|
+
schema = schema.split("\n").collect do |line|
|
230
|
+
if line.match(/TrueClass/)
|
231
|
+
line = line.sub(/:default=>(\d)/) { |match| ":default=>#{$1 == '0' ? 'true' : 'false'}" }
|
232
|
+
end
|
233
|
+
line
|
234
|
+
end.join("\n")
|
235
|
+
args[:schema] = schema
|
236
|
+
end
|
237
|
+
|
238
|
+
# this fixes the string as primary keys
|
239
|
+
before :create_table, :adapter => :access, :for => :source do |src, dst, args|
|
240
|
+
table = args[:table].to_s
|
241
|
+
pks = src.schema(args[:table]).collect do |col_schema|
|
242
|
+
col, opts = col_schema
|
243
|
+
opts[:primary_key] ? col_schema : nil
|
244
|
+
end.compact
|
245
|
+
|
246
|
+
if pks.size == 1 && pks[0][1][:type] == :string
|
247
|
+
col, opts = pks[0]
|
248
|
+
schema = args[:schema]
|
249
|
+
schema = schema.split("\n").collect do |line|
|
250
|
+
line = " String :#{col}, :size=>#{opts[:column_size]}, :null=>false" if line.match(/primary_key/)
|
251
|
+
line = " primary_key [:#{col}]\nend" if line.match(/^end/)
|
252
|
+
line
|
253
|
+
end.join("\n")
|
254
|
+
args[:schema] = schema
|
255
|
+
end
|
256
|
+
end
|
257
|
+
|
258
|
+
# When copying from access, convert all BigDecimal columns to Float or lose precision!
|
259
|
+
before :create_table, :adapter => :access, :for => :source do |src, dst, args|
|
260
|
+
args[:schema] = args[:schema].gsub(/BigDecimal/, 'Float')
|
261
|
+
end
|
262
|
+
|
263
|
+
STRING_TO_INT_FIXES = [
|
264
|
+
{ :table => "auxuserdbselectedsites", :column => "aquaticsiteuseid" },
|
265
|
+
{ :table => "auxuserdbselectedsiteuse", :column => "aquaticsiteuseid" },
|
266
|
+
{ :table => "cdtranslation - dfo stock mating", :column => "mating code" },
|
267
|
+
{ :table => "del-missing age class in tblfishmeasurement", :column => "fishsampleid" },
|
268
|
+
{ :table => "del-missing age class in tblfishmeasurement-robin", :column => "fishsampleid" },
|
269
|
+
{ :table => "selections", :column => "selectionid" },
|
270
|
+
{ :table => "tblelectrofishingmethoddetail", :column => "aquaticactivitydetailid" },
|
271
|
+
{ :table => "tbloldhabitatsurvey", :column => "habitatsurveyid" }
|
272
|
+
]
|
273
|
+
# not sure what's up here...
|
274
|
+
before :create_table, :adapter => :postgres, :for => :destination do |src, dst, args|
|
275
|
+
table = args[:table].to_s.downcase
|
276
|
+
if fix = STRING_TO_INT_FIXES.detect { |fix| fix[:table] == table }
|
277
|
+
schema = args[:schema]
|
278
|
+
schema = schema.split("\n").collect do |line|
|
279
|
+
line = " Integer :\"#{fix[:column]}\"" if line.match(/#{fix[:column]}/)
|
280
|
+
line
|
281
|
+
end.join("\n")
|
282
|
+
args[:schema] = schema
|
283
|
+
end
|
284
|
+
end
|
285
|
+
|
286
|
+
# determines which callbacks to run (is this needlessly complex?)
|
287
|
+
def run_callback(full_callback, args = {})
|
288
|
+
full_callback.to_s.match(/(before|after)_(.*)/)
|
289
|
+
type, callback = $1.to_sym, $2.to_sym
|
290
|
+
CopyDatabaseTask.callbacks.each do |callback_config|
|
291
|
+
if callback_config[:type] == type && callback_config[:callback] == callback # callback matches
|
292
|
+
# which adapters should we check against?
|
293
|
+
adapters = [:all] # always check for all...
|
294
|
+
if callback_config[:for] == :destination # only destination?
|
295
|
+
adapters << destination.database_type.to_sym
|
296
|
+
elsif callback_config[:for] == :source # only source?
|
297
|
+
adapters << source.database_type.to_sym
|
298
|
+
else # or both?
|
299
|
+
adapters << destination.database_type.to_sym
|
300
|
+
adapters << source.database_type.to_sym
|
301
|
+
end
|
302
|
+
# if the adapter matches, run the callback
|
303
|
+
if adapters.include?(callback_config[:adapter])
|
304
|
+
callback_config[:logic].call(source, destination, args)
|
305
|
+
end
|
306
|
+
end
|
307
|
+
end
|
308
|
+
end
|
309
|
+
end # CopyDatabaseTask
|
310
|
+
end # Util
|
311
|
+
end # NBADW
|
@@ -0,0 +1,236 @@
|
|
1
|
+
#
|
2
|
+
# Ruby/ProgressBar - a text progress bar library
|
3
|
+
#
|
4
|
+
# Copyright (C) 2001-2005 Satoru Takabayashi <satoru@namazu.org>
|
5
|
+
# All rights reserved.
|
6
|
+
# This is free software with ABSOLUTELY NO WARRANTY.
|
7
|
+
#
|
8
|
+
# You can redistribute it and/or modify it under the terms
|
9
|
+
# of Ruby's license.
|
10
|
+
#
|
11
|
+
|
12
|
+
class ProgressBar
|
13
|
+
VERSION = "0.9"
|
14
|
+
|
15
|
+
def initialize (title, total, out = STDERR)
|
16
|
+
@title = title
|
17
|
+
@total = total
|
18
|
+
@out = out
|
19
|
+
@terminal_width = 80
|
20
|
+
@bar_mark = "="
|
21
|
+
@current = 0
|
22
|
+
@previous = 0
|
23
|
+
@finished_p = false
|
24
|
+
@start_time = Time.now
|
25
|
+
@previous_time = @start_time
|
26
|
+
@title_width = 14
|
27
|
+
@format = "%-#{@title_width}s %3d%% %s %s"
|
28
|
+
@format_arguments = [:title, :percentage, :bar, :stat]
|
29
|
+
clear
|
30
|
+
show
|
31
|
+
end
|
32
|
+
attr_reader :title
|
33
|
+
attr_reader :current
|
34
|
+
attr_reader :total
|
35
|
+
attr_accessor :start_time
|
36
|
+
|
37
|
+
private
|
38
|
+
def fmt_bar
|
39
|
+
bar_width = do_percentage * @terminal_width / 100
|
40
|
+
sprintf("|%s%s|",
|
41
|
+
@bar_mark * bar_width,
|
42
|
+
" " * (@terminal_width - bar_width))
|
43
|
+
end
|
44
|
+
|
45
|
+
def fmt_percentage
|
46
|
+
do_percentage
|
47
|
+
end
|
48
|
+
|
49
|
+
def fmt_stat
|
50
|
+
if @finished_p then elapsed else eta end
|
51
|
+
end
|
52
|
+
|
53
|
+
def fmt_stat_for_file_transfer
|
54
|
+
if @finished_p then
|
55
|
+
sprintf("%s %s %s", bytes, transfer_rate, elapsed)
|
56
|
+
else
|
57
|
+
sprintf("%s %s %s", bytes, transfer_rate, eta)
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
def fmt_title
|
62
|
+
@title[0,(@title_width - 1)] + ":"
|
63
|
+
end
|
64
|
+
|
65
|
+
def convert_bytes (bytes)
|
66
|
+
if bytes < 1024
|
67
|
+
sprintf("%6dB", bytes)
|
68
|
+
elsif bytes < 1024 * 1000 # 1000kb
|
69
|
+
sprintf("%5.1fKB", bytes.to_f / 1024)
|
70
|
+
elsif bytes < 1024 * 1024 * 1000 # 1000mb
|
71
|
+
sprintf("%5.1fMB", bytes.to_f / 1024 / 1024)
|
72
|
+
else
|
73
|
+
sprintf("%5.1fGB", bytes.to_f / 1024 / 1024 / 1024)
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
def transfer_rate
|
78
|
+
bytes_per_second = @current.to_f / (Time.now - @start_time)
|
79
|
+
sprintf("%s/s", convert_bytes(bytes_per_second))
|
80
|
+
end
|
81
|
+
|
82
|
+
def bytes
|
83
|
+
convert_bytes(@current)
|
84
|
+
end
|
85
|
+
|
86
|
+
def format_time (t)
|
87
|
+
t = t.to_i
|
88
|
+
sec = t % 60
|
89
|
+
min = (t / 60) % 60
|
90
|
+
hour = t / 3600
|
91
|
+
sprintf("%02d:%02d:%02d", hour, min, sec);
|
92
|
+
end
|
93
|
+
|
94
|
+
# ETA stands for Estimated Time of Arrival.
|
95
|
+
def eta
|
96
|
+
if @current == 0
|
97
|
+
"ETA: --:--:--"
|
98
|
+
else
|
99
|
+
elapsed = Time.now - @start_time
|
100
|
+
eta = elapsed * @total / @current - elapsed;
|
101
|
+
sprintf("ETA: %s", format_time(eta))
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
def elapsed
|
106
|
+
elapsed = Time.now - @start_time
|
107
|
+
sprintf("Time: %s", format_time(elapsed))
|
108
|
+
end
|
109
|
+
|
110
|
+
def eol
|
111
|
+
if @finished_p then "\n" else "\r" end
|
112
|
+
end
|
113
|
+
|
114
|
+
def do_percentage
|
115
|
+
if @total.zero?
|
116
|
+
100
|
117
|
+
else
|
118
|
+
@current * 100 / @total
|
119
|
+
end
|
120
|
+
end
|
121
|
+
|
122
|
+
def get_width
|
123
|
+
# FIXME: I don't know how portable it is.
|
124
|
+
default_width = 80
|
125
|
+
begin
|
126
|
+
tiocgwinsz = 0x5413
|
127
|
+
data = [0, 0, 0, 0].pack("SSSS")
|
128
|
+
if @out.ioctl(tiocgwinsz, data) >= 0 then
|
129
|
+
rows, cols, xpixels, ypixels = data.unpack("SSSS")
|
130
|
+
if cols > 0 then cols else default_width end
|
131
|
+
else
|
132
|
+
default_width
|
133
|
+
end
|
134
|
+
rescue Exception
|
135
|
+
default_width
|
136
|
+
end
|
137
|
+
end
|
138
|
+
|
139
|
+
def show
|
140
|
+
arguments = @format_arguments.map {|method|
|
141
|
+
method = sprintf("fmt_%s", method)
|
142
|
+
send(method)
|
143
|
+
}
|
144
|
+
line = sprintf(@format, *arguments)
|
145
|
+
|
146
|
+
width = get_width
|
147
|
+
if line.length == width - 1
|
148
|
+
@out.print(line + eol)
|
149
|
+
@out.flush
|
150
|
+
elsif line.length >= width
|
151
|
+
@terminal_width = [@terminal_width - (line.length - width + 1), 0].max
|
152
|
+
if @terminal_width == 0 then @out.print(line + eol) else show end
|
153
|
+
else # line.length < width - 1
|
154
|
+
@terminal_width += width - line.length + 1
|
155
|
+
show
|
156
|
+
end
|
157
|
+
@previous_time = Time.now
|
158
|
+
end
|
159
|
+
|
160
|
+
def show_if_needed
|
161
|
+
if @total.zero?
|
162
|
+
cur_percentage = 100
|
163
|
+
prev_percentage = 0
|
164
|
+
else
|
165
|
+
cur_percentage = (@current * 100 / @total).to_i
|
166
|
+
prev_percentage = (@previous * 100 / @total).to_i
|
167
|
+
end
|
168
|
+
|
169
|
+
# Use "!=" instead of ">" to support negative changes
|
170
|
+
if cur_percentage != prev_percentage ||
|
171
|
+
Time.now - @previous_time >= 1 || @finished_p
|
172
|
+
show
|
173
|
+
end
|
174
|
+
end
|
175
|
+
|
176
|
+
public
|
177
|
+
def clear
|
178
|
+
@out.print "\r"
|
179
|
+
@out.print(" " * (get_width - 1))
|
180
|
+
@out.print "\r"
|
181
|
+
end
|
182
|
+
|
183
|
+
def finish
|
184
|
+
@current = @total
|
185
|
+
@finished_p = true
|
186
|
+
show
|
187
|
+
end
|
188
|
+
|
189
|
+
def finished?
|
190
|
+
@finished_p
|
191
|
+
end
|
192
|
+
|
193
|
+
def file_transfer_mode
|
194
|
+
@format_arguments = [:title, :percentage, :bar, :stat_for_file_transfer]
|
195
|
+
end
|
196
|
+
|
197
|
+
def format= (format)
|
198
|
+
@format = format
|
199
|
+
end
|
200
|
+
|
201
|
+
def format_arguments= (arguments)
|
202
|
+
@format_arguments = arguments
|
203
|
+
end
|
204
|
+
|
205
|
+
def halt
|
206
|
+
@finished_p = true
|
207
|
+
show
|
208
|
+
end
|
209
|
+
|
210
|
+
def inc (step = 1)
|
211
|
+
@current += step
|
212
|
+
@current = @total if @current > @total
|
213
|
+
show_if_needed
|
214
|
+
@previous = @current
|
215
|
+
end
|
216
|
+
|
217
|
+
def set (count)
|
218
|
+
if count < 0 || count > @total
|
219
|
+
raise "invalid count: #{count} (total: #{@total})"
|
220
|
+
end
|
221
|
+
@current = count
|
222
|
+
show_if_needed
|
223
|
+
@previous = @current
|
224
|
+
end
|
225
|
+
|
226
|
+
def inspect
|
227
|
+
"#<ProgressBar:#{@current}/#{@total}>"
|
228
|
+
end
|
229
|
+
end
|
230
|
+
|
231
|
+
class ReversedProgressBar < ProgressBar
|
232
|
+
def do_percentage
|
233
|
+
100 - super
|
234
|
+
end
|
235
|
+
end
|
236
|
+
|
@@ -0,0 +1,44 @@
|
|
1
|
+
require 'sequel/adapters/shared/access'
|
2
|
+
|
3
|
+
module Sequel
|
4
|
+
module JDBC
|
5
|
+
class Database
|
6
|
+
# Alias the generic JDBC version so it can be called directly later
|
7
|
+
alias jdbc_schema_parse_table schema_parse_table
|
8
|
+
end
|
9
|
+
|
10
|
+
# Database and Dataset instance methods for MSSQL specific
|
11
|
+
# support via JDBC.
|
12
|
+
module Access
|
13
|
+
# Database instance methods for MSSQL databases accessed via JDBC.
|
14
|
+
module DatabaseMethods
|
15
|
+
PRIMARY_KEY_INDEX_RE = /\Apk__/i.freeze
|
16
|
+
|
17
|
+
include Sequel::Access::DatabaseMethods
|
18
|
+
|
19
|
+
# Return instance of Sequel::JDBC::MSSQL::Dataset with the given opts.
|
20
|
+
def dataset(opts=nil)
|
21
|
+
Sequel::JDBC::Access::Dataset.new(self, opts)
|
22
|
+
end
|
23
|
+
|
24
|
+
private
|
25
|
+
|
26
|
+
# Call the generic JDBC version instead of MSSQL version,
|
27
|
+
# since the JDBC version handles primary keys.
|
28
|
+
def schema_parse_table(table, opts={})
|
29
|
+
jdbc_schema_parse_table(table, opts)
|
30
|
+
end
|
31
|
+
|
32
|
+
# Primary key indexes appear to start with pk__ on MSSQL
|
33
|
+
def primary_key_index_re
|
34
|
+
PRIMARY_KEY_INDEX_RE
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
# Dataset class for MSSQL datasets accessed via JDBC.
|
39
|
+
class Dataset < JDBC::Dataset
|
40
|
+
include Sequel::Access::DatasetMethods
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
@@ -0,0 +1,416 @@
|
|
1
|
+
module Sequel
|
2
|
+
module Access
|
3
|
+
module DatabaseMethods
|
4
|
+
AUTO_INCREMENT = 'COUNTER(1,1)'.freeze
|
5
|
+
SERVER_VERSION_RE = /^(\d+)\.(\d+)\.(\d+)/.freeze
|
6
|
+
SQL_BEGIN = "BEGIN TRANSACTION".freeze
|
7
|
+
SQL_COMMIT = "COMMIT TRANSACTION".freeze
|
8
|
+
SQL_ROLLBACK = "ROLLBACK TRANSACTION".freeze
|
9
|
+
SQL_ROLLBACK_TO_SAVEPOINT = 'ROLLBACK TRANSACTION autopoint_%d'.freeze
|
10
|
+
SQL_SAVEPOINT = 'SAVE TRANSACTION autopoint_%d'.freeze
|
11
|
+
TEMPORARY = "#".freeze
|
12
|
+
|
13
|
+
def database_type
|
14
|
+
:access
|
15
|
+
end
|
16
|
+
|
17
|
+
def supports_savepoints?
|
18
|
+
false
|
19
|
+
end
|
20
|
+
|
21
|
+
private
|
22
|
+
|
23
|
+
# MSSQL uses the IDENTITY(1,1) column for autoincrementing columns.
|
24
|
+
def auto_increment_sql
|
25
|
+
AUTO_INCREMENT
|
26
|
+
end
|
27
|
+
|
28
|
+
# MSSQL specific syntax for altering tables.
|
29
|
+
def alter_table_sql(table, op)
|
30
|
+
case op[:op]
|
31
|
+
when :add_column
|
32
|
+
"ALTER TABLE #{quote_schema_table(table)} ADD #{column_definition_sql(op)}"
|
33
|
+
when :rename_column
|
34
|
+
"SP_RENAME #{literal("#{quote_schema_table(table)}.#{quote_identifier(op[:name])}")}, #{literal(op[:new_name].to_s)}, 'COLUMN'"
|
35
|
+
when :set_column_type
|
36
|
+
"ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{quote_identifier(op[:name])} #{type_literal(op)}"
|
37
|
+
when :set_column_null
|
38
|
+
sch = schema(table).find{|k,v| k.to_s == op[:name].to_s}.last
|
39
|
+
type = {:type=>sch[:db_type]}
|
40
|
+
type[:size] = sch[:max_chars] if sch[:max_chars]
|
41
|
+
"ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{quote_identifier(op[:name])} #{type_literal(type)} #{'NOT ' unless op[:null]}NULL"
|
42
|
+
when :set_column_default
|
43
|
+
"ALTER TABLE #{quote_schema_table(table)} ADD CONSTRAINT #{quote_identifier("sequel_#{table}_#{op[:name]}_def")} DEFAULT #{literal(op[:default])} FOR #{quote_identifier(op[:name])}"
|
44
|
+
else
|
45
|
+
super(table, op)
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
# SQL to start a new savepoint
|
50
|
+
def begin_savepoint_sql(depth)
|
51
|
+
SQL_SAVEPOINT % depth
|
52
|
+
end
|
53
|
+
|
54
|
+
# SQL to BEGIN a transaction.
|
55
|
+
def begin_transaction_sql
|
56
|
+
SQL_BEGIN
|
57
|
+
end
|
58
|
+
|
59
|
+
# Commit the active transaction on the connection, does not commit/release
|
60
|
+
# savepoints.
|
61
|
+
def commit_transaction(conn)
|
62
|
+
log_connection_execute(conn, commit_transaction_sql) unless Thread.current[:sequel_transaction_depth] > 1
|
63
|
+
end
|
64
|
+
|
65
|
+
# SQL to COMMIT a transaction.
|
66
|
+
def commit_transaction_sql
|
67
|
+
SQL_COMMIT
|
68
|
+
end
|
69
|
+
|
70
|
+
# The SQL to drop an index for the table.
|
71
|
+
def drop_index_sql(table, op)
|
72
|
+
"DROP INDEX #{quote_identifier(op[:name] || default_index_name(table, op[:columns]))} ON #{quote_schema_table(table)}"
|
73
|
+
end
|
74
|
+
|
75
|
+
# Always quote identifiers in the metadata_dataset, so schema parsing works.
|
76
|
+
def metadata_dataset
|
77
|
+
ds = super
|
78
|
+
ds.quote_identifiers = true
|
79
|
+
ds
|
80
|
+
end
|
81
|
+
|
82
|
+
# SQL to rollback to a savepoint
|
83
|
+
def rollback_savepoint_sql(depth)
|
84
|
+
SQL_ROLLBACK_TO_SAVEPOINT % depth
|
85
|
+
end
|
86
|
+
|
87
|
+
# SQL to ROLLBACK a transaction.
|
88
|
+
def rollback_transaction_sql
|
89
|
+
SQL_ROLLBACK
|
90
|
+
end
|
91
|
+
|
92
|
+
# MSSQL uses the INFORMATION_SCHEMA to hold column information. This method does
|
93
|
+
# not support the parsing of primary key information.
|
94
|
+
def schema_parse_table(table_name, opts)
|
95
|
+
m = output_identifier_meth
|
96
|
+
m2 = input_identifier_meth
|
97
|
+
ds = metadata_dataset.from(:information_schema__tables___t).
|
98
|
+
join(:information_schema__columns___c, :table_catalog=>:table_catalog,
|
99
|
+
:table_schema => :table_schema, :table_name => :table_name).
|
100
|
+
select(:column_name___column, :data_type___db_type, :character_maximum_length___max_chars, :column_default___default, :is_nullable___allow_null).
|
101
|
+
filter(:c__table_name=>m2.call(table_name.to_s))
|
102
|
+
if schema = opts[:schema] || default_schema
|
103
|
+
ds.filter!(:table_schema=>schema)
|
104
|
+
end
|
105
|
+
ds.map do |row|
|
106
|
+
row[:allow_null] = row[:allow_null] == 'YES' ? true : false
|
107
|
+
row[:default] = nil if blank_object?(row[:default])
|
108
|
+
row[:type] = schema_column_type(row[:db_type])
|
109
|
+
[m.call(row.delete(:column)), row]
|
110
|
+
end
|
111
|
+
end
|
112
|
+
|
113
|
+
# SQL fragment for marking a table as temporary
|
114
|
+
def temporary_table_sql
|
115
|
+
TEMPORARY
|
116
|
+
end
|
117
|
+
|
118
|
+
# MSSQL has both datetime and timestamp classes, most people are going
|
119
|
+
# to want datetime
|
120
|
+
def type_literal_generic_datetime(column)
|
121
|
+
:datetime
|
122
|
+
end
|
123
|
+
|
124
|
+
# MSSQL has both datetime and timestamp classes, most people are going
|
125
|
+
# to want datetime
|
126
|
+
def type_literal_generic_time(column)
|
127
|
+
column[:only_time] ? :time : :datetime
|
128
|
+
end
|
129
|
+
|
130
|
+
# MSSQL doesn't have a true boolean class, so it uses bit
|
131
|
+
def type_literal_generic_trueclass(column)
|
132
|
+
:bit
|
133
|
+
end
|
134
|
+
|
135
|
+
# MSSQL uses image type for blobs
|
136
|
+
def type_literal_generic_file(column)
|
137
|
+
:image
|
138
|
+
end
|
139
|
+
end
|
140
|
+
|
141
|
+
module DatasetMethods
|
142
|
+
BOOL_TRUE = '1'.freeze
|
143
|
+
BOOL_FALSE = '0'.freeze
|
144
|
+
COMMA_SEPARATOR = ', '.freeze
|
145
|
+
DELETE_CLAUSE_METHODS = Dataset.clause_methods(:delete, %w'with from output from2 where')
|
146
|
+
INSERT_CLAUSE_METHODS = Dataset.clause_methods(:insert, %w'with into columns output values')
|
147
|
+
SELECT_CLAUSE_METHODS = Dataset.clause_methods(:select, %w'with limit distinct columns from table_options join where group order having compounds')
|
148
|
+
UPDATE_CLAUSE_METHODS = Dataset.clause_methods(:update, %w'with table set output from where')
|
149
|
+
WILDCARD = LiteralString.new('*').freeze
|
150
|
+
CONSTANT_MAP = {:CURRENT_DATE=>'CAST(CURRENT_TIMESTAMP AS DATE)'.freeze, :CURRENT_TIME=>'CAST(CURRENT_TIMESTAMP AS TIME)'.freeze}
|
151
|
+
|
152
|
+
# Split out from fetch rows to allow processing of JDBC result sets
|
153
|
+
# that don't come from issuing an SQL string.
|
154
|
+
def process_result_set(result)
|
155
|
+
# get column names
|
156
|
+
meta = result.getMetaData
|
157
|
+
cols = []
|
158
|
+
i = 0
|
159
|
+
meta.getColumnCount.times{cols << [output_identifier(meta.getColumnLabel(i+=1)), i]}
|
160
|
+
@columns = cols.map{|c| c.at(0)}
|
161
|
+
row = {}
|
162
|
+
blk = if @convert_types
|
163
|
+
lambda{ |n, i|
|
164
|
+
begin
|
165
|
+
row[n] = convert_type(result.getObject(i))
|
166
|
+
rescue
|
167
|
+
# XXX: this is because HXTT driver throws an error here
|
168
|
+
if n == :column_def && row[:type_name] == 'TIMESTAMP'
|
169
|
+
row[:column_def] = nil
|
170
|
+
end
|
171
|
+
end
|
172
|
+
}
|
173
|
+
else
|
174
|
+
lambda{|n, i| row[n] = result.getObject(i)}
|
175
|
+
end
|
176
|
+
# get rows
|
177
|
+
rsmd = result.get_meta_data
|
178
|
+
num_cols = rsmd.get_column_count
|
179
|
+
|
180
|
+
while result.next
|
181
|
+
row = {}
|
182
|
+
cols.each(&blk)
|
183
|
+
yield row
|
184
|
+
end
|
185
|
+
end
|
186
|
+
|
187
|
+
# MSSQL uses + for string concatenation
|
188
|
+
def complex_expression_sql(op, args)
|
189
|
+
case op
|
190
|
+
when :'||'
|
191
|
+
super(:+, args)
|
192
|
+
else
|
193
|
+
super(op, args)
|
194
|
+
end
|
195
|
+
end
|
196
|
+
|
197
|
+
# MSSQL doesn't support the SQL standard CURRENT_DATE or CURRENT_TIME
|
198
|
+
def constant_sql(constant)
|
199
|
+
CONSTANT_MAP[constant] || super
|
200
|
+
end
|
201
|
+
|
202
|
+
# When returning all rows, if an offset is used, delete the row_number column
|
203
|
+
# before yielding the row.
|
204
|
+
def fetch_rows(sql, &block)
|
205
|
+
@opts[:offset] ? super(sql) {|r| r.delete(:"recno()"); yield r} : super(sql, &block)
|
206
|
+
end
|
207
|
+
|
208
|
+
# MSSQL uses the CONTAINS keyword for full text search
|
209
|
+
def full_text_search(cols, terms, opts = {})
|
210
|
+
filter("CONTAINS (#{literal(cols)}, #{literal(terms)})")
|
211
|
+
end
|
212
|
+
|
213
|
+
# MSSQL uses a UNION ALL statement to insert multiple values at once.
|
214
|
+
def multi_insert_sql(columns, values)
|
215
|
+
[insert_sql(columns, LiteralString.new(values.map {|r| "SELECT #{expression_list(r)}" }.join(" UNION ALL ")))]
|
216
|
+
end
|
217
|
+
|
218
|
+
# Allows you to do .nolock on a query
|
219
|
+
def nolock
|
220
|
+
clone(:table_options => "(NOLOCK)")
|
221
|
+
end
|
222
|
+
|
223
|
+
# Include an OUTPUT clause in the eventual INSERT, UPDATE, or DELETE query.
|
224
|
+
#
|
225
|
+
# The first argument is the table to output into, and the second argument
|
226
|
+
# is either an Array of column values to select, or a Hash which maps output
|
227
|
+
# column names to selected values, in the style of #insert or #update.
|
228
|
+
#
|
229
|
+
# Output into a returned result set is not currently supported.
|
230
|
+
#
|
231
|
+
# Examples:
|
232
|
+
#
|
233
|
+
# dataset.output(:output_table, [:deleted__id, :deleted__name])
|
234
|
+
# dataset.output(:output_table, :id => :inserted__id, :name => :inserted__name)
|
235
|
+
def output(into, values)
|
236
|
+
output = {}
|
237
|
+
case values
|
238
|
+
when Hash:
|
239
|
+
output[:column_list], output[:select_list] = values.keys, values.values
|
240
|
+
when Array:
|
241
|
+
output[:select_list] = values
|
242
|
+
end
|
243
|
+
output[:into] = into
|
244
|
+
clone({:output => output})
|
245
|
+
end
|
246
|
+
|
247
|
+
# An output method that modifies the receiver.
|
248
|
+
def output!(into, values)
|
249
|
+
mutation_method(:output, into, values)
|
250
|
+
end
|
251
|
+
|
252
|
+
# MSSQL uses [] to quote identifiers
|
253
|
+
def quoted_identifier(name)
|
254
|
+
"[#{name}]"
|
255
|
+
end
|
256
|
+
|
257
|
+
# Pagination queries (i.e., limit with offset) are supported HXTT
|
258
|
+
# with the help of the recno() function which returns the
|
259
|
+
# row number of each record
|
260
|
+
def select_sql
|
261
|
+
return super unless offset = @opts[:offset]
|
262
|
+
if @opts[:select]
|
263
|
+
@opts[:select] << :recno.sql_function
|
264
|
+
else
|
265
|
+
@opts[:select] = [WILDCARD, :recno.sql_function]
|
266
|
+
end
|
267
|
+
s = unlimited.where("BETWEEN (recno(), #{@opts[:offset] + 1}, #{@opts[:limit] + @opts[:offset]})")
|
268
|
+
s.select_sql
|
269
|
+
end
|
270
|
+
# def select_sql
|
271
|
+
# return super unless offset = @opts[:offset]
|
272
|
+
# raise(Error, 'Access requires an order be provided if using an offset') unless order = @opts[:order]
|
273
|
+
#
|
274
|
+
# total_rows = unlimited.count
|
275
|
+
# if @opts[:limit] + @opts[:offset] > total_rows
|
276
|
+
# correction = @opts[:limit] + @opts[:offset] - total_rows
|
277
|
+
# @opts[:limit] = @opts[:limit] - correction
|
278
|
+
# end
|
279
|
+
#
|
280
|
+
# s0 = unlimited.limit(@opts[:limit] + @opts[:offset]).order(order)
|
281
|
+
# s1 = unlimited.from(s0.as('s1')).limit(@opts[:limit]).reverse_order(order)
|
282
|
+
# s2 = unlimited.from(s1.as('s2')).order(order)
|
283
|
+
# s2.select_sql
|
284
|
+
# end
|
285
|
+
|
286
|
+
# The version of the database server.
|
287
|
+
def server_version
|
288
|
+
db.server_version(@opts[:server])
|
289
|
+
end
|
290
|
+
|
291
|
+
# Microsoft SQL Server does not support INTERSECT or EXCEPT
|
292
|
+
def supports_intersect_except?
|
293
|
+
false
|
294
|
+
end
|
295
|
+
|
296
|
+
# MSSQL does not support IS TRUE
|
297
|
+
def supports_is_true?
|
298
|
+
false
|
299
|
+
end
|
300
|
+
|
301
|
+
# MSSQL 2005+ supports window functions
|
302
|
+
def supports_window_functions?
|
303
|
+
true
|
304
|
+
end
|
305
|
+
|
306
|
+
private
|
307
|
+
|
308
|
+
# MSSQL can modify joined datasets
|
309
|
+
def check_modification_allowed!
|
310
|
+
raise(InvalidOperation, "Grouped datasets cannot be modified") if opts[:group]
|
311
|
+
end
|
312
|
+
|
313
|
+
# MSSQL supports the OUTPUT clause for DELETE statements.
|
314
|
+
# It also allows prepending a WITH clause.
|
315
|
+
def delete_clause_methods
|
316
|
+
DELETE_CLAUSE_METHODS
|
317
|
+
end
|
318
|
+
|
319
|
+
# Handle the with clause for delete, insert, and update statements
|
320
|
+
# to be the same as the insert statement.
|
321
|
+
def delete_with_sql(sql)
|
322
|
+
select_with_sql(sql)
|
323
|
+
end
|
324
|
+
alias insert_with_sql delete_with_sql
|
325
|
+
alias update_with_sql delete_with_sql
|
326
|
+
|
327
|
+
# MSSQL raises an error if you try to provide more than 3 decimal places
|
328
|
+
# for a fractional timestamp. This probably doesn't work for smalldatetime
|
329
|
+
# fields.
|
330
|
+
def format_timestamp_usec(usec)
|
331
|
+
sprintf(".%03d", usec/1000)
|
332
|
+
end
|
333
|
+
|
334
|
+
# MSSQL supports FROM clauses in DELETE and UPDATE statements.
|
335
|
+
def from_sql(sql)
|
336
|
+
if (opts[:from].is_a?(Array) && opts[:from].size > 1) || opts[:join]
|
337
|
+
select_from_sql(sql)
|
338
|
+
select_join_sql(sql)
|
339
|
+
end
|
340
|
+
end
|
341
|
+
alias delete_from2_sql from_sql
|
342
|
+
alias update_from_sql from_sql
|
343
|
+
|
344
|
+
# MSSQL supports the OUTPUT clause for INSERT statements.
|
345
|
+
# It also allows prepending a WITH clause.
|
346
|
+
def insert_clause_methods
|
347
|
+
INSERT_CLAUSE_METHODS
|
348
|
+
end
|
349
|
+
|
350
|
+
# MSSQL uses a literal hexidecimal number for blob strings
|
351
|
+
def literal_blob(v)
|
352
|
+
blob = '0x'
|
353
|
+
v.each_byte{|x| blob << sprintf('%02x', x)}
|
354
|
+
blob
|
355
|
+
end
|
356
|
+
|
357
|
+
# Use unicode string syntax for all strings
|
358
|
+
def literal_string(v)
|
359
|
+
"N#{super}"
|
360
|
+
end
|
361
|
+
|
362
|
+
# Use 0 for false on MSSQL
|
363
|
+
def literal_false
|
364
|
+
BOOL_FALSE
|
365
|
+
end
|
366
|
+
|
367
|
+
# Use 1 for true on MSSQL
|
368
|
+
def literal_true
|
369
|
+
BOOL_TRUE
|
370
|
+
end
|
371
|
+
|
372
|
+
# The alias to use for the row_number column when emulating OFFSET
|
373
|
+
def row_number_column
|
374
|
+
:x_sequel_row_number_x
|
375
|
+
end
|
376
|
+
|
377
|
+
# MSSQL adds the limit before the columns
|
378
|
+
def select_clause_methods
|
379
|
+
SELECT_CLAUSE_METHODS
|
380
|
+
end
|
381
|
+
|
382
|
+
# MSSQL uses TOP for limit
|
383
|
+
def select_limit_sql(sql)
|
384
|
+
sql << " TOP #{@opts[:limit]}" if @opts[:limit]
|
385
|
+
end
|
386
|
+
|
387
|
+
# MSSQL uses the WITH statement to lock tables
|
388
|
+
def select_table_options_sql(sql)
|
389
|
+
sql << " WITH #{@opts[:table_options]}" if @opts[:table_options]
|
390
|
+
end
|
391
|
+
|
392
|
+
# SQL fragment for MSSQL's OUTPUT clause.
|
393
|
+
def output_sql(sql)
|
394
|
+
return unless output = @opts[:output]
|
395
|
+
sql << " OUTPUT #{column_list(output[:select_list])}"
|
396
|
+
if into = output[:into]
|
397
|
+
sql << " INTO #{table_ref(into)}"
|
398
|
+
if column_list = output[:column_list]
|
399
|
+
cl = []
|
400
|
+
column_list.each { |k, v| cl << literal(String === k ? k.to_sym : k) }
|
401
|
+
sql << " (#{cl.join(COMMA_SEPARATOR)})"
|
402
|
+
end
|
403
|
+
end
|
404
|
+
end
|
405
|
+
alias delete_output_sql output_sql
|
406
|
+
alias update_output_sql output_sql
|
407
|
+
alias insert_output_sql output_sql
|
408
|
+
|
409
|
+
# MSSQL supports the OUTPUT clause for UPDATE statements.
|
410
|
+
# It also allows prepending a WITH clause.
|
411
|
+
def update_clause_methods
|
412
|
+
UPDATE_CLAUSE_METHODS
|
413
|
+
end
|
414
|
+
end
|
415
|
+
end
|
416
|
+
end
|
metadata
ADDED
@@ -0,0 +1,72 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: nbadw-util
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.1.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Colin Casey
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
|
12
|
+
date: 2009-10-22 00:00:00 -03:00
|
13
|
+
default_executable:
|
14
|
+
dependencies:
|
15
|
+
- !ruby/object:Gem::Dependency
|
16
|
+
name: sequel
|
17
|
+
type: :runtime
|
18
|
+
version_requirement:
|
19
|
+
version_requirements: !ruby/object:Gem::Requirement
|
20
|
+
requirements:
|
21
|
+
- - ">="
|
22
|
+
- !ruby/object:Gem::Version
|
23
|
+
version: 3.5.0
|
24
|
+
version:
|
25
|
+
description: Database models, migrations, and utilities for the New Brunswick Aquatic Data Warehouse
|
26
|
+
email: casey.colin@gmail.com
|
27
|
+
executables: []
|
28
|
+
|
29
|
+
extensions: []
|
30
|
+
|
31
|
+
extra_rdoc_files:
|
32
|
+
- LICENSE
|
33
|
+
- README
|
34
|
+
files:
|
35
|
+
- LICENSE
|
36
|
+
- Rakefile
|
37
|
+
- lib/nbadw/util/copy_database_task.rb
|
38
|
+
- lib/nbadw/util/progress_bar.rb
|
39
|
+
- lib/sequel/adapters/jdbc/access.rb
|
40
|
+
- lib/sequel/adapters/shared/access.rb
|
41
|
+
- lib/sequel/jdbc_access_adapter.rb
|
42
|
+
- README
|
43
|
+
has_rdoc: true
|
44
|
+
homepage: http://github.com/colincasey/nbadw
|
45
|
+
licenses: []
|
46
|
+
|
47
|
+
post_install_message:
|
48
|
+
rdoc_options:
|
49
|
+
- --charset=UTF-8
|
50
|
+
require_paths:
|
51
|
+
- lib
|
52
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
53
|
+
requirements:
|
54
|
+
- - ">="
|
55
|
+
- !ruby/object:Gem::Version
|
56
|
+
version: "0"
|
57
|
+
version:
|
58
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
59
|
+
requirements:
|
60
|
+
- - ">="
|
61
|
+
- !ruby/object:Gem::Version
|
62
|
+
version: "0"
|
63
|
+
version:
|
64
|
+
requirements: []
|
65
|
+
|
66
|
+
rubyforge_project:
|
67
|
+
rubygems_version: 1.3.5
|
68
|
+
signing_key:
|
69
|
+
specification_version: 3
|
70
|
+
summary: NB Aquatic Data Warehouse - Models and Database Utilities
|
71
|
+
test_files: []
|
72
|
+
|