wyrm 0.2.0 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: c432ee798bf7c5208a16696daa5741d64351721d
4
- data.tar.gz: 51e22092611ef48f16d4757eb8c327f96e6847f5
3
+ metadata.gz: 6c0bb0fe99a301ead2da2ce8a64dc1eb20c925b0
4
+ data.tar.gz: 031b66ab01f20c5ebad94dbfa3c50338dbd15cba
5
5
  SHA512:
6
- metadata.gz: b907a9adbd5b47ac9847a0aeaa95e5318eff307735947f8e4dfd4aa35e819f8ff873cbf53053202a4a944f2961fe6a3254f33bd4816753386580e2049c13e186
7
- data.tar.gz: 65e512436c2991f2b9786c8e9e2587854b227e95bac2dcc1ec2f87b3d0840e786ac4e13a454197cdeb38a43eae948c9fbaa743b3c1f3b21d02714b398fd56894
6
+ metadata.gz: 5feadc5c19a9df8417414cb91270ae55b8d114cf94fc9daa33b01b2f23292e858a109b0d703cfc7474952b963c26876d7e732f12c0802146cd5ef7838e803629
7
+ data.tar.gz: a73a7c30e43a430fb05d22552f5b0f10c81cdcd99ec818ff8f838f6ff1a6f59f3c9d71a9d8eacdd26ff4066f1140e82e0236db0e13c2421fba8b1800b7d35710
data/Gemfile CHANGED
@@ -1,7 +1,26 @@
1
- source 'https://rubygems.org'
2
- # source 'file:///var/cache/rubygems'
1
+ def from_gemrc
2
+ # auto-load from ~/.gemrc
3
+ home_gemrc = Pathname('~/.gemrc').expand_path
3
4
 
4
- gem 'sequel', ~> '4.0.0'
5
+ if home_gemrc.exist?
6
+ require 'yaml'
7
+ # use all the sources specified in .gemrc
8
+ YAML.load_file(home_gemrc)[:sources]
9
+ end
10
+ end
11
+
12
+ # Use the gemrc source if defined, unless CANON is set,
13
+ # otherwise just use the default.
14
+ def preferred_sources
15
+ rv = from_gemrc unless eval(ENV['CANON']||'')
16
+ rv ||= []
17
+ rv << 'http://rubygems.org' if rv.empty?
18
+ rv
19
+ end
20
+
21
+ preferred_sources.each{|src| source src}
22
+
23
+ gem 'sequel'
5
24
  gem 'fastandand'
6
25
 
7
26
  # Specify your gem's dependencies in wyrm.gemspec
data/README.md CHANGED
@@ -3,16 +3,23 @@
3
3
  Transfer data from one database to another. Has been used to dump > 100M dbs,
4
4
  and one 850G db. Should theoretically work for any dbs supported by Sequel.
5
5
 
6
+ Dumps are compressed with bz2, using pbzip2. Fast *and* small :-D For example:
7
+ mysqldump | bzip2 for a certain 850G db comes to 127G. With wyrm it
8
+ comes to 134G.
9
+
6
10
  Currently transfers tables and views only. Does not attempt to transfer
7
11
  stored procs, permissions, triggers etc.
8
12
 
9
- Works best for tables that have single numeric primary keys, but should also
10
- handle compound primary keys and tables without primary keys.
13
+ Handles tables with a single numeric key, single non-numeric key, and no
14
+ primary key. Haven't tried with compound primary key.
15
+
16
+ Depending on table keys will use different strategies to keep memory usage small.
17
+ Will use result set streaming if available.
11
18
 
12
19
  Wyrm because:
13
20
 
14
21
  - I like dragons
15
- - I can (eventually) have a Wyrm::Hole to transfer data through :-D
22
+ - I can (eventually) have a Wyrm::Hole to transfer data through ;-)
16
23
 
17
24
  ## Dependencies
18
25
 
@@ -37,7 +44,7 @@ Or install it yourself as:
37
44
 
38
45
  Make sure you install the db gems, typically
39
46
 
40
- $ gem install pg sequel_pg mysql2
47
+ $ gem install pg sequel_pg mysql2 sqlite3
41
48
 
42
49
  ## Usage
43
50
 
@@ -77,7 +84,7 @@ require 'wyrm/db_pump'
77
84
 
78
85
  db = Sequel.connect 'postgres://postgres@localhost/other_db'
79
86
  dbp = DbPump.new db, :things
80
- dbp.open_bz2 '/mnt/disk/wyrm/things.dbp.bz2'
87
+ dbp.io = IO.popen 'pbzip2 -d -c /mnt/disk/wyrm/things.dbp.bz2'
81
88
  dbp.each_row do |row|
82
89
  puts row.inspect
83
90
  end
@@ -1,20 +1,16 @@
1
1
  require 'sequel'
2
2
  require 'yaml'
3
- require 'ostruct'
4
3
  require 'logger'
5
- require 'fastandand'
6
4
 
7
5
  Sequel.extension :migration
8
6
 
9
- # TODO possibly use Gem::Package::TarWriter to write tar files
10
7
  # TODO when restoring, could use a SizeQueue to make sure the db is kept busy
11
-
12
8
  # TODO need to version the dumps, or something like that.
13
- # TODO This really should be Wyrm::Hole. Or maybe Wyrm::Hole should
14
- # be the codec that connects two DbPumps, for direct transfer?
9
+ # TODO looks like io should belong to codec. Hmm. Not sure.
10
+ # TODO table_name table_dataset need some thinking about. Dataset would encapsulate both. But couldn't change db then, and primary_keys would be hard.
15
11
  class DbPump
16
12
  # some codecs might ignore io, eg if a dbpump is talking to another dbpump
17
- def initialize( db, table_name, io: STDOUT, codec: :marshal, page_size: 10000, dry_run: false )
13
+ def initialize( db: nil, table_name: nil, io: STDOUT, codec: :marshal, page_size: 10000, dry_run: false )
18
14
  self.codec = codec
19
15
  self.db = db
20
16
  self.table_name = table_name
@@ -42,14 +38,24 @@ class DbPump
42
38
 
43
39
  def db=( other_db )
44
40
  invalidate_cached_members
41
+
45
42
  @db = other_db
43
+ return unless other_db
44
+
45
+ # add extensions
46
46
  @db.extension :pagination
47
+
48
+ # turn on postgres streaming if available
49
+ if defined?( Sequel::Postgres ) && Sequel::Postgres.supports_streaming?
50
+ logger.info "Turn streaming on for postgres"
51
+ @db.extension :pg_streaming
52
+ end
47
53
  end
48
54
 
49
55
  # return an object that responds to ===
50
56
  # which returns true if ==='s parameter
51
57
  # responds to all the methods
52
- def quacks_like( *methods )
58
+ def self.quacks_like( *methods )
53
59
  @quacks_like ||= {}
54
60
  @quacks_like[methods] ||= Object.new.tap do |obj|
55
61
  obj.define_singleton_method(:===) do |instance|
@@ -58,6 +64,10 @@ class DbPump
58
64
  end
59
65
  end
60
66
 
67
+ def quacks_like( *methods )
68
+ self.class.quacks_like( *methods )
69
+ end
70
+
61
71
  def codec=( codec_thing )
62
72
  @codec =
63
73
  case codec_thing
@@ -68,7 +78,7 @@ class DbPump
68
78
  when quacks_like( :encode, :decode )
69
79
  codec_thing
70
80
  else
71
- raise "unknown codec #{codec_thing}"
81
+ raise "unknown codec #{codec_thing.inspect}"
72
82
  end
73
83
  end
74
84
 
@@ -110,44 +120,75 @@ class DbPump
110
120
  @table_dataset ||= db[table_name.to_sym]
111
121
  end
112
122
 
113
- # TODO possibly use select from outer / inner join to
114
- # http://www.numerati.com/2012/06/26/reading-large-result-sets-with-hibernate-and-mysql/
115
- # because mysql is useless
116
- def paginated_dump
123
+ # Use limit / offset. Last fallback if there are no keys (or a compound primary key?).
124
+ def paginated_dump( &encode_block )
117
125
  table_dataset.order(*primary_keys).each_page(page_size) do |page|
118
126
  logger.info page.sql
119
- page.each do |row|
120
- unless dry_run?
121
- codec.encode row.values, io
122
- end
123
- end
127
+ page.each &encode_block
124
128
  end
125
129
  end
126
130
 
127
- # have to use this for non-integer pks
131
+ # Use limit / offset, but not for all fields.
128
132
  # The idea is that large offsets are expensive in the db because the db server has to read
129
- # through the data set to reach the required offset. So make that only ids, and then
130
- # do the main select from the limited id list.
133
+ # through the data set to reach the required offset. So make that only ids need to be read,
134
+ # and then do the main select from the limited id list.
131
135
  # TODO could speed this up by have a query thread which runs the next page-query while
132
136
  # the current one is being written/compressed.
133
137
  # select * from massive as full
134
138
  # inner join (select id from massive order by whatever limit m, n) limit
135
139
  # on full.id = limit.id
136
140
  # order by full.whatever
137
- def inner_dump
141
+ # http://www.numerati.com/2012/06/26/reading-large-result-sets-with-hibernate-and-mysql/
142
+ def inner_dump( &encode_block )
138
143
  # could possibly overrride Dataset#paginate(page_no, page_size, record_count=nil)
139
144
  0.step(table_dataset.count, page_size).each do |offset|
140
145
  limit_dataset = table_dataset.select( *primary_keys ).limit( page_size, offset ).order( *primary_keys )
141
146
  page = table_dataset.join( limit_dataset, Hash[ primary_keys.map{|f| [f,f]} ] ).order( *primary_keys ).qualify(table_name)
142
147
  logger.info page.sql
143
- page.each do |row|
144
- unless dry_run?
145
- codec.encode row.values, io
148
+ page.each &encode_block
149
+ end
150
+ end
151
+
152
+ # Selects pages by a range of ids, using >= and <.
153
+ # Use this for integer pks
154
+ def min_max_dump( &encode_block )
155
+ # select max(id), min(id) from table
156
+ # and then split that up into 10000 size chunks.
157
+ # Not really important if there aren't exactly 10000
158
+ min, max = table_dataset.select{[min(id), max(id)]}.first.values
159
+ return unless min && max
160
+
161
+ # will always include the last item because page_size will be
162
+ # bigger than max for the last page
163
+ (min..max).step(page_size).each do |offset|
164
+ page = table_dataset.where( id: offset...(offset + page_size) )
165
+ logger.info page.sql
166
+ page.each &encode_block
167
+ end
168
+ end
169
+
170
+ def stream_dump( &encode_block )
171
+ logger.info "using result set streaming"
172
+
173
+ # I want to output progress every page_size records,
174
+ # without doing a records_count % page_size every iteration.
175
+ # So define an external enumerator
176
+ # TODO should really performance test the options here.
177
+ records_count = 0
178
+ enum = table_dataset.stream.enum_for
179
+ loop do
180
+ begin
181
+ page_size.times do
182
+ encode_block.call enum.next
183
+ records_count += 1
146
184
  end
185
+ ensure
186
+ logger.info "#{records_count} from #{table_dataset.sql}"
147
187
  end
148
188
  end
149
189
  end
150
190
 
191
+ # Dump the serialization of the table to the specified io.
151
192
  # TODO need to also dump a first row containing useful stuff:
152
193
  # - source table name
153
194
  # - number of rows
@@ -155,50 +196,50 @@ class DbPump
155
196
  # - permissions?
156
197
  # These should all be in one object that can be Marshall.load-ed easily.
157
198
  def dump
199
+ _dump do |row|
200
+ codec.encode( row.values, io ) unless dry_run?
201
+ end
202
+ ensure
203
+ io.flush
204
+ end
205
+
206
+ # decide which kind of paged iteration will be best for this table.
207
+ # Return an iterator, or yield row hashes to the block
208
+ def _dump( &encode_block )
209
+ return enum_for(__method__) unless block_given?
158
210
  case
211
+ when table_dataset.respond_to?( :stream )
212
+ stream_dump &encode_block
159
213
  when primary_keys.empty?
160
- paginated_dump
214
+ paginated_dump &encode_block
161
215
  when primary_keys.all?{|i| i == :id }
162
- min_max_dump
216
+ min_max_dump &encode_block
163
217
  else
164
- inner_dump
218
+ inner_dump &encode_block
165
219
  end
166
- io.flush
167
220
  end
168
221
 
169
- # could use this for integer pks
170
- def min_max_dump
171
- # select max(id), min(id) from patents
172
- # and then split that up into 10000 size chunks. Not really important if there aren't exactly 10000
173
- min, max = table_dataset.select{[min(id), max(id)]}.first.values
174
- return unless min && max
175
- # could possibly overrride Dataset#paginate(page_no, page_size, record_count=nil)
176
- # TODO definitely need to refactor this
177
-
178
- # will always include the last item because
179
- (min..max).step(page_size).each do |offset|
180
- page = table_dataset.where( id: offset...(offset + page_size) )
181
- logger.info page.sql
182
- page.each do |row|
183
- unless dry_run?
184
- codec.encode row.values, io
185
- end
186
- end
187
- end
222
+ def dump_matches_columns?( row_enum, columns )
223
+ raise "schema mismatch" unless row_enum.peek.size == columns.size
224
+ true
225
+ rescue StopIteration
226
+ # peek threw a StopIteration, so there's no data
227
+ false
188
228
  end
189
229
 
190
- # TODO lazy evaluation / streaming
230
+ # TODO don't generate the full insert, ie leave out the fields
231
+ # because we've already checked that the columns and the table
232
+ # match.
233
+ # TODO generate column names in insert, they might still work
234
+ # if columns have been added to the db, but not the dump.
191
235
  # start_row is zero-based
192
236
  def restore( start_row: 0, filename: 'io' )
193
237
  columns = table_dataset.columns
194
- logger.info{ "inserting to #{table_name} #{columns.inspect}" }
195
-
196
- # get the Enumerator
197
238
  row_enum = each_row
198
239
 
199
- # check that columns match
200
- raise "schema mismatch" if row_enum.peek.size != columns.size
240
+ return unless dump_matches_columns?( row_enum, columns )
201
241
 
242
+ logger.info{ "inserting to #{table_name} #{columns.inspect}" }
202
243
  rows_restored = 0
203
244
 
204
245
  if start_row != 0
@@ -217,7 +258,10 @@ class DbPump
217
258
  db.transaction do
218
259
  begin
219
260
  page_size.times do
220
- # This skips all the checks in the Sequel code
261
+ # This skips all the checks in the Sequel code. Basically we want
262
+ # to generate the
263
+ # insert into (field1,field2) values (value1,value2)
264
+ # statement as quickly as possible.
221
265
  sql = table_dataset.clone( columns: columns, values: row_enum.next ).send( :clause_sql, :insert )
222
266
  db.execute sql unless dry_run?
223
267
  rows_restored += 1
@@ -235,18 +279,14 @@ class DbPump
235
279
  rows_restored
236
280
  end
237
281
 
238
- # this doesn't really belong here, but it will do for now.
239
- def open_bz2( filename )
240
- io.andand.close if io != STDOUT && !io.andand.closed?
241
- self.io = IO.popen( "pbzip2 -d -c #{filename}" )
242
- end
243
-
244
- # enumerate through the given io at its current position
282
+ # Enumerate through the given io at its current position
283
+ # TODO don't check for io.eof here, leave that to the codec
245
284
  def each_row
246
285
  return enum_for(__method__) unless block_given?
247
286
  yield codec.decode( io ) until io.eof?
248
287
  end
249
288
 
289
+ # Enumerate sql insert statements from the dump
250
290
  def insert_sql_each
251
291
  return enum_for(__method__) unless block_given?
252
292
  each_row do |row|
@@ -6,6 +6,7 @@ require 'wyrm/pump_maker'
6
6
  # ds = DumpSchema.new src_db, Pathname('/var/data/lots')
7
7
  # ds.dump_schema
8
8
  # ds.dump_tables
9
+ # TODO possibly use Gem::Package::TarWriter to write tar files
9
10
  class DumpSchema
10
11
  include PumpMaker
11
12
 
@@ -31,27 +32,6 @@ class DumpSchema
31
32
  @fk_migration ||= src_db.dump_foreign_key_migration(:same_db => same_db)
32
33
  end
33
34
 
34
- def restore_migration
35
- <<-EOF
36
- require 'restore_migration'
37
- Sequel.migration do
38
- def db_pump
39
- end
40
-
41
- up do
42
- restore_tables
43
- end
44
-
45
- down do
46
- # from each table clear table
47
- each_table do |table_name|
48
- db_pump.restore table_name, io: io, db: db
49
- end
50
- end
51
- end
52
- EOF
53
- end
54
-
55
35
  def same_db
56
36
  false
57
37
  end
@@ -61,24 +41,22 @@ class DumpSchema
61
41
  end
62
42
 
63
43
  def dump_schema
64
- (container + '001_schema.rb').open('w') do |io|
65
- io.write schema_migration
66
- end
44
+ numbering = '000'
67
45
 
68
- (container + '002_populate_tables.rb').open('w') do |io|
69
- io.write restore_migration
46
+ (container + "#{numbering.next!}_schema.rb").open('w') do |io|
47
+ io.write schema_migration
70
48
  end
71
49
 
72
- (container + '003_indexes.rb').open('w') do |io|
50
+ (container + "#{numbering.next!}_indexes.rb").open('w') do |io|
73
51
  io.write index_migration
74
52
  end
75
53
 
76
- (container + '004_foreign_keys.rb').open('w') do |io|
54
+ (container + "#{numbering.next!}_foreign_keys.rb").open('w') do |io|
77
55
  io.write fk_migration
78
56
  end
79
57
  end
80
58
 
81
- def open_bz2( pathname )
59
+ def write_through_bz2( pathname )
82
60
  fio = pathname.open('w')
83
61
  # open subprocess in read-write mode
84
62
  zio = IO.popen( "pbzip2 -z", 'r+' )
@@ -96,7 +74,8 @@ class DumpSchema
96
74
  # signal the copier thread to stop
97
75
  zio.close_write
98
76
  logger.debug 'finished dumping'
99
- # wait for copier thread to
77
+
78
+ # wait for copier thread to finish
100
79
  copier.join
101
80
  logger.debug 'stream copy thread finished'
102
81
  ensure
@@ -104,7 +83,7 @@ class DumpSchema
104
83
  fio.close unless fio.closed?
105
84
  end
106
85
 
107
- def dump_table( table_name )
86
+ def dump_table( table_name, &io_block )
108
87
  pump.table_name = table_name
109
88
  if pump.table_dataset.empty?
110
89
  logger.info "No records in #{table_name}"
@@ -114,7 +93,7 @@ class DumpSchema
114
93
  filename = container + "#{table_name}.dbp.bz2"
115
94
  logger.info "dumping #{table_name} to #{filename}"
116
95
 
117
- open_bz2 filename do |zio|
96
+ write_through_bz2 filename do |zio|
118
97
  # generate the dump
119
98
  pump.io = zio
120
99
  pump.dump
@@ -1,6 +1,6 @@
1
1
  require 'wyrm/db_pump'
2
2
 
3
- class Object
3
+ module PumpMaker
4
4
  def call_or_self( maybe_callable )
5
5
  if maybe_callable.respond_to? :call
6
6
  maybe_callable.call( self )
@@ -8,11 +8,9 @@ class Object
8
8
  maybe_callable
9
9
  end
10
10
  end
11
- end
12
11
 
13
- module PumpMaker
14
12
  def make_pump( db, pump_thing )
15
- call_or_self(pump_thing) || DbPump.new( db, nil )
13
+ call_or_self(pump_thing) || DbPump.new( db: db )
16
14
  end
17
15
 
18
16
  def maybe_deebe( db_or_string )
@@ -2,9 +2,9 @@ require 'logger'
2
2
  require 'wyrm/pump_maker'
3
3
 
4
4
  # Load a schema from a set of dump files (from DumpSchema)
5
- # and restore the table data
5
+ # and restore the table data.
6
6
  # dst_db = Sequel.connect "postgres://localhost:5454/lots"
7
- # rs = RestoreSchema.new dst_db, Pathname('/var/data/lots')
7
+ # rs = RestoreSchema.new dst_db, '/var/data/lots'
8
8
  # rs.create
9
9
  # rs.restore_tables
10
10
  class RestoreSchema
@@ -14,23 +14,39 @@ class RestoreSchema
14
14
  @container = Pathname.new container
15
15
  @dst_db = maybe_deebe dst_db
16
16
  @pump = make_pump( @dst_db, pump )
17
-
18
- load_migrations
19
17
  end
20
18
 
21
19
  attr_reader :pump
22
20
  attr_reader :dst_db
23
21
  attr_reader :container
24
- attr_reader :schema_migration, :index_migration, :fk_migration
25
22
 
26
- def logger
27
- @logger ||= Logger.new STDERR
23
+ # sequel wants migrations numbered, but it's a bit of an annoyance for this.
24
+ def find_single( glob )
25
+ candidates =Pathname.glob container + glob
26
+ raise "too many #{candidates.inspect} for #{glob}" unless candidates.size == 1
27
+ candidates.first
28
+ end
29
+
30
+ def schema_migration
31
+ @schema_migration ||= find_single( '*schema.rb' ).read
32
+ end
33
+
34
+ def index_migration
35
+ @index_migration ||= find_single( '*indexes.rb' ).read
36
+ end
37
+
38
+ def fk_migration
39
+ @fk_migration ||= find_single( '*foreign_keys.rb' ).read
28
40
  end
29
41
 
30
- def load_migrations
31
- @schema_migration = (container + '001_schema.rb').read
32
- @index_migration = (container + '003_indexes.rb').read
33
- @fk_migration = (container + '004_foreign_keys.rb').read
42
+ def reload_migrations
43
+ @fk_migration = nil
44
+ @index_migration = nil
45
+ @schema_migration = nil
46
+ end
47
+
48
+ def logger
49
+ @logger ||= Logger.new STDERR
34
50
  end
35
51
 
36
52
  # create indexes and foreign keys, and reset sequences
@@ -53,20 +69,34 @@ class RestoreSchema
53
69
  eval( schema_migration ).apply dst_db, :up
54
70
  end
55
71
 
56
- # assume the table name is the base name of table_file
72
+ # assume the table name is the base name of table_file pathname
57
73
  def restore_table( table_file )
58
74
  logger.info "restoring from #{table_file}"
59
75
  pump.table_name = table_file.basename.sub_ext('').sub_ext('').to_s.to_sym
60
76
  # TODO check if table has been restored already, and has the correct rows,
61
- # otherwise pass in a start row.
62
- IO.popen( "pbzip2 -d -c #{table_file}" ) do |io|
77
+ open_bz2 table_file do |io|
63
78
  pump.io = io
64
79
  pump.restore
65
80
  end
66
81
  end
67
82
 
83
+ # open a dbp.bz2 file and either yield or return an io of the uncompressed contents
84
+ def open_bz2( table_name, &block )
85
+ table_file =
86
+ case table_name
87
+ when Symbol
88
+ container + "#{table_name}.dbp.bz2"
89
+ when Pathname
90
+ table_name
91
+ else
92
+ raise "Don't know what to do with #{table_name.inspect}"
93
+ end
94
+
95
+ IO.popen "pbzip2 -d -c #{table_file}", &block
96
+ end
97
+
68
98
  def restore_tables
69
- table_files = Pathname.glob Pathname(container) + '*dbp.bz2'
99
+ table_files = Pathname.glob container + '*.dbp.bz2'
70
100
  table_files.sort_by{|tf| tf.stat.size}.each{|table_file| restore_table table_file}
71
101
  end
72
102
  end
@@ -1,3 +1,3 @@
1
1
  module Wyrm
2
- VERSION = "0.2.0"
2
+ VERSION = "0.2.1"
3
3
  end
@@ -3,8 +3,6 @@ require 'sqlite3'
3
3
  require 'pathname'
4
4
  require 'wyrm/dump_schema.rb'
5
5
 
6
- db = Sequel.connect 'sqlite:/home/panic/.qtstalker/new-trading.sqlite3'
7
-
8
6
  # pump = DbPump.new db, :positions, codec: :yaml
9
7
  dumper = DumpSchema.new db, '/tmp/test', pump: lambda{|_| DbPump.new db, nil, codec: :yaml}
10
8
  dumper = DumpSchema.new db, '/tmp/test', pump: ->(dump_schema){ DbPump.new dump_schema.src_db, nil, codec: :yaml}
@@ -18,7 +18,7 @@ Gem::Specification.new do |spec|
18
18
  spec.test_files = spec.files.grep(%r{^(test|spec|features)/})
19
19
  spec.require_paths = ["lib"]
20
20
 
21
- spec.add_runtime_dependency 'sequel', '~> 4.0.0'
21
+ spec.add_runtime_dependency 'sequel'
22
22
  spec.add_runtime_dependency "fastandand"
23
23
 
24
24
  spec.add_development_dependency "bundler", "~> 1.3"
metadata CHANGED
@@ -1,29 +1,29 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: wyrm
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.0
4
+ version: 0.2.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - John Anderson
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2013-08-03 00:00:00.000000000 Z
11
+ date: 2013-08-07 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: sequel
15
15
  requirement: !ruby/object:Gem::Requirement
16
16
  requirements:
17
- - - ~>
17
+ - - '>='
18
18
  - !ruby/object:Gem::Version
19
- version: 4.0.0
19
+ version: '0'
20
20
  type: :runtime
21
21
  prerelease: false
22
22
  version_requirements: !ruby/object:Gem::Requirement
23
23
  requirements:
24
- - - ~>
24
+ - - '>='
25
25
  - !ruby/object:Gem::Version
26
- version: 4.0.0
26
+ version: '0'
27
27
  - !ruby/object:Gem::Dependency
28
28
  name: fastandand
29
29
  requirement: !ruby/object:Gem::Requirement