pg_shrink 0.0.5 → 0.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 4d4bd5c0b682c29ae81072befc0f5c962319809a
4
- data.tar.gz: d9d4aa0ceae5e3c816dfd8cea704a0f5b3f2a404
3
+ metadata.gz: 1f5d381b33119abf32ee3e51c3d9efe5997194ff
4
+ data.tar.gz: 255bcaed0c21c079370b483ac5941a5c7a320c1f
5
5
  SHA512:
6
- metadata.gz: 9183c4d2b6226d8e0c7b610066333de8f7fa1d731663e5f67241722601336f7749d285b42ef3c84b044c0039b0ba671e0a1085ea622af1d9e086999d1ec92db0
7
- data.tar.gz: 0e55a008ff71f575854f2acc1add7dd7a7f7459385eac3fdf63346d94f469ea0c46e86c50b0b627a01249b8d2595c8c3ff375af1d9579d9e4b26acf8e7995b98
6
+ metadata.gz: 8c6a6fc900c387196c7dfb973337123867b010bb6e7e9ea85e543e69ff23c0d69a1cfab472133b406d4ee421f8a9caf2e650671ca7cc9fee45b35016dffb8783
7
+ data.tar.gz: 477d58878e5e00c8bf2f73d2818e4e672d5172eff9533c44d97025fdf1013637a380bda218d0e970575c846606471845538efdd8d62734b343738d8a22115ea2
data/Shrinkfile.example CHANGED
@@ -1,12 +1,16 @@
1
1
  filter_table :users do |f|
2
2
 
3
- # filter_by takes a block and yields the fields of each record (as a hash)
4
- # the block should return true to keep the record, false if not. For
5
- # ease of use and extensibility, we allow multiple filter_by blocks
6
- # rather than forcing all logic into one block.
7
- f.filter_by do |u|
8
- u[:id] % 1000 == 0
9
- end
3
+ # filter_by takes a sql condition for the records you would like to keep.
4
+ # This condition can be as a hash or string
5
+ f.filter_by("id % 1000 == 0")
6
+
7
+ # NOTE: It can also take a block that expects the fields of each record (as
8
+ # a hash) the block should return true to keep the record, false if not.
9
+ # However, this approach is substantially slower than the conditions hash
10
+ # based approach.
11
+ #f.filter_by do |u|
12
+ # u[:id] % 1000 == 0
13
+ #end
10
14
 
11
15
  # lock takes a block and yields the fields of each record (as a hash of
12
16
  # fieldname => value) If the block returns true this record is immune to all
data/bin/pg_shrink CHANGED
@@ -37,6 +37,10 @@ Please make sure you have a Shrinkfile or specify one using -c
37
37
  options[:force] = true
38
38
  end
39
39
 
40
+ verbose_desc = 'run in verbose mode'
41
+ opts.on('-v', '--verbose', verbose_desc) do
42
+ options[:log] = true
43
+ end
40
44
 
41
45
  opts.on('-h', '--help', 'Show this message and exit') do |h|
42
46
  puts opts
@@ -27,6 +27,15 @@ module PgShrink
27
27
  end
28
28
  end
29
29
 
30
+ def database_name
31
+ if @opts[:postgres_url]
32
+ @opts[:postgres_url] =~ /.*\/([^\/]+)$/
33
+ return $1
34
+ else
35
+ @opts[:database]
36
+ end
37
+ end
38
+
30
39
  def batch_size
31
40
  @opts[:batch_size]
32
41
  end
@@ -46,13 +55,18 @@ module PgShrink
46
55
  end
47
56
  max_id = self.connection["select max(#{primary_key}) from #{table_name}"].
48
57
  first[:max]
49
- i = 1;
58
+ i = 0;
50
59
  while i < max_id do
51
60
  sql = "select * from #{table_name} where " +
52
- "#{primary_key} >= #{i} and #{primary_key} < #{i + batch_size}"
53
- batch = self.connection[sql].all
61
+ "#{primary_key} > #{i} limit #{batch_size}"
62
+ batch = self.connection[sql].all.compact
63
+
54
64
  yield(batch)
55
- i = i + batch_size
65
+ if batch.any?
66
+ i = batch.last[primary_key]
67
+ else
68
+ break
69
+ end
56
70
  end
57
71
  end
58
72
 
@@ -87,8 +101,67 @@ module PgShrink
87
101
  self.connection.from(table_name).where(opts).all
88
102
  end
89
103
 
90
- def delete_records(table_name, condition_to_delete)
91
- self.connection.from(table_name).where(condition_to_delete).delete
104
+ def delete_records(table_name, conditions, exclude_conditions = [])
105
+ query = connection.from(table_name)
106
+ Array.wrap(conditions).compact.each do |cond|
107
+ query = query.where(cond)
108
+ end
109
+ Array.wrap(exclude_conditions).compact.each do |exclude_cond|
110
+ query = query.exclude(exclude_cond)
111
+ end
112
+ query.delete
113
+ end
114
+
115
+ def propagate_delete(opts)
116
+ # what we conceptually want to do is delete the left outer join where id is null.
117
+ # That's not working in postgres, so we instead use where not exists. Docs
118
+ # indicate using where not exists and select 1 in this case.
119
+ # See:
120
+ # http://www.postgresql.org/docs/current/interactive/functions-subquery.html#FUNCTIONS-SUBQUERY-EXISTS
121
+ query = "DELETE FROM #{opts[:child_table]} WHERE NOT EXISTS (" +
122
+ "SELECT 1 from #{opts[:parent_table]} where " +
123
+ "#{opts[:child_table]}.#{opts[:child_key]} = " +
124
+ "#{opts[:parent_table]}.#{opts[:parent_key]}" +
125
+ ")"
126
+
127
+
128
+ # Outside of the join statements, we want to maintain the ease of hash-based
129
+ # conditions. Do this by using a query builder but then swapping in delete SQL
130
+ # in the end.
131
+ query_builder = connection.from(opts[:child_table])
132
+ Array.wrap(opts[:conditions]).compact.each do |cond|
133
+ query_builder = query_builder.where(cond)
134
+ end
135
+ Array.wrap(opts[:exclude]).compact.each do |exclude_cond|
136
+ query_builder = query_builder.exclude(exclude_cond)
137
+ end
138
+ sql = query_builder.sql.gsub("WHERE", "AND").
139
+ gsub("SELECT * FROM \"#{opts[:child_table]}\"",
140
+ query)
141
+
142
+ connection[sql].delete
143
+ end
144
+
145
+ def vacuum_and_reindex!(table_name)
146
+ self.log("Beginning vacuum on #{table_name}")
147
+ connection["vacuum full #{table_name}"].first
148
+ self.log("Beginning reindex on #{table_name}")
149
+ connection["reindex table #{table_name}"].first
150
+ self.log("done reindexing #{table_name}")
151
+ end
152
+
153
+ def vacuum_and_reindex_all!
154
+ self.log("Beginning full database vacuum")
155
+ connection["vacuum full"].first
156
+ self.log("beginning full database reindex")
157
+ connection["reindex database #{database_name}"].first
158
+ self.log("done reindexing full database")
159
+ end
160
+
161
+ def shrink!
162
+ filter!
163
+ vacuum_and_reindex_all!
164
+ sanitize!
92
165
  end
93
166
  end
94
167
  end
@@ -41,8 +41,21 @@ module PgShrink
41
41
  raise "implement in subclass"
42
42
  end
43
43
 
44
- # The delete_records method takes a table name and a condition to delete on.
45
- def delete_records(table_name, condition)
44
+ # The delete_records method takes a table name a condition to delete on,
45
+ # and a condition to prevent deletion on. This can be used to combine
46
+ # a targeted deletion with exclusions or to delete an entire table but
47
+ # for some exclusions by passing no conditions but some exclude conditions.
48
+ def delete_records(table_name, conditions, exclude_conditions = [])
49
+ raise "implement in subclass"
50
+ end
51
+
52
+ # vacuum and reindex is pg specific... do nothing in other cases
53
+ def vacuum_and_reindex!(table_name)
54
+ end
55
+
56
+ # This is kind of a leaky abstraction b/c I'm not sure how this would work
57
+ # outside of sql
58
+ def propagate_delete(opts)
46
59
  raise "implement in subclass"
47
60
  end
48
61
 
@@ -58,5 +71,15 @@ module PgShrink
58
71
  filter!
59
72
  sanitize!
60
73
  end
74
+
75
+ def initialize(opts = {})
76
+ @opts = opts
77
+ end
78
+
79
+ def log(message)
80
+ if @opts[:log]
81
+ puts "#{Time.now}: #{message}"
82
+ end
83
+ end
61
84
  end
62
85
  end
@@ -1,6 +1,30 @@
1
1
  module PgShrink
2
2
  class SubTableFilter < SubTableOperator
3
3
 
4
+ def propagate_table!
5
+ primary_key = @opts[:primary_key]
6
+ foreign_key = @opts[:foreign_key]
7
+ additional_conditions = {}
8
+ if @opts[:type_key] && @opts[:type]
9
+ additional_conditions[@opts[:type_key]] = @opts[:type]
10
+ end
11
+ self.database.log("Beginning subtable propagation from " +
12
+ "#{self.parent.table_name} to #{self.table.table_name}")
13
+ self.database.propagate_delete(:parent_table => self.parent.table_name,
14
+ :child_table => self.table.table_name,
15
+ :parent_key => primary_key,
16
+ :child_key => foreign_key,
17
+ :conditions => additional_conditions,
18
+ :exclude => self.table.lock_opts)
19
+
20
+ self.database.log("Done with subtable propagation from " +
21
+ "#{self.parent.table_name} to #{self.table.table_name}")
22
+ if self.table.subtable_filters.any?
23
+ self.database.vacuum_and_reindex!(self.table.table_name)
24
+ self.table.subtable_filters.each(&:propagate_table!)
25
+ end
26
+ end
27
+
4
28
  def propagate!(old_parent_data, new_parent_data)
5
29
  return if (old_parent_data.empty? && new_parent_data.empty?)
6
30
  old_batch_keys = old_parent_data.map {|record| record[@opts[:primary_key]]}
@@ -38,7 +38,6 @@ module PgShrink
38
38
  raise "Implement in subclass"
39
39
  end
40
40
 
41
-
42
41
  end
43
42
  end
44
43
 
@@ -3,7 +3,7 @@ module PgShrink
3
3
  attr_accessor :table_name
4
4
  attr_accessor :database
5
5
  attr_accessor :opts
6
- attr_reader :filters, :sanitizers, :subtable_filters, :subtable_sanitizers
6
+ attr_reader :filters, :sanitizers, :subtable_filters, :subtable_sanitizers, :lock_opts
7
7
  # TODO: Figure out, do we need to be able to support tables with no
8
8
  # keys? If so, how should we handle that?
9
9
  def initialize(database, table_name, opts = {})
@@ -31,12 +31,27 @@ module PgShrink
31
31
  end
32
32
 
33
33
  def lock(opts = {}, &block)
34
- @lock = block
34
+ @lock_opts = opts
35
+ if block_given?
36
+ puts "WARNING: Block-based lock on #{self.table_name} will make things SLOW"
37
+ @lock_block = block
38
+ end
39
+ end
40
+
41
+ def has_lock?
42
+ (@lock_opts && @lock_opts.any?) || @lock_block
43
+ end
44
+
45
+ def lock_condition_ok?
46
+ !@lock_block
35
47
  end
36
48
 
37
49
  def locked?(record)
38
- if @lock
39
- @lock.call(record)
50
+ if @lock_block
51
+ @lock_block.call(record)
52
+ elsif @lock_opts && @lock_opts.any?
53
+ raise "Unimplemented: Condition-based locks with block-based " +
54
+ "filter on table #{self.table_name}"
40
55
  end
41
56
  end
42
57
 
@@ -101,6 +116,17 @@ module PgShrink
101
116
  end
102
117
  end
103
118
 
119
+ def condition_filter(filter)
120
+ self.database.log("Beginning filter on #{table_name}")
121
+ self.database.delete_records(self.table_name, {}, [filter.opts, lock_opts].compact)
122
+ self.database.log("Done filtering on #{table_name}")
123
+ # If there aren't any subtables, there isn't much benefit to vacuuming in
124
+ # the middle, and we'll wait until we're done with all filters
125
+ if self.subtable_filters.any?
126
+ self.database.vacuum_and_reindex!(self.table_name)
127
+ end
128
+ end
129
+
104
130
  def filter_batch(batch, &filter_block)
105
131
  new_set = batch.select do |record|
106
132
  locked?(record) || filter_block.call(record.dup)
@@ -111,11 +137,7 @@ module PgShrink
111
137
 
112
138
  def sanitize_batch(batch, &sanitize_block)
113
139
  new_set = batch.map do |record|
114
- if locked?(record)
115
- record.dup
116
- else
117
- sanitize_block.call(record.dup)
118
- end
140
+ sanitize_block.call(record.dup)
119
141
  end
120
142
  update_records(batch, new_set)
121
143
  sanitize_subtables(batch, new_set)
@@ -126,9 +148,14 @@ module PgShrink
126
148
  remove!
127
149
  else
128
150
  self.filters.each do |filter|
129
- self.records_in_batches do |batch|
130
- self.filter_batch(batch) do |record|
131
- filter.apply(record)
151
+ if filter.conditions? && self.lock_condition_ok?
152
+ self.condition_filter(filter)
153
+ self.subtable_filters.each(&:propagate_table!)
154
+ else
155
+ self.records_in_batches do |batch|
156
+ self.filter_batch(batch) do |record|
157
+ filter.apply(record)
158
+ end
132
159
  end
133
160
  end
134
161
  end
@@ -146,7 +173,7 @@ module PgShrink
146
173
  end
147
174
 
148
175
  def can_just_remove?
149
- self.subtable_filters.empty? && self.subtable_sanitizers.empty? && !@lock
176
+ self.subtable_filters.empty? && self.subtable_sanitizers.empty? && !has_lock?
150
177
  end
151
178
 
152
179
  # Mark @remove and add filter so that if we're in the simple case we can
@@ -1,14 +1,36 @@
1
1
  module PgShrink
2
2
  class TableFilter
3
- attr_accessor :table
4
- def initialize(table, opts, &block)
3
+ attr_accessor :table, :opts
4
+ def initialize(table, opts = nil, &block)
5
5
  self.table = table
6
- @opts = opts # Currently not used, but who knows
7
- @block = block
6
+ @opts = opts
7
+ @block = block if block_given?
8
+ end
9
+
10
+ def conditions?
11
+ # use !empty instead of any? because we accept string conditions
12
+ !@block
8
13
  end
9
14
 
10
15
  def apply(hash)
11
- @block.call(hash)
16
+ if @block
17
+ @block.call(hash)
18
+ # if we have a straightforwards conditions hash can just do in place comparisons
19
+ elsif @opts.is_a?(Hash)
20
+ @opts.each do |k, v|
21
+ if [Array, Range].include?(v.class)
22
+ return false unless v.include?(hash[k])
23
+ elsif [String, Integer, Float].include?(v.class)
24
+ return false unless hash[k] == v
25
+ else
26
+ raise "Unsupported condition type for mixing with block locks: #{v.class}"
27
+ end
28
+ end
29
+ return true
30
+ #TODO: Figure out if this case matters and we want to support it.
31
+ elsif @opts.is_a?(String)
32
+ raise "Unsupported: Mixing string conditions with block locks"
33
+ end
12
34
  end
13
35
  end
14
36
  end
@@ -1,3 +1,3 @@
1
1
  module PgShrink
2
- VERSION = "0.0.5"
2
+ VERSION = "0.0.6"
3
3
  end
data/lib/pg_shrink.rb CHANGED
@@ -19,7 +19,8 @@ module PgShrink
19
19
  url: nil,
20
20
  config: 'Shrinkfile',
21
21
  force: false,
22
- batch_size: 10000
22
+ batch_size: 10000,
23
+ log: false,
23
24
  }
24
25
  end
25
26
 
@@ -60,7 +61,8 @@ module PgShrink
60
61
  end
61
62
 
62
63
  database = Database::Postgres.new(:postgres_url => options[:url],
63
- :batch_size => batch_size)
64
+ :batch_size => batch_size,
65
+ :log => options[:log])
64
66
 
65
67
  database.instance_eval(File.read(options[:config]), options[:config], 1)
66
68
 
@@ -53,6 +53,135 @@ describe PgShrink do
53
53
  end
54
54
  end
55
55
  end
56
+ describe "with a conditions string" do
57
+ before(:each) do
58
+ database.filter_table(:users) do |f|
59
+ f.filter_by("name like '%test 1%'")
60
+ end
61
+ end
62
+
63
+ it "Should not call records in batches" do
64
+ expect(database).not_to receive(:records_in_batches)
65
+ database.shrink!
66
+ end
67
+
68
+ it "Should call delete_records once" do
69
+ expect(database).to receive(:delete_records).once
70
+ database.shrink!
71
+ end
72
+
73
+ it "Should result in the appropriate records being deleted" do
74
+ database.shrink!
75
+ remaining_users = database.connection.from(:users).all
76
+ # 1 and 10-19
77
+ expect(remaining_users.size).to eq(11)
78
+ end
79
+ end
80
+
81
+ describe "when filtering just with a conditions hash" do
82
+ before(:each) do
83
+ database.filter_table(:users) do |f|
84
+ f.filter_by({
85
+ :name => ["test 1", "test 2", "test 3", "test 4", "test 5",
86
+ "test 6", "test 7", "test 8", "test 9", "test 10"]
87
+ })
88
+ end
89
+ end
90
+
91
+ it "Should not call records in batches" do
92
+ expect(database).not_to receive(:records_in_batches)
93
+ database.shrink!
94
+ end
95
+
96
+ it "Should call delete_records once" do
97
+ expect(database).to receive(:delete_records).once
98
+ database.shrink!
99
+ end
100
+
101
+ it "Should result in the appropriate records being deleted" do
102
+ database.shrink!
103
+ remaining_users = database.connection.from(:users).all
104
+ expect(remaining_users.size).to eq(10)
105
+ end
106
+
107
+ describe "and a condition hash lock" do
108
+ before(:each) do
109
+ database.filter_table(:users) do |f|
110
+ f.lock(name: "test 11")
111
+ end
112
+ end
113
+ it "Should not call records in batches" do
114
+ expect(database).not_to receive(:records_in_batches)
115
+ database.shrink!
116
+ end
117
+
118
+ it "Should call delete_records once" do
119
+ expect(database).to receive(:delete_records).once
120
+ database.shrink!
121
+ end
122
+ it "Still results in the appropriate records being deleted" do
123
+ database.shrink!
124
+ remaining_users = database.connection.from(:users).all
125
+ expect(remaining_users.size).to eq(11)
126
+ end
127
+ end
128
+
129
+ describe "and a condition string lock" do
130
+ before(:each) do
131
+ database.filter_table(:users) do |f|
132
+ f.lock("name = 'test 11'")
133
+ end
134
+ end
135
+ it "Should not call records in batches" do
136
+ expect(database).not_to receive(:records_in_batches)
137
+ database.shrink!
138
+ end
139
+
140
+ it "Should call delete_records once" do
141
+ expect(database).to receive(:delete_records).once
142
+ database.shrink!
143
+ end
144
+ it "Still results in the appropriate records being deleted" do
145
+ database.shrink!
146
+ remaining_users = database.connection.from(:users).all
147
+ expect(remaining_users.size).to eq(11)
148
+ end
149
+ end
150
+
151
+ describe "and a block-based lock" do
152
+ before(:each) do
153
+ database.filter_table(:users) do |f|
154
+ f.lock do |u|
155
+ u[:name] == "test 11"
156
+ end
157
+ end
158
+ end
159
+ it "falls back to results in batches" do
160
+ expect(database).to receive(:records_in_batches)
161
+ database.shrink!
162
+ end
163
+ it "Still results in the appropriate records being deleted" do
164
+ database.shrink!
165
+ remaining_users = database.connection.from(:users).all
166
+ expect(remaining_users.size).to eq(11)
167
+ end
168
+
169
+ end
170
+
171
+ describe "with a subtable_filter" do
172
+ before(:each) do
173
+ database.filter_table(:users) do |f|
174
+ f.filter_subtable(:user_preferences, :foreign_key => :user_id)
175
+ end
176
+ end
177
+
178
+ it "should remove the appropriate subtable records" do
179
+ database.shrink!
180
+ remaining_prefs = database.connection.from(:user_preferences).all
181
+ expect(remaining_prefs.size).to eq(30)
182
+ end
183
+ end
184
+ end
56
185
 
57
186
  it "Should not run delete if there is nothing filtered" do
58
187
  database.filter_table(:users) do |f|
@@ -312,6 +441,90 @@ describe PgShrink do
312
441
  end
313
442
  end
314
443
 
444
+ describe "with condition filters" do
445
+ describe "simple cascade" do
446
+ before(:each) do
447
+ database.filter_table(:users) do |f|
448
+ f.filter_by(:name => "test 1")
449
+ f.filter_subtable(:preferences, :foreign_key => :context_id,
450
+ :type_key => :context_type, :type => 'User')
451
+ end
452
+ database.filter!
453
+ end
454
+ it "will filter prefs with context_type 'User'" do
455
+ #
456
+ remaining_user = database.connection.from(:users).first
457
+ remaining_preferences = database.connection.from(:preferences).
458
+ where(:context_type => 'User').all
459
+ expect(remaining_preferences.size).to eq(3)
460
+ expect(remaining_preferences.map {|u| u[:context_id]}.uniq).
461
+ to eq([remaining_user[:id]])
462
+ end
463
+
464
+ it "will not filter preferences without context_type user" do
465
+ remaining_preferences = database.connection.from(:preferences).
466
+ where(:context_type => 'OtherClass').all
467
+ expect(remaining_preferences.size).to eq(20)
468
+ end
469
+ end
470
+ describe "an extra layer of polymorphic subtables" do
471
+ before(:all) do
472
+ connection = PgShrink::Database::Postgres.new(PgSpecHelper.pg_config).
473
+ connection
474
+ PgSpecHelper.create_table(connection, :preference_dependents,
475
+ {'context_id' => 'integer',
476
+ 'context_type' => 'character varying(256)',
477
+ 'value' => 'character varying(256)'})
478
+ end
479
+
480
+ before(:each) do
481
+ PgSpecHelper.clear_table(database.connection, :preference_dependents)
482
+ prefs = database.connection.from(:preferences).all
483
+ prefs.each do |pref|
484
+ database.connection.run(
485
+ "insert into preference_dependents " +
486
+ "(context_id, context_type, value) " +
487
+ "values (#{pref[:id]}, 'Preference', 'depvalue#{pref[:id]}')")
488
+
489
+ database.connection.run(
490
+ "insert into preference_dependents " +
491
+ "(context_id, context_type, value) " +
492
+ "values (#{pref[:id]}, 'SomeOtherClass', 'fakevalue#{pref[:id]}')")
493
+
494
+ end
495
+
496
+ database.filter_table(:users) do |f|
497
+ f.filter_by(:name => "test 1")
498
+ f.filter_subtable(:preferences, :foreign_key => :context_id,
499
+ :type_key => :context_type, :type => 'User')
500
+ end
501
+
502
+ database.filter_table(:preferences) do |f|
503
+ f.filter_subtable(:preference_dependents,
504
+ :foreign_key => :context_id,
505
+ :type_key => :context_type,
506
+ :type => 'Preference')
507
+ end
508
+ database.filter!
509
+ end
510
+ it "will filter preference dependents associated with preferences" do
511
+ remaining_preferences = database.connection.from(:preferences).all
512
+ remaining_dependents = database.connection.
513
+ from(:preference_dependents).
514
+ where(:context_type => 'Preference').all
515
+
516
+ expect(remaining_dependents.size).to eq(remaining_preferences.size)
517
+ end
518
+
519
+ it "will not filter preference dependents with different type" do
520
+ other_dependents = database.connection.
521
+ from(:preference_dependents).
522
+ where(:context_type => 'SomeOtherClass').all
523
+ expect(other_dependents.size).to eq(80)
524
+ end
525
+ end
526
+ end
527
+
315
528
  describe "simple two table filtering" do
316
529
  before(:each) do
317
530
  database.filter_table(:users) do |f|
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: pg_shrink
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.5
4
+ version: 0.0.6
5
5
  platform: ruby
6
6
  authors:
7
7
  - Kevin Ball
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2014-06-02 00:00:00.000000000 Z
11
+ date: 2014-06-11 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: pg