sq-dbsync 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/HISTORY.md +5 -0
- data/LICENSE +14 -0
- data/README.md +218 -0
- data/lib/sq/dbsync/all_tables_plan.rb +51 -0
- data/lib/sq/dbsync/batch_load_action.rb +95 -0
- data/lib/sq/dbsync/config.rb +12 -0
- data/lib/sq/dbsync/consistency_verifier.rb +70 -0
- data/lib/sq/dbsync/database/common.rb +91 -0
- data/lib/sq/dbsync/database/connection.rb +23 -0
- data/lib/sq/dbsync/database/mysql.rb +163 -0
- data/lib/sq/dbsync/database/postgres.rb +77 -0
- data/lib/sq/dbsync/error_handler.rb +59 -0
- data/lib/sq/dbsync/example_record_destroyer.rb +77 -0
- data/lib/sq/dbsync/incremental_load_action.rb +95 -0
- data/lib/sq/dbsync/load_action.rb +156 -0
- data/lib/sq/dbsync/loggers.rb +135 -0
- data/lib/sq/dbsync/manager.rb +241 -0
- data/lib/sq/dbsync/pipeline/simple_context.rb +15 -0
- data/lib/sq/dbsync/pipeline/threaded_context.rb +95 -0
- data/lib/sq/dbsync/pipeline.rb +80 -0
- data/lib/sq/dbsync/refresh_recent_load_action.rb +71 -0
- data/lib/sq/dbsync/schema_maker.rb +87 -0
- data/lib/sq/dbsync/static_table_plan.rb +42 -0
- data/lib/sq/dbsync/table_registry.rb +75 -0
- data/lib/sq/dbsync/tempfile_factory.rb +41 -0
- data/lib/sq/dbsync/version.rb +5 -0
- data/lib/sq/dbsync.rb +9 -0
- data/spec/acceptance/loading_spec.rb +237 -0
- data/spec/acceptance_helper.rb +2 -0
- data/spec/database_helper.rb +86 -0
- data/spec/integration/all_tables_plan_spec.rb +36 -0
- data/spec/integration/batch_load_action_spec.rb +229 -0
- data/spec/integration/consistency_verifier_spec.rb +54 -0
- data/spec/integration/database_connection_spec.rb +61 -0
- data/spec/integration/incremental_load_action_spec.rb +196 -0
- data/spec/integration/manager_spec.rb +109 -0
- data/spec/integration/schema_maker_spec.rb +119 -0
- data/spec/integration_helper.rb +43 -0
- data/spec/spec_helper.rb +27 -0
- data/spec/unit/config_spec.rb +18 -0
- data/spec/unit/error_handler_spec.rb +52 -0
- data/spec/unit/pipeline_spec.rb +42 -0
- data/spec/unit/stream_logger_spec.rb +33 -0
- data/spec/unit_helper.rb +1 -0
- data/sq-dbsync.gemspec +32 -0
- metadata +188 -0
@@ -0,0 +1,229 @@
|
|
1
|
+
require 'integration_helper'
|
2
|
+
|
3
|
+
require 'sq/dbsync/database/connection'
|
4
|
+
require 'sq/dbsync/loggers'
|
5
|
+
require 'sq/dbsync/batch_load_action'
|
6
|
+
require 'sq/dbsync/table_registry'
|
7
|
+
require 'sq/dbsync/static_table_plan'
|
8
|
+
require 'sq/dbsync/all_tables_plan'
|
9
|
+
|
10
|
+
describe SQD::BatchLoadAction do
|
11
|
+
let(:overlap) { described_class.overlap }
|
12
|
+
let!(:now) { @now = Time.now.utc }
|
13
|
+
let(:last_synced_at) { now - 10 }
|
14
|
+
let(:target) { test_target }
|
15
|
+
let(:table_plan) {{
|
16
|
+
table_name: :test_table,
|
17
|
+
source_table_name: :test_table,
|
18
|
+
columns: [:id, :col1, :updated_at],
|
19
|
+
source_db: source,
|
20
|
+
indexes: index
|
21
|
+
}}
|
22
|
+
let(:index) {{
|
23
|
+
index_on_col1: { columns: [:col1], unique: false }
|
24
|
+
} }
|
25
|
+
let(:registry) { SQD::TableRegistry.new(target) }
|
26
|
+
let(:action) { SQD::BatchLoadAction.new(
|
27
|
+
target,
|
28
|
+
table_plan,
|
29
|
+
registry,
|
30
|
+
SQD::Loggers::Null.new,
|
31
|
+
->{ @now }
|
32
|
+
) }
|
33
|
+
|
34
|
+
shared_examples_for 'a batch load' do
|
35
|
+
before do
|
36
|
+
create_source_table_with(
|
37
|
+
id: 1,
|
38
|
+
col1: 'hello',
|
39
|
+
pii: 'don alias',
|
40
|
+
updated_at: now - 10
|
41
|
+
)
|
42
|
+
|
43
|
+
registry.ensure_storage_exists
|
44
|
+
end
|
45
|
+
|
46
|
+
describe ':all columns options' do
|
47
|
+
let(:table_plan) {{
|
48
|
+
table_name: :test_table,
|
49
|
+
source_table_name: :test_table,
|
50
|
+
columns: :all,
|
51
|
+
source_db: source,
|
52
|
+
}}
|
53
|
+
|
54
|
+
it 'copies all columns to target' do
|
55
|
+
action.call
|
56
|
+
|
57
|
+
target.hash_schema(:test_table).keys.should ==
|
58
|
+
source.hash_schema(:test_table).keys
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
it 'copies source tables to target with matching schemas' do
|
63
|
+
start_time = now.to_f
|
64
|
+
|
65
|
+
action.call
|
66
|
+
|
67
|
+
verify_schema
|
68
|
+
verify_data
|
69
|
+
verify_metadata(start_time)
|
70
|
+
end
|
71
|
+
|
72
|
+
it 'handles column that does not exist in source' do
|
73
|
+
source.alter_table :test_table do
|
74
|
+
drop_column :col1
|
75
|
+
end
|
76
|
+
|
77
|
+
table_plan[:indexes] = {}
|
78
|
+
action.call
|
79
|
+
|
80
|
+
target[:test_table].map {|x| x.values_at(:id)}.
|
81
|
+
should == [[1]]
|
82
|
+
end
|
83
|
+
|
84
|
+
it 'handles table that does not exist in source' do
|
85
|
+
source.drop_table :test_table
|
86
|
+
|
87
|
+
action.call
|
88
|
+
|
89
|
+
target.table_exists?(:test_table).should_not be
|
90
|
+
end
|
91
|
+
|
92
|
+
it 'ignores duplicates when loading data' do
|
93
|
+
source[:test_table].insert(id: 2, col1: 'hello')
|
94
|
+
source[:test_table].insert(id: 3, col1: 'hello')
|
95
|
+
|
96
|
+
table_plan[:indexes][:unique_index] = {columns: [:col1], unique: true}
|
97
|
+
|
98
|
+
action.call
|
99
|
+
|
100
|
+
target[:test_table].count.should == 1
|
101
|
+
end
|
102
|
+
|
103
|
+
it 'clears partial load if a new_ table already exists' do
|
104
|
+
setup_target_table(now)
|
105
|
+
target.switch_table(:new_test_table, :test_table)
|
106
|
+
|
107
|
+
source[:test_table].insert(
|
108
|
+
id: 7,
|
109
|
+
col1: 'old',
|
110
|
+
updated_at: now - 600
|
111
|
+
)
|
112
|
+
|
113
|
+
target[:new_test_table].insert(
|
114
|
+
id: 2,
|
115
|
+
col1: 'already loaded',
|
116
|
+
updated_at: now - 200
|
117
|
+
)
|
118
|
+
|
119
|
+
action.call
|
120
|
+
|
121
|
+
target[:test_table].all.map {|x| x[:col1] }.sort.should ==
|
122
|
+
['hello', 'old'].sort
|
123
|
+
end
|
124
|
+
|
125
|
+
it 'catches up from last_row_at' do
|
126
|
+
action.do_prepare
|
127
|
+
action.extract_data
|
128
|
+
action.load_data
|
129
|
+
|
130
|
+
source[:test_table].insert(id: 2, col1: 'new', updated_at: now)
|
131
|
+
|
132
|
+
@now += 600
|
133
|
+
|
134
|
+
action.post_load
|
135
|
+
|
136
|
+
target[:test_table].all.map {|x| x[:col1] }.sort.should ==
|
137
|
+
['hello', 'new'].sort
|
138
|
+
end
|
139
|
+
|
140
|
+
def test_tables
|
141
|
+
{
|
142
|
+
test_table: source,
|
143
|
+
}
|
144
|
+
end
|
145
|
+
|
146
|
+
def verify_schema
|
147
|
+
test_tables.each do |table_name, source_db|
|
148
|
+
target_table_name = table_name
|
149
|
+
target.tables.should include(target_table_name)
|
150
|
+
source_test_table_schema =
|
151
|
+
source_db.schema(table_name).map do |column, hash|
|
152
|
+
# Auto-increment is not copied, since it isn't relevant for
|
153
|
+
# replicated tables and would be more complicated to support.
|
154
|
+
# Primary key status is copied, however.
|
155
|
+
hash.delete(:auto_increment)
|
156
|
+
hash.delete(:ruby_default)
|
157
|
+
[column, hash]
|
158
|
+
end
|
159
|
+
|
160
|
+
extract_common_db_column_info = ->(e) {
|
161
|
+
[e[0], {
|
162
|
+
type: e[1][:type],
|
163
|
+
primary_key: e[1][:primary_key]
|
164
|
+
}]
|
165
|
+
}
|
166
|
+
|
167
|
+
source_test_table_schema = source_test_table_schema.map do |e|
|
168
|
+
extract_common_db_column_info.call(e)
|
169
|
+
end
|
170
|
+
|
171
|
+
target.schema(target_table_name).each do |column_arr|
|
172
|
+
column_arr = extract_common_db_column_info.call(column_arr)
|
173
|
+
source_test_table_schema.should include(column_arr)
|
174
|
+
end
|
175
|
+
target.indexes(target_table_name).should == index
|
176
|
+
end
|
177
|
+
end
|
178
|
+
|
179
|
+
def verify_data
|
180
|
+
test_tables.each do |table_name, _|
|
181
|
+
data = target[table_name].all
|
182
|
+
data.count.should == 1
|
183
|
+
data = data[0]
|
184
|
+
data.keys.length.should == 3
|
185
|
+
data[:id].should == 1
|
186
|
+
data[:col1].should == 'hello'
|
187
|
+
data[:updated_at].to_i.should == (now - 10).to_i
|
188
|
+
end
|
189
|
+
end
|
190
|
+
|
191
|
+
def verify_metadata(start_time)
|
192
|
+
test_tables.each do |table_name, _|
|
193
|
+
meta = registry.get(table_name)
|
194
|
+
meta[:last_synced_at].should_not be_nil
|
195
|
+
meta[:last_batch_synced_at].should_not be_nil
|
196
|
+
meta[:last_batch_synced_at].to_i.should == start_time.to_i
|
197
|
+
meta[:last_row_at].to_i.should == (now - 10).to_i
|
198
|
+
end
|
199
|
+
end
|
200
|
+
end
|
201
|
+
|
202
|
+
describe 'with MySQL source' do
|
203
|
+
let(:source) { test_source(:source) }
|
204
|
+
|
205
|
+
it_should_behave_like 'a batch load'
|
206
|
+
|
207
|
+
it 'loads records with bodgy timestamps' do
|
208
|
+
create_source_table_with(
|
209
|
+
id: 1,
|
210
|
+
col1: 'hello',
|
211
|
+
pii: 'don alias',
|
212
|
+
created_at: '0000-00-00 00:00:00',
|
213
|
+
updated_at: '0000-00-00 00:00:00'
|
214
|
+
)
|
215
|
+
registry.ensure_storage_exists
|
216
|
+
|
217
|
+
action.call
|
218
|
+
|
219
|
+
target[:test_table].count.should == 1
|
220
|
+
end
|
221
|
+
end
|
222
|
+
|
223
|
+
describe 'with PG source' do
|
224
|
+
let(:source) { test_source(:postgres) }
|
225
|
+
|
226
|
+
it_should_behave_like 'a batch load'
|
227
|
+
end
|
228
|
+
|
229
|
+
end
|
@@ -0,0 +1,54 @@
|
|
1
|
+
require 'integration_helper'
|
2
|
+
|
3
|
+
require 'sq/dbsync/consistency_verifier'
|
4
|
+
require 'sq/dbsync/static_table_plan'
|
5
|
+
require 'sq/dbsync/table_registry'
|
6
|
+
|
7
|
+
describe SQD::ConsistencyVerifier do
|
8
|
+
let(:overlap) { SQD::LoadAction.overlap }
|
9
|
+
let(:now) { Date.new(2012, 4, 4).to_time.utc }
|
10
|
+
let(:source) { test_source(:source) }
|
11
|
+
let(:target) { test_target }
|
12
|
+
let(:tables) {[{
|
13
|
+
table_name: :test_table,
|
14
|
+
columns: [:id, :col1, :updated_at],
|
15
|
+
consistency: true,
|
16
|
+
source_db: source,
|
17
|
+
indexes: {}
|
18
|
+
}]}
|
19
|
+
let(:registry) { SQD::TableRegistry.new(target) }
|
20
|
+
let(:verifier) { SQD::ConsistencyVerifier.new(target, registry) }
|
21
|
+
|
22
|
+
before do
|
23
|
+
create_source_table_with(
|
24
|
+
id: 1,
|
25
|
+
col1: 'old record',
|
26
|
+
created_at: now - overlap
|
27
|
+
)
|
28
|
+
setup_target_table(now)
|
29
|
+
end
|
30
|
+
|
31
|
+
it 'raises if counts do not match up' do
|
32
|
+
error_string =
|
33
|
+
"test_table had a count difference of 1; " +
|
34
|
+
"source: #{source.name} (count: 1), " +
|
35
|
+
"sink: #{target.name} (count: 0)"
|
36
|
+
|
37
|
+
lambda {
|
38
|
+
verifier.check_consistency!(tables)
|
39
|
+
}.should raise_error(
|
40
|
+
SQD::ConsistencyVerifier::ConsistencyError,
|
41
|
+
error_string
|
42
|
+
)
|
43
|
+
end
|
44
|
+
|
45
|
+
it 'uses last_row_at rather than last_synced_at' do
|
46
|
+
registry.update(:test_table, now,
|
47
|
+
last_row_at: now - 3
|
48
|
+
)
|
49
|
+
|
50
|
+
lambda {
|
51
|
+
verifier.check_consistency!(tables)
|
52
|
+
}.should_not raise_error(SQD::ConsistencyVerifier::ConsistencyError)
|
53
|
+
end
|
54
|
+
end
|
@@ -0,0 +1,61 @@
|
|
1
|
+
require 'integration_helper'
|
2
|
+
|
3
|
+
require 'sq/dbsync/database/connection'
|
4
|
+
|
5
|
+
shared_examples_for 'a decorated database adapter' do
|
6
|
+
let(:path) { @file.path }
|
7
|
+
|
8
|
+
before { @file = Tempfile.new('bogus') }
|
9
|
+
|
10
|
+
describe '#extract_sql_to_file' do
|
11
|
+
it 'should raise when it fails' do
|
12
|
+
lambda {
|
13
|
+
db.extract_to_file('some_table', [], path)
|
14
|
+
}.should raise_error(SQD::Database::ExtractError)
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
describe SQD::Database::Postgres do
|
20
|
+
let(:source) { test_source(:postgres) }
|
21
|
+
let(:db) { SQD::Database::Postgres.new(source) }
|
22
|
+
|
23
|
+
it_should_behave_like 'a decorated database adapter'
|
24
|
+
end
|
25
|
+
|
26
|
+
describe SQD::Database::Mysql do
|
27
|
+
let(:source) { test_source(:source) }
|
28
|
+
let(:db) { SQD::Database::Mysql.new(source) }
|
29
|
+
|
30
|
+
it_should_behave_like 'a decorated database adapter'
|
31
|
+
|
32
|
+
describe '#load_incrementally_from_file' do
|
33
|
+
let(:path) { @file.path }
|
34
|
+
|
35
|
+
before { @file = Tempfile.new('bogus') }
|
36
|
+
|
37
|
+
def source_with_exception(exception_message)
|
38
|
+
source.stub(:run).and_raise(
|
39
|
+
Sequel::DatabaseError.new(exception_message)
|
40
|
+
)
|
41
|
+
end
|
42
|
+
|
43
|
+
it 're-raises deadlock related exceptions as TransientError' do
|
44
|
+
source_with_exception("Deadlock found when trying to get lock")
|
45
|
+
-> { db.load_incrementally_from_file('bogus', ['bogus'], path) }.
|
46
|
+
should raise_error(SQD::Database::TransientError)
|
47
|
+
end
|
48
|
+
|
49
|
+
it 're-raises lock wait timeout exceptions as TransientError' do
|
50
|
+
source_with_exception("Lock wait timeout exceeded")
|
51
|
+
-> { db.load_incrementally_from_file('bogus', ['bogus'], path) }.
|
52
|
+
should raise_error(SQD::Database::TransientError)
|
53
|
+
end
|
54
|
+
|
55
|
+
it 'does not translate unknown errors' do
|
56
|
+
source_with_exception("Unknown")
|
57
|
+
-> { db.load_incrementally_from_file('bogus', ['bogus'], path) }.
|
58
|
+
should raise_error(Sequel::DatabaseError)
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
@@ -0,0 +1,196 @@
|
|
1
|
+
require 'integration_helper'
|
2
|
+
|
3
|
+
require 'sq/dbsync/database/connection'
|
4
|
+
require 'sq/dbsync/incremental_load_action'
|
5
|
+
require 'sq/dbsync/table_registry'
|
6
|
+
require 'sq/dbsync/loggers'
|
7
|
+
|
8
|
+
describe SQD::IncrementalLoadAction do
|
9
|
+
let(:overlap) { described_class.overlap }
|
10
|
+
let(:now) { Date.new(2012, 4, 4).to_time.utc }
|
11
|
+
let(:last_synced_at) { now - 10 }
|
12
|
+
let(:source) { test_source(:source) }
|
13
|
+
let(:target) { test_target }
|
14
|
+
let(:table_plan) {{
|
15
|
+
table_name: :test_table,
|
16
|
+
source_table_name: :test_table,
|
17
|
+
columns: [:id, :col1, :updated_at],
|
18
|
+
source_db: source,
|
19
|
+
indexes: {}
|
20
|
+
}}
|
21
|
+
let(:registry) { SQD::TableRegistry.new(target) }
|
22
|
+
let(:action) { SQD::IncrementalLoadAction.new(
|
23
|
+
target,
|
24
|
+
table_plan,
|
25
|
+
registry,
|
26
|
+
SQD::Loggers::Null.new,
|
27
|
+
->{ now }
|
28
|
+
)}
|
29
|
+
|
30
|
+
shared_examples_for 'an incremental load' do
|
31
|
+
before :each do
|
32
|
+
create_source_table_with({
|
33
|
+
id: 1,
|
34
|
+
col1: 'old record',
|
35
|
+
updated_at: last_synced_at - overlap - 1,
|
36
|
+
imported_at: last_synced_at - overlap - 1,
|
37
|
+
}, {
|
38
|
+
id: 2,
|
39
|
+
col1: 'new record',
|
40
|
+
updated_at: last_synced_at - overlap + 1,
|
41
|
+
imported_at: last_synced_at - overlap + 1,
|
42
|
+
})
|
43
|
+
|
44
|
+
setup_target_table(last_synced_at)
|
45
|
+
end
|
46
|
+
|
47
|
+
describe ':all columns options' do
|
48
|
+
let(:table_plan) {{
|
49
|
+
table_name: :test_table,
|
50
|
+
source_table_name: :test_table,
|
51
|
+
columns: :all,
|
52
|
+
source_db: source,
|
53
|
+
}}
|
54
|
+
|
55
|
+
it 'copies all columns to target' do
|
56
|
+
action.call
|
57
|
+
|
58
|
+
target[:test_table].map { |row| row.values_at(:id, :col1) }.
|
59
|
+
should == [[2, 'new record']]
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
|
64
|
+
it 'copies null data to the target' do
|
65
|
+
source[:test_table].update(col1: nil)
|
66
|
+
|
67
|
+
action.call
|
68
|
+
|
69
|
+
target[:test_table].map {|row| row[:col1] }.
|
70
|
+
should == [nil]
|
71
|
+
end
|
72
|
+
|
73
|
+
it 'copies source data to the target since the last synced row' do
|
74
|
+
registry.update(:test_table, last_synced_at,
|
75
|
+
last_synced_at: last_synced_at + 2,
|
76
|
+
last_row_at: last_synced_at
|
77
|
+
)
|
78
|
+
|
79
|
+
action.call
|
80
|
+
|
81
|
+
target[:test_table].map { |row| row.values_at(:id, :col1) }.
|
82
|
+
should == [[2, 'new record']]
|
83
|
+
|
84
|
+
metadata = registry.get(:test_table)
|
85
|
+
metadata[:last_synced_at].to_i.should == now.to_i
|
86
|
+
metadata[:last_row_at].to_i.should == (last_synced_at - overlap + 1).to_i
|
87
|
+
end
|
88
|
+
|
89
|
+
it 'should replace any records found within the overlap' do
|
90
|
+
target[:test_table].insert(
|
91
|
+
id: 2,
|
92
|
+
col1: 'old record'
|
93
|
+
)
|
94
|
+
|
95
|
+
action.call
|
96
|
+
|
97
|
+
target[:test_table].map { |row| row.values_at(:id, :col1) }.
|
98
|
+
should == [[2, 'new record']]
|
99
|
+
end
|
100
|
+
|
101
|
+
it 'should handle table that does not exist in source but does in target' do
|
102
|
+
source.drop_table :test_table
|
103
|
+
|
104
|
+
action.call
|
105
|
+
|
106
|
+
registry.get(:test_table).should_not be_nil
|
107
|
+
target.table_exists?(:test_table).should be
|
108
|
+
end
|
109
|
+
|
110
|
+
it 'should handle table that does not exist in target but in source' do
|
111
|
+
target.drop_table :test_table
|
112
|
+
registry.delete(:test_table)
|
113
|
+
|
114
|
+
action.call
|
115
|
+
|
116
|
+
target.table_exists?(:test_table).should_not be
|
117
|
+
end
|
118
|
+
|
119
|
+
it 'should handle column that does not exist in source' do
|
120
|
+
table_plan[:columns] += [:bogus]
|
121
|
+
|
122
|
+
action.call
|
123
|
+
|
124
|
+
target[:test_table].map { |row| row.values_at(:col1) }.
|
125
|
+
should == [['new record']]
|
126
|
+
end
|
127
|
+
|
128
|
+
|
129
|
+
describe 'with custom timestamp' do
|
130
|
+
let(:table_plan) {{
|
131
|
+
table_name: :test_table,
|
132
|
+
source_table_name: :test_table,
|
133
|
+
columns: [:id, :col1, :imported_at],
|
134
|
+
timestamp: :imported_at,
|
135
|
+
source_db: source,
|
136
|
+
indexes: {}
|
137
|
+
}}
|
138
|
+
|
139
|
+
it 'copies source data to the target since the last synced row' do
|
140
|
+
registry.update(:test_table, last_synced_at,
|
141
|
+
last_synced_at: last_synced_at + 2,
|
142
|
+
last_row_at: last_synced_at
|
143
|
+
)
|
144
|
+
|
145
|
+
action.call
|
146
|
+
|
147
|
+
target[:test_table].map { |row| row.values_at(:id, :col1) }.
|
148
|
+
should == [[2, 'new record']]
|
149
|
+
|
150
|
+
metadata = registry.get(:test_table)
|
151
|
+
metadata[:last_synced_at].to_i.should == now.to_i
|
152
|
+
metadata[:last_row_at].to_i.should == (last_synced_at-overlap+1).to_i
|
153
|
+
end
|
154
|
+
end
|
155
|
+
|
156
|
+
context 'always_sync = true' do
|
157
|
+
it 'handles table that does not exist in source but does in target' do
|
158
|
+
source.drop_table :test_table
|
159
|
+
table_plan[:always_sync] = true
|
160
|
+
|
161
|
+
action.call
|
162
|
+
|
163
|
+
registry.get(:test_table).should be_nil
|
164
|
+
target.table_exists?(:test_table).should_not be
|
165
|
+
end
|
166
|
+
|
167
|
+
it 'handles table that does not exist in target with always_sync' do
|
168
|
+
table_plan[:always_sync] = true
|
169
|
+
target.drop_table :test_table
|
170
|
+
registry.delete(:test_table)
|
171
|
+
|
172
|
+
action.call
|
173
|
+
|
174
|
+
target[:test_table].map { |row| row.values_at(:id, :col1) }.
|
175
|
+
should == [[1, 'old record'], [2, 'new record']]
|
176
|
+
|
177
|
+
metadata = registry.get(:test_table)
|
178
|
+
metadata[:last_synced_at].to_i.should == now.to_i
|
179
|
+
metadata[:last_row_at].to_i.should ==
|
180
|
+
(last_synced_at - overlap + 1).to_i
|
181
|
+
end
|
182
|
+
end
|
183
|
+
end
|
184
|
+
|
185
|
+
describe 'with MySQL source' do
|
186
|
+
let(:source) { test_source(:source) }
|
187
|
+
|
188
|
+
it_should_behave_like 'an incremental load'
|
189
|
+
end
|
190
|
+
|
191
|
+
describe 'with PG source' do
|
192
|
+
let(:source) { test_source(:postgres) }
|
193
|
+
|
194
|
+
it_should_behave_like 'an incremental load'
|
195
|
+
end
|
196
|
+
end
|
@@ -0,0 +1,109 @@
|
|
1
|
+
require 'integration_helper'
|
2
|
+
|
3
|
+
require 'sq/dbsync/manager'
|
4
|
+
require 'sq/dbsync/static_table_plan'
|
5
|
+
require 'sq/dbsync/loggers'
|
6
|
+
|
7
|
+
describe SQD::Manager do
|
8
|
+
let(:now) { Time.now.utc }
|
9
|
+
let(:config) {{
|
10
|
+
sources: TEST_SOURCES,
|
11
|
+
target: TEST_TARGET,
|
12
|
+
logger: SQD::Loggers::Null.new,
|
13
|
+
clock: ->{ now }
|
14
|
+
}}
|
15
|
+
let(:manager) { SQD::Manager.new(config, [
|
16
|
+
[SQD::StaticTablePlan.new(plan), :source]
|
17
|
+
]) }
|
18
|
+
let(:source) { manager.sources.fetch(:source) }
|
19
|
+
let(:alt_source) { manager.sources.fetch(:alt_source) }
|
20
|
+
let(:target) { manager.target }
|
21
|
+
let(:registry) { SQD::TableRegistry.new(target) }
|
22
|
+
let(:plan) {[{
|
23
|
+
table_name: :test_table,
|
24
|
+
columns: [:id, :updated_at]
|
25
|
+
}] }
|
26
|
+
let(:now) { Time.now.utc }
|
27
|
+
|
28
|
+
before do
|
29
|
+
create_source_table_with(
|
30
|
+
id: 1,
|
31
|
+
col1: 'hello',
|
32
|
+
pii: 'don alias',
|
33
|
+
updated_at: now - 10
|
34
|
+
)
|
35
|
+
end
|
36
|
+
|
37
|
+
it 'handles duplicate table names by selecting the first one' do
|
38
|
+
create_source_table_with(alt_source, {
|
39
|
+
col1: 'hello',
|
40
|
+
pii: 'don alias'
|
41
|
+
}, {
|
42
|
+
col1: 'hello again',
|
43
|
+
pii: 'don alias'
|
44
|
+
})
|
45
|
+
|
46
|
+
manager = SQD::Manager.new(config,
|
47
|
+
[
|
48
|
+
[SQD::StaticTablePlan.new(plan), :source],
|
49
|
+
[SQD::StaticTablePlan.new(plan), :alt_source]
|
50
|
+
]
|
51
|
+
)
|
52
|
+
manager.batch_nonactive
|
53
|
+
|
54
|
+
target[:test_table].count.should == 1
|
55
|
+
end
|
56
|
+
|
57
|
+
it 'does not purge old tables from the database' do
|
58
|
+
setup_target_table(now)
|
59
|
+
|
60
|
+
manager = SQD::Manager.new(config, [])
|
61
|
+
manager.batch_nonactive
|
62
|
+
target.table_exists?(:test_table).should be
|
63
|
+
end
|
64
|
+
|
65
|
+
it 'removes old tables from the registry' do
|
66
|
+
setup_target_table(now)
|
67
|
+
|
68
|
+
manager = SQD::Manager.new(config, [])
|
69
|
+
manager.increment_checkpoint
|
70
|
+
registry.get(:test_table).should_not be
|
71
|
+
|
72
|
+
# Dropping tables must be done manually
|
73
|
+
target.table_exists?(:test_table).should be
|
74
|
+
end
|
75
|
+
|
76
|
+
it 'only batch loads the given tables, even when batch load disabled' do
|
77
|
+
plan[0][:batch_load] = false
|
78
|
+
|
79
|
+
manager = SQD::Manager.new(config, [
|
80
|
+
[SQD:: StaticTablePlan.new(plan), :source],
|
81
|
+
])
|
82
|
+
manager.batch_nonactive([:bogus])
|
83
|
+
target.table_exists?(:test_table).should_not be
|
84
|
+
|
85
|
+
manager.batch_nonactive([:test_table])
|
86
|
+
target.table_exists?(:test_table).should be
|
87
|
+
end
|
88
|
+
|
89
|
+
it 'does not purge tables excluded from batch load' do
|
90
|
+
plan[0][:batch_load] = false
|
91
|
+
setup_target_table(now)
|
92
|
+
|
93
|
+
manager = SQD::Manager.new(config, [
|
94
|
+
[SQD::StaticTablePlan.new(plan), :source],
|
95
|
+
])
|
96
|
+
|
97
|
+
manager.batch_nonactive([:bogus])
|
98
|
+
target.table_exists?(:test_table).should be
|
99
|
+
end
|
100
|
+
|
101
|
+
it 'does not purge old tables when doing a partial load' do
|
102
|
+
setup_target_table(now)
|
103
|
+
|
104
|
+
manager = SQD::Manager.new(config, [])
|
105
|
+
manager.batch_nonactive([:bogus])
|
106
|
+
|
107
|
+
target.table_exists?(:test_table).should be
|
108
|
+
end
|
109
|
+
end
|