flydata 0.7.11 → 0.7.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: d5cb99d4fe2e890c02757c744e6c62f0946b2bf4
4
- data.tar.gz: 5ca9a5c1618e2775b975adb6702d96399cb57ebf
3
+ metadata.gz: 77a3df344141ff66884b5deecb9b947c4616b64d
4
+ data.tar.gz: d3a7bbaa5ba44fb27d24d0d37e6844b4be0b809a
5
5
  SHA512:
6
- metadata.gz: 634e6cee60de39ac8bdef5fe551095645b24adf5c54f9904499954b7682ee004feedcfa2d7d2a517b75ee2677706d2d9ab45430f17b67d99e0e3970b420d18d8
7
- data.tar.gz: c03762c4bb3f013d349499ed50f6b4e9cbf4eb08d9943fdae5d8b6fb77eab53d9ce026813ee72122a03c8e79aa358e7c96e2aaedc49a5361615a6b99a607de22
6
+ metadata.gz: dcec46188c13aa32bdb56974f81642b71da51cfe1a4de1cd08eade5fcbb8b70d1a5ad329cbc1cd7ddc020571deee70bf346dc1d845362ba262dc79d5cde40a93
7
+ data.tar.gz: f8d1f84ebcea486f2aa30cafbdef5851c8876b6f9d21ff3220c976fd4b7306d42faa94d53ad7df9f250fd0b6f922484fda867d4bae563db828680c8a50b9d353
data/VERSION CHANGED
@@ -1 +1 @@
1
- 0.7.11
1
+ 0.7.12
@@ -33,6 +33,7 @@ module ErrorCode
33
33
  REDSHIFT_ACCESS_ERROR = 10009
34
34
  BREAKING_ALTER_TABLE_ERROR = 10010
35
35
  BREAKING_INFORMATION_SCHEMA_ERROR = 10011
36
+ BREAKING_TABLE_DDL_ERROR = 10012
36
37
  INTERNAL_ERROR = 1200
37
38
  end
38
39
 
@@ -206,12 +207,18 @@ class StaleTableRevisionError < FlydataError
206
207
  def err_code; ErrorCode::TABLE_REVISION_ERROR; end
207
208
  end
208
209
 
209
- # Unsupported ALTER TABLE which breaks the tables sync consistency.
210
+ # Unsupported ALTER TABLE which breaks the table's sync consistency.
210
211
  class BreakingAlterTableError < RecordDeliveryError
211
212
  include BreakingSyncError
212
213
  def err_code; ErrorCode::BREAKING_ALTER_TABLE_ERROR; end
213
214
  end
214
215
 
216
+ # Unsupported DDL which breaks the table's sync consistency.
217
+ class BreakingTableDdlError < RecordDeliveryError
218
+ include BreakingSyncError
219
+ def err_code; ErrorCode::BREAKING_TABLE_DDL_ERROR; end
220
+ end
221
+
215
222
  # Unrecoverable information schema error which requires re-synchronize a table
216
223
  class BreakingInformationSchemaError < InformationSchemaError
217
224
  include BreakingSyncError
@@ -269,8 +269,10 @@ EOS
269
269
  return NULL_STR if default_value.nil?
270
270
  # strip type cast
271
271
  if default_value.kind_of?(String) &&
272
- /^(.+?)(::"?[a-z ]+"?)*$/.match(default_value)
272
+ /(.+?)(::"?[^':]+?"?)?\z/m.match(default_value)
273
273
  default_value = $1
274
+ # strip outer single-quotes
275
+ default_value = $1 if /^'(.*)'$/m.match(default_value)
274
276
  end
275
277
  if flydata_type.start_with?('year')
276
278
  value = convert_year_into_date(remove_single_quote(default_value))
@@ -1215,6 +1215,29 @@ EOS
1215
1215
  subject { subject_object.replace_default_value(flydata_type, redshift_type,
1216
1216
  default_value) }
1217
1217
 
1218
+ context 'with cast expression from PostgreSQL' do
1219
+ context 'with multiple-lines in a default value' do
1220
+ let(:flydata_type) { "text" }
1221
+ let(:redshift_type) { "varchar(max)" }
1222
+ let(:default_value) { "'--- {} \nthe 2nd line'::text" }
1223
+ it { is_expected.to eq "'--- {} \nthe 2nd line'" }
1224
+ end
1225
+
1226
+ context 'with :: in a default value' do
1227
+ let(:flydata_type) { "varchar" }
1228
+ let(:redshift_type) { "varchar" }
1229
+ let(:default_value) { "'FlyDataCore::TableDef'::character varying" }
1230
+ it { is_expected.to eq "'FlyDataCore::TableDef'" }
1231
+ end
1232
+
1233
+ context 'with a single-quote in a default value' do
1234
+ let(:flydata_type) { "varchar" }
1235
+ let(:redshift_type) { "varchar" }
1236
+ let(:default_value) { "'That''s it!'::character varying" }
1237
+ it { is_expected.to eq "'That''s it!'" }
1238
+ end
1239
+ end
1240
+
1218
1241
  context 'with integer type' do
1219
1242
  let(:flydata_type) { "int8" }
1220
1243
  let(:redshift_type) { "int8" }
data/flydata.gemspec CHANGED
Binary file
@@ -42,7 +42,7 @@ module Flydata
42
42
  # "success": true
43
43
  # }
44
44
 
45
- NUM_TABLES_PER_REQUEST = 110
45
+ NUM_TABLES_PER_REQUEST = 50
46
46
 
47
47
  def buffer_stat(data_entry_id, options = {})
48
48
  table_array = options[:tables] || ['']
@@ -180,7 +180,11 @@ module Flydata
180
180
  # - If value is nil, the setting will be deleted.
181
181
  # - If table's attribute hash doesn't have an attribute key, the setting for the attribute for the table will not be changed
182
182
  def update_table_validity(data_entry_id, table_update_hash)
183
- @client.post("/#{@model_name.pluralize}/#{data_entry_id}/update_table_validity", {:headers => {:content_type => :json}}, table_update_hash.to_json)
183
+ slice_hash(table_update_hash[:updated_tables], NUM_TABLES_PER_REQUEST).each do |table_update_hash_sl|
184
+ # re-construct sliced argument hash
185
+ arg_hash = {updated_tables: table_update_hash_sl}
186
+ @client.post("/#{@model_name.pluralize}/#{data_entry_id}/update_table_validity", {:headers => {:content_type => :json}}, arg_hash.to_json)
187
+ end
184
188
  end
185
189
 
186
190
  # Tells the server that an initial sync has completed
@@ -210,6 +214,13 @@ module Flydata
210
214
 
211
215
  message
212
216
  end
217
+
218
+ # return Array of Hash, sliced in length
219
+ def slice_hash(original_hash, length)
220
+ original_hash.each_slice(length).collect do |sl|
221
+ sl.inject({}) { |h, (k,v)| h[k] = v; h }
222
+ end
223
+ end
213
224
  end
214
225
  end
215
226
  end
@@ -18,12 +18,18 @@ module Flydata
18
18
  else
19
19
  options = {show_final_message: options_or_show_final_message}
20
20
  end
21
+
21
22
  # Check if process exist
22
23
  if process_exist?
23
24
  log_info_stdout("Process is still running. Please stop process first.") unless options[:quiet]
24
25
  return
25
26
  end
26
27
 
28
+ dp = flydata.data_port.get
29
+ if dp['paused']
30
+ raise "This application has been stopped. Process cannot be started."
31
+ end
32
+
27
33
  if agent_locked?
28
34
  log_info_stdout("Agent was not shut down properly. Agent will check the status and fix itself if necessary.")
29
35
  repair_opts = Flydata::Command::Sync.slop_repair
@@ -42,7 +48,6 @@ module Flydata
42
48
 
43
49
  wait_until_server_ready(options)
44
50
 
45
- dp = flydata.data_port.get
46
51
  AgentCompatibilityCheck.new(dp).check
47
52
 
48
53
  fluentd_started = false
@@ -302,8 +302,8 @@ EOS
302
302
  # Command: flydata sync:repair
303
303
  # - Entry method
304
304
  def repair
305
- _repair
306
- unless opts.skip_start?
305
+ need_to_start = _repair
306
+ if need_to_start && !opts.skip_start?
307
307
  Flydata::Command::Sender.new.start
308
308
  end
309
309
  end
@@ -385,7 +385,11 @@ EOS
385
385
  abnormal_shutdown = false
386
386
  begin
387
387
  begin
388
- flush_buffer_and_stop(@full_tables, force: false, timeout: 55) # A short timeout is set. Otherwise, the check command and the repair command takes forever to complete.
388
+ # wait until no data gets processed for 3 minutes. This is long
389
+ # to wait, but there *are* cases where active data processing takes
390
+ # more than 3 minutes between 2 chunk processing if COPY command
391
+ # takes minutes to process.
392
+ flush_buffer_and_stop(@full_tables, force: false, timeout: 180)
389
393
  rescue ServerDataProcessingTimeout => e
390
394
  data_stuck_at = e.state
391
395
  end
@@ -450,7 +454,7 @@ EOS
450
454
  if status.include? :OK
451
455
  log_info_stdout ""
452
456
  log_info_stdout "Sync is in good condition. Nothing to repair."
453
- return
457
+ return true
454
458
  end
455
459
 
456
460
  if status.include?(:ABNORMAL_SHUTDOWN) && status.uniq.length == 1
@@ -460,7 +464,7 @@ EOS
460
464
 
461
465
  # Remove the lock file if exists.
462
466
  File.delete(FLYDATA_LOCK) if File.exists?(FLYDATA_LOCK)
463
- return
467
+ return true
464
468
  end
465
469
 
466
470
  gt = []
@@ -495,7 +499,10 @@ EOS
495
499
 
496
500
  EOS
497
501
 
498
- return unless ask_yes_no("Proceed?")
502
+ unless ask_yes_no("Proceed?")
503
+ log_info_stdout "FlyData agent is stopped. Run 'flydata sync:repair' again for retry."
504
+ return false # Not restart
505
+ end
499
506
 
500
507
  oldest_source_pos = get_oldest_available_source_pos
501
508
  unrepairable_tables = []
@@ -577,6 +584,7 @@ EOS
577
584
  File.delete(FLYDATA_LOCK) if File.exists?(FLYDATA_LOCK)
578
585
 
579
586
  log_info_stdout "Repair is done. Restarting."
587
+ true
580
588
  end
581
589
 
582
590
  # Initial sync
@@ -63,13 +63,22 @@ module QueryBasedSync
63
63
  sleep @fetch_interval
64
64
  end
65
65
  rescue => e
66
- #TODO: Introduce retryable error notions
67
- log_error_with_backtrace("Unexpected error occured", error: e)
66
+ handle_error(e, "Unexpected error occured")
68
67
  sleep @retry_interval
69
68
  retry
70
69
  end
71
70
  end
72
71
 
72
+ def handle_error(error, message)
73
+ log_method = if error.kind_of?(FlydataCore::RetryableError)
74
+ error = error.original_exception
75
+ :log_warn
76
+ else
77
+ :log_error
78
+ end
79
+ self.send(log_method, message, {error: error}, {backtrace: true})
80
+ end
81
+
73
82
  def run_once
74
83
  resource_requester.start do |req| # open connection
75
84
  context.tables.each do |table_name|
@@ -99,7 +99,7 @@ EOS
99
99
  src_pos
100
100
  end
101
101
 
102
- NUM_ROWS = 500000
102
+ NUM_ROWS = 50000
103
103
 
104
104
  def dump_table(tabledef, source_pos, io, cli)
105
105
  dump_source_table(tabledef, io)
@@ -63,7 +63,11 @@ class ResourceRequester < Flydata::QueryBasedSync::ResourceRequester
63
63
  # Create and execute query
64
64
  query = generate_query(t_meta, from_sid, to_sid, pk_values)
65
65
 
66
- result = @resource_client.query(query)
66
+ begin
67
+ result = @resource_client.query(query)
68
+ rescue PG::TRSerializationFailure => e
69
+ raise FlydataCore::RetryableError.new(e)
70
+ end
67
71
 
68
72
  # Create response object based on result
69
73
  responses = build_responses(table_name, result, from_sid: from_sid, to_sid: to_sid, pk_values: pk_values)
@@ -175,10 +175,10 @@ describe DataEntry do
175
175
  end
176
176
  end
177
177
  end
178
- context "with more than 100 tables" do
179
- let(:tables) { 180.times.collect{|i| "tbl#{i}"} }
180
- let(:expected_tables1) { tables[0...110].join(",") }
181
- let(:expected_tables2) { tables[110..-1].join(",") }
178
+ context "with more than 50 tables" do
179
+ let(:tables) { 80.times.collect{|i| "tbl#{i}"} }
180
+ let(:expected_tables1) { tables[0...50].join(",") }
181
+ let(:expected_tables2) { tables[50..-1].join(",") }
182
182
 
183
183
  let(:expected_message) { double('expected_message') }
184
184
 
@@ -318,10 +318,10 @@ describe DataEntry do
318
318
  let(:status21) { double('status21') }
319
319
  let(:status22) { double('status22') }
320
320
 
321
- context "with more than 100 tables" do
322
- let(:tables) { 180.times.collect {|i| "tbl#{i}"} }
323
- let(:expected_tables1) { tables[0...110].join(",") }
324
- let(:expected_tables2) { tables[110..-1].join(",") }
321
+ context "with more than 50 tables" do
322
+ let(:tables) { 80.times.collect {|i| "tbl#{i}"} }
323
+ let(:expected_tables1) { tables[0...50].join(",") }
324
+ let(:expected_tables2) { tables[50..-1].join(",") }
325
325
 
326
326
  it do
327
327
  expect(api_client).to receive(:post).
@@ -71,6 +71,40 @@ describe Client do
71
71
  expect{subject_object.start}.to raise_error(/Already started/)
72
72
  end
73
73
  end
74
+
75
+ context 'when getting non-retryable error' do
76
+ before do
77
+ context.instance_variable_get(:@params)[:retry_interval] = 0.5
78
+ expect_any_instance_of(DummyResourceRequester).to receive(:start).and_raise("dummy error")
79
+ expect_any_instance_of(DummyResourceRequester).to receive(:start).and_return(nil)
80
+ end
81
+ it 'retries after logging in error level' do
82
+ expect($log).to receive(:error) do |msg|
83
+ expect(msg).to include("dummy error")
84
+ end
85
+ expect($log).to receive(:warn).never
86
+ start_client(0.2)
87
+ sleep 0.6
88
+ end
89
+ end
90
+
91
+ context 'when getting retryable error' do
92
+ before do
93
+ context.instance_variable_get(:@params)[:retry_interval] = 0.5
94
+ expect_any_instance_of(DummyResourceRequester).to receive(:start).and_raise(
95
+ FlydataCore::RetryableError.new(RuntimeError.new("dummy retryable error")))
96
+ expect_any_instance_of(DummyResourceRequester).to receive(:start).and_return(nil)
97
+ end
98
+ it 'retries after logging in warn level' do
99
+ expect($log).to receive(:error).never
100
+ expect($log).to receive(:warn) do |msg|
101
+ expect(msg).to include("dummy retryable error")
102
+ end
103
+ start_client(0.2)
104
+ sleep 0.6
105
+ end
106
+ end
107
+
74
108
  end
75
109
  end
76
110
 
@@ -62,6 +62,16 @@ describe ResourceRequester do
62
62
  let(:table_1_snapshot) { current_snapshot }
63
63
  it { is_expected.to be_nil }
64
64
  end
65
+
66
+ context 'when getting PG::TRSerializationFailure' do
67
+ before do
68
+ allow_any_instance_of(FlydataCore::Postgresql::PGClient).to receive(:query).and_raise(
69
+ PG::TRSerializationFailure.new("ERROR: canceling statement due to conflict with recovery DETAIL: User query might have needed to see row versions that must be removed.")
70
+ )
71
+ end
72
+
73
+ it { expect{subject}.to raise_error(FlydataCore::RetryableError) }
74
+ end
65
75
  end
66
76
  end
67
77
 
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: flydata
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.7.11
4
+ version: 0.7.12
5
5
  platform: ruby
6
6
  authors:
7
7
  - Koichi Fujikawa
@@ -12,7 +12,7 @@ authors:
12
12
  autorequire:
13
13
  bindir: bin
14
14
  cert_chain: []
15
- date: 2016-08-18 00:00:00.000000000 Z
15
+ date: 2016-10-25 00:00:00.000000000 Z
16
16
  dependencies:
17
17
  - !ruby/object:Gem::Dependency
18
18
  name: rest-client