embulk-input-zendesk 0.1.13 → 0.2.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 1a391e53c7f33423839b92cfbaed3b80765ec8fb
4
- data.tar.gz: 54253ba9242ad460cb0252912b928aaf81a77af6
3
+ metadata.gz: 8965b15e24973482e7f636c44633ace2821f9c0d
4
+ data.tar.gz: 1bbcaf319cabe8f53ddfde5b2cdeb16f0c54c612
5
5
  SHA512:
6
- metadata.gz: a0909371764ebeec9b3b083040cab4a6626391d9eb258292428040ce7cc2d91480a2e2edaca6b173f0b8773d3380a21e2e0ca8d462b69f714b25e9bec4242854
7
- data.tar.gz: 6f6f530ab66122a1ab329c3a228ee699052acf7114448ef2d4bfe43c11d03dd8afed19ec220a2a23a942178fd380f8c718228716b9ae5dc58f0568949096ce2f
6
+ metadata.gz: 79ea61da70c871ea6d1bde7789dbc0af0fce75101aa29ac8d7821b0bf73b42df87c925c40dc796a4333cda32d17546ef86c233a463b5d3eed86211cedd2d6e0b
7
+ data.tar.gz: 5a767959bf7955ed241d67dc5993731046c1abfa328af9738bff8dda40bf16543aa5ec568cb5ec3fcb26912a4790baab88da09ec6f5a5782eddd8bf8b4833aa5
data/CHANGELOG.md CHANGED
@@ -1,3 +1,31 @@
1
+ ## 0.2.6 - 2017-05-23
2
+ * [enhancement] Enable incremental loading for ticket_metrics
3
+
4
+ ## 0.2.5 - 2017-04-21
5
+ * [enhancement] Replace `thread` gem with `concurrent-ruby`, it's more robust and has better queue management [#33](https://github.com/treasure-data/embulk-input-zendesk/pull/33)
6
+
7
+ ## 0.2.4 - 2017-04-20
8
+ * [fixed] Fix thread pool bottleneck [#31](https://github.com/treasure-data/embulk-input-zendesk/pull/31)
9
+
10
+ ## 0.2.3 - 2017-04-18
11
+ * [enhancement] Add flushing to `page_builder` [#29](https://github.com/treasure-data/embulk-input-zendesk/pull/29)
12
+
13
+ ## 0.2.2 - 2017-04-14
14
+ * [enhancement] Improve `httpclient` usage: re-use client instance [#27](https://github.com/treasure-data/embulk-input-zendesk/pull/27)
15
+
16
+ ## 0.2.1 - 2017-04-11
17
+ * [fixed] Mem leak in `export_parallel()` method [#25](https://github.com/treasure-data/embulk-input-zendesk/pull/25)
18
+
19
+ ## 0.2.0 - 2017-04-07
20
+ * [fixed] `time_metrics` is cutoff (archived), need to compare with list of all `tickets` [#23](https://github.com/treasure-data/embulk-input-zendesk/pull/23)
21
+ * [enhancement] Switch to thread pool for `export_parallel()` method [#23](https://github.com/treasure-data/embulk-input-zendesk/pull/23)
22
+
23
+ ## 0.1.15 - 2017-03-30
24
+ * [fixed] Rename JRuby thread, to not expose runtime path [#21](https://github.com/treasure-data/embulk-input-zendesk/pull/21)
25
+
26
+ ## 0.1.14 - 2017-03-28
27
+ * [enhancement] Concurrent fetching base target and related objects [#19](https://github.com/treasure-data/embulk-input-zendesk/pull/19)
28
+
1
29
  ## 0.1.13 - 2017-03-23
2
30
  * [fixed] Fix to generate config_diff when no data fetched [#18](https://github.com/treasure-data/embulk-input-zendesk/pull/18)
3
31
 
@@ -1,7 +1,7 @@
1
1
 
2
2
  Gem::Specification.new do |spec|
3
3
  spec.name = "embulk-input-zendesk"
4
- spec.version = "0.1.13"
4
+ spec.version = "0.2.6"
5
5
  spec.authors = ["uu59", "muga", "sakama"]
6
6
  spec.summary = "Zendesk input plugin for Embulk"
7
7
  spec.description = "Loads records from Zendesk."
@@ -15,6 +15,7 @@ Gem::Specification.new do |spec|
15
15
 
16
16
  spec.add_dependency 'perfect_retry', '~> 0.5'
17
17
  spec.add_dependency 'httpclient'
18
+ spec.add_dependency 'concurrent-ruby'
18
19
  spec.add_development_dependency 'embulk', ['~> 0.8.1']
19
20
  spec.add_development_dependency 'bundler', ['~> 1.0']
20
21
  spec.add_development_dependency 'rake', ['>= 10.0']
@@ -1,6 +1,6 @@
1
1
  require "strscan"
2
- require "thread"
3
2
  require "httpclient"
3
+ require 'concurrent'
4
4
 
5
5
  module Embulk
6
6
  module Input
@@ -10,8 +10,8 @@ module Embulk
10
10
 
11
11
  PARTIAL_RECORDS_SIZE = 50
12
12
  PARTIAL_RECORDS_BYTE_SIZE = 50000
13
- AVAILABLE_INCREMENTAL_EXPORT = %w(tickets users organizations ticket_events).freeze
14
- UNAVAILABLE_INCREMENTAL_EXPORT = %w(ticket_fields ticket_forms ticket_metrics).freeze
13
+ AVAILABLE_INCREMENTAL_EXPORT = %w(tickets users organizations ticket_events ticket_metrics).freeze
14
+ UNAVAILABLE_INCREMENTAL_EXPORT = %w(ticket_fields ticket_forms).freeze
15
15
  AVAILABLE_TARGETS = AVAILABLE_INCREMENTAL_EXPORT + UNAVAILABLE_INCREMENTAL_EXPORT
16
16
 
17
17
  def initialize(config)
@@ -19,10 +19,25 @@ module Embulk
19
19
  end
20
20
 
21
21
  def httpclient
22
- httpclient = HTTPClient.new
23
- httpclient.connect_timeout = 240 # default:60 is not enough for huge data
24
- # httpclient.debug_dev = STDOUT
25
- return set_auth(httpclient)
22
+ # multi-threading + retry can create lot of instances, and each will keep connecting
23
+ # re-using instance in multi threads can help to omit cleanup code
24
+ @httpclient ||=
25
+ begin
26
+ clnt = HTTPClient.new
27
+ clnt.connect_timeout = 240 # default:60 is not enough for huge data
28
+ clnt.receive_timeout = 240 # better change default receive_timeout too
29
+ # httpclient.debug_dev = STDOUT
30
+ set_auth(clnt)
31
+ end
32
+ end
33
+
34
+ def get_pool
35
+ Concurrent::ThreadPoolExecutor.new(
36
+ min_threads: 10,
37
+ max_threads: 100,
38
+ max_queue: 10_000,
39
+ fallback_policy: :caller_runs
40
+ )
26
41
  end
27
42
 
28
43
  def validate_config
@@ -69,6 +84,24 @@ module Embulk
69
84
  end
70
85
  end
71
86
 
87
+ # Ticket metrics will need to be export using both the non incremental and incremental on ticket
88
+ # We provide support by filter out ticket_metrics with created at smaller than start time
89
+ # while passing the incremental start time to the incremental ticket/ticket_metrics export
90
+ %w(ticket_metrics).each do |target|
91
+ define_method(target) do |partial = true, start_time = 0, &block|
92
+ path = "/api/v2/incremental/tickets.json"
93
+ if partial
94
+ path = "/api/v2/#{target}.json"
95
+ # If partial export then we need to use the old end point. Since new end point return both ticket and
96
+ # ticket metric with ticket come first so the current approach that cut off the response packet won't work
97
+ # Since partial is only use for preview and guess so this should be fine
98
+ export(path, target, &block)
99
+ else
100
+ incremental_export(path, "metric_sets", start_time, [], partial,{include: "metric_sets"}, &block)
101
+ end
102
+ end
103
+ end
104
+
72
105
  # they have non-incremental API only
73
106
  UNAVAILABLE_INCREMENTAL_EXPORT.each do |target|
74
107
  define_method(target) do |partial = true, start_time = 0, &block|
@@ -76,7 +109,7 @@ module Embulk
76
109
  if partial
77
110
  export(path, target, &block)
78
111
  else
79
- export_parallel(path, target, &block)
112
+ export_parallel(path, target, start_time, &block)
80
113
  end
81
114
  end
82
115
  end
@@ -96,7 +129,7 @@ module Embulk
96
129
 
97
130
  private
98
131
 
99
- def export_parallel(path, key, workers = 5, &block)
132
+ def export_parallel(path, key, start_time = 0, &block)
100
133
  per_page = 100 # 100 is maximum https://developer.zendesk.com/rest_api/docs/core/introduction#pagination
101
134
  first_response = request(path, per_page: per_page, page: 1)
102
135
  first_fetched = JSON.parse(first_response.body)
@@ -104,42 +137,26 @@ module Embulk
104
137
  last_page_num = (total_count / per_page.to_f).ceil
105
138
  Embulk.logger.info "#{key} records=#{total_count} last_page=#{last_page_num}"
106
139
 
107
- queue = Queue.new
108
- (2..last_page_num).each do |page|
109
- queue << page
140
+ first_fetched[key].uniq { |r| r['id'] }.each do |record|
141
+ block.call record
142
+ # known_ticket_ids: collect fetched ticket IDs, to exclude in next step
110
143
  end
111
- records = first_fetched[key]
112
-
113
- mutex = Mutex.new
114
- threads = workers.times.map do |n|
115
- Thread.start do
116
- loop do
117
- break if queue.empty?
118
- current_page = nil
119
-
120
- begin
121
- Timeout.timeout(0.1) do
122
- # Somehow queue.pop(true) blocks... timeout is workaround for that
123
- current_page = queue.pop(true)
124
- end
125
- rescue Timeout::Error, ThreadError => e
126
- break #=> ThreadError: queue empty
127
- end
128
144
 
129
- response = request(path, per_page: per_page, page: current_page)
130
- fetched_records = extract_records_from_response(response, key)
131
- mutex.synchronize do
132
- Embulk.logger.info "Fetched #{key} on page=#{current_page}"
133
- records.concat fetched_records
134
- end
145
+ pool = get_pool
146
+ (2..last_page_num).each do |page|
147
+ pool.post do
148
+ response = request(path, per_page: per_page, page: page)
149
+ fetched_records = extract_records_from_response(response, key)
150
+ Embulk.logger.info "Fetched #{key} on page=#{page} >>> size: #{fetched_records.length}"
151
+ fetched_records.uniq { |r| r['id'] }.each do |record|
152
+ block.call record
135
153
  end
136
154
  end
137
155
  end
138
- threads.each(&:join)
139
156
 
140
- records.uniq {|r| r["id"]}.each do |record|
141
- block.call record
142
- end
157
+ pool.shutdown
158
+ pool.wait_for_termination
159
+
143
160
  nil # this is necessary different with incremental_export
144
161
  end
145
162
 
@@ -160,18 +177,19 @@ module Embulk
160
177
  end
161
178
  end
162
179
 
163
- def incremental_export(path, key, start_time = 0, known_ids = [], partial = true, &block)
180
+ def incremental_export(path, key, start_time = 0, known_ids = [], partial = true,query = {}, &block)
164
181
  if partial
165
- records = request_partial(path, {start_time: start_time}).first(5)
182
+ records = request_partial(path, query.merge({start_time: start_time})).first(5)
166
183
  records.uniq{|r| r["id"]}.each do |record|
167
184
  block.call record
168
185
  end
169
186
  return
170
187
  end
171
188
 
172
- loop do
189
+ pool = get_pool
190
+ last_data = loop do
173
191
  start_fetching = Time.now
174
- response = request(path, {start_time: start_time})
192
+ response = request(path, query.merge({start_time: start_time}))
175
193
  begin
176
194
  data = JSON.parse(response.body)
177
195
  rescue => e
@@ -198,7 +216,7 @@ module Embulk
198
216
  next if known_ids.include?(record["id"])
199
217
 
200
218
  known_ids << record["id"]
201
- block.call record
219
+ pool.post { yield(record) }
202
220
  actual_fetched += 1
203
221
  end
204
222
  Embulk.logger.info "Fetched #{actual_fetched} records from start_time:#{start_time} (#{Time.at(start_time)}) within #{Time.now.to_i - start_fetching.to_i} seconds"
@@ -209,6 +227,10 @@ module Embulk
209
227
  # https://developer.zendesk.com/rest_api/docs/core/incremental_export#pagination
210
228
  break data if data["count"] < 1000
211
229
  end
230
+
231
+ pool.shutdown
232
+ pool.wait_for_termination
233
+ last_data
212
234
  end
213
235
 
214
236
  def extract_records_from_response(response, key)
@@ -364,6 +386,7 @@ module Embulk
364
386
  raise "Server returns unknown status code (#{status_code}) #{body}"
365
387
  end
366
388
  end
389
+
367
390
  end
368
391
  end
369
392
  end
@@ -1,4 +1,4 @@
1
- require "perfect_retry"
1
+ require 'perfect_retry'
2
2
 
3
3
  module Embulk
4
4
  module Input
@@ -110,11 +110,14 @@ module Embulk
110
110
  args << @start_time.to_i
111
111
  end
112
112
 
113
+ mutex = Mutex.new
113
114
  fetching_start_at = Time.now
114
115
  last_data = client.public_send(method, *args) do |record|
115
116
  record = fetch_related_object(record)
116
117
  values = extract_values(record)
117
- page_builder.add(values)
118
+ mutex.synchronize do
119
+ page_builder.add(values)
120
+ end
118
121
  break if preview? # NOTE: preview take care only 1 record. subresources fetching is slow.
119
122
  end
120
123
  page_builder.finish
@@ -133,14 +136,15 @@ module Embulk
133
136
  end
134
137
  end
135
138
 
136
- return task_report
139
+ task_report
137
140
  end
138
141
 
139
142
  private
140
143
 
141
144
  def fetch_related_object(record)
142
- (task[:includes] || []).each do |ent|
143
- record[ent] = client.fetch_subresource(record["id"], task[:target], ent)
145
+ return record unless task[:includes] && !task[:includes].empty?
146
+ task[:includes].each do |ent|
147
+ record[ent] = client.fetch_subresource(record['id'], task[:target], ent)
144
148
  end
145
149
  record
146
150
  end
@@ -6,7 +6,7 @@ require "embulk/input/zendesk"
6
6
  require "override_assert_raise"
7
7
  require "fixture_helper"
8
8
  require "capture_io"
9
-
9
+ require "concurrent/atomic/atomic_fixnum"
10
10
  module Embulk
11
11
  module Input
12
12
  module Zendesk
@@ -49,7 +49,40 @@ module Embulk
49
49
  end
50
50
  end
51
51
  end
52
-
52
+ sub_test_case "ticket_metrics incremental export" do
53
+ def client
54
+ @client ||= Client.new(login_url: login_url, auth_method: "oauth", access_token: access_token, retry_limit: 1, retry_initial_wait_sec: 0)
55
+ end
56
+ setup do
57
+ stub(Embulk).logger { Logger.new(File::NULL) }
58
+ @httpclient = client.httpclient
59
+ stub(client).httpclient { @httpclient }
60
+ end
61
+ test "fetch ticket_metrics with start_time set" do
62
+ records = 100.times.map{|n| {"id"=> n, "ticket_id"=>n+1}}
63
+ start_time = 1488535542
64
+ @httpclient.test_loopback_http_response << [
65
+ "HTTP/1.1 200",
66
+ "Content-Type: application/json",
67
+ "",
68
+ {
69
+ metric_sets: records,
70
+ count: records.size,
71
+ next_page: nil,
72
+ }.to_json
73
+ ].join("\r\n")
74
+ # lock = Mutex.new
75
+ result_array = records
76
+ counter=Concurrent::AtomicFixnum.new(0)
77
+ handler = proc { |record|
78
+ assert_include(result_array,record)
79
+ counter.increment
80
+ }
81
+ proxy(@httpclient).get("#{login_url}/api/v2/incremental/tickets.json", {:include => "metric_sets", :start_time => start_time}, anything)
82
+ client.ticket_metrics(false, start_time, &handler)
83
+ assert_equal(counter.value, result_array.size)
84
+ end
85
+ end
53
86
  sub_test_case "ticket_metrics (non-incremental export)" do
54
87
  sub_test_case "partial" do
55
88
  def client
@@ -73,7 +106,8 @@ module Embulk
73
106
  "",
74
107
  {
75
108
  ticket_metrics: records,
76
- next_page: "https://treasuredata.zendesk.com/api/v2/ticket_metrics.json?page=2",
109
+ next_page: "https://treasuredata.zendesk.com/api/v2/incremental/tickets
110
+ .json?include=metric_sets&start_time=1488535542",
77
111
  }.to_json
78
112
  ].join("\r\n")
79
113
 
@@ -97,18 +131,20 @@ module Embulk
97
131
  end
98
132
 
99
133
  test "fetch ticket_metrics all page" do
100
- records = 100.times.map{|n| {"id"=> n}}
134
+ records = 100.times.map{|n| {"id"=> n, "ticket_id"=>n+1}}
101
135
  second_results = [
102
- {"id" => 101}
136
+ {"id" => 101, "ticket_id" => 101}
103
137
  ]
138
+ end_time = 1488535542
104
139
  @httpclient.test_loopback_http_response << [
105
140
  "HTTP/1.1 200",
106
141
  "Content-Type: application/json",
107
142
  "",
108
143
  {
109
- ticket_metrics: records,
110
- count: records.length + second_results.length,
111
- next_page: "https://treasuredata.zendesk.com/api/v2/ticket_metrics.json?page=2",
144
+ metric_sets: records,
145
+ count: 1000,
146
+ next_page: "#{login_url}/api/v2/incremental/tickets.json?include=metric_sets&start_time=1488535542",
147
+ end_time: end_time
112
148
  }.to_json
113
149
  ].join("\r\n")
114
150
 
@@ -117,55 +153,59 @@ module Embulk
117
153
  "Content-Type: application/json",
118
154
  "",
119
155
  {
120
- ticket_metrics: second_results,
121
- count: records.length + second_results.length,
156
+ metric_sets: second_results,
157
+ count: second_results.size,
122
158
  next_page: nil,
123
159
  }.to_json
124
160
  ].join("\r\n")
161
+ # lock = Mutex.new
162
+ result_array = records + second_results
163
+ counter=Concurrent::AtomicFixnum.new(0)
164
+ handler = proc { |record|
165
+ assert_include(result_array,record)
166
+ counter.increment
167
+ }
168
+
169
+ proxy(@httpclient).get("#{login_url}/api/v2/incremental/tickets.json", {:include => "metric_sets", :start_time => 0}, anything)
170
+ proxy(@httpclient).get("#{login_url}/api/v2/incremental/tickets.json", {:include => "metric_sets",:start_time => end_time}, anything)
125
171
 
126
- handler = proc { }
127
- records.each do |record|
128
- mock(handler).call(record)
129
- end
130
- second_results.each do |record|
131
- mock(handler).call(record)
132
- end
133
172
  client.ticket_metrics(false, &handler)
173
+ assert_equal(counter.value, result_array.size)
134
174
  end
135
175
 
136
- test "fetch tickets without duplicated" do
176
+ test "fetch tickets metrics without duplicated" do
137
177
  records = [
138
- {"id" => 1},
139
- {"id" => 2},
140
- {"id" => 1},
141
- {"id" => 1},
178
+ {"id" => 1, "ticket_id" => 100},
179
+ {"id" => 2, "ticket_id" => 200},
180
+ {"id" => 1, "ticket_id" => 100},
181
+ {"id" => 1, "ticket_id" => 100},
142
182
  ]
143
183
  @httpclient.test_loopback_http_response << [
144
184
  "HTTP/1.1 200",
145
185
  "Content-Type: application/json",
146
186
  "",
147
187
  {
148
- ticket_metrics: records,
188
+ metric_sets: records,
149
189
  count: records.length,
150
190
  }.to_json
151
191
  ].join("\r\n")
152
-
153
- handler = proc { }
154
- mock(handler).call(anything).twice
192
+ counter = Concurrent::AtomicFixnum.new(0)
193
+ handler = proc {counter.increment}
155
194
  client.ticket_metrics(false, &handler)
195
+ assert_equal(2,counter.value)
156
196
  end
157
197
 
158
- test "fetch tickets with next_page" do
159
- end_time = Time.now.to_i
160
-
198
+ test "fetch ticket_metrics with next_page" do
199
+ end_time = 1488535542
161
200
  response_1 = [
162
201
  "HTTP/1.1 200",
163
202
  "Content-Type: application/json",
164
203
  "",
165
204
  {
166
- ticket_metrics: 100.times.map{|n| {"id" => n}},
167
- count: 101,
168
- next_page: "https://treasuredata.zendesk.com/api/v2/ticket_metrics.json?page=2",
205
+ metric_sets: 100.times.map{|n| {"id" => n, "ticket_id" => n+1}},
206
+ count: 1001,
207
+ end_time: end_time,
208
+ next_page: "#{login_url}/api/v2/incremental/tickets.json?include=metric_sets&start_time=1488535542",
169
209
  }.to_json
170
210
  ].join("\r\n")
171
211
 
@@ -174,17 +214,20 @@ module Embulk
174
214
  "Content-Type: application/json",
175
215
  "",
176
216
  {
177
- ticket_metrics: [{"id" => 101}],
217
+ metric_sets: [{"id" => 101, "ticket_id" => 101}],
178
218
  count: 101,
179
219
  }.to_json
180
220
  ].join("\r\n")
181
221
 
222
+
182
223
  @httpclient.test_loopback_http_response << response_1
183
224
  @httpclient.test_loopback_http_response << response_2
184
-
185
- handler = proc { }
186
- mock(handler).call(anything).times(101)
225
+ counter = Concurrent::AtomicFixnum.new(0)
226
+ handler = proc { counter.increment }
227
+ proxy(@httpclient).get("#{login_url}/api/v2/incremental/tickets.json",{:include=>"metric_sets", :start_time=>0},anything)
228
+ proxy(@httpclient).get("#{login_url}/api/v2/incremental/tickets.json",{:include=>"metric_sets", :start_time=>end_time},anything)
187
229
  client.ticket_metrics(false, &handler)
230
+ assert_equal(101, counter.value)
188
231
  end
189
232
 
190
233
  test "raise DataError when invalid JSON response" do
@@ -232,7 +275,8 @@ module Embulk
232
275
  end
233
276
 
234
277
  test "invoke export when partial=false" do
235
- mock(client).export_parallel(anything, "ticket_fields")
278
+ # Added default `start_time`
279
+ mock(client).export_parallel(anything, "ticket_fields", 0)
236
280
  client.ticket_fields(false)
237
281
  end
238
282
  end
@@ -244,7 +288,8 @@ module Embulk
244
288
  end
245
289
 
246
290
  test "invoke export when partial=false" do
247
- mock(client).export_parallel(anything, "ticket_forms")
291
+ # Added default `start_time`
292
+ mock(client).export_parallel(anything, "ticket_forms", 0)
248
293
  client.ticket_forms(false)
249
294
  end
250
295
  end
@@ -520,6 +565,13 @@ module Embulk
520
565
  end
521
566
  end
522
567
 
568
+ sub_test_case "should not create new instance of httpclient" do
569
+ test "not create new instance when re-call" do
570
+ client = Client.new(login_url: login_url, auth_method: "token", username: username, token: token)
571
+ assert client.httpclient == client.httpclient
572
+ end
573
+ end
574
+
523
575
  def login_url
524
576
  "http://example.com"
525
577
  end
@@ -549,6 +549,43 @@ module Embulk
549
549
  end
550
550
  end
551
551
 
552
+ sub_test_case "flush each 10k records" do
553
+ setup do
554
+ stub(Embulk).logger { Logger.new(File::NULL) }
555
+ stub(@plugin).preview? { false }
556
+ @httpclient.test_loopback_http_response << [
557
+ "HTTP/1.1 200",
558
+ "Content-Type: application/json",
559
+ "",
560
+ {
561
+ tickets: (1..20000).map { |i| { 'id' => i } },
562
+ count: 20000,
563
+ end_time: 0,
564
+ }.to_json
565
+ ].join("\r\n")
566
+ # to stop pagination (count < 1000)
567
+ @httpclient.test_loopback_http_response << [
568
+ "HTTP/1.1 200",
569
+ "Content-Type: application/json",
570
+ "",
571
+ {
572
+ tickets: [{ 'id' => 20001 }],
573
+ count: 1,
574
+ end_time: 0,
575
+ }.to_json
576
+ ].join("\r\n")
577
+ end
578
+
579
+ test "flush is called twice" do
580
+ omit("This test is no longer valid, flushing is removed now")
581
+ mock(page_builder).add(anything).times(20001)
582
+ mock(page_builder).flush.times(2)
583
+ mock(page_builder).finish
584
+
585
+ @plugin.run
586
+ end
587
+ end
588
+
552
589
  end
553
590
 
554
591
  def yml
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: embulk-input-zendesk
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.13
4
+ version: 0.2.6
5
5
  platform: ruby
6
6
  authors:
7
7
  - uu59
@@ -10,7 +10,7 @@ authors:
10
10
  autorequire:
11
11
  bindir: bin
12
12
  cert_chain: []
13
- date: 2017-03-23 00:00:00.000000000 Z
13
+ date: 2017-05-23 00:00:00.000000000 Z
14
14
  dependencies:
15
15
  - !ruby/object:Gem::Dependency
16
16
  requirement: !ruby/object:Gem::Requirement
@@ -40,6 +40,20 @@ dependencies:
40
40
  - - ">="
41
41
  - !ruby/object:Gem::Version
42
42
  version: '0'
43
+ - !ruby/object:Gem::Dependency
44
+ requirement: !ruby/object:Gem::Requirement
45
+ requirements:
46
+ - - ">="
47
+ - !ruby/object:Gem::Version
48
+ version: '0'
49
+ name: concurrent-ruby
50
+ prerelease: false
51
+ type: :runtime
52
+ version_requirements: !ruby/object:Gem::Requirement
53
+ requirements:
54
+ - - ">="
55
+ - !ruby/object:Gem::Version
56
+ version: '0'
43
57
  - !ruby/object:Gem::Dependency
44
58
  requirement: !ruby/object:Gem::Requirement
45
59
  requirements: