telekinesis 2.0.0-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +5 -0
  3. data/.ruby-version +1 -0
  4. data/Gemfile +2 -0
  5. data/README.md +401 -0
  6. data/Rakefile +111 -0
  7. data/ext/.gitignore +3 -0
  8. data/ext/pom.xml +63 -0
  9. data/ext/pom.xml.template +65 -0
  10. data/ext/src/main/java/com/kickstarter/jruby/Telekinesis.java +103 -0
  11. data/lib/telekinesis/aws/client_adapter.rb +61 -0
  12. data/lib/telekinesis/aws/java_client_adapter.rb +72 -0
  13. data/lib/telekinesis/aws/ruby_client_adapter.rb +40 -0
  14. data/lib/telekinesis/aws.rb +9 -0
  15. data/lib/telekinesis/consumer/base_processor.rb +12 -0
  16. data/lib/telekinesis/consumer/block.rb +22 -0
  17. data/lib/telekinesis/consumer/distributed_consumer.rb +114 -0
  18. data/lib/telekinesis/consumer.rb +3 -0
  19. data/lib/telekinesis/java_util.rb +46 -0
  20. data/lib/telekinesis/logging/java_logging.rb +18 -0
  21. data/lib/telekinesis/logging/ruby_logger_handler.rb +54 -0
  22. data/lib/telekinesis/producer/async_producer.rb +157 -0
  23. data/lib/telekinesis/producer/async_producer_worker.rb +110 -0
  24. data/lib/telekinesis/producer/noop_failure_handler.rb +12 -0
  25. data/lib/telekinesis/producer/sync_producer.rb +52 -0
  26. data/lib/telekinesis/producer/warn_failure_handler.rb +25 -0
  27. data/lib/telekinesis/producer.rb +4 -0
  28. data/lib/telekinesis/telekinesis-2.0.0.jar +0 -0
  29. data/lib/telekinesis/version.rb +3 -0
  30. data/lib/telekinesis.rb +14 -0
  31. data/telekinesis.gemspec +21 -0
  32. data/test/aws/test_client_adapter.rb +29 -0
  33. data/test/aws/test_java_client_adapter.rb +72 -0
  34. data/test/producer/test_async_producer.rb +158 -0
  35. data/test/producer/test_async_producer_worker.rb +390 -0
  36. data/test/producer/test_helper.rb +1 -0
  37. data/test/producer/test_sync_producer.rb +144 -0
  38. data/test/test_helper.rb +6 -0
  39. metadata +149 -0
@@ -0,0 +1,390 @@
1
+ require_relative "test_helper"
2
+
3
+ class AsyncProducerWorkerTest < Minitest::Test
4
+ java_import java.util.concurrent.TimeUnit
5
+ java_import java.util.concurrent.ArrayBlockingQueue
6
+
7
+ def string_from_bytebuffer(bb)
8
+ String.from_java_bytes bb.array
9
+ end
10
+
11
+ class UnretryableAwsError < com.amazonaws.AmazonClientException
12
+ def is_retryable
13
+ false
14
+ end
15
+ end
16
+
17
+ class CapturingFailureHandler
18
+ attr_reader :retries, :final_err
19
+
20
+ def initialize
21
+ @retries = 0
22
+ end
23
+
24
+ def failed_records
25
+ @failed_records ||= []
26
+ end
27
+
28
+ def on_record_failure(fails)
29
+ failed_records << fails
30
+ end
31
+
32
+ def on_kinesis_retry(error, items)
33
+ @retries += 1
34
+ end
35
+
36
+ def on_kinesis_failure(error, items)
37
+ @final_err = [error, items]
38
+ end
39
+ end
40
+
41
+ StubProducer = Struct.new(:stream, :client, :failure_handler)
42
+
43
+ # NOTE: This stub mocks the behavior of timing out on poll once all of the
44
+ # items have been drained from the internal list.
45
+ class StubQueue
46
+ def initialize(items)
47
+ @items = items
48
+ end
49
+
50
+ def poll(duration, unit)
51
+ @items.shift
52
+ end
53
+ end
54
+
55
+ # A wrapper over ABQ that inserts shutdown into itself after a given number
56
+ # of calls to poll. Not thread-safe.
57
+ class ShutdownAfterQueue
58
+ def initialize(shutdown_after)
59
+ @shutdown_after = shutdown_after
60
+ @called = 0
61
+ @under = ArrayBlockingQueue.new(10)
62
+ end
63
+
64
+ def poll(duration, unit)
65
+ @called += 1
66
+ if @called > @shutdown_after
67
+ @under.put(Telekinesis::Producer::AsyncProducerWorker::SHUTDOWN)
68
+ end
69
+ @under.poll(duration, unit)
70
+ end
71
+ end
72
+
73
+ class CapturingClient
74
+ attr_reader :requests
75
+
76
+ def initialize(responses)
77
+ @requests = ArrayBlockingQueue.new(1000)
78
+ @responses = responses
79
+ end
80
+
81
+ def put_records(stream, items)
82
+ @requests.put([stream, items])
83
+ @responses.shift || []
84
+ end
85
+ end
86
+
87
+ class ExplodingClient
88
+ def initialize(exception)
89
+ @exception = exception
90
+ end
91
+
92
+ def put_records(stream, items)
93
+ raise @exception
94
+ end
95
+ end
96
+
97
+ def stub_producer(stream, responses = [])
98
+ StubProducer.new(stream, CapturingClient.new(responses), CapturingFailureHandler.new)
99
+ end
100
+
101
+ # NOTE: This always adds SHUTDOWN to the end of the list so that the worker
102
+ # can be run in the test thread and there's no need to deal with coordination
103
+ # across multiple threads. To simulate the worker timing out on a queue.poll
104
+ # just add 'nil' to your list of items in the queue at the appropriate place.
105
+ def queue_with(*items)
106
+ to_put = items + [Telekinesis::Producer::AsyncProducerWorker::SHUTDOWN]
107
+ StubQueue.new(to_put)
108
+ end
109
+
110
+ def build_worker
111
+ Telekinesis::Producer::AsyncProducerWorker.new(
112
+ @producer,
113
+ @queue,
114
+ @send_size,
115
+ @send_every,
116
+ @retries,
117
+ @retry_interval
118
+ )
119
+ end
120
+
121
+ def records_as_kv_pairs(request)
122
+ request.records.map{|r| [r.partition_key, string_from_bytebuffer(r.data)]}
123
+ end
124
+
125
+ context "producer worker" do
126
+ setup do
127
+ @send_size = 10
128
+ @send_every = 100 # ms
129
+ @retries = 4
130
+ @retry_interval = 0.01
131
+ end
132
+
133
+ context "with only SHUTDOWN in the queue" do
134
+ setup do
135
+ @producer = stub_producer('test')
136
+ @queue = queue_with() # shutdown is always added
137
+ @worker = build_worker
138
+ end
139
+
140
+ should "shut down the worker" do
141
+ @worker.run
142
+ assert(@worker.instance_variable_get(:@shutdown))
143
+ end
144
+ end
145
+
146
+ context "with [item, SHUTDOWN] in the queue" do
147
+ setup do
148
+ @producer = stub_producer('test')
149
+ @queue = queue_with(
150
+ ["key", "value"],
151
+ )
152
+ @worker = build_worker
153
+ end
154
+
155
+ should "put data before shutting down the worker" do
156
+ @worker.run
157
+ stream, items = @producer.client.requests.first
158
+ assert_equal(stream, 'test', "request should have the correct stream name")
159
+ assert_equal([["key", "value"]], items, "Request payload should be kv pairs")
160
+ end
161
+ end
162
+
163
+ context "with nothing in the queue" do
164
+ setup do
165
+ @producer = stub_producer('test')
166
+ @queue = ShutdownAfterQueue.new(5)
167
+ @worker = build_worker
168
+ @starting_put_at = @worker.instance_variable_get(:@last_poll_at)
169
+ end
170
+
171
+ should "update the internal last_poll_at counter and sleep on poll" do
172
+ @worker.run
173
+ refute_equal(@starting_put_at, @worker.instance_variable_get(:@last_poll_at))
174
+ end
175
+ end
176
+
177
+ context "with buffered data that times out" do
178
+ setup do
179
+ @items = [["key", "value"]]
180
+
181
+ @producer = stub_producer('test')
182
+ # Explicitly add 'nil' to fake the queue being empty
183
+ @queue = queue_with(*(@items + [nil]))
184
+ @worker = build_worker
185
+ end
186
+
187
+ should "send whatever is in the queue" do
188
+ @worker.run
189
+ stream, items = @producer.client.requests.first
190
+ assert_equal('test', stream, "request should have the correct stream name")
191
+ assert_equal(items, @items, "Request payload should be kv pairs")
192
+ end
193
+ end
194
+
195
+ context "with fewer than send_size items in queue" do
196
+ setup do
197
+ num_items = @send_size - 1
198
+ @items = num_items.times.map{|i| ["key-#{i}", "value-#{i}"]}
199
+
200
+ @producer = stub_producer('test')
201
+ @queue = queue_with(*@items)
202
+ @worker = build_worker
203
+ end
204
+
205
+ should "send one request" do
206
+ @worker.run
207
+ stream, items = @producer.client.requests.first
208
+ assert_equal('test', stream, "request should have the correct stream name")
209
+ assert_equal(@items, items, "Request payload should be kv pairs")
210
+ end
211
+ end
212
+
213
+ context "with more than send_size items in queue" do
214
+ setup do
215
+ num_items = (@send_size * 2) - 1
216
+ @items = num_items.times.map{|i| ["key-#{i}", "value-#{i}"]}
217
+
218
+ @producer = stub_producer('test')
219
+ @queue = queue_with(*@items)
220
+ @worker = build_worker
221
+ end
222
+
223
+ should "send multiple requests of at most send_size" do
224
+ @worker.run
225
+ expected = @items.each_slice(@send_size).to_a
226
+ expected.zip(@producer.client.requests) do |kv_pairs, (stream, batch)|
227
+ assert_equal('test', stream, "Request should have the correct stream name")
228
+ assert_equal(batch, kv_pairs, "Request payload should be kv pairs")
229
+ end
230
+ end
231
+ end
232
+
233
+ context "when some records return an unretryable error response" do
234
+ setup do
235
+ num_items = @send_size - 1
236
+ @items = num_items.times.map{|i| ["key-#{i}", "value-#{i}"]}
237
+ @failed_items = @items.each_with_index.map do |item, idx|
238
+ if idx.even?
239
+ k, v = item
240
+ [k, v, "some_code", "message"]
241
+ else
242
+ nil
243
+ end
244
+ end
245
+ @failed_items.compact!
246
+
247
+ @producer = stub_producer('test', [@failed_items])
248
+ @queue = queue_with(*@items)
249
+ @worker = build_worker
250
+ end
251
+
252
+ should "call the failure handler with all failed records" do
253
+ @worker.run
254
+ assert_equal([@failed_items], @producer.failure_handler.failed_records)
255
+ end
256
+ end
257
+
258
+ context "when some records return a retryable error response" do
259
+ setup do
260
+ num_items = @send_size - 1
261
+ @items = num_items.times.map{|i| ["key-#{i}", "value-#{i}"]}
262
+ @failed_items = @items.each_with_index.map do |item, idx|
263
+ if idx.even?
264
+ k, v = item
265
+ [k, v, "InternalFailure", "message"]
266
+ else
267
+ nil
268
+ end
269
+ end
270
+ @failed_items.compact!
271
+
272
+ @producer = stub_producer('test', [@failed_items, []])
273
+ @queue = queue_with(*@items)
274
+ @worker = build_worker
275
+ end
276
+
277
+ should "not call the failure handler with any failed records" do
278
+ @worker.run
279
+ assert_equal([], @producer.failure_handler.failed_records)
280
+ end
281
+
282
+ should "retry the request" do
283
+ @worker.run
284
+ assert_equal(2, @producer.client.requests.size)
285
+ end
286
+ end
287
+
288
+ context "when retryable responses fail too many times" do
289
+ setup do
290
+ num_items = @send_size - 1
291
+ @items = num_items.times.map{|i| ["key-#{i}", "value-#{i}"]}
292
+ @failed_items = @items.each_with_index.map do |item, idx|
293
+ if idx.even?
294
+ k, v = item
295
+ [k, v, "InternalFailure", "message"]
296
+ else
297
+ nil
298
+ end
299
+ end
300
+ @failed_items.compact!
301
+
302
+ @producer = stub_producer('test', [@failed_items] * (@retries + 1))
303
+ @queue = queue_with(*@items)
304
+ @worker = build_worker
305
+ end
306
+
307
+ should "call the failure handler with all failed records" do
308
+ @worker.run
309
+ assert_equal([@failed_items], @producer.failure_handler.failed_records)
310
+ end
311
+
312
+ should "retry the request" do
313
+ @worker.run
314
+ assert_equal(@retries, @producer.client.requests.size)
315
+ end
316
+ end
317
+
318
+ context "with a mix of retryable error responses" do
319
+ setup do
320
+ num_items = @send_size - 1
321
+ @items = num_items.times.map{|i| ["key-#{i}", "value-#{i}"]}
322
+ @first_response = @items.each_with_index.map do |item, idx|
323
+ k, v = item
324
+ [k, v, idx.even? ? "InternalFailure" : "WHATEVER", "message"]
325
+ end
326
+ @did_retry = @first_response.select{|_, _, m, _| m == "InternalFailure"}
327
+ @no_retry = @first_response.select{|_, _, m, _| m == "WHATEVER"}
328
+
329
+ @producer = stub_producer('test', [@first_response, []])
330
+ @queue = queue_with(*@items)
331
+ @worker = build_worker
332
+ end
333
+
334
+ should "retry the request" do
335
+ @worker.run
336
+ assert_equal(2, @producer.client.requests.size)
337
+ _, items = @producer.client.requests.to_a.last
338
+ assert_equal(@did_retry.map{|k, v, _, _| [k, v]}, items)
339
+ end
340
+
341
+ should "call the failure handler with only the records that failed" do
342
+ @worker.run
343
+ assert_equal([@no_retry], @producer.failure_handler.failed_records)
344
+ end
345
+ end
346
+
347
+ context "when the client throws a retryable exception" do
348
+ setup do
349
+ @boom = Telekinesis::Aws::KinesisError.new(com.amazonaws.AmazonClientException.new("boom"))
350
+ @producer = StubProducer.new(
351
+ 'stream',
352
+ ExplodingClient.new(@boom),
353
+ CapturingFailureHandler.new
354
+ )
355
+ @queue = queue_with(['foo', 'bar'])
356
+ @worker = build_worker
357
+ end
358
+
359
+ should "call the failure handler on retries and errors" do
360
+ @worker.run
361
+ assert_equal((@retries - 1), @producer.failure_handler.retries)
362
+ err, items = @producer.failure_handler.final_err
363
+ assert_equal(@boom, err)
364
+ assert_equal([['foo', 'bar']], items)
365
+ end
366
+ end
367
+
368
+ context "when the client throws an unretryable exception" do
369
+ setup do
370
+ @boom = Telekinesis::Aws::KinesisError.new(UnretryableAwsError.new("boom"))
371
+ @producer = StubProducer.new(
372
+ 'stream',
373
+ ExplodingClient.new(@boom),
374
+ CapturingFailureHandler.new
375
+ )
376
+ @queue = queue_with(['foo', 'bar'])
377
+ @worker = build_worker
378
+ end
379
+
380
+ should "call the failure handler on error but not on retry" do
381
+ @worker.run
382
+ assert_equal(0, @producer.failure_handler.retries)
383
+ err, items = @producer.failure_handler.final_err
384
+ assert_equal(@boom, err)
385
+ assert_equal([['foo', 'bar']], items)
386
+ end
387
+ end
388
+
389
+ end
390
+ end
@@ -0,0 +1 @@
1
+ require_relative "../test_helper"
@@ -0,0 +1,144 @@
1
+ require_relative "test_helper"
2
+
3
+
4
+ class SyncProducerTest < Minitest::Test
5
+ StubPutRecordResponse = Struct.new(:shard_id, :sequence_number, :error_code, :error_message)
6
+
7
+ class StubClient
8
+ attr_reader :requests
9
+
10
+ def initialize(*responses)
11
+ @requests = []
12
+ @responses = responses
13
+ end
14
+
15
+ def put_record(stream, key, value)
16
+ @requests << [stream, [key, value]]
17
+ @responses.shift || []
18
+ end
19
+
20
+ def put_records(stream, items)
21
+ @requests << [stream, items]
22
+ @responses.shift || []
23
+ end
24
+ end
25
+
26
+ class TestingProducer < Telekinesis::Producer::SyncProducer
27
+ def failures
28
+ @failures ||= []
29
+ end
30
+
31
+ def on_record_failure(fs)
32
+ failures << fs
33
+ end
34
+ end
35
+
36
+ context "SyncProducer" do
37
+ context "#put" do
38
+ setup do
39
+ @expected_response = StubPutRecordResponse.new(123, 123)
40
+ @client = StubClient.new(@expected_response)
41
+ @producer = TestingProducer.new('stream', @client)
42
+ end
43
+
44
+ should "call the underlying client's put_record" do
45
+ assert(@producer.failures.empty?)
46
+ assert_equal(@expected_response, @producer.put('key', 'value'))
47
+ assert_equal(['stream', ['key', 'value']], @client.requests.first)
48
+ end
49
+ end
50
+
51
+ context "#put_all" do
52
+ context "with an empty argument" do
53
+ setup do
54
+ @client = StubClient.new([])
55
+ @producer = TestingProducer.new('stream', @client)
56
+ @actual_failures = @producer.put_all([])
57
+ end
58
+
59
+ should "send no data" do
60
+ assert(@client.requests.empty?)
61
+ assert(@actual_failures.empty?)
62
+ end
63
+ end
64
+
65
+ context "with an argument smaller than :send_size" do
66
+ setup do
67
+ @send_size = 30
68
+ @items = (@send_size - 1).times.map{|i| ["key-#{i}", "value-#{i}"]}
69
+ end
70
+
71
+ context "when no records fail" do
72
+ setup do
73
+ @client = StubClient.new([])
74
+ @producer = TestingProducer.new('stream', @client, {send_size: @send_size})
75
+ @actual_failures = @producer.put_all(@items)
76
+ end
77
+
78
+ should "send one batch and return nothing" do
79
+ assert(@actual_failures.empty?)
80
+ assert_equal([['stream', @items]], @client.requests)
81
+ end
82
+ end
83
+
84
+ context "when some records fail" do
85
+ setup do
86
+ @client = StubClient.new([["key-2", "value-2", "fake error", "message"]])
87
+ @producer = TestingProducer.new('stream', @client, {send_size: @send_size})
88
+ @actual_failures = @producer.put_all(@items)
89
+ end
90
+
91
+ should "call on_record_failure" do
92
+ assert_equal([['stream', @items]], @client.requests)
93
+ assert_equal([["key-2", "value-2", "fake error", "message"]], @actual_failures)
94
+ end
95
+ end
96
+ end
97
+
98
+ context "with an argument larger than :send_size" do
99
+ setup do
100
+ @send_size = 30
101
+ @items = (@send_size + 3).times.map{|i| ["key-#{i}", "value-#{i}"]}
102
+ # expected_requests looks like:
103
+ # [
104
+ # ['stream', [[k1, v1], [k2, v2], ...]],
105
+ # ['stream', [[kn, vn], [k(n+1), v(n+1)], ...]]
106
+ # ]
107
+ @expected_requests = @items.each_slice(@send_size).map{|batch| ['stream', batch]}
108
+ end
109
+
110
+ context "when no records fail" do
111
+ setup do
112
+ @client = StubClient.new([])
113
+ @producer = TestingProducer.new('stream', @client, {send_size: @send_size})
114
+ @actual_failures = @producer.put_all(@items)
115
+ end
116
+
117
+ should "send multiple batches and return nothing" do
118
+ assert(@actual_failures.empty?)
119
+ assert_equal(@expected_requests, @client.requests)
120
+ end
121
+ end
122
+
123
+ context "when some records fail" do
124
+ setup do
125
+ @error_respones = [
126
+ [["k1", "v1", "err", "message"], ["k2", "v2", "err", "message"]],
127
+ [["k-next", "v-next", "err", "message"]]
128
+ ]
129
+ @expected_failures = @error_respones.flat_map {|x| x }
130
+
131
+ @client = StubClient.new(*@error_respones)
132
+ @producer = TestingProducer.new('stream', @client, {send_size: @send_size})
133
+ @actual_failures = @producer.put_all(@items)
134
+ end
135
+
136
+ should "return the failures" do
137
+ assert_equal(@expected_requests, @client.requests)
138
+ assert_equal(@expected_failures, @actual_failures)
139
+ end
140
+ end
141
+ end
142
+ end
143
+ end
144
+ end
@@ -0,0 +1,6 @@
1
+ require "minitest/autorun"
2
+ require "minitest/pride"
3
+ require "bundler/setup"
4
+ Bundler.require(:development)
5
+
6
+ require "telekinesis"
metadata ADDED
@@ -0,0 +1,149 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: telekinesis
3
+ version: !ruby/object:Gem::Version
4
+ version: 2.0.0
5
+ platform: java
6
+ authors:
7
+ - Ben Linsay
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+ date: 2015-09-02 00:00:00.000000000 Z
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ requirement: !ruby/object:Gem::Requirement
15
+ requirements:
16
+ - - '>='
17
+ - !ruby/object:Gem::Version
18
+ version: '0'
19
+ name: aws-sdk
20
+ prerelease: false
21
+ type: :runtime
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - '>='
25
+ - !ruby/object:Gem::Version
26
+ version: '0'
27
+ - !ruby/object:Gem::Dependency
28
+ requirement: !ruby/object:Gem::Requirement
29
+ requirements:
30
+ - - '>='
31
+ - !ruby/object:Gem::Version
32
+ version: '0'
33
+ name: rake
34
+ prerelease: false
35
+ type: :development
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - '>='
39
+ - !ruby/object:Gem::Version
40
+ version: '0'
41
+ - !ruby/object:Gem::Dependency
42
+ requirement: !ruby/object:Gem::Requirement
43
+ requirements:
44
+ - - '>='
45
+ - !ruby/object:Gem::Version
46
+ version: '0'
47
+ name: nokogiri
48
+ prerelease: false
49
+ type: :development
50
+ version_requirements: !ruby/object:Gem::Requirement
51
+ requirements:
52
+ - - '>='
53
+ - !ruby/object:Gem::Version
54
+ version: '0'
55
+ - !ruby/object:Gem::Dependency
56
+ requirement: !ruby/object:Gem::Requirement
57
+ requirements:
58
+ - - '>='
59
+ - !ruby/object:Gem::Version
60
+ version: '0'
61
+ name: minitest
62
+ prerelease: false
63
+ type: :development
64
+ version_requirements: !ruby/object:Gem::Requirement
65
+ requirements:
66
+ - - '>='
67
+ - !ruby/object:Gem::Version
68
+ version: '0'
69
+ - !ruby/object:Gem::Dependency
70
+ requirement: !ruby/object:Gem::Requirement
71
+ requirements:
72
+ - - '>='
73
+ - !ruby/object:Gem::Version
74
+ version: '0'
75
+ name: shoulda-context
76
+ prerelease: false
77
+ type: :development
78
+ version_requirements: !ruby/object:Gem::Requirement
79
+ requirements:
80
+ - - '>='
81
+ - !ruby/object:Gem::Version
82
+ version: '0'
83
+ description:
84
+ email: ben@kickstarter.com
85
+ executables: []
86
+ extensions: []
87
+ extra_rdoc_files: []
88
+ files:
89
+ - .gitignore
90
+ - .ruby-version
91
+ - Gemfile
92
+ - README.md
93
+ - Rakefile
94
+ - ext/.gitignore
95
+ - ext/pom.xml
96
+ - ext/pom.xml.template
97
+ - ext/src/main/java/com/kickstarter/jruby/Telekinesis.java
98
+ - lib/telekinesis.rb
99
+ - lib/telekinesis/aws.rb
100
+ - lib/telekinesis/aws/client_adapter.rb
101
+ - lib/telekinesis/aws/java_client_adapter.rb
102
+ - lib/telekinesis/aws/ruby_client_adapter.rb
103
+ - lib/telekinesis/consumer.rb
104
+ - lib/telekinesis/consumer/base_processor.rb
105
+ - lib/telekinesis/consumer/block.rb
106
+ - lib/telekinesis/consumer/distributed_consumer.rb
107
+ - lib/telekinesis/java_util.rb
108
+ - lib/telekinesis/logging/java_logging.rb
109
+ - lib/telekinesis/logging/ruby_logger_handler.rb
110
+ - lib/telekinesis/producer.rb
111
+ - lib/telekinesis/producer/async_producer.rb
112
+ - lib/telekinesis/producer/async_producer_worker.rb
113
+ - lib/telekinesis/producer/noop_failure_handler.rb
114
+ - lib/telekinesis/producer/sync_producer.rb
115
+ - lib/telekinesis/producer/warn_failure_handler.rb
116
+ - lib/telekinesis/version.rb
117
+ - telekinesis.gemspec
118
+ - test/aws/test_client_adapter.rb
119
+ - test/aws/test_java_client_adapter.rb
120
+ - test/producer/test_async_producer.rb
121
+ - test/producer/test_async_producer_worker.rb
122
+ - test/producer/test_helper.rb
123
+ - test/producer/test_sync_producer.rb
124
+ - test/test_helper.rb
125
+ - lib/telekinesis/telekinesis-2.0.0.jar
126
+ homepage: https://github.com/kickstarter/telekinesis
127
+ licenses: []
128
+ metadata: {}
129
+ post_install_message:
130
+ rdoc_options: []
131
+ require_paths:
132
+ - lib
133
+ required_ruby_version: !ruby/object:Gem::Requirement
134
+ requirements:
135
+ - - '>='
136
+ - !ruby/object:Gem::Version
137
+ version: '0'
138
+ required_rubygems_version: !ruby/object:Gem::Requirement
139
+ requirements:
140
+ - - '>='
141
+ - !ruby/object:Gem::Version
142
+ version: '0'
143
+ requirements: []
144
+ rubyforge_project:
145
+ rubygems_version: 2.1.9
146
+ signing_key:
147
+ specification_version: 4
148
+ summary: High level clients for Amazon Kinesis
149
+ test_files: []