telekinesis 2.0.0-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +5 -0
  3. data/.ruby-version +1 -0
  4. data/Gemfile +2 -0
  5. data/README.md +401 -0
  6. data/Rakefile +111 -0
  7. data/ext/.gitignore +3 -0
  8. data/ext/pom.xml +63 -0
  9. data/ext/pom.xml.template +65 -0
  10. data/ext/src/main/java/com/kickstarter/jruby/Telekinesis.java +103 -0
  11. data/lib/telekinesis/aws/client_adapter.rb +61 -0
  12. data/lib/telekinesis/aws/java_client_adapter.rb +72 -0
  13. data/lib/telekinesis/aws/ruby_client_adapter.rb +40 -0
  14. data/lib/telekinesis/aws.rb +9 -0
  15. data/lib/telekinesis/consumer/base_processor.rb +12 -0
  16. data/lib/telekinesis/consumer/block.rb +22 -0
  17. data/lib/telekinesis/consumer/distributed_consumer.rb +114 -0
  18. data/lib/telekinesis/consumer.rb +3 -0
  19. data/lib/telekinesis/java_util.rb +46 -0
  20. data/lib/telekinesis/logging/java_logging.rb +18 -0
  21. data/lib/telekinesis/logging/ruby_logger_handler.rb +54 -0
  22. data/lib/telekinesis/producer/async_producer.rb +157 -0
  23. data/lib/telekinesis/producer/async_producer_worker.rb +110 -0
  24. data/lib/telekinesis/producer/noop_failure_handler.rb +12 -0
  25. data/lib/telekinesis/producer/sync_producer.rb +52 -0
  26. data/lib/telekinesis/producer/warn_failure_handler.rb +25 -0
  27. data/lib/telekinesis/producer.rb +4 -0
  28. data/lib/telekinesis/telekinesis-2.0.0.jar +0 -0
  29. data/lib/telekinesis/version.rb +3 -0
  30. data/lib/telekinesis.rb +14 -0
  31. data/telekinesis.gemspec +21 -0
  32. data/test/aws/test_client_adapter.rb +29 -0
  33. data/test/aws/test_java_client_adapter.rb +72 -0
  34. data/test/producer/test_async_producer.rb +158 -0
  35. data/test/producer/test_async_producer_worker.rb +390 -0
  36. data/test/producer/test_helper.rb +1 -0
  37. data/test/producer/test_sync_producer.rb +144 -0
  38. data/test/test_helper.rb +6 -0
  39. metadata +149 -0
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA1:
3
+ metadata.gz: 0e89258c0a400e2a01de10f23ed6cbda434ad362
4
+ data.tar.gz: e395dc8e34614c2f3600346ca0ab5e267a6b626d
5
+ SHA512:
6
+ metadata.gz: 82088c3f830e5bb89295bee228e98ee85da6b17d7838371086fafd122d6975d0054a50b193749c5303d158b9bdeb70f81e804169f7ede6a01a91c61e9a9b0311
7
+ data.tar.gz: 2b60128d819c8270b0b2ffe821c0a665877db9acfeb04c5e0bcdac3e7f0ef8f978bd6db01d23d4d4c8803287dc7c477df1266e648447aa9365407fb33028b296
data/.gitignore ADDED
@@ -0,0 +1,5 @@
1
+ *.swp
2
+ lib/telekinesis/*.jar
3
+ tmp/
4
+ Gemfile.lock
5
+ telekinesis-*.gem
data/.ruby-version ADDED
@@ -0,0 +1 @@
1
+ jruby-1.7.9
data/Gemfile ADDED
@@ -0,0 +1,2 @@
1
+ source 'https://rubygems.org'
2
+ gemspec
data/README.md ADDED
@@ -0,0 +1,401 @@
1
+ **Table of Contents**
2
+
3
+ - [Telekinesis](#telekinesis)
4
+ - [Requirements](#requirements)
5
+ - [Installing](#installing)
6
+ - [Producers](#producers)
7
+ - [SyncProducer](#syncproducer)
8
+ - [AsyncProducer](#asyncproducer)
9
+ - [Consumers](#consumers)
10
+ - [DistributedConsumer](#distributedconsumer)
11
+ - [Client State](#client-state)
12
+ - [Errors while processing records](#errors-while-processing-records)
13
+ - [Checkpoints and `INITIAL_POSITION_IN_STREAM`](#checkpoints-and-initial_position_in_stream)
14
+ - [Java client logging](#java-client-logging)
15
+ - [](#)
16
+ - [Building](#building)
17
+ - [Prerequisites](#prerequisites)
18
+ - [Build](#build)
19
+ - [Testing](#testing)
20
+
21
+ # Telekinesis
22
+
23
+ Telekinesis is a high-level client for Amazon Kinesis.
24
+
25
+ The library provides a high-throughput asynchronous producer and wraps the
26
+ [Kinesis Client Library](https://github.com/awslabs/amazon-kinesis-client) to
27
+ provide an easy interface for writing consumers.
28
+
29
+ ## Requirements
30
+
31
+ Telekinesis runs on JRuby 1.7.x or later, with at least Java 6.
32
+
33
+ If you want to build from source, you need to have Apache Maven installed.
34
+
35
+ ## Installing
36
+
37
+ ```
38
+ gem install telekinesis
39
+ ```
40
+
41
+ ## Producers
42
+
43
+ Telekinesis includes two high-level
44
+ [Producers](http://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-producers.html).
45
+
46
+ Telekinesis assumes that records are `[key, value]` pairs of strings. The key
47
+ *must* be a string as enforced by Kinesis itself. Keys are used by the service
48
+ to partition data into shards. Values can be any old blob of data, but for
49
+ simplicity, Telekinesis expects strings.
50
+
51
+ Both keys and values should respect any Kinesis
52
+ [limits](http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html).
53
+ and all of the [restrictions](http://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecord.html)
54
+ in the PutRecords API documentation.
55
+
56
+ ### SyncProducer
57
+
58
+ The `SyncProducer` sends data to Kinesis every time `put` or `put_records`
59
+ is called. These calls will block until the call to Kinesis returns.
60
+
61
+
62
+ ```ruby
63
+ require 'telekinesis'
64
+
65
+ producer = Telekinesis::Producer::SyncProducer.create(
66
+ stream: 'my stream',
67
+ credentials: {
68
+ acess_key_id: 'foo',
69
+ secret_access_key: 'bar'
70
+ }
71
+ )
72
+ ```
73
+
74
+ Calls to `put` send a single record at a time to Kinesis, where calls to
75
+ `put_records` can send up to 500 records at a time, which is the Kinesis service limit.
76
+ If more than 500 records are passed to `put_records` they're grouped into batches
77
+ and sent.
78
+
79
+ > NOTE: To send fewer records to Kinesis at a time when using `put_records`,
80
+ > you can adjust the `:send_size` parameter in the `create` method.
81
+
82
+ Using `put_records` over `put` is recommended if you have any way to batch your
83
+ data. Since Kinesis has an HTTP API and often has high latency, it tends to make
84
+ sense to try and increase throughput as much as possible by batching data.
85
+
86
+ ```ruby
87
+ # file is an instance of File containing CSV data that looks like:
88
+ #
89
+ # "some,very,important,data,with,a,partition_key"
90
+ #
91
+ lines = file.lines.map do |line|
92
+ key = line.split(/,/).last
93
+ data = line
94
+ [key, data]
95
+ end
96
+
97
+ # One record at a time
98
+ lines.each do |key, data|
99
+ producer.put(key, data)
100
+ end
101
+
102
+ # Manually control your batches
103
+ lines.each_slice(200) do |batch|
104
+ producer.put_all(batch)
105
+ end
106
+
107
+ # Go hog wild
108
+ producer.put_all(lines.to_a)
109
+ ```
110
+
111
+ When something goes wrong and the Kinesis client throws an exception, it bubbles
112
+ up as a `Telekinesis::Aws::KinesisError` with the underlying exception accessible
113
+ as the `cause` field.
114
+
115
+ When some of (but maybe not all of) the records passed to `put_records` cause
116
+ problems, they're returned as an array of
117
+ `[key, value, error_code, error_message]` tuples.
118
+
119
+ ### AsyncProducer
120
+
121
+ The `AsyncProducer` queues events interally and uses background threads to send
122
+ data to Kinesis. Data is sent when a batch reaches the Kinesis limit of 500,
123
+ when the producer's timeout is reached, or when the producer is shut down.
124
+
125
+ > NOTE: You can configure the size at which a batch is sent by passing the
126
+ > `:send_size` parameter to create. The producer's internal timeout can be
127
+ > set by using the `:send_every_ms` parameter.
128
+
129
+ The API for the `AsyncProducer` is looks similar to the `SyncProducer`. However,
130
+ all `put` and `put_all` calls return immediately. Both `put` and `put_all`
131
+ return `true` if the producer enqueued the data for sending later, and `false`
132
+ if the producer is not accepting data for any reason. If the producer's internal
133
+ queue fill up, calls to `put` and `put_all` will block.
134
+
135
+ Since sending (and therefore failures) happen in a different thread, you can
136
+ provide an `AsyncProducer` with a failure handler that's called whenever
137
+ something bad happens.
138
+
139
+ ```ruby
140
+ require 'telekinesis'
141
+
142
+ class MyFailureHandler
143
+ def on_record_failure(kv_pairs_and_errors)
144
+ items = kv_pairs_and_errors.map do |k, v, code, message|
145
+ maybe_log_error(code, message)
146
+ [k, v]
147
+ end
148
+ save_for_later(items)
149
+ end
150
+
151
+ def on_kinesis_error(err, items)
152
+ log_exception(err.cause)
153
+ save_for_later(items)
154
+ end
155
+ end
156
+
157
+ producer = Telekinesis::Producer::AsyncProducer.create(
158
+ stream: 'my stream',
159
+ failure_handler: MyFailureHandler.new,
160
+ send_every_ms: 1500,
161
+ credentials: {
162
+ acess_key_id: 'foo',
163
+ secret_access_key: 'bar'
164
+ }
165
+ )
166
+ ```
167
+
168
+ ## Consumers
169
+
170
+ ### DistributedConsumer
171
+
172
+ The `DistributedConsumer` is a wrapper around Amazon's [Kinesis Client Library
173
+ (also called the KCL)](http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-record-processor-app.html#kinesis-record-processor-overview-kcl).
174
+
175
+ Each `DistributedConsumer` is considered to be part of a group of consumers that
176
+ make up an _application_. An application can be running on any number of hosts.
177
+ Consumers identify themself uniquely within an application by specifying a
178
+ `worker_id`.
179
+
180
+ All of the consumers within an application attempt to distribute work evenly
181
+ between themselves by coordinating through a DynamoDB table. This coordination
182
+ ensures that a single consumer processes each shard, and that if one consumer
183
+ fails for any reason, another consumer can pick up from the point at which it
184
+ last checkpointed.
185
+
186
+ This is all part of the KCL! Telekinesis just makes it easier to use from JRuby.
187
+
188
+ Each `DistributedConsumer` has to know how to process all the data it's
189
+ retreiving from Kinesis. That's done by creating a [record
190
+ processor](http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-record-processor-implementation-app-java.html#kinesis-record-processor-implementation-interface-java)
191
+ and telling a `DistributedConsumer` how to create a processor when it becomes
192
+ responsible for a shard.
193
+
194
+ We highly recommend reading the [official
195
+ docs](http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-record-processor-implementation-app-java.html#kinesis-record-processor-implementation-interface-java)
196
+ on implementing the `IRecordProcessor` interface before you continue.
197
+
198
+ > NOTE: Since `initialize` is a reserved method, Telekinesis takes care of
199
+ > calling your `init` method whenever the KCL calls `IRecordProcessor`'s
200
+ > `initialize` method.
201
+
202
+ > NOTE: Make sure you read the Kinesis Record Processor documentation carefully.
203
+ > Failures, checkpoints, and shutting require some attention. More on that later.
204
+
205
+ After it is created, a record processor is initialized with the ID of the shard
206
+ it's processing, and handed an enumerable of
207
+ [Records](http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/index.html?com/amazonaws/services/kinesis/AmazonKinesisClient.html) and a checkpointer (see below) every time the consumer detects new data to
208
+ process.
209
+
210
+ Defining and creating a simple processor might look like:
211
+
212
+ ```ruby
213
+ require 'telekinesis'
214
+
215
+ class MyProcessor
216
+ def init(shard_id)
217
+ @shard_id = shard_id
218
+ $stderr.puts "Started processing #{@shard_id}"
219
+ end
220
+
221
+ def process_records(records, checkpointer)
222
+ records.each {|r| puts "key=#{r.partition_key} value=#{String.from_java_bytes(r.data.array)}" }
223
+ end
224
+
225
+ def shutdown
226
+ $stderr.puts "Shutting down #{@shard_id}"
227
+ end
228
+ end
229
+
230
+ Telekinesis::Consumer::DistributedConsumer.new(stream: 'some-events', app: 'example') do
231
+ MyProcessor.new
232
+ end
233
+ ```
234
+
235
+ To make defining record processors easier, Telekinesis comes with a `Block`
236
+ processor that lets you use a block to specify your `process_records` method.
237
+ Use this if you don't need to do any explicit startup or shutdown in a record
238
+ processor.
239
+
240
+ ```ruby
241
+ require 'telekinesis'
242
+
243
+ Telekinesis::Consumer::DistributedConsumer.new(stream: 'some-events', app: 'example') do
244
+ Telekinesis::Consumer::Block.new do |records, checkpointer|
245
+ records.each {|r| puts "key=#{r.partition_key} value=#{String.from_java_bytes(r.data.array)}" }
246
+ end
247
+ end
248
+ ```
249
+
250
+ Once you get into building a client application, you'll probably want
251
+ to know about some of the following advanced tips and tricks.
252
+
253
+ #### Client State
254
+
255
+ Each KCL Application gets its own DynamoDB table that stores all of this state.
256
+ The `:application` name is used as the DynamoDB table name, so beware of
257
+ namespace collisions if you use DynamoDB on its own. Altering or reseting any
258
+ of this state involves manually altering the application's Dynamo table.
259
+
260
+ #### Errors while processing records
261
+
262
+ When a call to `process_records` fails, the KCL expects you to handle the
263
+ failure and try to reprocess. If you let an exception escape, it happily moves
264
+ on to the next batch of records from Kinesis and will let you checkpoint further
265
+ on down the road.
266
+
267
+ From the [official docs](http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-record-processor-implementation-app-java.html):
268
+
269
+ > The KCL relies on processRecords to handle any exceptions that arise from
270
+ > processing the data records. If an exception is thrown from processRecords,
271
+ > the KCL skips over the data records that were passed prior to the exception;
272
+ > that is, these records are not re-sent to the record processor that threw the
273
+ > exception or to any other record processor in the application.
274
+
275
+ The moral of the story is that you should be absolutely sure you catch any
276
+ exceptions that get thrown in your `process_records` implementation. If you
277
+ don't, you can (silently) drop data on the floor.
278
+
279
+ If something terrible happens and you can't attempt to re-read the list of
280
+ records and re-do whatever work you needed to do in process records, we've been
281
+ advised by the Kinesis team that killing the entire JVM that's running the
282
+ worker is the safest thing to do. On restart, the consumer (or another consumer
283
+ in the application group) will pick up the orphaned shards and attempt to
284
+ restart from the last available checkpoint.
285
+
286
+ #### Checkpoints and `INITIAL_POSITION_IN_STREAM`
287
+
288
+ The second object passed to `process_records` is a checkpointer. This can be
289
+ used to checkpoint all records that have been passed to the processor so far
290
+ (by just calling `checkpointer.checkpoint`) or up to a particular sequence
291
+ number (by calling `checkpointer.checkpoint(record.sequence_number)`).
292
+
293
+ While a `DistributedConsumer` can be initialized with an
294
+ `:initial_position_in_stream` option, any existing checkpoint for a shard will
295
+ take precedent over that value. Furthermore, any existing STATE in DynamoDB will
296
+ take precedent, so if you start a consumer with `initial_position_in_stream: 'LATEST'`
297
+ and then restart with `initial_position_in_stream: 'TRIM_HORIZON'` you still end
298
+ up starting from `LATEST`.
299
+
300
+ ## Java client logging
301
+
302
+ The AWS Java SDK can be extremely noisy and hard to control, since it logs
303
+ through `java.util.logging`.
304
+
305
+ Telekinesis comes with a shim that can silence all of that logging or redirect
306
+ it to a Ruby Logger of your choice. This isn't fine-grained control - you're
307
+ capturing or disabling ALL logging from any Java dependency that uses
308
+ `java.util.logging` - so use it with care.
309
+
310
+ To entirely disable logging:
311
+
312
+ ```ruby
313
+ Telekinesis::Logging.disable_java_logging
314
+ ```
315
+
316
+ To capture all logging and send it through a Ruby logger:
317
+
318
+ ```ruby
319
+ Telekinesis::Logging.capture_java_logging(Logger.new($stderr))
320
+ ```
321
+
322
+ ----
323
+
324
+ # Building
325
+
326
+ ## Prerequisites
327
+
328
+ * JRuby 1.7.9 or later.
329
+ * Apache Maven
330
+
331
+ ## Build
332
+
333
+ Install JRuby 1.7.9 or later, for example with `rbenv` you would:
334
+
335
+ ```
336
+ $ rbenv install jruby-1.7.9
337
+ ```
338
+
339
+ Install Bundler and required Gems.
340
+
341
+ ```
342
+ $ gem install bundler
343
+ $ bundle install
344
+ ```
345
+
346
+ Install Apache Maven.
347
+
348
+ On Ubuntu or related use:
349
+
350
+ ```
351
+ $ sudo apt-get install maven
352
+ ```
353
+
354
+ The easiest method on OSX is via `brew`.
355
+
356
+ ```
357
+ $ sudo brew install maven
358
+ ```
359
+
360
+ Ensure `JAVA_HOME` is set on OSX.
361
+
362
+ Ensure your `JAVA_HOME` environment variable is set. In Bash for example
363
+ add the following to `~/.bash_profile`.
364
+
365
+ ```
366
+ export JAVA_HOME=$(/usr/libexec/java_home)
367
+ ```
368
+
369
+ Then run:
370
+
371
+ ```
372
+ $ source ~/.bash_profile
373
+ ```
374
+
375
+ Build the Java shim and jar.
376
+
377
+ ```
378
+ $ rake build:ext
379
+ ```
380
+
381
+ The `rake build:ext` task builds the Java shim and packages all of the required Java
382
+ classes into a single jar. Since bytecode is portable, the JAR is shipped with
383
+ the built gem.
384
+
385
+ Build the Gem.
386
+
387
+ Use the `rake build:gem` task to build the complete gem, uberjar and all.
388
+
389
+ ```
390
+ $ rake build:gem
391
+ ```
392
+
393
+ # Testing
394
+
395
+ Telekinesis comes with a small set of unit tests. Run those with plain ol'
396
+ `rake test`.
397
+
398
+ > NOTE: The Java extension *must* be built and installed before you can run
399
+ > unit tests.
400
+
401
+ Integration tests coming soon.
data/Rakefile ADDED
@@ -0,0 +1,111 @@
1
+ require 'bundler/setup'
2
+
3
+ Bundler.require(:development)
4
+
5
+ def log_ok(message)
6
+ $stderr.write "#{message}... "
7
+ begin
8
+ yield
9
+ $stderr.puts "ok"
10
+ rescue => e
11
+ $stderr.puts "failed"
12
+ abort <<-EOF
13
+
14
+ error: #{e}
15
+ EOF
16
+ end
17
+ end
18
+
19
+ def artifact_name(path)
20
+ File.open(path) do |f|
21
+ doc = Nokogiri::XML(f)
22
+ id = doc.css("project>artifactId").text
23
+ version = doc.css("project>version").text
24
+ "#{id}-#{version}.jar"
25
+ end
26
+ end
27
+
28
+ namespace :ext do
29
+ require_relative 'lib/telekinesis/version'
30
+
31
+ desc "Cleanup all built extension"
32
+ task :clean do
33
+ FileUtils.rm(Dir.glob("lib/telekinesis/*.jar"))
34
+ Dir.chdir("ext") do
35
+ `mvn clean 2>&1`
36
+ end
37
+ end
38
+
39
+ task :have_maven? do
40
+ log_ok("Checking for maven") do
41
+ `which mvn`
42
+ raise "Maven is required to build this gem" unless $?.success?
43
+ end
44
+ end
45
+
46
+ task :have_jdk6_or_higher? do
47
+ log_ok("Checking that at least java 6 is installed") do
48
+ version_match = `java -version 2>&1`.match(/java version "1\.(\d)\.(\d+_\d+)"/)
49
+ if version_match.nil?
50
+ raise "Can't parse Java version!"
51
+ end
52
+ jdk_version, _jdk_patchlevel = version_match.captures
53
+ if jdk_version.to_i < 6
54
+ raise "Found #{version_match}"
55
+ end
56
+ end
57
+ end
58
+
59
+ task :update_pom_version do
60
+ File.open('ext/pom.xml', 'r+') do |f|
61
+ doc = Nokogiri::XML(f)
62
+ pom_version = doc.css("project>version")
63
+
64
+ if pom_version.text != Telekinesis::VERSION
65
+ log_ok("Updating pom.xml version") do
66
+ pom_version.first.content = Telekinesis::VERSION
67
+ f.truncate(0)
68
+ f.rewind
69
+ f.write(doc.to_xml)
70
+ end
71
+ end
72
+ end
73
+ end
74
+
75
+ desc "Build the Java extensions for this gem. Requires JDK6+ and Maven"
76
+ task :build => [:have_jdk6_or_higher?, :have_maven?, :update_pom_version, :clean] do
77
+ fat_jar = artifact_name('ext/pom.xml')
78
+ log_ok("Building #{fat_jar}") do
79
+ Dir.chdir("ext") do
80
+ `mkdir -p target/`
81
+ `mvn package 2>&1 > target/build_log`
82
+ raise "build failed. See ext/target/build_log for details" unless $?.success?
83
+ FileUtils.copy("target/#{fat_jar}", "../lib/telekinesis/#{fat_jar}")
84
+ end
85
+ end
86
+ end
87
+ end
88
+
89
+ namespace :gem do
90
+ desc "Build this gem"
91
+ task :build => 'ext:build' do
92
+ `gem build telekinesis.gemspec`
93
+ end
94
+ end
95
+
96
+ require 'rake/testtask'
97
+
98
+ # NOTE: Tests shouldn't be run without the extension being built, but converting
99
+ # the build task to a file task made it hard to depend on having a JDK
100
+ # and Maven installed. This is a little kludgy but better than the
101
+ # alternative.
102
+ task :check_for_ext do
103
+ fat_jar = artifact_name('ext/pom.xml')
104
+ Rake::Task["ext:build"].invoke unless File.exists?("lib/telekinesis/#{fat_jar}")
105
+ end
106
+
107
+ Rake::TestTask.new(:test) do |t|
108
+ t.test_files = FileList["test/**/test_*.rb"].exclude(/test_helper/)
109
+ t.verbose = true
110
+ end
111
+ task :test => :check_for_ext
data/ext/.gitignore ADDED
@@ -0,0 +1,3 @@
1
+ .idea/
2
+ *.iml
3
+ target/
data/ext/pom.xml ADDED
@@ -0,0 +1,63 @@
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
3
+ <modelVersion>4.0.0</modelVersion>
4
+
5
+ <groupId>com.kickstarter</groupId>
6
+ <artifactId>telekinesis</artifactId>
7
+ <version>2.0.0</version>
8
+
9
+ <!-- ================================================================== -->
10
+ <build>
11
+ <finalName>${project.artifactId}-${project.version}</finalName>
12
+
13
+ <plugins>
14
+ <plugin>
15
+ <groupId>org.apache.maven.plugins</groupId>
16
+ <artifactId>maven-compiler-plugin</artifactId>
17
+ <version>3.1</version>
18
+ <configuration>
19
+ <source>1.6</source>
20
+ <target>1.6</target>
21
+ </configuration>
22
+ </plugin>
23
+ <plugin>
24
+ <groupId>org.apache.maven.plugins</groupId>
25
+ <artifactId>maven-shade-plugin</artifactId>
26
+ <version>1.6</version>
27
+ <configuration>
28
+ <createDependencyReducedPom>true</createDependencyReducedPom>
29
+ </configuration>
30
+ <executions>
31
+ <execution>
32
+ <phase>package</phase>
33
+ <goals>
34
+ <goal>shade</goal>
35
+ </goals>
36
+ </execution>
37
+ </executions>
38
+ </plugin>
39
+ </plugins>
40
+ </build>
41
+
42
+ <!-- ================================================================== -->
43
+ <!-- NOTE: all version numbers are defined in the properties section -->
44
+ <dependencies>
45
+ <!-- Production dependencies :: Default scope -->
46
+ <dependency>
47
+ <groupId>com.amazonaws</groupId>
48
+ <artifactId>amazon-kinesis-client</artifactId>
49
+ <version>${amazon-kinesis-client-version}</version>
50
+ </dependency>
51
+ <dependency>
52
+ <groupId>com.google.guava</groupId>
53
+ <artifactId>guava</artifactId>
54
+ <version>18.0</version>
55
+ </dependency>
56
+ </dependencies>
57
+
58
+ <!-- ================================================================== -->
59
+ <properties>
60
+ <aws-java-sdk-version>1.6.9.1</aws-java-sdk-version>
61
+ <amazon-kinesis-client-version>1.2.1</amazon-kinesis-client-version>
62
+ </properties>
63
+ </project>
@@ -0,0 +1,65 @@
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project xmlns="http://maven.apache.org/POM/4.0.0"
3
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
4
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
5
+ <modelVersion>4.0.0</modelVersion>
6
+
7
+ <groupId>com.kickstarter</groupId>
8
+ <artifactId>telekinesis</artifactId>
9
+ <version>1.0.0</version>
10
+
11
+ <!-- ================================================================== -->
12
+ <build>
13
+ <finalName>${project.artifactId}-${project.version}</finalName>
14
+
15
+ <plugins>
16
+ <plugin>
17
+ <groupId>org.apache.maven.plugins</groupId>
18
+ <artifactId>maven-compiler-plugin</artifactId>
19
+ <version>3.1</version>
20
+ <configuration>
21
+ <source>1.6</source>
22
+ <target>1.6</target>
23
+ </configuration>
24
+ </plugin>
25
+ <plugin>
26
+ <groupId>org.apache.maven.plugins</groupId>
27
+ <artifactId>maven-shade-plugin</artifactId>
28
+ <version>1.6</version>
29
+ <configuration>
30
+ <createDependencyReducedPom>true</createDependencyReducedPom>
31
+ </configuration>
32
+ <executions>
33
+ <execution>
34
+ <phase>package</phase>
35
+ <goals>
36
+ <goal>shade</goal>
37
+ </goals>
38
+ </execution>
39
+ </executions>
40
+ </plugin>
41
+ </plugins>
42
+ </build>
43
+
44
+ <!-- ================================================================== -->
45
+ <!-- NOTE: all version numbers are defined in the properties section -->
46
+ <dependencies>
47
+ <!-- Production dependencies :: Default scope -->
48
+ <dependency>
49
+ <groupId>com.amazonaws</groupId>
50
+ <artifactId>amazon-kinesis-client</artifactId>
51
+ <version>${amazon-kinesis-client-version}</version>
52
+ </dependency>
53
+ <dependency>
54
+ <groupId>com.google.guava</groupId>
55
+ <artifactId>guava</artifactId>
56
+ <version>18.0</version>
57
+ </dependency>
58
+ </dependencies>
59
+
60
+ <!-- ================================================================== -->
61
+ <properties>
62
+ <aws-java-sdk-version>1.6.9.1</aws-java-sdk-version>
63
+ <amazon-kinesis-client-version>1.2.1</amazon-kinesis-client-version>
64
+ </properties>
65
+ </project>