phobos 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,6 @@
1
+ require "bundler/gem_tasks"
2
+ require "rspec/core/rake_task"
3
+
4
+ RSpec::Core::RakeTask.new(:spec)
5
+
6
+ task :default => :spec
@@ -0,0 +1,17 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require "bundler/setup"
4
+ require "phobos"
5
+
6
+ # You can add fixtures and/or initialization code here to make experimenting
7
+ # with your gem easier. You can also use a different console, if you like.
8
+
9
+ # (If you use this, don't forget to add pry to your Gemfile!)
10
+ # require "pry"
11
+ # Pry.start
12
+
13
+ config_path = ENV['CONFIG_PATH'] || (File.exist?('config/phobos.yml') ? 'config/phobos.yml' : 'config/phobos.yml.example')
14
+ Phobos.configure(config_path)
15
+
16
+ require "irb"
17
+ IRB.start
@@ -0,0 +1,9 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ $LOAD_PATH.unshift File.dirname(__FILE__) + '/../lib'
4
+
5
+ require 'phobos'
6
+ require 'phobos/cli'
7
+
8
+ STDOUT.sync = true
9
+ Phobos::CLI::Commands.start(ARGV)
@@ -0,0 +1,8 @@
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+ IFS=$'\n\t'
4
+ set -vx
5
+
6
+ bundle install
7
+
8
+ # Do any other automated setup that you need to do here
@@ -0,0 +1,27 @@
1
+ machine:
2
+ pre:
3
+ - curl -sSL https://s3.amazonaws.com/circle-downloads/install-circleci-docker.sh | bash -s -- 1.10.0
4
+ services:
5
+ - docker
6
+ environment:
7
+ LOG_LEVEL: DEBUG
8
+ CI: true
9
+ DEFAULT_TIMEOUT: 20
10
+ ruby:
11
+ version: 2.3.1
12
+
13
+ dependencies:
14
+ pre:
15
+ - docker -v
16
+ - docker pull ches/kafka:0.9.0.1
17
+ - docker pull jplock/zookeeper:3.4.6
18
+ - gem install bundler -v 1.9.5
19
+ - bundle install
20
+
21
+ test:
22
+ override:
23
+ - docker run -d -p 2003:2181 --name zookeeper jplock/zookeeper:3.4.6; sleep 5
24
+ - docker run -d -p 9092:9092 --name kafka -e KAFKA_BROKER_ID=0 -e KAFKA_ADVERTISED_HOST_NAME=localhost -e KAFKA_ADVERTISED_PORT=9092 -e ZOOKEEPER_CONNECTION_STRING=zookeeper:2181 --link zookeeper:zookeeper ches/kafka:0.9.0.1; sleep 5
25
+ - bundle exec rspec -r rspec_junit_formatter --format RspecJunitFormatter -o $CIRCLE_TEST_REPORTS/rspec/unit.xml
26
+ post:
27
+ - cp log/*.log $CIRCLE_ARTIFACTS/ || true
@@ -0,0 +1,78 @@
1
+ logger:
2
+ file: log/phobos.log
3
+ level: info
4
+
5
+ kafka:
6
+ # identifier for this application
7
+ client_id: phobos
8
+ # timeout setting for connecting to brokers
9
+ connect_timeout:
10
+ # timeout setting for socket connections
11
+ socket_timeout:
12
+ # PEM encoded CA cert to use with an SSL connection (string)
13
+ ssl_ca_cert:
14
+ # PEM encoded client cert to use with an SSL connection (string)
15
+ # Must be used in combination with ssl_client_cert_key
16
+ ssl_client_cert:
17
+ # PEM encoded client cert key to use with an SSL connection (string)
18
+ # Must be used in combination with ssl_client_cert
19
+ ssl_client_cert_key:
20
+ # list of brokers used to initialize the client ("port:protocol")
21
+ seed_brokers:
22
+ - localhost:9092
23
+
24
+ producer:
25
+ # number of seconds a broker can wait for replicas to acknowledge
26
+ # a write before responding with a timeout
27
+ ack_timeout: 5
28
+ # number of replicas that must acknowledge a write, or `:all`
29
+ # if all in-sync replicas must acknowledge
30
+ required_acks: :all
31
+ # number of retries that should be attempted before giving up sending
32
+ # messages to the cluster. Does not include the original attempt
33
+ max_retries: 2
34
+ # number of seconds to wait between retries
35
+ retry_backoff: 1
36
+ # number of messages allowed in the buffer before new writes will
37
+ # raise {BufferOverflow} exceptions
38
+ max_buffer_size: 1000
39
+ # maximum size of the buffer in bytes. Attempting to produce messages
40
+ # when the buffer reaches this size will result in {BufferOverflow} being raised
41
+ max_buffer_bytesize: 10000000
42
+ # name of the compression codec to use, or nil if no compression should be performed.
43
+ # Valid codecs: `:snappy` and `:gzip`
44
+ compression_codec:
45
+ # number of messages that needs to be in a message set before it should be compressed.
46
+ # Note that message sets are per-partition rather than per-topic or per-producer
47
+ compression_threshold: 1
48
+
49
+ consumer:
50
+ # number of seconds after which, if a client hasn't contacted the Kafka cluster,
51
+ # it will be kicked out of the group
52
+ session_timeout: 30
53
+ # interval between offset commits, in seconds
54
+ offset_commit_interval: 10
55
+ # number of messages that can be processed before their offsets are committed.
56
+ # If zero, offset commits are not triggered by message processing
57
+ offset_commit_threshold: 0
58
+ # interval between heartbeats; must be less than the session window
59
+ heartbeat_interval: 10
60
+
61
+ backoff:
62
+ min_ms: 1000
63
+ max_ms: 60000
64
+
65
+ listeners:
66
+ - handler: Phobos::EchoHandler
67
+ topic: test
68
+ # id of the group that the consumer should join
69
+ group_id: test-1
70
+ # Once the consumer group has checkpointed its progress in the topic's partitions,
71
+ # the consumers will always start from the checkpointed offsets, regardless of config
72
+ # As such, this setting only applies when the consumer initially starts consuming from a topic
73
+ start_from_beginning: true
74
+ # maximum amount of data fetched from a single partition at a time
75
+ max_bytes_per_partition: 524288 # 512 KB
76
+ # Number of threads created for this listener, each thread will behave as an independent consumer.
77
+ # They don't share any state
78
+ max_concurrency: 1
@@ -0,0 +1,49 @@
1
+ #
2
+ # This example assumes that you want to save all events in your database for
3
+ # recovery purposes. The consumer will process the message and perform other
4
+ # operations, this implementation assumes a generic way to save the events.
5
+ #
6
+ # Setup your database connection using `phobos_boot.rb`. Remember to Setup
7
+ # a hook to disconnect, e.g: `at_exit { Database.disconnect! }`
8
+ #
9
+ class HandlerSavingEventsDatabase
10
+ include Phobos::Handler
11
+ include Phobos::Producer
12
+
13
+ def self.around_consume(payload, metadata)
14
+ #
15
+ # Let's assume `::from_message` will initialize our object with `payload`
16
+ #
17
+ event = Model::Event.from_message(payload)
18
+
19
+ #
20
+ # If event already exists in the database, skip this message
21
+ #
22
+ return if event.exists?
23
+
24
+ Model::Event.transaction do
25
+ #
26
+ # Executes `#consume` method
27
+ #
28
+ new_values = yield
29
+
30
+ #
31
+ # `#consume` method can return additional data (up to your code)
32
+ #
33
+ event.update_with_new_attributes(new_values)
34
+
35
+ #
36
+ # Let's assume the event is just initialized and now is the time to save it
37
+ #
38
+ event.save!
39
+ end
40
+ end
41
+
42
+ def consume(payload, metadata)
43
+ #
44
+ # Process the event, it might index it to elasticsearch or notify other
45
+ # system, you should process your message inside this method.
46
+ #
47
+ { new_vale: payload.length % 3 }
48
+ end
49
+ end
@@ -0,0 +1,15 @@
1
+ #
2
+ # This example assumes you want to process the event and publish another
3
+ # one to kafka. A new event is always published thus we want to use the async producer
4
+ # to better use our resources and to speed up the process
5
+ #
6
+ class HandlerUsingAsyncProducer
7
+ include Phobos::Handler
8
+ include Phobos::Producer
9
+
10
+ PUBLISH_TO 'another-topic'
11
+
12
+ def consume(payload, metadata)
13
+ producer.async_publish(PUBLISH_TO, "#{payload}-#{rand}")
14
+ end
15
+ end
@@ -0,0 +1,72 @@
1
+ require "bundler/setup"
2
+ require "phobos"
3
+
4
+ TOPIC = 'test-partitions'
5
+
6
+ Phobos.configure('config/phobos.yml')
7
+
8
+ class MyProducer
9
+ include Phobos::Producer
10
+ end
11
+
12
+ #
13
+ # Trapping signals to properly stop this generator
14
+ #
15
+ @stop = false
16
+ %i( INT TERM QUIT ).each do |signal|
17
+ Signal.trap(signal) do
18
+ puts "Stopping"
19
+ @stop = true
20
+ end
21
+ end
22
+
23
+ Thread.new do
24
+ begin
25
+ total = 1
26
+
27
+ loop do
28
+ break if @stop
29
+ key = SecureRandom.uuid
30
+ payload = Time.now.utc.to_json
31
+
32
+ begin
33
+ # Producer will use phobos configuration to create a kafka client and
34
+ # a producer and it will bind both to the current thread, so it's safe
35
+ # to call class methods here
36
+ #
37
+ MyProducer
38
+ .producer
39
+ .async_publish(TOPIC, payload, key)
40
+
41
+ puts "produced #{key}, total: #{total}"
42
+
43
+ # Since this is a simplistic code we are going to generate more messages than
44
+ # the producer can write to Kafka, so eventually we'll get some buffer overflows
45
+ #
46
+ rescue Kafka::BufferOverflow => e
47
+ puts "| waiting"
48
+ sleep(1)
49
+ retry
50
+ end
51
+
52
+ total += 1
53
+ end
54
+ ensure
55
+ #
56
+ # Before we stop we must shutdown the async producer to ensure that all messages
57
+ # are delivered
58
+ #
59
+ MyProducer
60
+ .producer
61
+ .async_producer_shutdown
62
+
63
+ #
64
+ # Since no client was configured (we can do this with `MyProducer.producer.configure_kafka_client`)
65
+ # we must get the auto generated one and close it properly
66
+ #
67
+ MyProducer
68
+ .producer
69
+ .kafka_client
70
+ .close
71
+ end
72
+ end.join
@@ -0,0 +1,62 @@
1
+ require 'yaml'
2
+ require 'securerandom'
3
+
4
+ require 'concurrent'
5
+ require 'kafka'
6
+ require 'hashie'
7
+ require 'logging'
8
+ require 'exponential_backoff'
9
+ require 'active_support/notifications'
10
+ require 'active_support/core_ext/string/inflections'
11
+ require 'active_support/core_ext/hash/keys'
12
+
13
+ require 'phobos/version'
14
+ require 'phobos/instrumentation'
15
+ require 'phobos/errors'
16
+ require 'phobos/listener'
17
+ require 'phobos/producer'
18
+ require 'phobos/handler'
19
+ require 'phobos/echo_handler'
20
+ require 'phobos/executor'
21
+
22
+ Thread.abort_on_exception = true
23
+
24
+ module Phobos
25
+ class << self
26
+ attr_reader :config, :logger
27
+ attr_accessor :silence_log
28
+
29
+ def configure(yml_path)
30
+ ENV['RAILS_ENV'] = ENV['RACK_ENV'] ||= 'development'
31
+ @config = Hashie::Mash.new(YAML.load_file(File.expand_path(yml_path)))
32
+ @config.class.send(:define_method, :producer_hash) { Phobos.config.producer&.to_hash&.symbolize_keys }
33
+ @config.class.send(:define_method, :consumer_hash) { Phobos.config.consumer&.to_hash&.symbolize_keys }
34
+ configure_logger
35
+ logger.info { Hash(message: 'Phobos configured', env: ENV['RACK_ENV']) }
36
+ end
37
+
38
+ def create_kafka_client
39
+ Kafka.new(config.kafka.to_hash.symbolize_keys)
40
+ end
41
+
42
+ def create_exponential_backoff
43
+ min = Phobos.config.backoff.min_ms / 1000.0
44
+ max = Phobos.config.backoff.max_ms / 1000.0
45
+ ExponentialBackoff.new(min, max).tap { |backoff| backoff.randomize_factor = rand }
46
+ end
47
+
48
+ def configure_logger
49
+ date_pattern = '%Y-%m-%dT%H:%M:%S:%L%zZ'
50
+ FileUtils.mkdir_p(File.dirname(config.logger.file))
51
+
52
+ Logging.backtrace true
53
+ Logging.logger.root.level = silence_log ? :fatal : config.logger.level
54
+
55
+ @logger = Logging.logger[self]
56
+ @logger.appenders = [
57
+ Logging.appenders.stdout(layout: Logging.layouts.pattern(date_pattern: date_pattern)),
58
+ Logging.appenders.file(config.logger.file, layout: Logging.layouts.json(date_pattern: date_pattern))
59
+ ]
60
+ end
61
+ end
62
+ end
@@ -0,0 +1,61 @@
1
+ require 'thor'
2
+ require 'phobos/cli/start'
3
+
4
+ module Phobos
5
+ module CLI
6
+
7
+ def self.logger
8
+ @logger ||= Logging.logger[self].tap do |l|
9
+ l.appenders = [Logging.appenders.stdout]
10
+ end
11
+ end
12
+
13
+ class Commands < Thor
14
+ include Thor::Actions
15
+
16
+ map '-v' => :version
17
+ map '--version' => :version
18
+
19
+ desc 'version', 'Outputs the version number. Can be used with: phobos -v or phobos --version'
20
+ def version
21
+ puts Phobos::VERSION
22
+ end
23
+
24
+ desc 'init', 'Initialize your project with Phobos'
25
+ def init
26
+ copy_file 'config/phobos.yml.example', 'config/phobos.yml'
27
+ create_file 'phobos_boot.rb' do
28
+ <<~EXAMPLE
29
+ # Use this file to load your code
30
+ puts <<~ART
31
+ ______ _ _
32
+ | ___ \\\\ | | |
33
+ | |_/ / |__ ___ | |__ ___ ___
34
+ | __/| '_ \\\\ / _ \\\\| '_ \\\\ / _ \\\\/ __|
35
+ | | | | | | (_) | |_) | (_) \\\\__ \\\\
36
+ \\\\_| |_| |_|\\\\___/|_.__/ \\\\___/|___/
37
+ ART
38
+ puts "\nphobos_boot.rb - find this file at \#{File.expand_path(__FILE__)}\n\n"
39
+ EXAMPLE
40
+ end
41
+ end
42
+
43
+ desc 'start', 'Starts Phobos'
44
+ option :config,
45
+ aliases: ['-c'],
46
+ default: 'config/phobos.yml',
47
+ banner: 'Configuration file'
48
+ option :boot,
49
+ aliases: ['-b'],
50
+ banner: 'File path to load application specific code',
51
+ default: 'phobos_boot.rb'
52
+ def start
53
+ Phobos::CLI::Start.new(options).execute
54
+ end
55
+
56
+ def self.source_root
57
+ File.expand_path(File.join(File.dirname(__FILE__), '../..'))
58
+ end
59
+ end
60
+ end
61
+ end
@@ -0,0 +1,48 @@
1
+ module Phobos
2
+ module CLI
3
+ class Runner
4
+
5
+ SIGNALS = %i( INT TERM QUIT ).freeze
6
+
7
+ def initialize
8
+ @signal_queue = []
9
+ @reader, @writer = IO.pipe
10
+ @executor = Phobos::Executor.new
11
+ end
12
+
13
+ def run!
14
+ setup_signals
15
+ executor.start
16
+
17
+ loop do
18
+ case signal_queue.pop
19
+ when *SIGNALS
20
+ executor.stop
21
+ break
22
+ else
23
+ ready = IO.select([reader, writer])
24
+
25
+ # drain the self-pipe so it won't be returned again next time
26
+ reader.read_nonblock(1) if ready[0].include?(reader)
27
+ end
28
+ end
29
+ end
30
+
31
+ private
32
+
33
+ attr_reader :reader, :writer, :signal_queue, :executor
34
+
35
+ def setup_signals
36
+ SIGNALS.each do |signal|
37
+ Signal.trap(signal) { unblock(signal) }
38
+ end
39
+ end
40
+
41
+ def unblock(signal)
42
+ writer.write_nonblock('.')
43
+ signal_queue << signal
44
+ end
45
+
46
+ end
47
+ end
48
+ end