brow 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +5 -0
- data/Gemfile +16 -0
- data/Guardfile +54 -0
- data/LICENSE.txt +21 -0
- data/README.md +61 -0
- data/Rakefile +12 -0
- data/bin/console +15 -0
- data/bin/setup +8 -0
- data/examples/basic.rb +14 -0
- data/lib/brow/backoff_policy.rb +61 -0
- data/lib/brow/client.rb +110 -0
- data/lib/brow/message_batch.rb +94 -0
- data/lib/brow/prefixed_logger.rb +25 -0
- data/lib/brow/response.rb +13 -0
- data/lib/brow/test_queue.rb +29 -0
- data/lib/brow/transport.rb +137 -0
- data/lib/brow/utils.rb +64 -0
- data/lib/brow/version.rb +5 -0
- data/lib/brow/worker.rb +66 -0
- data/lib/brow.rb +29 -0
- metadata +67 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 8f08f2a47a1034332966c9ef42acdecc5629052fd8255857a61cdaa8483be502
|
4
|
+
data.tar.gz: f42dac7bdfe22a48b7e7d5e0ef5ecb91407505f9ef0fb91393a263b194a8845c
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: abb687ce5fe388f7c87826752255ff1227dd275365ed599d3e78519f1402a2bf4a6010e9f6da307f8f7265be8a69e6f26baea43e30b7bd9adc2cd9f3253ca447
|
7
|
+
data.tar.gz: 57722a879c3fa49461ffbf8334a5674333fce12bb30dc2037b7dcb09f65d3e2335e57d4abb47359a7ed2b032154e1e9494e92c3a5f1a9e00a99cd0627feb1488
|
data/CHANGELOG.md
ADDED
data/Gemfile
ADDED
@@ -0,0 +1,16 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
source "https://rubygems.org"
|
4
|
+
gemspec
|
5
|
+
|
6
|
+
gem "rake", "~> 13.0"
|
7
|
+
gem "minitest", "~> 5.0"
|
8
|
+
gem "minitest-heat", "~> 0.0"
|
9
|
+
gem "webmock", "~> 3.10.0"
|
10
|
+
|
11
|
+
group(:guard) do
|
12
|
+
gem "guard", "~> 2.18.0"
|
13
|
+
gem "guard-minitest", "~> 2.4.6"
|
14
|
+
gem "guard-bundler", "~> 3.0.0"
|
15
|
+
gem "rb-fsevent", "~> 0.10"
|
16
|
+
end
|
data/Guardfile
ADDED
@@ -0,0 +1,54 @@
|
|
1
|
+
# A sample Guardfile
|
2
|
+
# More info at https://github.com/guard/guard#readme
|
3
|
+
|
4
|
+
## Uncomment and set this to only include directories you want to watch
|
5
|
+
# directories %w(app lib config test spec features) \
|
6
|
+
# .select{|d| Dir.exist?(d) ? d : UI.warning("Directory #{d} does not exist")}
|
7
|
+
|
8
|
+
## Note: if you are using the `directories` clause above and you are not
|
9
|
+
## watching the project directory ('.'), then you will want to move
|
10
|
+
## the Guardfile to a watched dir and symlink it back, e.g.
|
11
|
+
#
|
12
|
+
# $ mkdir config
|
13
|
+
# $ mv Guardfile config/
|
14
|
+
# $ ln -s config/Guardfile .
|
15
|
+
#
|
16
|
+
# and, you'll have to watch "config/Guardfile" instead of "Guardfile"
|
17
|
+
|
18
|
+
guard :bundler do
|
19
|
+
require 'guard/bundler'
|
20
|
+
require 'guard/bundler/verify'
|
21
|
+
helper = Guard::Bundler::Verify.new
|
22
|
+
|
23
|
+
files = ['Gemfile']
|
24
|
+
files += Dir['*.gemspec'] if files.any? { |f| helper.uses_gemspec?(f) }
|
25
|
+
|
26
|
+
# Assume files are symlinked from somewhere
|
27
|
+
files.each { |file| watch(helper.real_path(file)) }
|
28
|
+
end
|
29
|
+
|
30
|
+
guard :minitest do
|
31
|
+
# with Minitest::Unit
|
32
|
+
watch(%r{^test/(.*)\/?(.*)_test\.rb$})
|
33
|
+
watch(%r{^lib/(.*/)?([^/]+)\.rb$}) { |m| "test/#{m[1]}#{m[2]}_test.rb" }
|
34
|
+
watch(%r{^test/test_helper\.rb$}) { 'test' }
|
35
|
+
|
36
|
+
# with Minitest::Spec
|
37
|
+
# watch(%r{^spec/(.*)_spec\.rb$})
|
38
|
+
# watch(%r{^lib/(.+)\.rb$}) { |m| "spec/#{m[1]}_spec.rb" }
|
39
|
+
# watch(%r{^spec/spec_helper\.rb$}) { 'spec' }
|
40
|
+
|
41
|
+
# Rails 4
|
42
|
+
# watch(%r{^app/(.+)\.rb$}) { |m| "test/#{m[1]}_test.rb" }
|
43
|
+
# watch(%r{^app/controllers/application_controller\.rb$}) { 'test/controllers' }
|
44
|
+
# watch(%r{^app/controllers/(.+)_controller\.rb$}) { |m| "test/integration/#{m[1]}_test.rb" }
|
45
|
+
# watch(%r{^app/views/(.+)_mailer/.+}) { |m| "test/mailers/#{m[1]}_mailer_test.rb" }
|
46
|
+
# watch(%r{^lib/(.+)\.rb$}) { |m| "test/lib/#{m[1]}_test.rb" }
|
47
|
+
# watch(%r{^test/.+_test\.rb$})
|
48
|
+
# watch(%r{^test/test_helper\.rb$}) { 'test' }
|
49
|
+
|
50
|
+
# Rails < 4
|
51
|
+
# watch(%r{^app/controllers/(.*)\.rb$}) { |m| "test/functional/#{m[1]}_test.rb" }
|
52
|
+
# watch(%r{^app/helpers/(.*)\.rb$}) { |m| "test/helpers/#{m[1]}_test.rb" }
|
53
|
+
# watch(%r{^app/models/(.*)\.rb$}) { |m| "test/unit/#{m[1]}_test.rb" }
|
54
|
+
end
|
data/LICENSE.txt
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
The MIT License (MIT)
|
2
|
+
|
3
|
+
Copyright (c) 2021 John Nunemaker
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in
|
13
|
+
all copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
21
|
+
THE SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,61 @@
|
|
1
|
+
# Brow
|
2
|
+
|
3
|
+
A generic background thread worker for shipping events via https to some API backend.
|
4
|
+
|
5
|
+
I've been wanting to build something like this for a while. This might be a terrible start. But its a start.
|
6
|
+
|
7
|
+
I noticed a lot of companies copied segment's [analytics-ruby](https://github.com/segmentio/analytics-ruby) project and are using it successfully.
|
8
|
+
|
9
|
+
So that's where I began. Seems safe to assume that project has been around long enough and is production hardened enough. I guess I'll find out. :)
|
10
|
+
|
11
|
+
Things around here are pretty basic for now. But I'm looking to spruce it up and production test it over the coming months — likely with [Flipper](https://github.com/jnunemaker/flipper) and [Flipper Cloud](https://www.flippercloud.io/?utm_source=brow&utm_medium=web&utm_campaign=readme).
|
12
|
+
|
13
|
+
## Installation
|
14
|
+
|
15
|
+
Add this line to your application's Gemfile:
|
16
|
+
|
17
|
+
```ruby
|
18
|
+
gem 'brow'
|
19
|
+
```
|
20
|
+
|
21
|
+
And then execute:
|
22
|
+
|
23
|
+
$ bundle install
|
24
|
+
|
25
|
+
Or install it yourself as:
|
26
|
+
|
27
|
+
$ gem install brow
|
28
|
+
|
29
|
+
## Usage
|
30
|
+
|
31
|
+
```ruby
|
32
|
+
require "brow"
|
33
|
+
|
34
|
+
client = Brow::Client.new({
|
35
|
+
url: "https://requestbin.net/r/rna67for",
|
36
|
+
})
|
37
|
+
|
38
|
+
50.times do |n|
|
39
|
+
client.record({
|
40
|
+
number: n,
|
41
|
+
now: Time.now.utc,
|
42
|
+
})
|
43
|
+
end
|
44
|
+
|
45
|
+
# batch of 50 events sent to api url above as json
|
46
|
+
client.flush
|
47
|
+
```
|
48
|
+
|
49
|
+
## Development
|
50
|
+
|
51
|
+
After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake test` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment.
|
52
|
+
|
53
|
+
To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and the created tag, and push the `.gem` file to [rubygems.org](https://rubygems.org).
|
54
|
+
|
55
|
+
## Contributing
|
56
|
+
|
57
|
+
Bug reports and pull requests are welcome on GitHub at https://github.com/jnunemaker/brow.
|
58
|
+
|
59
|
+
## License
|
60
|
+
|
61
|
+
The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT).
|
data/Rakefile
ADDED
data/bin/console
ADDED
@@ -0,0 +1,15 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
# frozen_string_literal: true
|
3
|
+
|
4
|
+
require "bundler/setup"
|
5
|
+
require "brow"
|
6
|
+
|
7
|
+
# You can add fixtures and/or initialization code here to make experimenting
|
8
|
+
# with your gem easier. You can also use a different console, if you like.
|
9
|
+
|
10
|
+
# (If you use this, don't forget to add pry to your Gemfile!)
|
11
|
+
# require "pry"
|
12
|
+
# Pry.start
|
13
|
+
|
14
|
+
require "irb"
|
15
|
+
IRB.start(__FILE__)
|
data/bin/setup
ADDED
data/examples/basic.rb
ADDED
@@ -0,0 +1,61 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Brow
|
4
|
+
class BackoffPolicy
|
5
|
+
# Private: The default minimum timeout between intervals in milliseconds.
|
6
|
+
MIN_TIMEOUT_MS = 100
|
7
|
+
|
8
|
+
# Private: The default maximum timeout between intervals in milliseconds.
|
9
|
+
MAX_TIMEOUT_MS = 10000
|
10
|
+
|
11
|
+
# Private: The value to multiply the current interval with for each
|
12
|
+
# retry attempt.
|
13
|
+
MULTIPLIER = 1.5
|
14
|
+
|
15
|
+
# Private: The randomization factor to use to create a range around the
|
16
|
+
# retry interval.
|
17
|
+
RANDOMIZATION_FACTOR = 0.5
|
18
|
+
|
19
|
+
# Public: Create new instance of backoff policy.
|
20
|
+
#
|
21
|
+
# options - The Hash of options.
|
22
|
+
# :min_timeout_ms - The minimum backoff timeout.
|
23
|
+
# :max_timeout_ms - The maximum backoff timeout.
|
24
|
+
# :multiplier - The value to multiply the current interval with for each
|
25
|
+
# retry attempt.
|
26
|
+
# :randomization_factor - The randomization factor to use to create a range
|
27
|
+
# around the retry interval.
|
28
|
+
def initialize(options = {})
|
29
|
+
@min_timeout_ms = options[:min_timeout_ms] || MIN_TIMEOUT_MS
|
30
|
+
@max_timeout_ms = options[:max_timeout_ms] || MAX_TIMEOUT_MS
|
31
|
+
@multiplier = options[:multiplier] || MULTIPLIER
|
32
|
+
@randomization_factor = options[:randomization_factor] || RANDOMIZATION_FACTOR
|
33
|
+
|
34
|
+
@attempts = 0
|
35
|
+
end
|
36
|
+
|
37
|
+
# Public: Returns the next backoff interval in milliseconds.
|
38
|
+
def next_interval
|
39
|
+
interval = @min_timeout_ms * (@multiplier**@attempts)
|
40
|
+
interval = add_jitter(interval, @randomization_factor)
|
41
|
+
|
42
|
+
@attempts += 1
|
43
|
+
|
44
|
+
[interval, @max_timeout_ms].min
|
45
|
+
end
|
46
|
+
|
47
|
+
private
|
48
|
+
|
49
|
+
def add_jitter(base, randomization_factor)
|
50
|
+
random_number = rand
|
51
|
+
max_deviation = base * randomization_factor
|
52
|
+
deviation = random_number * max_deviation
|
53
|
+
|
54
|
+
if random_number < 0.5
|
55
|
+
base - deviation
|
56
|
+
else
|
57
|
+
base + deviation
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
data/lib/brow/client.rb
ADDED
@@ -0,0 +1,110 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'thread'
|
4
|
+
require 'time'
|
5
|
+
|
6
|
+
require_relative 'utils'
|
7
|
+
require_relative 'worker'
|
8
|
+
require_relative 'test_queue'
|
9
|
+
|
10
|
+
module Brow
|
11
|
+
class Client
|
12
|
+
# Private: Default # of items that can be in queue before we start dropping data.
|
13
|
+
MAX_QUEUE_SIZE = 10_000
|
14
|
+
|
15
|
+
# Public: Create a new instance of a client.
|
16
|
+
#
|
17
|
+
# options - The Hash of options.
|
18
|
+
# :max_queue_size - The maximum number of calls to be remain queued.
|
19
|
+
# :on_error - The Proc that handles error calls from the API.
|
20
|
+
def initialize(options = {})
|
21
|
+
options = Brow::Utils.symbolize_keys(options)
|
22
|
+
|
23
|
+
@worker_thread = nil
|
24
|
+
@worker_mutex = Mutex.new
|
25
|
+
@test = options[:test]
|
26
|
+
@max_queue_size = options[:max_queue_size] || MAX_QUEUE_SIZE
|
27
|
+
@logger = options.fetch(:logger) { Brow.logger }
|
28
|
+
@queue = options.fetch(:queue) { Queue.new }
|
29
|
+
@worker = options.fetch(:worker) { Worker.new(@queue, options) }
|
30
|
+
|
31
|
+
at_exit { @worker_thread && @worker_thread[:should_exit] = true }
|
32
|
+
end
|
33
|
+
|
34
|
+
# Public: Synchronously waits until the worker has flushed the queue.
|
35
|
+
#
|
36
|
+
# Use only for scripts which are not long-running, and will
|
37
|
+
# specifically exit.
|
38
|
+
def flush
|
39
|
+
while !@queue.empty? || @worker.requesting?
|
40
|
+
ensure_worker_running
|
41
|
+
sleep(0.1)
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
# Public: Enqueues the event.
|
46
|
+
#
|
47
|
+
# event - The Hash of event data.
|
48
|
+
#
|
49
|
+
# Returns Boolean of whether the item was added to the queue.
|
50
|
+
def record(event)
|
51
|
+
raise ArgumentError, "event must be a Hash" unless event.is_a?(Hash)
|
52
|
+
|
53
|
+
event = Brow::Utils.symbolize_keys(event)
|
54
|
+
event = Brow::Utils.isoify_dates(event)
|
55
|
+
enqueue event
|
56
|
+
end
|
57
|
+
|
58
|
+
# Public: Returns the number of messages in the queue.
|
59
|
+
def queued_messages
|
60
|
+
@queue.length
|
61
|
+
end
|
62
|
+
|
63
|
+
# Public: For test purposes only. If test: true is passed to #initialize
|
64
|
+
# then all recording of events will go to test queue in memory so they can
|
65
|
+
# be verified with assertions.
|
66
|
+
def test_queue
|
67
|
+
unless @test
|
68
|
+
raise 'Test queue only available when setting :test to true.'
|
69
|
+
end
|
70
|
+
|
71
|
+
@test_queue ||= TestQueue.new
|
72
|
+
end
|
73
|
+
|
74
|
+
private
|
75
|
+
|
76
|
+
# Private: Enqueues the event.
|
77
|
+
#
|
78
|
+
# Returns Boolean of whether the item was added to the queue.
|
79
|
+
def enqueue(action)
|
80
|
+
if @test
|
81
|
+
test_queue << action
|
82
|
+
return true
|
83
|
+
end
|
84
|
+
|
85
|
+
if @queue.length < @max_queue_size
|
86
|
+
@queue << action
|
87
|
+
ensure_worker_running
|
88
|
+
|
89
|
+
true
|
90
|
+
else
|
91
|
+
@logger.warn 'Queue is full, dropping events. The :max_queue_size configuration parameter can be increased to prevent this from happening.'
|
92
|
+
false
|
93
|
+
end
|
94
|
+
end
|
95
|
+
|
96
|
+
def ensure_worker_running
|
97
|
+
return if worker_running?
|
98
|
+
@worker_mutex.synchronize do
|
99
|
+
return if worker_running?
|
100
|
+
@worker_thread = Thread.new do
|
101
|
+
@worker.run
|
102
|
+
end
|
103
|
+
end
|
104
|
+
end
|
105
|
+
|
106
|
+
def worker_running?
|
107
|
+
@worker_thread && @worker_thread.alive?
|
108
|
+
end
|
109
|
+
end
|
110
|
+
end
|
@@ -0,0 +1,94 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'securerandom'
|
4
|
+
require 'forwardable'
|
5
|
+
|
6
|
+
module Brow
|
7
|
+
# Internal: A batch of messages to be sent to the API.
|
8
|
+
class MessageBatch
|
9
|
+
extend Forwardable
|
10
|
+
|
11
|
+
# Private: The error raised when a message cannot be serialized to json.
|
12
|
+
class JSONGenerationError < ::Brow::Error; end
|
13
|
+
|
14
|
+
# Private: Maximum bytes for an individual message.
|
15
|
+
MAX_BYTES_PER_MESSAGE = 32_768 # 32Kb
|
16
|
+
|
17
|
+
# Private: Maximum total bytes for a batch.
|
18
|
+
MAX_BYTES = 512_000 # 500Kb
|
19
|
+
|
20
|
+
# Private: Maximum number of messages in a batch.
|
21
|
+
MAX_SIZE = 100
|
22
|
+
|
23
|
+
def_delegators :@messages, :empty?
|
24
|
+
def_delegators :@messages, :length
|
25
|
+
|
26
|
+
attr_reader :uuid, :json_size
|
27
|
+
|
28
|
+
def initialize(options = {})
|
29
|
+
clear
|
30
|
+
@max_size = options[:max_size] || MAX_SIZE
|
31
|
+
@logger = options.fetch(:logger) { Brow.logger }
|
32
|
+
end
|
33
|
+
|
34
|
+
def <<(message)
|
35
|
+
begin
|
36
|
+
message_json = message.to_json
|
37
|
+
rescue StandardError => error
|
38
|
+
raise JSONGenerationError, "Serialization error: #{error}"
|
39
|
+
end
|
40
|
+
|
41
|
+
message_json_size = message_json.bytesize
|
42
|
+
|
43
|
+
if message_too_big?(message_json_size)
|
44
|
+
@logger.error('a message exceeded the maximum allowed size')
|
45
|
+
else
|
46
|
+
@messages << message
|
47
|
+
@json_size += message_json_size + 1 # One byte for the comma
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
def full?
|
52
|
+
item_count_exhausted? || size_exhausted?
|
53
|
+
end
|
54
|
+
|
55
|
+
def clear
|
56
|
+
@messages = []
|
57
|
+
@json_size = 0
|
58
|
+
@uuid = SecureRandom.uuid
|
59
|
+
end
|
60
|
+
|
61
|
+
def as_json
|
62
|
+
{
|
63
|
+
uuid: @uuid,
|
64
|
+
messages: @messages,
|
65
|
+
}
|
66
|
+
end
|
67
|
+
|
68
|
+
def to_json
|
69
|
+
JSON.generate(as_json)
|
70
|
+
end
|
71
|
+
|
72
|
+
private
|
73
|
+
|
74
|
+
def item_count_exhausted?
|
75
|
+
@messages.length >= @max_size
|
76
|
+
end
|
77
|
+
|
78
|
+
def message_too_big?(message_json_size)
|
79
|
+
message_json_size > MAX_BYTES_PER_MESSAGE
|
80
|
+
end
|
81
|
+
|
82
|
+
# We consider the max size here as just enough to leave room for one more
|
83
|
+
# message of the largest size possible. This is a shortcut that allows us
|
84
|
+
# to use a native Ruby `Queue` that doesn't allow peeking. The tradeoff
|
85
|
+
# here is that we might fit in less messages than possible into a batch.
|
86
|
+
#
|
87
|
+
# The alternative is to use our own `Queue` implementation that allows
|
88
|
+
# peeking, and to consider the next message size when calculating whether
|
89
|
+
# the message can be accomodated in this batch.
|
90
|
+
def size_exhausted?
|
91
|
+
@json_size >= (MAX_BYTES - MAX_BYTES_PER_MESSAGE)
|
92
|
+
end
|
93
|
+
end
|
94
|
+
end
|
@@ -0,0 +1,25 @@
|
|
1
|
+
module Brow
|
2
|
+
# Internal: Wraps an existing logger and adds a prefix to all messages.
|
3
|
+
class PrefixedLogger
|
4
|
+
def initialize(logger, prefix)
|
5
|
+
@logger = logger
|
6
|
+
@prefix = prefix
|
7
|
+
end
|
8
|
+
|
9
|
+
def debug(message)
|
10
|
+
@logger.debug("#{@prefix} #{message}")
|
11
|
+
end
|
12
|
+
|
13
|
+
def info(message)
|
14
|
+
@logger.info("#{@prefix} #{message}")
|
15
|
+
end
|
16
|
+
|
17
|
+
def warn(message)
|
18
|
+
@logger.warn("#{@prefix} #{message}")
|
19
|
+
end
|
20
|
+
|
21
|
+
def error(message)
|
22
|
+
@logger.error("#{@prefix} #{message}")
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Brow
|
4
|
+
# Public: The test queue to use if the `Client` is in test mode. Keeps all
|
5
|
+
# messages in an array so you can add assertions.
|
6
|
+
#
|
7
|
+
# Be sure to reset before each test case.
|
8
|
+
class TestQueue
|
9
|
+
attr_reader :messages
|
10
|
+
|
11
|
+
def initialize
|
12
|
+
reset
|
13
|
+
end
|
14
|
+
|
15
|
+
def count
|
16
|
+
messages.count
|
17
|
+
end
|
18
|
+
alias_method :size, :count
|
19
|
+
alias_method :length, :count
|
20
|
+
|
21
|
+
def <<(message)
|
22
|
+
messages << message
|
23
|
+
end
|
24
|
+
|
25
|
+
def reset
|
26
|
+
@messages = []
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
@@ -0,0 +1,137 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'net/http'
|
4
|
+
require 'net/https'
|
5
|
+
require 'json'
|
6
|
+
|
7
|
+
require_relative 'response'
|
8
|
+
require_relative 'backoff_policy'
|
9
|
+
|
10
|
+
module Brow
|
11
|
+
class Transport
|
12
|
+
RETRIES = 10
|
13
|
+
HEADERS = {
|
14
|
+
"Accept" => "application/json",
|
15
|
+
"Content-Type" => "application/json",
|
16
|
+
"User-Agent" => "brow-ruby/#{Brow::VERSION}",
|
17
|
+
"Client-Language" => "ruby",
|
18
|
+
"Client-Language-Version" => "#{RUBY_VERSION} p#{RUBY_PATCHLEVEL} (#{RUBY_RELEASE_DATE})",
|
19
|
+
"Client-Platform" => RUBY_PLATFORM,
|
20
|
+
"Client-Engine" => defined?(RUBY_ENGINE) ? RUBY_ENGINE : "",
|
21
|
+
"Client-Pid" => Process.pid.to_s,
|
22
|
+
"Client-Thread" => Thread.current.object_id.to_s,
|
23
|
+
"Client-Hostname" => Socket.gethostname,
|
24
|
+
}
|
25
|
+
|
26
|
+
attr_reader :url
|
27
|
+
|
28
|
+
def initialize(options = {})
|
29
|
+
@url = options[:url] || raise(ArgumentError, ":url is required to be present so we know where to send batches")
|
30
|
+
@uri = URI.parse(@url)
|
31
|
+
|
32
|
+
# Default path if people forget a slash.
|
33
|
+
if @uri.path.nil? || @uri.path.empty?
|
34
|
+
@uri.path = "/"
|
35
|
+
end
|
36
|
+
|
37
|
+
@headers = HEADERS.merge(options[:headers] || {})
|
38
|
+
@retries = options[:retries] || RETRIES
|
39
|
+
|
40
|
+
@logger = options.fetch(:logger) { Brow.logger }
|
41
|
+
@backoff_policy = options.fetch(:backoff_policy) {
|
42
|
+
Brow::BackoffPolicy.new
|
43
|
+
}
|
44
|
+
|
45
|
+
@http = Net::HTTP.new(@uri.host, @uri.port)
|
46
|
+
@http.use_ssl = @uri.scheme == "https"
|
47
|
+
@http.read_timeout = options[:read_timeout] || 8
|
48
|
+
@http.open_timeout = options[:open_timeout] || 4
|
49
|
+
end
|
50
|
+
|
51
|
+
# Sends a batch of messages to the API
|
52
|
+
#
|
53
|
+
# @return [Response] API response
|
54
|
+
def send_batch(batch)
|
55
|
+
@logger.debug("Sending request for #{batch.length} items")
|
56
|
+
|
57
|
+
last_response, exception = retry_with_backoff(@retries) do
|
58
|
+
response = send_request(batch)
|
59
|
+
status_code = response.code.to_i
|
60
|
+
should_retry = should_retry_request?(status_code, response.body)
|
61
|
+
@logger.debug("Response status code: #{status_code}")
|
62
|
+
|
63
|
+
[Response.new(status_code, nil), should_retry]
|
64
|
+
end
|
65
|
+
|
66
|
+
if exception
|
67
|
+
@logger.error(exception.message)
|
68
|
+
exception.backtrace.each { |line| @logger.error(line) }
|
69
|
+
Response.new(-1, exception.to_s)
|
70
|
+
else
|
71
|
+
last_response
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
75
|
+
# Closes a persistent connection if it exists
|
76
|
+
def shutdown
|
77
|
+
@http.finish if @http.started?
|
78
|
+
end
|
79
|
+
|
80
|
+
private
|
81
|
+
|
82
|
+
def should_retry_request?(status_code, body)
|
83
|
+
if status_code >= 500
|
84
|
+
# Server error. Retry and log.
|
85
|
+
@logger.info("Server error: status=#{status_code}, body=#{body}")
|
86
|
+
true
|
87
|
+
elsif status_code == 429
|
88
|
+
# Rate limited
|
89
|
+
@logger.info "Rate limit error"
|
90
|
+
true
|
91
|
+
elsif status_code >= 400
|
92
|
+
# Client error. Do not retry, but log.
|
93
|
+
@logger.error("Client error: status=#{status_code}, body=#{body}")
|
94
|
+
false
|
95
|
+
else
|
96
|
+
false
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
# Takes a block that returns [result, should_retry].
|
101
|
+
#
|
102
|
+
# Retries upto `retries_remaining` times, if `should_retry` is false or
|
103
|
+
# an exception is raised. `@backoff_policy` is used to determine the
|
104
|
+
# duration to sleep between attempts
|
105
|
+
#
|
106
|
+
# Returns [last_result, raised_exception]
|
107
|
+
def retry_with_backoff(retries_remaining, &block)
|
108
|
+
result, caught_exception = nil
|
109
|
+
should_retry = false
|
110
|
+
|
111
|
+
begin
|
112
|
+
result, should_retry = yield
|
113
|
+
return [result, nil] unless should_retry
|
114
|
+
rescue StandardError => error
|
115
|
+
@logger.debug "Request error: #{error}"
|
116
|
+
should_retry = true
|
117
|
+
caught_exception = error
|
118
|
+
end
|
119
|
+
|
120
|
+
if should_retry && (retries_remaining > 1)
|
121
|
+
@logger.debug("Retrying request, #{retries_remaining} retries left")
|
122
|
+
sleep(@backoff_policy.next_interval.to_f / 1000)
|
123
|
+
retry_with_backoff(retries_remaining - 1, &block)
|
124
|
+
else
|
125
|
+
[result, caught_exception]
|
126
|
+
end
|
127
|
+
end
|
128
|
+
|
129
|
+
# Sends a request for the batch, returns [status_code, body]
|
130
|
+
def send_request(batch)
|
131
|
+
payload = batch.to_json
|
132
|
+
@http.start unless @http.started? # Maintain a persistent connection
|
133
|
+
request = Net::HTTP::Post.new(@uri.path, @headers)
|
134
|
+
@http.request(request, payload)
|
135
|
+
end
|
136
|
+
end
|
137
|
+
end
|
data/lib/brow/utils.rb
ADDED
@@ -0,0 +1,64 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'time'
|
4
|
+
|
5
|
+
module Brow
|
6
|
+
module Utils
|
7
|
+
extend self
|
8
|
+
|
9
|
+
# Internal: Return a new hash with keys converted from strings to symbols
|
10
|
+
def symbolize_keys(hash)
|
11
|
+
hash.each_with_object({}) do |(k, v), memo|
|
12
|
+
memo[k.to_sym] = v
|
13
|
+
end
|
14
|
+
end
|
15
|
+
|
16
|
+
# Internal: Returns a new hash with all the date values in the into
|
17
|
+
# iso8601 strings
|
18
|
+
def isoify_dates(hash)
|
19
|
+
hash.each_with_object({}) do |(k, v), memo|
|
20
|
+
memo[k] = datetime_in_iso8601(v)
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
# Internal
|
25
|
+
def datetime_in_iso8601(datetime)
|
26
|
+
case datetime
|
27
|
+
when Time
|
28
|
+
time_in_iso8601 datetime
|
29
|
+
when DateTime
|
30
|
+
time_in_iso8601 datetime.to_time
|
31
|
+
when Date
|
32
|
+
date_in_iso8601 datetime
|
33
|
+
else
|
34
|
+
datetime
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
# Internal
|
39
|
+
def time_in_iso8601(time)
|
40
|
+
"#{time.strftime('%Y-%m-%dT%H:%M:%S.%6N')}#{formatted_offset(time, true, 'Z')}"
|
41
|
+
end
|
42
|
+
|
43
|
+
# Internal
|
44
|
+
def date_in_iso8601(date)
|
45
|
+
date.strftime('%F')
|
46
|
+
end
|
47
|
+
|
48
|
+
# Internal
|
49
|
+
def formatted_offset(time, colon = true, alternate_utc_string = nil)
|
50
|
+
time.utc? && alternate_utc_string || seconds_to_utc_offset(time.utc_offset, colon)
|
51
|
+
end
|
52
|
+
|
53
|
+
# Internal
|
54
|
+
def seconds_to_utc_offset(seconds, colon = true)
|
55
|
+
(colon ? UTC_OFFSET_WITH_COLON : UTC_OFFSET_WITHOUT_COLON) % [(seconds < 0 ? '-' : '+'), (seconds.abs / 3600), ((seconds.abs % 3600) / 60)]
|
56
|
+
end
|
57
|
+
|
58
|
+
# Internal
|
59
|
+
UTC_OFFSET_WITH_COLON = '%s%02d:%02d'
|
60
|
+
|
61
|
+
# Internal
|
62
|
+
UTC_OFFSET_WITHOUT_COLON = UTC_OFFSET_WITH_COLON.sub(':', '')
|
63
|
+
end
|
64
|
+
end
|
data/lib/brow/version.rb
ADDED
data/lib/brow/worker.rb
ADDED
@@ -0,0 +1,66 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative 'message_batch'
|
4
|
+
require_relative 'transport'
|
5
|
+
require_relative 'utils'
|
6
|
+
|
7
|
+
module Brow
|
8
|
+
# Internal: The Worker to pull items off the queue and put them
|
9
|
+
class Worker
|
10
|
+
DEFAULT_ON_ERROR = proc { |response| }
|
11
|
+
|
12
|
+
# Internal: Creates a new worker
|
13
|
+
#
|
14
|
+
# The worker continuously takes messages off the queue and makes requests to
|
15
|
+
# the api.
|
16
|
+
#
|
17
|
+
# queue - Queue synchronized between client and worker
|
18
|
+
# options - The Hash of worker options.
|
19
|
+
# batch_size - Fixnum of how many items to send in a batch.
|
20
|
+
# on_error - Proc of what to do on an error.
|
21
|
+
# transport - The Transport object to deliver batches.
|
22
|
+
# logger - The Logger object for all log messages.
|
23
|
+
# batch - The MessageBatch to collect messages and deliver batches
|
24
|
+
# via Transport.
|
25
|
+
def initialize(queue, options = {})
|
26
|
+
@queue = queue
|
27
|
+
@lock = Mutex.new
|
28
|
+
options = Brow::Utils.symbolize_keys(options)
|
29
|
+
@on_error = options[:on_error] || DEFAULT_ON_ERROR
|
30
|
+
@transport = options.fetch(:transport) { Transport.new(options) }
|
31
|
+
@logger = options.fetch(:logger) { Brow.logger }
|
32
|
+
@batch = options.fetch(:batch) { MessageBatch.new(max_size: options[:batch_size]) }
|
33
|
+
end
|
34
|
+
|
35
|
+
# Internal: Continuously runs the loop to check for new events
|
36
|
+
def run
|
37
|
+
until Thread.current[:should_exit]
|
38
|
+
return if @queue.empty?
|
39
|
+
|
40
|
+
@lock.synchronize do
|
41
|
+
consume_message_from_queue! until @batch.full? || @queue.empty?
|
42
|
+
end
|
43
|
+
|
44
|
+
response = @transport.send_batch @batch
|
45
|
+
@on_error.call(response) unless response.status == 200
|
46
|
+
|
47
|
+
@lock.synchronize { @batch.clear }
|
48
|
+
end
|
49
|
+
ensure
|
50
|
+
@transport.shutdown
|
51
|
+
end
|
52
|
+
|
53
|
+
# Internal: Check whether we have outstanding requests.
|
54
|
+
def requesting?
|
55
|
+
@lock.synchronize { !@batch.empty? }
|
56
|
+
end
|
57
|
+
|
58
|
+
private
|
59
|
+
|
60
|
+
def consume_message_from_queue!
|
61
|
+
@batch << @queue.pop
|
62
|
+
rescue MessageBatch::JSONGenerationError => error
|
63
|
+
@on_error.call(Response.new(-1, error))
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
data/lib/brow.rb
ADDED
@@ -0,0 +1,29 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "brow/version"
|
4
|
+
require "logger"
|
5
|
+
|
6
|
+
module Brow
|
7
|
+
class Error < StandardError; end
|
8
|
+
|
9
|
+
# Public: Returns the logger instance to use for logging of things.
|
10
|
+
def self.logger
|
11
|
+
return @logger if @logger
|
12
|
+
|
13
|
+
base_logger = if defined?(Rails)
|
14
|
+
Rails.logger
|
15
|
+
else
|
16
|
+
Logger.new(STDOUT)
|
17
|
+
end
|
18
|
+
|
19
|
+
@logger = PrefixedLogger.new(base_logger, "[brow]")
|
20
|
+
end
|
21
|
+
|
22
|
+
# Public: Sets the logger instance to use for logging things.
|
23
|
+
def self.logger=(new_logger)
|
24
|
+
@logger = new_logger
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
require_relative "brow/client"
|
29
|
+
require_relative "brow/prefixed_logger"
|
metadata
ADDED
@@ -0,0 +1,67 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: brow
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.1.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- John Nunemaker
|
8
|
+
autorequire:
|
9
|
+
bindir: exe
|
10
|
+
cert_chain: []
|
11
|
+
date: 2021-10-20 00:00:00.000000000 Z
|
12
|
+
dependencies: []
|
13
|
+
description:
|
14
|
+
email:
|
15
|
+
- nunemaker@gmail.com
|
16
|
+
executables: []
|
17
|
+
extensions: []
|
18
|
+
extra_rdoc_files: []
|
19
|
+
files:
|
20
|
+
- CHANGELOG.md
|
21
|
+
- Gemfile
|
22
|
+
- Guardfile
|
23
|
+
- LICENSE.txt
|
24
|
+
- README.md
|
25
|
+
- Rakefile
|
26
|
+
- bin/console
|
27
|
+
- bin/setup
|
28
|
+
- examples/basic.rb
|
29
|
+
- lib/brow.rb
|
30
|
+
- lib/brow/backoff_policy.rb
|
31
|
+
- lib/brow/client.rb
|
32
|
+
- lib/brow/message_batch.rb
|
33
|
+
- lib/brow/prefixed_logger.rb
|
34
|
+
- lib/brow/response.rb
|
35
|
+
- lib/brow/test_queue.rb
|
36
|
+
- lib/brow/transport.rb
|
37
|
+
- lib/brow/utils.rb
|
38
|
+
- lib/brow/version.rb
|
39
|
+
- lib/brow/worker.rb
|
40
|
+
homepage: https://github.com/jnunemaker/brow
|
41
|
+
licenses:
|
42
|
+
- MIT
|
43
|
+
metadata:
|
44
|
+
homepage_uri: https://github.com/jnunemaker/brow
|
45
|
+
source_code_uri: https://github.com/jnunemaker/brow
|
46
|
+
changelog_uri: https://github.com/jnunemaker/brow/blob/main/CHANGELOG.md
|
47
|
+
post_install_message:
|
48
|
+
rdoc_options: []
|
49
|
+
require_paths:
|
50
|
+
- lib
|
51
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
52
|
+
requirements:
|
53
|
+
- - ">="
|
54
|
+
- !ruby/object:Gem::Version
|
55
|
+
version: 2.6.0
|
56
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
57
|
+
requirements:
|
58
|
+
- - ">="
|
59
|
+
- !ruby/object:Gem::Version
|
60
|
+
version: '0'
|
61
|
+
requirements: []
|
62
|
+
rubygems_version: 3.0.3
|
63
|
+
signing_key:
|
64
|
+
specification_version: 4
|
65
|
+
summary: A generic background thread worker for shipping events via https to some
|
66
|
+
API backend.
|
67
|
+
test_files: []
|