brow 0.1.0 → 0.2.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 8f08f2a47a1034332966c9ef42acdecc5629052fd8255857a61cdaa8483be502
4
- data.tar.gz: f42dac7bdfe22a48b7e7d5e0ef5ecb91407505f9ef0fb91393a263b194a8845c
3
+ metadata.gz: ffdc8b811ca1be5ce149f0e21dc0c08a6f8a4f2e79368f20a69503ca33e2d997
4
+ data.tar.gz: 4f58ddc175db9c12b6fa77b13fe9f1d82a49100ea1d1cc143d335cb9a5db082b
5
5
  SHA512:
6
- metadata.gz: abb687ce5fe388f7c87826752255ff1227dd275365ed599d3e78519f1402a2bf4a6010e9f6da307f8f7265be8a69e6f26baea43e30b7bd9adc2cd9f3253ca447
7
- data.tar.gz: 57722a879c3fa49461ffbf8334a5674333fce12bb30dc2037b7dcb09f65d3e2335e57d4abb47359a7ed2b032154e1e9494e92c3a5f1a9e00a99cd0627feb1488
6
+ metadata.gz: e66c43a44fd5e10b3aba344941bbc046ed1943208da8de20bc2a96577d1642d94e3528586a1db3d7ee998c45e966bc8daa72de39933a8dfa7354a0f737c1e4b1
7
+ data.tar.gz: 704c98568783c393b58f438d23039003c816b7c363932785ceb2f747ddfca6e7da049708c7fd7e6c22f10e392b65928367bbd0d5667aea9720e7a7f7259747c2
data/CHANGELOG.md CHANGED
@@ -1,5 +1,23 @@
1
- ## [Unreleased]
1
+ # Changelog
2
+ All notable changes to this project will be documented in this file.
2
3
 
3
- ## [0.1.0] - 2021-10-14
4
+ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
5
+ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
4
6
 
5
- - Initial release
7
+ ## [0.2.0] - 2021-10-25
8
+
9
+ ### Changed
10
+
11
+ - [c25dce](https://github.com/jnunemaker/brow/commit/c25dcedcab2b75cfe28a561e80e537fefae6cc52) `record` is now `push`.
12
+
13
+ ### Fixed
14
+
15
+ - [eceb02](https://github.com/jnunemaker/brow/commit/eceb02f810cc5ace7d7540c957fc1cf924849629) Fixed problems with shutdown (required a flush to get whatever batches were in progress) and forking (caused queue to not get worked off).
16
+
17
+ ### Added
18
+
19
+ - [c7f7e4](https://github.com/jnunemaker/brow/commit/c7f7e42b0d6bfa9fa96bac58fda0ef94f93d223d) `BackoffPolicy` now gets `options` so you can pass those to `Client` and they'll make it all the way through.
20
+
21
+ ## [0.1.0] - 2021-10-20
22
+
23
+ - Initial release. Let's face it I just wanted to squat on the gem name.
data/Gemfile CHANGED
@@ -7,6 +7,7 @@ gem "rake", "~> 13.0"
7
7
  gem "minitest", "~> 5.0"
8
8
  gem "minitest-heat", "~> 0.0"
9
9
  gem "webmock", "~> 3.10.0"
10
+ gem "rack", "~> 2.2.3"
10
11
 
11
12
  group(:guard) do
12
13
  gem "guard", "~> 2.18.0"
data/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # Brow
2
2
 
3
- A generic background thread worker for shipping events via https to some API backend.
3
+ A generic background thread worker for shipping events via https to some API backend. It'll get events to your API by the sweat of its brow.
4
4
 
5
5
  I've been wanting to build something like this for a while. This might be a terrible start. But its a start.
6
6
 
@@ -36,14 +36,13 @@ client = Brow::Client.new({
36
36
  })
37
37
 
38
38
  50.times do |n|
39
- client.record({
39
+ client.push({
40
40
  number: n,
41
41
  now: Time.now.utc,
42
42
  })
43
43
  end
44
44
 
45
45
  # batch of 50 events sent to api url above as json
46
- client.flush
47
46
  ```
48
47
 
49
48
  ## Development
data/examples/basic.rb CHANGED
@@ -1,14 +1,12 @@
1
1
  require_relative "../lib/brow"
2
2
 
3
3
  client = Brow::Client.new({
4
- url: "https://requestbin.net/r/rna67for",
4
+ url: "https://requestbin.net/r/4f09194m",
5
5
  })
6
6
 
7
- 50.times do |n|
8
- client.record({
7
+ 150.times do |n|
8
+ client.push({
9
9
  number: n,
10
10
  now: Time.now.utc,
11
11
  })
12
12
  end
13
-
14
- client.flush
data/lib/brow/client.rb CHANGED
@@ -12,6 +12,9 @@ module Brow
12
12
  # Private: Default # of items that can be in queue before we start dropping data.
13
13
  MAX_QUEUE_SIZE = 10_000
14
14
 
15
+ # Private: Default number of seconds to wait to shutdown worker thread.
16
+ SHUTDOWN_TIMEOUT = 5
17
+
15
18
  # Public: Create a new instance of a client.
16
19
  #
17
20
  # options - The Hash of options.
@@ -22,13 +25,17 @@ module Brow
22
25
 
23
26
  @worker_thread = nil
24
27
  @worker_mutex = Mutex.new
28
+ @pid = Process.pid
25
29
  @test = options[:test]
26
30
  @max_queue_size = options[:max_queue_size] || MAX_QUEUE_SIZE
27
31
  @logger = options.fetch(:logger) { Brow.logger }
28
32
  @queue = options.fetch(:queue) { Queue.new }
29
33
  @worker = options.fetch(:worker) { Worker.new(@queue, options) }
34
+ @shutdown_timeout = options.fetch(:shutdown_timeout) { SHUTDOWN_TIMEOUT }
30
35
 
31
- at_exit { @worker_thread && @worker_thread[:should_exit] = true }
36
+ if options.fetch(:shutdown_automatically, true)
37
+ at_exit { shutdown }
38
+ end
32
39
  end
33
40
 
34
41
  # Public: Synchronously waits until the worker has flushed the queue.
@@ -37,22 +44,46 @@ module Brow
37
44
  # specifically exit.
38
45
  def flush
39
46
  while !@queue.empty? || @worker.requesting?
40
- ensure_worker_running
47
+ ensure_threads_alive
41
48
  sleep(0.1)
42
49
  end
43
50
  end
44
51
 
45
- # Public: Enqueues the event.
52
+ def shutdown
53
+ if @worker_thread
54
+ begin
55
+ @worker_thread.join @shutdown_timeout
56
+ rescue => error
57
+ @logger.info("[brow]") { "Error shutting down worker thread: #{error.inspect}"}
58
+ end
59
+ end
60
+ end
61
+
62
+ # Public: Enqueues an event to eventually be transported to backend service.
46
63
  #
47
64
  # event - The Hash of event data.
48
65
  #
49
66
  # Returns Boolean of whether the item was added to the queue.
50
- def record(event)
51
- raise ArgumentError, "event must be a Hash" unless event.is_a?(Hash)
67
+ def push(item)
68
+ raise ArgumentError, "item must be a Hash" unless item.is_a?(Hash)
69
+
70
+ item = Brow::Utils.symbolize_keys(item)
71
+ item = Brow::Utils.isoify_dates(item)
52
72
 
53
- event = Brow::Utils.symbolize_keys(event)
54
- event = Brow::Utils.isoify_dates(event)
55
- enqueue event
73
+ if @test
74
+ test_queue << item
75
+ return true
76
+ end
77
+
78
+ ensure_threads_alive
79
+
80
+ if @queue.length < @max_queue_size
81
+ @queue << item
82
+ true
83
+ else
84
+ @logger.warn("[brow]") { "Queue is full, dropping events. The :max_queue_size configuration parameter can be increased to prevent this from happening." }
85
+ false
86
+ end
56
87
  end
57
88
 
58
89
  # Public: Returns the number of messages in the queue.
@@ -61,7 +92,7 @@ module Brow
61
92
  end
62
93
 
63
94
  # Public: For test purposes only. If test: true is passed to #initialize
64
- # then all recording of events will go to test queue in memory so they can
95
+ # then all pushing of events will go to test queue in memory so they can
65
96
  # be verified with assertions.
66
97
  def test_queue
67
98
  unless @test
@@ -73,36 +104,34 @@ module Brow
73
104
 
74
105
  private
75
106
 
76
- # Private: Enqueues the event.
77
- #
78
- # Returns Boolean of whether the item was added to the queue.
79
- def enqueue(action)
80
- if @test
81
- test_queue << action
82
- return true
83
- end
84
-
85
- if @queue.length < @max_queue_size
86
- @queue << action
87
- ensure_worker_running
107
+ def forked?
108
+ @pid != Process.pid
109
+ end
88
110
 
89
- true
90
- else
91
- @logger.warn 'Queue is full, dropping events. The :max_queue_size configuration parameter can be increased to prevent this from happening.'
92
- false
93
- end
111
+ def ensure_threads_alive
112
+ reset if forked?
113
+ ensure_worker_running
94
114
  end
95
115
 
96
116
  def ensure_worker_running
97
- return if worker_running?
98
- @worker_mutex.synchronize do
117
+ # If another thread is starting worker thread, then return early so this
118
+ # thread can enqueue and move on with life.
119
+ return unless @worker_mutex.try_lock
120
+
121
+ begin
99
122
  return if worker_running?
100
- @worker_thread = Thread.new do
101
- @worker.run
102
- end
123
+ @worker_thread = Thread.new { @worker.run }
124
+ ensure
125
+ @worker_mutex.unlock
103
126
  end
104
127
  end
105
128
 
129
+ def reset
130
+ @pid = Process.pid
131
+ @worker_mutex.unlock if @worker_mutex.locked?
132
+ @queue.clear
133
+ end
134
+
106
135
  def worker_running?
107
136
  @worker_thread && @worker_thread.alive?
108
137
  end
@@ -41,7 +41,7 @@ module Brow
41
41
  message_json_size = message_json.bytesize
42
42
 
43
43
  if message_too_big?(message_json_size)
44
- @logger.error('a message exceeded the maximum allowed size')
44
+ @logger.error("[brow]") { 'a message exceeded the maximum allowed size' }
45
45
  else
46
46
  @messages << message
47
47
  @json_size += message_json_size + 1 # One byte for the comma
@@ -10,6 +10,8 @@ require_relative 'backoff_policy'
10
10
  module Brow
11
11
  class Transport
12
12
  RETRIES = 10
13
+ READ_TIMEOUT = 8
14
+ OPEN_TIMEOUT = 4
13
15
  HEADERS = {
14
16
  "Accept" => "application/json",
15
17
  "Content-Type" => "application/json",
@@ -18,8 +20,6 @@ module Brow
18
20
  "Client-Language-Version" => "#{RUBY_VERSION} p#{RUBY_PATCHLEVEL} (#{RUBY_RELEASE_DATE})",
19
21
  "Client-Platform" => RUBY_PLATFORM,
20
22
  "Client-Engine" => defined?(RUBY_ENGINE) ? RUBY_ENGINE : "",
21
- "Client-Pid" => Process.pid.to_s,
22
- "Client-Thread" => Thread.current.object_id.to_s,
23
23
  "Client-Hostname" => Socket.gethostname,
24
24
  }
25
25
 
@@ -39,32 +39,29 @@ module Brow
39
39
 
40
40
  @logger = options.fetch(:logger) { Brow.logger }
41
41
  @backoff_policy = options.fetch(:backoff_policy) {
42
- Brow::BackoffPolicy.new
42
+ Brow::BackoffPolicy.new(options)
43
43
  }
44
44
 
45
45
  @http = Net::HTTP.new(@uri.host, @uri.port)
46
46
  @http.use_ssl = @uri.scheme == "https"
47
- @http.read_timeout = options[:read_timeout] || 8
48
- @http.open_timeout = options[:open_timeout] || 4
47
+ @http.read_timeout = options[:read_timeout] || READ_TIMEOUT
48
+ @http.open_timeout = options[:open_timeout] || OPEN_TIMEOUT
49
49
  end
50
50
 
51
51
  # Sends a batch of messages to the API
52
52
  #
53
53
  # @return [Response] API response
54
54
  def send_batch(batch)
55
- @logger.debug("Sending request for #{batch.length} items")
55
+ @logger.debug("[brow]") { "Sending request for #{batch.length} items" }
56
56
 
57
57
  last_response, exception = retry_with_backoff(@retries) do
58
58
  response = send_request(batch)
59
- status_code = response.code.to_i
60
- should_retry = should_retry_request?(status_code, response.body)
61
- @logger.debug("Response status code: #{status_code}")
62
-
63
- [Response.new(status_code, nil), should_retry]
59
+ @logger.debug("[brow]") { "Response: status=#{response.code}, body=#{response.body}" }
60
+ [Response.new(response.code.to_i, nil), retry?(response)]
64
61
  end
65
62
 
66
63
  if exception
67
- @logger.error(exception.message)
64
+ @logger.error("[brow]") { exception.message }
68
65
  exception.backtrace.each { |line| @logger.error(line) }
69
66
  Response.new(-1, exception.to_s)
70
67
  else
@@ -79,18 +76,19 @@ module Brow
79
76
 
80
77
  private
81
78
 
82
- def should_retry_request?(status_code, body)
79
+ def retry?(response)
80
+ status_code = response.code.to_i
83
81
  if status_code >= 500
84
82
  # Server error. Retry and log.
85
- @logger.info("Server error: status=#{status_code}, body=#{body}")
83
+ @logger.info("[brow]") { "Server error: status=#{status_code}, body=#{response.body}" }
86
84
  true
87
85
  elsif status_code == 429
88
- # Rate limited
89
- @logger.info "Rate limit error"
86
+ # Rate limited. Retry and log.
87
+ @logger.info("[brow]") { "Rate limit error: body=#{response.body}" }
90
88
  true
91
89
  elsif status_code >= 400
92
90
  # Client error. Do not retry, but log.
93
- @logger.error("Client error: status=#{status_code}, body=#{body}")
91
+ @logger.error("[brow]") { "Client error: status=#{status_code}, body=#{response.body}" }
94
92
  false
95
93
  else
96
94
  false
@@ -112,13 +110,13 @@ module Brow
112
110
  result, should_retry = yield
113
111
  return [result, nil] unless should_retry
114
112
  rescue StandardError => error
115
- @logger.debug "Request error: #{error}"
113
+ @logger.debug("[brow]") { "Request error: #{error}" }
116
114
  should_retry = true
117
115
  caught_exception = error
118
116
  end
119
117
 
120
118
  if should_retry && (retries_remaining > 1)
121
- @logger.debug("Retrying request, #{retries_remaining} retries left")
119
+ @logger.debug("[brow]") { "Retrying request, #{retries_remaining} retries left" }
122
120
  sleep(@backoff_policy.next_interval.to_f / 1000)
123
121
  retry_with_backoff(retries_remaining - 1, &block)
124
122
  else
@@ -126,12 +124,15 @@ module Brow
126
124
  end
127
125
  end
128
126
 
129
- # Sends a request for the batch, returns [status_code, body]
130
127
  def send_request(batch)
131
- payload = batch.to_json
132
- @http.start unless @http.started? # Maintain a persistent connection
133
- request = Net::HTTP::Post.new(@uri.path, @headers)
134
- @http.request(request, payload)
128
+ headers = {
129
+ "Client-Pid" => Process.pid.to_s,
130
+ "Client-Thread" => Thread.current.object_id.to_s,
131
+ }.merge(@headers)
132
+
133
+ @http.start unless @http.started?
134
+ request = Net::HTTP::Post.new(@uri.path, headers)
135
+ @http.request(request, batch.to_json)
135
136
  end
136
137
  end
137
138
  end
data/lib/brow/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Brow
4
- VERSION = "0.1.0"
4
+ VERSION = "0.2.0"
5
5
  end
data/lib/brow/worker.rb CHANGED
@@ -28,7 +28,6 @@ module Brow
28
28
  options = Brow::Utils.symbolize_keys(options)
29
29
  @on_error = options[:on_error] || DEFAULT_ON_ERROR
30
30
  @transport = options.fetch(:transport) { Transport.new(options) }
31
- @logger = options.fetch(:logger) { Brow.logger }
32
31
  @batch = options.fetch(:batch) { MessageBatch.new(max_size: options[:batch_size]) }
33
32
  end
34
33
 
data/lib/brow.rb CHANGED
@@ -10,13 +10,11 @@ module Brow
10
10
  def self.logger
11
11
  return @logger if @logger
12
12
 
13
- base_logger = if defined?(Rails)
13
+ @logger = if defined?(Rails)
14
14
  Rails.logger
15
15
  else
16
16
  Logger.new(STDOUT)
17
17
  end
18
-
19
- @logger = PrefixedLogger.new(base_logger, "[brow]")
20
18
  end
21
19
 
22
20
  # Public: Sets the logger instance to use for logging things.
@@ -26,4 +24,3 @@ module Brow
26
24
  end
27
25
 
28
26
  require_relative "brow/client"
29
- require_relative "brow/prefixed_logger"
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: brow
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.0
4
+ version: 0.2.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - John Nunemaker
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2021-10-20 00:00:00.000000000 Z
11
+ date: 2021-10-26 00:00:00.000000000 Z
12
12
  dependencies: []
13
13
  description:
14
14
  email:
@@ -30,7 +30,6 @@ files:
30
30
  - lib/brow/backoff_policy.rb
31
31
  - lib/brow/client.rb
32
32
  - lib/brow/message_batch.rb
33
- - lib/brow/prefixed_logger.rb
34
33
  - lib/brow/response.rb
35
34
  - lib/brow/test_queue.rb
36
35
  - lib/brow/transport.rb
@@ -1,25 +0,0 @@
1
- module Brow
2
- # Internal: Wraps an existing logger and adds a prefix to all messages.
3
- class PrefixedLogger
4
- def initialize(logger, prefix)
5
- @logger = logger
6
- @prefix = prefix
7
- end
8
-
9
- def debug(message)
10
- @logger.debug("#{@prefix} #{message}")
11
- end
12
-
13
- def info(message)
14
- @logger.info("#{@prefix} #{message}")
15
- end
16
-
17
- def warn(message)
18
- @logger.warn("#{@prefix} #{message}")
19
- end
20
-
21
- def error(message)
22
- @logger.error("#{@prefix} #{message}")
23
- end
24
- end
25
- end