brow 0.2.0 → 0.3.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: ffdc8b811ca1be5ce149f0e21dc0c08a6f8a4f2e79368f20a69503ca33e2d997
4
- data.tar.gz: 4f58ddc175db9c12b6fa77b13fe9f1d82a49100ea1d1cc143d335cb9a5db082b
3
+ metadata.gz: 94b1026b806df0a58a01aa0bc911ca756251dc5d0de510ae1c12d42665b00559
4
+ data.tar.gz: 55b2b5454086ac78e22681ced81bb6d500dd9c6537ebfcf67bbc9a445a2d9a79
5
5
  SHA512:
6
- metadata.gz: e66c43a44fd5e10b3aba344941bbc046ed1943208da8de20bc2a96577d1642d94e3528586a1db3d7ee998c45e966bc8daa72de39933a8dfa7354a0f737c1e4b1
7
- data.tar.gz: 704c98568783c393b58f438d23039003c816b7c363932785ceb2f747ddfca6e7da049708c7fd7e6c22f10e392b65928367bbd0d5667aea9720e7a7f7259747c2
6
+ metadata.gz: 2d23c1ff291ca611a65b1357392e2f4275502fc3467a629e85ff631f7585050095ef4cc86f0b86981fb0e7e790ce709bd2a0e2b511f0e706954cec6221ae40db
7
+ data.tar.gz: 884ffd8cebbcaf2681a1347a1115892676c86de12dcef693e5364531e37cef0d3700343cb8e4d2393270003c541d9efbd0e1c427fef904b66f475eb81ece2246
data/CHANGELOG.md CHANGED
@@ -1,9 +1,25 @@
1
1
  # Changelog
2
+
2
3
  All notable changes to this project will be documented in this file.
3
4
 
4
5
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
5
6
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
6
7
 
8
+ ## [0.3.0] - 2021-10-29
9
+
10
+ https://github.com/jnunemaker/brow/pull/4
11
+
12
+ ### Fixed
13
+
14
+ - Fixed thread churn. Upon digging in, I realized that the previous code was creating a bunch of threads. Basically one for each batch, which seems far from ideal. I'm surprised it worked that way. This changes it to be one worker thread that just sits there forever in a loop. When a batch is full, it transports it. When shutdown happens, a shutdown message is enqueued and the worker breaks the loop.
15
+ - Moved worker thread management to `Worker` from `Client`.
16
+ - Back off policy is now reset after `Transport#send_batch` completes. Previously it wasn't, which meant the next interval would get to the max and stay there.
17
+
18
+ ### Changed
19
+
20
+ - Switched to stringify data keys instead of symbolize. Old versions of ruby didn't gc symbols so that was a memory leak. Might be fixed now, but strings are fine here so lets roll with them.
21
+ - Removed test mode and test queue. I didn't like this implementation and neither did @bkeepers. We'll come up with something new and better soon like Brow::Clients::Memory.new or something.
22
+
7
23
  ## [0.2.0] - 2021-10-25
8
24
 
9
25
  ### Changed
data/Gemfile CHANGED
@@ -5,6 +5,7 @@ gemspec
5
5
 
6
6
  gem "rake", "~> 13.0"
7
7
  gem "minitest", "~> 5.0"
8
+ gem "maxitest", "~> 4.1"
8
9
  gem "minitest-heat", "~> 0.0"
9
10
  gem "webmock", "~> 3.10.0"
10
11
  gem "rack", "~> 2.2.3"
data/examples/basic.rb CHANGED
@@ -1,12 +1,20 @@
1
1
  require_relative "../lib/brow"
2
2
 
3
3
  client = Brow::Client.new({
4
- url: "https://requestbin.net/r/4f09194m",
4
+ url: "https://requestbin.net/r/2bp3p3vn",
5
+ batch_size: 10,
5
6
  })
6
7
 
7
- 150.times do |n|
8
- client.push({
9
- number: n,
10
- now: Time.now.utc,
11
- })
8
+ 5.times do |n|
9
+ client.push(n: n, parent: true)
12
10
  end
11
+
12
+ pid = fork {
13
+ 15.times do |n|
14
+ client.push({
15
+ number: n,
16
+ now: Time.now.utc,
17
+ })
18
+ end
19
+ }
20
+ Process.waitpid pid, 0
@@ -6,7 +6,7 @@ module Brow
6
6
  MIN_TIMEOUT_MS = 100
7
7
 
8
8
  # Private: The default maximum timeout between intervals in milliseconds.
9
- MAX_TIMEOUT_MS = 10000
9
+ MAX_TIMEOUT_MS = 10_000
10
10
 
11
11
  # Private: The value to multiply the current interval with for each
12
12
  # retry attempt.
@@ -16,6 +16,12 @@ module Brow
16
16
  # retry interval.
17
17
  RANDOMIZATION_FACTOR = 0.5
18
18
 
19
+ # Private
20
+ attr_reader :min_timeout_ms, :max_timeout_ms, :multiplier, :randomization_factor
21
+
22
+ # Private
23
+ attr_reader :attempts
24
+
19
25
  # Public: Create new instance of backoff policy.
20
26
  #
21
27
  # options - The Hash of options.
@@ -44,6 +50,10 @@ module Brow
44
50
  [interval, @max_timeout_ms].min
45
51
  end
46
52
 
53
+ def reset
54
+ @attempts = 0
55
+ end
56
+
47
57
  private
48
58
 
49
59
  def add_jitter(base, randomization_factor)
data/lib/brow/client.rb CHANGED
@@ -1,139 +1,69 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'thread'
4
3
  require 'time'
5
4
 
6
5
  require_relative 'utils'
7
6
  require_relative 'worker'
8
- require_relative 'test_queue'
9
7
 
10
8
  module Brow
11
9
  class Client
12
- # Private: Default # of items that can be in queue before we start dropping data.
13
- MAX_QUEUE_SIZE = 10_000
14
-
15
- # Private: Default number of seconds to wait to shutdown worker thread.
16
- SHUTDOWN_TIMEOUT = 5
17
-
18
10
  # Public: Create a new instance of a client.
19
11
  #
20
12
  # options - The Hash of options.
13
+ # :url - The URL where all batches of data should be transported.
21
14
  # :max_queue_size - The maximum number of calls to be remain queued.
15
+ # :logger - The Logger to use to log useful information about what is
16
+ # going on.
17
+ # :queue - The Queue to use to store data until it can be batched up and
18
+ # transported to the API.
19
+ # :worker - The Worker that will pop items off the queue, batch them up
20
+ # and transport them to the API.
21
+ # :transport - The Transport to use to transport batches to the API.
22
+ # :headers - The Hash of headers to include when transporting batches to
23
+ # the API. These could be used for auth or whatever.
24
+ # :retries - The Integer number of times the transport should retry a call
25
+ # before giving up.
26
+ # :read_timeout - The number of seconds to wait when reading data before
27
+ # giving up.
28
+ # :open_timeout - The number of seconds to wait when opening a connection
29
+ # to the API.
30
+ # :backoff_policy - The BackoffPolicy to use to determine when the next
31
+ # retry should occur when the transport fails to send a
32
+ # batch of data to the API.
33
+ # :min_timeout_ms - The minimum number of milliseconds to wait before
34
+ # retrying a failed call to the API.
35
+ # :max_timeout_ms - The maximum number of milliseconds to wait before
36
+ # retrying a failed call to the API.
37
+ # :multiplier - The value to multily the current interval with for each
38
+ # retry attempt.
39
+ # :randomization_factor - The value to use to create a range of jitter
40
+ # around the retry interval.
41
+ # :batch - The MessageBatch used to batch up several events to be
42
+ # transported in one call to the API.
43
+ # :shutdown_timeout - The number of seconds to wait for the worker thread
44
+ # to join when shutting down.
45
+ # :shutdown_automatically - Should the worker shutdown automatically or
46
+ # manually. If true, shutdown is automatic. If
47
+ # false, you'll need to handle this on your own.
48
+ # :max_size - The maximum number of items a batch can contain before it
49
+ # should be transported to the API. Only used if not :batch
50
+ # is provided.
22
51
  # :on_error - The Proc that handles error calls from the API.
23
52
  def initialize(options = {})
24
53
  options = Brow::Utils.symbolize_keys(options)
25
-
26
- @worker_thread = nil
27
- @worker_mutex = Mutex.new
28
- @pid = Process.pid
29
- @test = options[:test]
30
- @max_queue_size = options[:max_queue_size] || MAX_QUEUE_SIZE
31
- @logger = options.fetch(:logger) { Brow.logger }
32
- @queue = options.fetch(:queue) { Queue.new }
33
- @worker = options.fetch(:worker) { Worker.new(@queue, options) }
34
- @shutdown_timeout = options.fetch(:shutdown_timeout) { SHUTDOWN_TIMEOUT }
35
-
36
- if options.fetch(:shutdown_automatically, true)
37
- at_exit { shutdown }
38
- end
54
+ @worker = options.fetch(:worker) { Worker.new(options) }
39
55
  end
40
56
 
41
- # Public: Synchronously waits until the worker has flushed the queue.
42
- #
43
- # Use only for scripts which are not long-running, and will
44
- # specifically exit.
45
- def flush
46
- while !@queue.empty? || @worker.requesting?
47
- ensure_threads_alive
48
- sleep(0.1)
49
- end
50
- end
51
-
52
- def shutdown
53
- if @worker_thread
54
- begin
55
- @worker_thread.join @shutdown_timeout
56
- rescue => error
57
- @logger.info("[brow]") { "Error shutting down worker thread: #{error.inspect}"}
58
- end
59
- end
60
- end
57
+ # Private
58
+ attr_reader :worker
61
59
 
62
60
  # Public: Enqueues an event to eventually be transported to backend service.
63
61
  #
64
- # event - The Hash of event data.
62
+ # data - The Hash of data.
65
63
  #
66
- # Returns Boolean of whether the item was added to the queue.
67
- def push(item)
68
- raise ArgumentError, "item must be a Hash" unless item.is_a?(Hash)
69
-
70
- item = Brow::Utils.symbolize_keys(item)
71
- item = Brow::Utils.isoify_dates(item)
72
-
73
- if @test
74
- test_queue << item
75
- return true
76
- end
77
-
78
- ensure_threads_alive
79
-
80
- if @queue.length < @max_queue_size
81
- @queue << item
82
- true
83
- else
84
- @logger.warn("[brow]") { "Queue is full, dropping events. The :max_queue_size configuration parameter can be increased to prevent this from happening." }
85
- false
86
- end
87
- end
88
-
89
- # Public: Returns the number of messages in the queue.
90
- def queued_messages
91
- @queue.length
92
- end
93
-
94
- # Public: For test purposes only. If test: true is passed to #initialize
95
- # then all pushing of events will go to test queue in memory so they can
96
- # be verified with assertions.
97
- def test_queue
98
- unless @test
99
- raise 'Test queue only available when setting :test to true.'
100
- end
101
-
102
- @test_queue ||= TestQueue.new
103
- end
104
-
105
- private
106
-
107
- def forked?
108
- @pid != Process.pid
109
- end
110
-
111
- def ensure_threads_alive
112
- reset if forked?
113
- ensure_worker_running
114
- end
115
-
116
- def ensure_worker_running
117
- # If another thread is starting worker thread, then return early so this
118
- # thread can enqueue and move on with life.
119
- return unless @worker_mutex.try_lock
120
-
121
- begin
122
- return if worker_running?
123
- @worker_thread = Thread.new { @worker.run }
124
- ensure
125
- @worker_mutex.unlock
126
- end
127
- end
128
-
129
- def reset
130
- @pid = Process.pid
131
- @worker_mutex.unlock if @worker_mutex.locked?
132
- @queue.clear
133
- end
134
-
135
- def worker_running?
136
- @worker_thread && @worker_thread.alive?
64
+ # Returns Boolean of whether the data was added to the queue.
65
+ def push(data)
66
+ worker.push(data)
137
67
  end
138
68
  end
139
69
  end
@@ -22,6 +22,8 @@ module Brow
22
22
 
23
23
  def_delegators :@messages, :empty?
24
24
  def_delegators :@messages, :length
25
+ def_delegators :@messages, :size
26
+ def_delegators :@messages, :count
25
27
 
26
28
  attr_reader :uuid, :json_size
27
29
 
@@ -9,21 +9,17 @@ require_relative 'backoff_policy'
9
9
 
10
10
  module Brow
11
11
  class Transport
12
+ # Private: Default number of times to retry request.
12
13
  RETRIES = 10
14
+
15
+ # Private: Default read timeout on requests.
13
16
  READ_TIMEOUT = 8
17
+
18
+ # Private: Default open timeout on requests.
14
19
  OPEN_TIMEOUT = 4
15
- HEADERS = {
16
- "Accept" => "application/json",
17
- "Content-Type" => "application/json",
18
- "User-Agent" => "brow-ruby/#{Brow::VERSION}",
19
- "Client-Language" => "ruby",
20
- "Client-Language-Version" => "#{RUBY_VERSION} p#{RUBY_PATCHLEVEL} (#{RUBY_RELEASE_DATE})",
21
- "Client-Platform" => RUBY_PLATFORM,
22
- "Client-Engine" => defined?(RUBY_ENGINE) ? RUBY_ENGINE : "",
23
- "Client-Hostname" => Socket.gethostname,
24
- }
25
-
26
- attr_reader :url
20
+
21
+ # Private
22
+ attr_reader :url, :headers, :retries, :logger, :backoff_policy, :http
27
23
 
28
24
  def initialize(options = {})
29
25
  @url = options[:url] || raise(ArgumentError, ":url is required to be present so we know where to send batches")
@@ -34,7 +30,7 @@ module Brow
34
30
  @uri.path = "/"
35
31
  end
36
32
 
37
- @headers = HEADERS.merge(options[:headers] || {})
33
+ @headers = options[:headers] || {}
38
34
  @retries = options[:retries] || RETRIES
39
35
 
40
36
  @logger = options.fetch(:logger) { Brow.logger }
@@ -52,25 +48,29 @@ module Brow
52
48
  #
53
49
  # @return [Response] API response
54
50
  def send_batch(batch)
55
- @logger.debug("[brow]") { "Sending request for #{batch.length} items" }
51
+ logger.debug("[brow]") { "Sending request for #{batch.length} items" }
56
52
 
57
- last_response, exception = retry_with_backoff(@retries) do
53
+ last_response, exception = retry_with_backoff(retries) do
58
54
  response = send_request(batch)
59
- @logger.debug("[brow]") { "Response: status=#{response.code}, body=#{response.body}" }
55
+ logger.debug("[brow]") { "Response: status=#{response.code}, body=#{response.body}" }
60
56
  [Response.new(response.code.to_i, nil), retry?(response)]
61
57
  end
62
58
 
63
59
  if exception
64
- @logger.error("[brow]") { exception.message }
65
- exception.backtrace.each { |line| @logger.error(line) }
60
+ logger.error("[brow]") { exception.message }
61
+ exception.backtrace.each { |line| logger.error(line) }
66
62
  Response.new(-1, exception.to_s)
67
63
  else
68
64
  last_response
69
65
  end
66
+ ensure
67
+ backoff_policy.reset
68
+ batch.clear
70
69
  end
71
70
 
72
71
  # Closes a persistent connection if it exists
73
72
  def shutdown
73
+ logger.info("[brow]") { "Transport shutting down" }
74
74
  @http.finish if @http.started?
75
75
  end
76
76
 
@@ -80,15 +80,15 @@ module Brow
80
80
  status_code = response.code.to_i
81
81
  if status_code >= 500
82
82
  # Server error. Retry and log.
83
- @logger.info("[brow]") { "Server error: status=#{status_code}, body=#{response.body}" }
83
+ logger.info("[brow]") { "Server error: status=#{status_code}, body=#{response.body}" }
84
84
  true
85
85
  elsif status_code == 429
86
86
  # Rate limited. Retry and log.
87
- @logger.info("[brow]") { "Rate limit error: body=#{response.body}" }
87
+ logger.info("[brow]") { "Rate limit error: body=#{response.body}" }
88
88
  true
89
89
  elsif status_code >= 400
90
90
  # Client error. Do not retry, but log.
91
- @logger.error("[brow]") { "Client error: status=#{status_code}, body=#{response.body}" }
91
+ logger.error("[brow]") { "Client error: status=#{status_code}, body=#{response.body}" }
92
92
  false
93
93
  else
94
94
  false
@@ -110,13 +110,13 @@ module Brow
110
110
  result, should_retry = yield
111
111
  return [result, nil] unless should_retry
112
112
  rescue StandardError => error
113
- @logger.debug("[brow]") { "Request error: #{error}" }
113
+ logger.debug("[brow]") { "Request error: #{error}" }
114
114
  should_retry = true
115
115
  caught_exception = error
116
116
  end
117
117
 
118
118
  if should_retry && (retries_remaining > 1)
119
- @logger.debug("[brow]") { "Retrying request, #{retries_remaining} retries left" }
119
+ logger.debug("[brow]") { "Retrying request, #{retries_remaining} retries left" }
120
120
  sleep(@backoff_policy.next_interval.to_f / 1000)
121
121
  retry_with_backoff(retries_remaining - 1, &block)
122
122
  else
@@ -126,6 +126,14 @@ module Brow
126
126
 
127
127
  def send_request(batch)
128
128
  headers = {
129
+ "Accept" => "application/json",
130
+ "Content-Type" => "application/json",
131
+ "User-Agent" => "brow-ruby/#{Brow::VERSION}",
132
+ "Client-Language" => "ruby",
133
+ "Client-Language-Version" => "#{RUBY_VERSION} p#{RUBY_PATCHLEVEL} (#{RUBY_RELEASE_DATE})",
134
+ "Client-Platform" => RUBY_PLATFORM,
135
+ "Client-Engine" => defined?(RUBY_ENGINE) ? RUBY_ENGINE : "",
136
+ "Client-Hostname" => Socket.gethostname,
129
137
  "Client-Pid" => Process.pid.to_s,
130
138
  "Client-Thread" => Thread.current.object_id.to_s,
131
139
  }.merge(@headers)
data/lib/brow/utils.rb CHANGED
@@ -13,6 +13,13 @@ module Brow
13
13
  end
14
14
  end
15
15
 
16
+ # Internal: Return a new hash with keys converted to strings
17
+ def stringify_keys(hash)
18
+ hash.each_with_object({}) do |(k, v), memo|
19
+ memo[k.to_s] = v
20
+ end
21
+ end
22
+
16
23
  # Internal: Returns a new hash with all the date values in the into
17
24
  # iso8601 strings
18
25
  def isoify_dates(hash)
data/lib/brow/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Brow
4
- VERSION = "0.2.0"
4
+ VERSION = "0.3.0"
5
5
  end
data/lib/brow/worker.rb CHANGED
@@ -1,5 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require 'thread'
4
+
3
5
  require_relative 'message_batch'
4
6
  require_relative 'transport'
5
7
  require_relative 'utils'
@@ -7,8 +9,24 @@ require_relative 'utils'
7
9
  module Brow
8
10
  # Internal: The Worker to pull items off the queue and put them
9
11
  class Worker
12
+ # Private: Noop default on error proc.
10
13
  DEFAULT_ON_ERROR = proc { |response| }
11
14
 
15
+ # Private: Object to enqueue to signal shutdown for worker.
16
+ SHUTDOWN = :__ಠ_ಠ__
17
+
18
+ # Private: Default number of seconds to wait to shutdown worker thread.
19
+ SHUTDOWN_TIMEOUT = 5
20
+
21
+ # Private: Default # of items that can be in queue before we start dropping data.
22
+ MAX_QUEUE_SIZE = 10_000
23
+
24
+ # Private
25
+ attr_reader :thread, :queue, :pid, :mutex, :on_error, :batch_size, :max_queue_size
26
+
27
+ # Private
28
+ attr_reader :logger, :transport, :shutdown_timeout
29
+
12
30
  # Internal: Creates a new worker
13
31
  #
14
32
  # The worker continuously takes messages off the queue and makes requests to
@@ -16,50 +34,130 @@ module Brow
16
34
  #
17
35
  # queue - Queue synchronized between client and worker
18
36
  # options - The Hash of worker options.
19
- # batch_size - Fixnum of how many items to send in a batch.
20
- # on_error - Proc of what to do on an error.
21
- # transport - The Transport object to deliver batches.
22
- # logger - The Logger object for all log messages.
23
- # batch - The MessageBatch to collect messages and deliver batches
24
- # via Transport.
25
- def initialize(queue, options = {})
26
- @queue = queue
27
- @lock = Mutex.new
37
+ # :on_error - Proc of what to do on an error.
38
+ # :batch_size - Fixnum of how many items to send in a batch.
39
+ # :transport - The Transport object to deliver batches.
40
+ # :logger - The Logger object for all log messages.
41
+ # :batch - The MessageBatch to collect messages and deliver batches
42
+ # via Transport.
43
+ # :shutdown_timeout - The number of seconds to wait for the worker thread
44
+ # to join when shutting down.
45
+ # :shutdown_automatically - Should the client shutdown automatically or
46
+ # manually. If true, shutdown is automatic. If
47
+ # false, you'll need to handle this on your own.
48
+ def initialize(options = {})
49
+ @thread = nil
50
+ @queue = options.fetch(:queue) { Queue.new }
51
+ @pid = Process.pid
52
+ @mutex = Mutex.new
28
53
  options = Brow::Utils.symbolize_keys(options)
29
54
  @on_error = options[:on_error] || DEFAULT_ON_ERROR
55
+ @batch_size = options[:batch_size]
56
+ @max_queue_size = options.fetch(:max_queue_size) { MAX_QUEUE_SIZE }
57
+ @logger = options.fetch(:logger) { Brow.logger }
30
58
  @transport = options.fetch(:transport) { Transport.new(options) }
31
- @batch = options.fetch(:batch) { MessageBatch.new(max_size: options[:batch_size]) }
59
+ @shutdown_timeout = options.fetch(:shutdown_timeout) { SHUTDOWN_TIMEOUT }
60
+
61
+ if options.fetch(:shutdown_automatically, true)
62
+ at_exit { stop }
63
+ end
64
+ end
65
+
66
+ def push(data)
67
+ raise ArgumentError, "data must be a Hash" unless data.is_a?(Hash)
68
+ start
69
+
70
+ data = Utils.isoify_dates(Utils.stringify_keys(data))
71
+
72
+ if queue.length < max_queue_size
73
+ queue << data
74
+ true
75
+ else
76
+ logger.warn("[brow]") { "Queue is full, dropping events. The :max_queue_size configuration parameter can be increased to prevent this from happening." }
77
+ false
78
+ end
79
+ end
80
+
81
+ def start
82
+ reset if forked?
83
+ ensure_worker_running
84
+ end
85
+
86
+ def stop
87
+ queue << SHUTDOWN
88
+
89
+ if @thread
90
+ begin
91
+ if @thread.join(shutdown_timeout)
92
+ logger.info("[brow]") { "Worker thread [#{@thread.object_id}] joined sucessfully" }
93
+ else
94
+ logger.info("[brow]") { "Worker thread [#{@thread.object_id}] did not join successfully" }
95
+ end
96
+ rescue => error
97
+ logger.info("[brow]") { "Worker thread [#{@thread.object_id}] error shutting down: #{error.inspect}" }
98
+ end
99
+ end
32
100
  end
33
101
 
34
102
  # Internal: Continuously runs the loop to check for new events
35
103
  def run
36
- until Thread.current[:should_exit]
37
- return if @queue.empty?
104
+ batch = MessageBatch.new(max_size: batch_size)
38
105
 
39
- @lock.synchronize do
40
- consume_message_from_queue! until @batch.full? || @queue.empty?
41
- end
106
+ loop do
107
+ message = queue.pop
42
108
 
43
- response = @transport.send_batch @batch
44
- @on_error.call(response) unless response.status == 200
109
+ case message
110
+ when SHUTDOWN
111
+ send_batch(batch) unless batch.empty?
112
+ break
113
+ else
114
+ begin
115
+ batch << message
116
+ rescue MessageBatch::JSONGenerationError => error
117
+ on_error.call(Response.new(-1, error))
118
+ end
45
119
 
46
- @lock.synchronize { @batch.clear }
120
+ send_batch(batch) if batch.full?
121
+ end
47
122
  end
48
123
  ensure
49
- @transport.shutdown
124
+ transport.shutdown
50
125
  end
51
126
 
52
- # Internal: Check whether we have outstanding requests.
53
- def requesting?
54
- @lock.synchronize { !@batch.empty? }
127
+ private
128
+
129
+ def forked?
130
+ pid != Process.pid
55
131
  end
56
132
 
57
- private
133
+ def ensure_worker_running
134
+ # If another thread is starting worker thread, then return early so this
135
+ # thread can enqueue and move on with life.
136
+ return unless mutex.try_lock
137
+
138
+ begin
139
+ return if @thread && @thread.alive?
140
+ @thread = Thread.new { run }
141
+ logger.debug("[brow]") { "Worker thread [#{@thread.object_id}] started" }
142
+ ensure
143
+ mutex.unlock
144
+ end
145
+ end
146
+
147
+ def reset
148
+ @pid = Process.pid
149
+ mutex.unlock if mutex.locked?
150
+ queue.clear
151
+ end
152
+
153
+ def send_batch(batch)
154
+ response = transport.send_batch(batch)
155
+
156
+ unless response.status == 200
157
+ on_error.call(response)
158
+ end
58
159
 
59
- def consume_message_from_queue!
60
- @batch << @queue.pop
61
- rescue MessageBatch::JSONGenerationError => error
62
- @on_error.call(Response.new(-1, error))
160
+ response
63
161
  end
64
162
  end
65
163
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: brow
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.0
4
+ version: 0.3.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - John Nunemaker
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2021-10-26 00:00:00.000000000 Z
11
+ date: 2021-10-29 00:00:00.000000000 Z
12
12
  dependencies: []
13
13
  description:
14
14
  email:
@@ -31,7 +31,6 @@ files:
31
31
  - lib/brow/client.rb
32
32
  - lib/brow/message_batch.rb
33
33
  - lib/brow/response.rb
34
- - lib/brow/test_queue.rb
35
34
  - lib/brow/transport.rb
36
35
  - lib/brow/utils.rb
37
36
  - lib/brow/version.rb
@@ -1,29 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Brow
4
- # Public: The test queue to use if the `Client` is in test mode. Keeps all
5
- # messages in an array so you can add assertions.
6
- #
7
- # Be sure to reset before each test case.
8
- class TestQueue
9
- attr_reader :messages
10
-
11
- def initialize
12
- reset
13
- end
14
-
15
- def count
16
- messages.count
17
- end
18
- alias_method :size, :count
19
- alias_method :length, :count
20
-
21
- def <<(message)
22
- messages << message
23
- end
24
-
25
- def reset
26
- @messages = []
27
- end
28
- end
29
- end