trifle-stats 1.6.0 → 2.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. checksums.yaml +4 -4
  2. data/.devops/docker/local/docker-compose.yml +3 -3
  3. data/.gitignore +2 -0
  4. data/Gemfile.lock +1 -1
  5. data/README.md +69 -92
  6. data/docker-compose.yml +1 -0
  7. data/lib/trifle/stats/aggregator/{avg.rb → mean.rb} +2 -2
  8. data/lib/trifle/stats/buffer.rb +380 -0
  9. data/lib/trifle/stats/configuration.rb +54 -8
  10. data/lib/trifle/stats/driver/README.md +2 -2
  11. data/lib/trifle/stats/driver/mongo.rb +101 -36
  12. data/lib/trifle/stats/driver/postgres.rb +150 -33
  13. data/lib/trifle/stats/driver/process.rb +2 -2
  14. data/lib/trifle/stats/driver/redis.rb +26 -3
  15. data/lib/trifle/stats/driver/sqlite.rb +166 -34
  16. data/lib/trifle/stats/formatter/category.rb +38 -11
  17. data/lib/trifle/stats/formatter/path_utils.rb +99 -0
  18. data/lib/trifle/stats/formatter/timeline.rb +28 -6
  19. data/lib/trifle/stats/nocturnal/key.rb +52 -0
  20. data/lib/trifle/stats/nocturnal/parser.rb +34 -0
  21. data/lib/trifle/stats/nocturnal.rb +156 -122
  22. data/lib/trifle/stats/nocturnal_legacy.rb +161 -0
  23. data/lib/trifle/stats/operations/status/beam.rb +1 -1
  24. data/lib/trifle/stats/operations/timeseries/increment.rb +7 -6
  25. data/lib/trifle/stats/operations/timeseries/set.rb +7 -6
  26. data/lib/trifle/stats/operations/timeseries/values.rb +7 -4
  27. data/lib/trifle/stats/transponder/add.rb +30 -0
  28. data/lib/trifle/stats/transponder/divide.rb +30 -0
  29. data/lib/trifle/stats/transponder/max.rb +31 -0
  30. data/lib/trifle/stats/transponder/mean.rb +31 -0
  31. data/lib/trifle/stats/transponder/min.rb +31 -0
  32. data/lib/trifle/stats/transponder/multiply.rb +30 -0
  33. data/lib/trifle/stats/transponder/ratio.rb +9 -10
  34. data/lib/trifle/stats/transponder/standard_deviation.rb +11 -12
  35. data/lib/trifle/stats/transponder/subtract.rb +30 -0
  36. data/lib/trifle/stats/transponder/sum.rb +28 -0
  37. data/lib/trifle/stats/version.rb +1 -1
  38. data/lib/trifle/stats.rb +15 -14
  39. metadata +17 -5
  40. data/lib/trifle/stats/operations/timeseries/classify.rb +0 -48
  41. data/lib/trifle/stats/transponder/average.rb +0 -31
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: eece6621a74a0dfc38e7e1932897ad0481e3b85744ec85d016320cc6c7e5b395
4
- data.tar.gz: a90a4b7bf5e885fdc77af3638b2fcb0fdef0f48e17001b2da925161164fd31f4
3
+ metadata.gz: cace74102291a5e8b9d1de50cc489ea5ca0514e7b23a4a73fb218f9626569f44
4
+ data.tar.gz: 7149e5a8928348b7631897abb94c33beca2963b9e1a4d8c1490168e2a2bf4e4b
5
5
  SHA512:
6
- metadata.gz: 7bb55a42c115a0e353277b7445f33db93bc16a6011202de515adb63126d0d154615bbc2b13bfad45bd6ca990aedec32de4911c5843553b2c5148676c01d21b99
7
- data.tar.gz: 475e37cab204a8b07ea90ead1c209cb784844ba3683884362f284d41c0f2e8170f9f7ec858125dece018312561d934efd555a41ac6457ec16af2d31e36be0695
6
+ metadata.gz: 6578def9715b7cdee5c236ea592f482888c0c29304224bc5cdae419df49900613cd115a0c5f3e2e920f04aea02cdbe433ad0742b6ceaf981a3a46ecd5c3c541b
7
+ data.tar.gz: 4d257ea260be8adc9d8a1868199cf1ceb983fc240312b13dc63444bf21ff1c0c2493aa2118a80001c3419758536a2d7e7678bb852b2583930f1e0b58f72b17ad
@@ -12,7 +12,7 @@ services:
12
12
  app:
13
13
  command: /bin/sh -c "while sleep 1000; do :; done"
14
14
  build:
15
- context: ../../..
15
+ context: .
16
16
  dockerfile: .devops/docker/local/Dockerfile
17
17
  depends_on:
18
18
  - postgres
@@ -26,5 +26,5 @@ services:
26
26
  expose:
27
27
  - 4000
28
28
  volumes:
29
- - ../../..:/workspaces/stats
30
- working_dir: /workspaces/stats
29
+ - ..:/workspaces
30
+ working_dir: /workspaces/trifle-stats
data/.gitignore CHANGED
@@ -13,3 +13,5 @@
13
13
  stats.db
14
14
  .DS_Store
15
15
  spec/performance/.byebug_history
16
+ stats_joined.db
17
+ stats_separated.db
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- trifle-stats (1.6.0)
4
+ trifle-stats (2.3.0)
5
5
  tzinfo (~> 2.0)
6
6
 
7
7
  GEM
data/README.md CHANGED
@@ -1,18 +1,13 @@
1
1
  # Trifle::Stats
2
2
 
3
- [![Gem Version](https://badge.fury.io/rb/trifle-stats.svg)](https://badge.fury.io/rb/trifle-stats)
4
- ![Ruby](https://github.com/trifle-io/trifle-stats/workflows/Ruby/badge.svg?branch=main)
5
- [![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/trifle-io/trifle-stats)
3
+ [![Gem Version](https://badge.fury.io/rb/trifle-stats.svg)](https://rubygems.org/gems/trifle-stats)
4
+ [![Ruby](https://github.com/trifle-io/trifle-stats/workflows/Ruby/badge.svg?branch=main)](https://github.com/trifle-io/trifle-stats)
6
5
 
7
- Simple analytics backed by Redis, Postgres, MongoDB, Google Analytics, Segment, or whatever. [^1]
8
-
9
- `Trifle::Stats` is a _way too_ simple timeline analytics that helps you track custom metrics. Automatically increments counters for each enabled range. It supports timezones and different week beginning.
10
-
11
- [^1]: TBH only Redis, Postgres and MongoDB for now 💔.
6
+ Simple analytics backed by Redis, Postgres, MongoDB, Google Analytics, Segment, or whatever. It gets you from having bunch of events occuring within few minutes to being able to say what happened on 25th January 2021.
12
7
 
13
8
  ## Documentation
14
9
 
15
- You can find guides and documentation at https://trifle.io/trifle-stats
10
+ For comprehensive guides, API reference, and examples, visit [trifle.io/trifle-stats-rb](https://trifle.io/trifle-stats-rb)
16
11
 
17
12
  ## Installation
18
13
 
@@ -24,137 +19,119 @@ gem 'trifle-stats'
24
19
 
25
20
  And then execute:
26
21
 
27
- ```sh
22
+ ```bash
28
23
  $ bundle install
29
24
  ```
30
25
 
31
26
  Or install it yourself as:
32
27
 
33
- ```sh
28
+ ```bash
34
29
  $ gem install trifle-stats
35
30
  ```
36
31
 
37
- Depending on driver you would like to use, make sure you add required gems into your `Gemfile`.
38
- ```ruby
39
- gem 'mongo', '>= 2.14.0'
40
- gem 'pg', '>= 1.2'
41
- gem 'redis', '>= 4.2'
42
- ```
43
-
44
- ## Usage
32
+ ## Quick Start
45
33
 
46
- You don't need to use it with Rails, but you still need to run `Trifle::Stats.configure`. If youre running it with Rails, create `config/initializers/trifle-stats.rb` and configure the gem.
34
+ ### 1. Configure
47
35
 
48
36
  ```ruby
37
+ require 'trifle/stats'
38
+
49
39
  Trifle::Stats.configure do |config|
50
- config.driver = Trifle::Stats::Driver::Redis.new
51
- config.track_ranges = [:hour, :day]
52
- config.time_zone = 'Europe/Bratislava'
53
- config.beginning_of_week = :monday
40
+ config.driver = Trifle::Stats::Driver::Redis.new(Redis.new)
41
+ config.granularities = ['1m', '1h', '1d', '1w', '1mo', '1q', '1y']
54
42
  end
55
43
  ```
56
44
 
57
- ### Track values
58
-
59
- Track your first metrics
45
+ ### 2. Track events
60
46
 
61
47
  ```ruby
62
- Trifle::Stats.track(key: 'event::logs', at: Time.now, values: {count: 1, duration: 2, lines: 241})
63
- => [{2021-01-25 16:00:00 +0100=>{:count=>1, :duration=>2, :lines=>241}}, {2021-01-25 00:00:00 +0100=>{:count=>1, :duration=>2, :lines=>241}}]
48
+ Trifle::Stats.track(key: 'event::logs', at: Time.now, values: { count: 1, duration: 2.11 })
64
49
  ```
65
50
 
66
- Then do it few more times
51
+ ### 3. Retrieve values
67
52
 
68
53
  ```ruby
69
- Trifle::Stats.track(key: 'event::logs', at: Time.now, values: {count: 1, duration: 1, lines: 56})
70
- => [{2021-01-25 16:00:00 +0100=>{:count=>1, :duration=>1, :lines=>56}}, {2021-01-25 00:00:00 +0100=>{:count=>1, :duration=>1, :lines=>56}}]
71
- Trifle::Stats.track(key: 'event::logs', at: Time.now, values: {count: 1, duration: 5, lines: 361})
72
- => [{2021-01-25 16:00:00 +0100=>{:count=>1, :duration=>5, :lines=>361}}, {2021-01-25 00:00:00 +0100=>{:count=>1, :duration=>5, :lines=>361}}]
54
+ Trifle::Stats.values(key: 'event::logs', from: 1.month.ago, to: Time.now, granularity: :day)
55
+ #=> {:at=>[Wed, 25 Jan 2023 00:00:00 +0000], :values=>[{"count"=>1, "duration"=>2.11}]}
73
56
  ```
74
57
 
75
- You can also store nested counters like
58
+ ## Drivers
76
59
 
77
- ```ruby
78
- Trifle::Stats.track(key: 'event::logs', at: Time.now, values: {
79
- count: 1,
80
- duration: {
81
- parsing: 21,
82
- compression: 8,
83
- upload: 1
84
- },
85
- lines: 25432754
86
- })
87
- ```
60
+ Trifle::Stats supports multiple backends:
88
61
 
89
- #### Get values
62
+ - **Redis** - Fast, in-memory storage
63
+ - **Postgres** - SQL database with JSONB support
64
+ - **SQLite** - SQL database in a file
65
+ - **MongoDB** - Document database
66
+ - **Process** - Thread-safe in-memory storage (development/testing)
67
+ - **Dummy** - No-op driver for disabled analytics
90
68
 
91
- Retrieve your values for specific `range`. Adding increments above will return sum of all the values you've tracked.
69
+ ## Features
92
70
 
93
- ```ruby
94
- Trifle::Stats.values(key: 'event::logs', from: Time.now, to: Time.now, range: :day)
95
- => {:at=>[2021-01-25 00:00:00 +0200], :values=>[{"count"=>3, "duration"=>8, "lines"=>658}]}
96
- ```
71
+ - **Multiple time granularities** - Track data across different time periods
72
+ - **Custom aggregators** - Sum, average, min, max with custom logic
73
+ - **Series operations** - Advanced data manipulation and calculations
74
+ - **Performance optimized** - Efficient storage and retrieval patterns
75
+ - **Buffered writes** - Queue metrics locally before flushing to the driver
76
+ - **Driver flexibility** - Switch between storage backends easily
97
77
 
98
- ### Assert values
78
+ ## Buffered Persistence
99
79
 
100
- Asserting values works same way like incrementing, but instead of increment, it sets the value. Duh.
80
+ Every `track/assert/assort` call can be buffered before touching the driver. The buffer is enabled by
81
+ default and flushes on an interval, when the queue reaches a configurable size, and again on shutdown
82
+ (`SIGTERM`/`at_exit`).
101
83
 
102
- Set your first metrics
84
+ Available configuration options:
103
85
 
104
- ```ruby
105
- Trifle::Stats.assert(key: 'event::logs', at: Time.now, values: {count: 1, duration: 2, lines: 241})
106
- => [{2021-01-25 16:00:00 +0100=>{:count=>1, :duration=>2, :lines=>241}}, {2021-01-25 00:00:00 +0100=>{:count=>1, :duration=>2, :lines=>241}}]
107
- ```
86
+ - `buffer_enabled` (default: `true`) – Disable to write-through synchronously
87
+ - `buffer_duration` (default: `1` second) Maximum time between automatic flushes
88
+ - `buffer_size` (default: `256`) Maximum queued actions before forcing a flush
89
+ - `buffer_aggregate` (default: `true`) – Combine repeated operations on the same key set
108
90
 
109
- Then do it few more times
91
+ Example:
110
92
 
111
93
  ```ruby
112
- Trifle::Stats.assert(key: 'event::logs', at: Time.now, values: {count: 1, duration: 1, lines: 56})
113
- => [{2021-01-25 16:00:00 +0100=>{:count=>1, :duration=>1, :lines=>56}}, {2021-01-25 00:00:00 +0100=>{:count=>1, :duration=>1, :lines=>56}}]
114
- Trifle::Stats.assert(key: 'event::logs', at: Time.now, values: {count: 1, duration: 5, lines: 361})
115
- => [{2021-01-25 16:00:00 +0100=>{:count=>1, :duration=>5, :lines=>361}}, {2021-01-25 00:00:00 +0100=>{:count=>1, :duration=>5, :lines=>361}}]
94
+ Trifle::Stats.configure do |config|
95
+ config.driver = Trifle::Stats::Driver::Redis.new(Redis.new)
96
+ config.buffer_duration = 5 # flush every ~5 seconds
97
+ config.buffer_size = 100 # ...or sooner when 100 actions are enqueued
98
+ config.buffer_aggregate = true
99
+ end
116
100
  ```
117
101
 
118
- #### Get values
119
-
120
- Retrieve your values for specific `range`. As you just used `assert` above, it will return latest value you've asserted.
121
-
122
- ```ruby
123
- Trifle::Stats.values(key: 'event::logs', from: Time.now, to: Time.now, range: :day)
124
- => {:at=>[2021-01-25 00:00:00 +0200], :values=>[{"count"=>1, "duration"=>5, "lines"=>361}]}
125
- ```
102
+ If your application manages database connections manually (e.g. ActiveRecord with a pool size of 1),
103
+ increase the pool size or disable buffering to avoid starving other threads.
126
104
 
127
105
  ## Testing
128
106
 
129
- ### Testing Principles
107
+ Tests are run against all supported drivers. To run the test suite:
130
108
 
131
- Tests are structured to be simple, isolated, and mirror the class structure. Each test is independent and self-contained.
109
+ ```bash
110
+ $ bundle exec rspec
111
+ ```
132
112
 
133
- #### Key Rules:
113
+ Ensure Redis, Postgres, and MongoDB are running locally. The test suite will handle database setup automatically.
134
114
 
135
- 1. **Keep tests simple and isolated** - Each test should focus on a single class/method
136
- 2. **Independent tests** - Tests should not depend on each other and can be run in any order
137
- 3. **Self-contained setup** - Every test configures its own variables and dependencies
138
- 4. **Single layer testing** - Test only the specific class, not multiple layers of functionality
139
- 5. **Use appropriate stubbing** - When testing operations, stub driver methods. Let driver tests verify driver behavior
140
- 6. **Repeat yourself** - It's okay to repeat setup code for clarity and independence
115
+ Tests are meant to be **simple and isolated**. Every test should be **independent** and able to run in any order. Tests should be **self-contained** and set up their own configuration. This makes it easier to debug and maintain the test suite.
141
116
 
142
- #### Driver Testing:
117
+ Use **single layer testing** to focus on testing a specific class or module in isolation. Use **appropriate stubbing** for driver methods when testing higher-level operations.
143
118
 
144
- - Driver tests use **real database connections** (Redis, PostgreSQL, MongoDB, SQLite)
145
- - Clean data between tests to ensure isolation
146
- - Use appropriate test databases (e.g., Redis database 15, test-specific DB names)
147
- - The **Process driver** is ideal for testing environments as it uses in-memory storage
119
+ Driver tests use real database connections for accurate behavior validation. The `Process` driver is preferred for in-memory testing environments.
148
120
 
149
- #### Test Structure:
121
+ **Repeat yourself** in test setup for clarity rather than complex shared setups that can hide dependencies.
150
122
 
151
- Tests follow the same structure as the classes they test:
152
- - `spec/stats/driver/` - Driver class tests
153
- - `spec/stats/operations/` - Operation class tests
154
- - `spec/stats/mixins/` - Mixin tests
123
+ For performance testing:
155
124
 
156
- This approach makes it easier to see initial configuration and expected results for each test.
125
+ ```bash
126
+ $ cd specs/performance
127
+ $ bundle install
128
+ $ ruby run.rb 100 '{"a":1}'
129
+ ```
157
130
 
158
131
  ## Contributing
159
132
 
160
133
  Bug reports and pull requests are welcome on GitHub at https://github.com/trifle-io/trifle-stats.
134
+
135
+ ## License
136
+
137
+ The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT).
@@ -0,0 +1 @@
1
+ .devops/docker/local/docker-compose.yml
@@ -3,8 +3,8 @@
3
3
  module Trifle
4
4
  module Stats
5
5
  class Aggregator
6
- class Avg
7
- Trifle::Stats::Series.register_aggregator(:avg, self)
6
+ class Mean
7
+ Trifle::Stats::Series.register_aggregator(:mean, self)
8
8
 
9
9
  def aggregate(series:, path:, slices: 1)
10
10
  return [] if series[:at].empty?
@@ -0,0 +1,380 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Trifle
4
+ module Stats
5
+ module BufferRegistry
6
+ class << self
7
+ def register(buffer)
8
+ registry_mutex.synchronize do
9
+ registry << buffer
10
+ install_shutdown_hooks
11
+ end
12
+ end
13
+
14
+ def unregister(buffer)
15
+ registry_mutex.synchronize do
16
+ registry.delete(buffer)
17
+ end
18
+ pending_mutex.synchronize do
19
+ pending.delete(buffer)
20
+ end
21
+ end
22
+
23
+ def enqueue_pending(buffer)
24
+ pending_mutex.synchronize do
25
+ pending << buffer unless pending.include?(buffer)
26
+ end
27
+ end
28
+
29
+ def cancel_pending(buffer)
30
+ pending_mutex.synchronize do
31
+ pending.delete(buffer)
32
+ end
33
+ end
34
+
35
+ def run_pending!
36
+ snapshot = pending_mutex.synchronize do
37
+ buffers = pending.dup
38
+ pending.clear
39
+ buffers
40
+ end
41
+ snapshot.each(&:flush_from_registry!)
42
+ end
43
+
44
+ def pending?
45
+ pending_mutex.synchronize { pending.any? }
46
+ end
47
+
48
+ def flush_all
49
+ run_pending!
50
+ snapshot = registry_mutex.synchronize { registry.dup }
51
+ snapshot.each(&:shutdown!)
52
+ end
53
+
54
+ private
55
+
56
+ def registry
57
+ @registry ||= []
58
+ end
59
+
60
+ def registry_mutex
61
+ @registry_mutex ||= Mutex.new
62
+ end
63
+
64
+ def pending
65
+ @pending ||= []
66
+ end
67
+
68
+ def pending_mutex
69
+ @pending_mutex ||= Mutex.new
70
+ end
71
+
72
+ def install_shutdown_hooks
73
+ return if @shutdown_hooks_installed
74
+
75
+ at_exit { flush_all }
76
+ install_sigterm_trap if Signal.list.key?('TERM')
77
+ @shutdown_hooks_installed = true
78
+ end
79
+
80
+ def install_sigterm_trap
81
+ previous = Signal.trap('TERM') do
82
+ flush_all
83
+ invoke_previous_handler(previous)
84
+ end
85
+ @previous_sigterm_handler = previous
86
+ end
87
+
88
+ def invoke_previous_handler(previous)
89
+ case previous
90
+ when Proc
91
+ previous.call
92
+ when Symbol, String
93
+ Signal.trap('TERM', previous)
94
+ Process.kill('TERM', Process.pid)
95
+ end
96
+ rescue StandardError
97
+ nil
98
+ end
99
+ end
100
+ end
101
+
102
+ class BufferQueue
103
+ def initialize(aggregate:)
104
+ @aggregate = aggregate
105
+ reset!
106
+ end
107
+
108
+ def store(operation, keys, values)
109
+ aggregate? ? store_aggregate(operation, keys, values) : store_linear(operation, keys, values)
110
+ @operation_count += 1
111
+ end
112
+
113
+ def size
114
+ @operation_count
115
+ end
116
+
117
+ def empty?
118
+ size.zero?
119
+ end
120
+
121
+ def drain
122
+ drained = aggregate? ? @actions.values : @actions.dup
123
+ reset!
124
+ drained
125
+ end
126
+
127
+ private
128
+
129
+ def aggregate?
130
+ @aggregate
131
+ end
132
+
133
+ def reset!
134
+ @actions = aggregate? ? {} : []
135
+ @operation_count = 0
136
+ end
137
+
138
+ def store_linear(operation, keys, values)
139
+ @actions << { operation: operation, keys: keys, values: duplicate(values), count: 1 }
140
+ end
141
+
142
+ def store_aggregate(operation, keys, values)
143
+ signature = signature_for(operation, keys)
144
+ if (entry = @actions[signature])
145
+ entry[:values] = merge_values(operation, entry[:values], values)
146
+ entry[:count] += 1
147
+ else
148
+ @actions[signature] = { operation: operation, keys: keys, values: duplicate(values), count: 1 }
149
+ end
150
+ end
151
+
152
+ def merge_values(operation, current, incoming)
153
+ case operation
154
+ when :inc
155
+ merge_increment(current, incoming)
156
+ when :set
157
+ duplicate(incoming)
158
+ else
159
+ duplicate(incoming)
160
+ end
161
+ end
162
+
163
+ def merge_increment(current, incoming)
164
+ incoming.each do |key, value|
165
+ current[key] =
166
+ if value.is_a?(Hash)
167
+ merge_increment(current.fetch(key, {}), value)
168
+ else
169
+ current.fetch(key, 0).to_i + value.to_i
170
+ end
171
+ end
172
+ current
173
+ end
174
+
175
+ def signature_for(operation, keys)
176
+ identifiers = keys.map do |key|
177
+ [key.prefix, key.key, key.granularity, key.at&.to_i].join(':')
178
+ end
179
+ "#{operation}-#{identifiers.join('|')}"
180
+ end
181
+
182
+ def duplicate(value)
183
+ case value
184
+ when Hash
185
+ value.transform_values { |entry| duplicate(entry) }
186
+ when Array
187
+ value.map { |entry| duplicate(entry) }
188
+ else
189
+ value
190
+ end
191
+ end
192
+ end
193
+
194
+ class Buffer # rubocop:disable Metrics/ClassLength
195
+ DEFAULT_DURATION = 1
196
+ DEFAULT_SIZE = 256
197
+
198
+ class << self
199
+ def register(buffer)
200
+ BufferRegistry.register(buffer)
201
+ end
202
+
203
+ def unregister(buffer)
204
+ BufferRegistry.unregister(buffer)
205
+ end
206
+
207
+ def flush_all
208
+ BufferRegistry.flush_all
209
+ end
210
+
211
+ def run_pending!
212
+ BufferRegistry.run_pending!
213
+ end
214
+
215
+ def pending_flushes?
216
+ BufferRegistry.pending?
217
+ end
218
+ end
219
+
220
+ def initialize(driver:, duration: DEFAULT_DURATION, size: DEFAULT_SIZE, aggregate: true, async: true) # rubocop:disable Metrics/MethodLength
221
+ @driver = driver
222
+ @duration = duration.to_f
223
+ @size = size.to_i.positive? ? size.to_i : 1
224
+ @async = async
225
+ @queue = BufferQueue.new(aggregate: aggregate)
226
+ @mutex = Mutex.new
227
+ @stopped = false
228
+ @flush_pending = false
229
+ @pending_condition = ConditionVariable.new
230
+ @worker = start_worker if async && @duration.positive?
231
+ self.class.register(self)
232
+ end
233
+
234
+ def inc(keys:, values:)
235
+ enqueue(:inc, keys: keys, values: values)
236
+ end
237
+
238
+ def set(keys:, values:)
239
+ enqueue(:set, keys: keys, values: values)
240
+ end
241
+
242
+ def flush!
243
+ actions = drain_actions(reset_pending: true)
244
+ return if actions.nil?
245
+
246
+ process(actions)
247
+ end
248
+
249
+ def shutdown!
250
+ return if @shutdown
251
+
252
+ @shutdown = true
253
+ stop_worker
254
+ BufferRegistry.cancel_pending(self)
255
+ flush!
256
+ self.class.unregister(self)
257
+ end
258
+
259
+ def flush_from_registry!
260
+ actions = drain_pending_actions
261
+ process(actions) if actions
262
+ end
263
+
264
+ private
265
+
266
+ def enqueue(operation, keys:, values:)
267
+ should_flush = false
268
+ @mutex.synchronize do
269
+ @queue.store(operation, keys, values)
270
+ should_flush = @queue.size >= @size
271
+ end
272
+
273
+ flush! if should_flush
274
+ end
275
+
276
+ def request_async_flush
277
+ return unless mark_flush_pending
278
+
279
+ BufferRegistry.enqueue_pending(self)
280
+ wait_for_pending_flush
281
+ end
282
+
283
+ def mark_flush_pending
284
+ @mutex.synchronize do
285
+ return false if @queue.empty? || @flush_pending
286
+
287
+ @flush_pending = true
288
+ true
289
+ end
290
+ end
291
+
292
+ def drain_actions(reset_pending: false)
293
+ @mutex.synchronize do
294
+ return if @queue.empty?
295
+
296
+ mark_flush_serviced if reset_pending
297
+ @queue.drain
298
+ end
299
+ end
300
+
301
+ def drain_pending_actions
302
+ @mutex.synchronize do
303
+ return unless @flush_pending
304
+ return if @queue.empty?
305
+
306
+ mark_flush_serviced
307
+ @queue.drain
308
+ end
309
+ end
310
+
311
+ def wait_for_pending_flush # rubocop:disable Metrics/MethodLength
312
+ should_force = false
313
+ timeout = @duration.positive? ? @duration : DEFAULT_DURATION
314
+ @mutex.synchronize do
315
+ while @flush_pending && timeout.positive?
316
+ @pending_condition.wait(@mutex, timeout)
317
+ break unless @flush_pending
318
+
319
+ timeout = 0
320
+ end
321
+ should_force = @flush_pending
322
+ end
323
+
324
+ return unless should_force
325
+
326
+ BufferRegistry.cancel_pending(self)
327
+ flush!
328
+ end
329
+
330
+ def mark_flush_serviced
331
+ return unless @flush_pending
332
+
333
+ @flush_pending = false
334
+ BufferRegistry.cancel_pending(self)
335
+ @pending_condition.broadcast
336
+ end
337
+
338
+ def process(actions)
339
+ actions.each do |action|
340
+ @driver.public_send(
341
+ action[:operation], keys: action[:keys], values: action[:values], count: action[:count] || 1
342
+ )
343
+ end
344
+ ensure
345
+ release_active_record_connection
346
+ end
347
+
348
+ def start_worker
349
+ Thread.new do
350
+ loop do
351
+ break if @stopped
352
+
353
+ sleep(@duration)
354
+ request_async_flush
355
+ end
356
+ end
357
+ end
358
+
359
+ def stop_worker
360
+ return if @worker.nil?
361
+
362
+ @stopped = true
363
+ begin
364
+ @worker.wakeup
365
+ rescue ThreadError
366
+ nil
367
+ end
368
+ @worker.join
369
+ end
370
+
371
+ def release_active_record_connection
372
+ # Workers run on dedicated threads, so make sure ActiveRecord connections
373
+ # are released back to the shared pool once a flush finishes.
374
+ return unless defined?(::ActiveRecord::Base)
375
+
376
+ ::ActiveRecord::Base.clear_active_connections!
377
+ end
378
+ end
379
+ end
380
+ end