event_meter 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE.txt +21 -0
  3. data/README.md +1081 -0
  4. data/exe/event_meter +5 -0
  5. data/lib/event_meter/auto_cleanup.rb +93 -0
  6. data/lib/event_meter/cli.rb +124 -0
  7. data/lib/event_meter/configuration.rb +244 -0
  8. data/lib/event_meter/errors.rb +9 -0
  9. data/lib/event_meter/event.rb +180 -0
  10. data/lib/event_meter/event_payload.rb +103 -0
  11. data/lib/event_meter/hash_input.rb +20 -0
  12. data/lib/event_meter/index_key.rb +19 -0
  13. data/lib/event_meter/keys.rb +63 -0
  14. data/lib/event_meter/path_name.rb +37 -0
  15. data/lib/event_meter/processor.rb +305 -0
  16. data/lib/event_meter/rails.rb +79 -0
  17. data/lib/event_meter/report_definition.rb +184 -0
  18. data/lib/event_meter/reports.rb +143 -0
  19. data/lib/event_meter/rollup.rb +148 -0
  20. data/lib/event_meter/stores/cleanup_helpers.rb +76 -0
  21. data/lib/event_meter/stores/file_helpers.rb +47 -0
  22. data/lib/event_meter/stores/lock_refresher.rb +75 -0
  23. data/lib/event_meter/stores/namespace.rb +14 -0
  24. data/lib/event_meter/stores/redis_lock.rb +77 -0
  25. data/lib/event_meter/stores/rollup/active_record_postgres.rb +135 -0
  26. data/lib/event_meter/stores/rollup/file.rb +736 -0
  27. data/lib/event_meter/stores/rollup/postgres.rb +813 -0
  28. data/lib/event_meter/stores/rollup/redis.rb +349 -0
  29. data/lib/event_meter/stores/stream/file.rb +98 -0
  30. data/lib/event_meter/stores/stream/redis.rb +79 -0
  31. data/lib/event_meter/time_buckets.rb +56 -0
  32. data/lib/event_meter/version.rb +3 -0
  33. data/lib/event_meter/write_result.rb +26 -0
  34. data/lib/event_meter.rb +150 -0
  35. data/lib/generators/event_meter/install_generator.rb +57 -0
  36. data/lib/generators/event_meter/templates/create_event_meter_tables.rb.erb +12 -0
  37. data/lib/generators/event_meter/templates/event_meter.rb.erb +12 -0
  38. metadata +156 -0
@@ -0,0 +1,184 @@
1
+ require "digest"
2
+ require "json"
3
+
4
+ require_relative "hash_input"
5
+
6
+ module EventMeter
7
+ class ReportDefinition
8
+ Index = Struct.new(:params, keyword_init: true) do
9
+ def matches?(by)
10
+ params == ReportDefinition.normalize_params(by.keys)
11
+ end
12
+
13
+ def key_for(values)
14
+ IndexKey.build(params, values)
15
+ end
16
+
17
+ def to_h
18
+ params.map(&:to_s)
19
+ end
20
+ end
21
+
22
+ Interval = Struct.new(:param, :group_by, keyword_init: true) do
23
+ def initialize(param:, group_by: [])
24
+ super(
25
+ param: ReportDefinition.normalize_param(param),
26
+ group_by: ReportDefinition.normalize_params(group_by)
27
+ )
28
+ end
29
+
30
+ def group_index
31
+ Index.new(params: group_by)
32
+ end
33
+
34
+ def to_h
35
+ {
36
+ "param" => param.to_s,
37
+ "group_by" => group_by.map(&:to_s)
38
+ }
39
+ end
40
+ end
41
+
42
+ BuiltIndex = Struct.new(:index, :key, keyword_init: true)
43
+
44
+ attr_reader :name, :version, :indexes, :intervals
45
+
46
+ def self.build(name, version:)
47
+ definition = new(name: name, version: version)
48
+ yield definition if block_given?
49
+ definition
50
+ end
51
+
52
+ def self.from_h(hash)
53
+ hash = HashInput.coerce(hash, "report definition")
54
+
55
+ new(name: hash.fetch("name"), version: hash.fetch("version")).tap do |definition|
56
+ definition.send(:replace_indexes, hash.fetch("indexes", []))
57
+ definition.send(:replace_intervals, hash.fetch("intervals", []))
58
+ definition.send(:validate_fingerprint!, hash.fetch("fingerprint"))
59
+ end
60
+ end
61
+
62
+ def initialize(name:, version:)
63
+ @name = normalize_name(name)
64
+ @version = normalize_version(version)
65
+ @indexes = [Index.new(params: [])]
66
+ @intervals = []
67
+ end
68
+
69
+ def index_by(*params)
70
+ normalized = self.class.normalize_params(params)
71
+ return self if indexes.any? { |index| index.params == normalized }
72
+
73
+ indexes << Index.new(params: normalized)
74
+ self
75
+ end
76
+
77
+ def measure_interval_by(param, group_by: [])
78
+ interval = Interval.new(param: param, group_by: group_by)
79
+ intervals << interval unless intervals.any? { |existing| existing == interval }
80
+ index_by(*interval.group_by) unless interval.group_by.empty?
81
+ self
82
+ end
83
+
84
+ def indexes_for(payload)
85
+ indexes.filter_map do |index|
86
+ next unless index.params.all? { |param| self.class.indexable_value?(payload.params, param) }
87
+
88
+ BuiltIndex.new(index: index, key: index.key_for(payload.params))
89
+ end
90
+ end
91
+
92
+ def index_for!(by)
93
+ normalized_by = normalize_by(by)
94
+ index = indexes.find { |candidate| candidate.matches?(normalized_by) }
95
+
96
+ unless index
97
+ raise UnsupportedQueryError, "no index configured for #{name} v#{version} by #{normalized_by.keys.inspect}"
98
+ end
99
+
100
+ BuiltIndex.new(index: index, key: index.key_for(normalized_by))
101
+ end
102
+
103
+ def fingerprint
104
+ Digest::SHA256.hexdigest(JSON.generate(canonical_h))
105
+ end
106
+
107
+ def to_h
108
+ canonical_h.merge("fingerprint" => fingerprint)
109
+ end
110
+
111
+ def self.normalize_params(params)
112
+ params = params.first if params.is_a?(Array) && params.length == 1 && params.first.is_a?(Array)
113
+ Array(params).map { |param| normalize_param(param) }.uniq.sort
114
+ end
115
+
116
+ def self.normalize_param(param)
117
+ return param.to_sym if param.respond_to?(:to_sym) && !param.to_s.strip.empty?
118
+
119
+ raise ArgumentError, "report params must be strings or symbols"
120
+ end
121
+
122
+ def self.indexable_value?(values, param)
123
+ values.key?(param.to_s) && !values[param.to_s].nil?
124
+ end
125
+
126
+ private
127
+
128
+ def replace_indexes(raw_indexes)
129
+ @indexes = []
130
+ Array(raw_indexes).each { |params| index_by(*Array(params)) }
131
+ index_by if @indexes.empty?
132
+ end
133
+
134
+ def replace_intervals(raw_intervals)
135
+ @intervals = []
136
+ Array(raw_intervals).each do |raw_interval|
137
+ interval = HashInput.coerce(raw_interval, "interval")
138
+ measure_interval_by(
139
+ interval.fetch("param"),
140
+ group_by: interval.fetch("group_by", [])
141
+ )
142
+ end
143
+ end
144
+
145
+ def validate_fingerprint!(stored_fingerprint)
146
+ return if stored_fingerprint == fingerprint
147
+
148
+ raise DefinitionChangedError, "#{name} v#{version} definition fingerprint does not match stored metadata"
149
+ end
150
+
151
+ def canonical_h
152
+ {
153
+ "name" => name,
154
+ "version" => version,
155
+ "indexes" => indexes.map(&:to_h).sort,
156
+ "intervals" => intervals.map(&:to_h).sort_by { |interval| [interval.fetch("param"), interval.fetch("group_by")] }
157
+ }
158
+ end
159
+
160
+ def normalize_name(value)
161
+ name = value.to_s
162
+ raise ArgumentError, "report name cannot be blank" if name.strip.empty?
163
+
164
+ name
165
+ end
166
+
167
+ def normalize_version(value)
168
+ version = Integer(value)
169
+ return version if version.positive?
170
+
171
+ raise ArgumentError
172
+ rescue ArgumentError, TypeError, RangeError
173
+ raise ArgumentError, "report version must be a positive integer"
174
+ end
175
+
176
+ def normalize_by(by)
177
+ HashInput.coerce(by, "by").each_with_object({}) do |(key, value), hash|
178
+ raise ArgumentError, "by values cannot be nil" if value.nil?
179
+
180
+ hash[self.class.normalize_param(key)] = value
181
+ end
182
+ end
183
+ end
184
+ end
@@ -0,0 +1,143 @@
1
+ require "time"
2
+
3
+ module EventMeter
4
+ class Reports
5
+ DEFAULT_SERIES_SECONDS = 3600
6
+
7
+ def initialize(configuration:, rollup_storage:)
8
+ @configuration = configuration
9
+ @rollup_storage = rollup_storage
10
+ end
11
+
12
+ def summary(name, version:, from: nil, to: nil, by: {})
13
+ raise ArgumentError, "pass both from: and to:, or neither" if from.nil? != to.nil?
14
+ from = time_value(from) if from
15
+ to = time_value(to) if to
16
+ validate_window!(from, to) if from && to
17
+
18
+ definition = report_definition(name, version)
19
+ index = definition.index_for!(by)
20
+ keys, seconds = summary_keys(definition, from: from, to: to, index: index)
21
+
22
+ Rollup.combine(@rollup_storage.hgetall_many(keys)).to_h(seconds: seconds)
23
+ end
24
+
25
+ def series(name, version:, from: nil, to: nil, every: :minute, by: {})
26
+ every = TimeBuckets.normalize(every)
27
+ to = to ? time_value(to) : TimeBuckets.time(Time.now.utc, every) + TimeBuckets.seconds(every)
28
+ from = time_value(from) if from
29
+ from ||= to - DEFAULT_SERIES_SECONDS
30
+ validate_window!(from, to)
31
+
32
+ definition = report_definition(name, version)
33
+ index = definition.index_for!(by)
34
+ buckets = TimeBuckets.between(from, to, every)
35
+ keys = buckets.map { |bucket| rollup_key(definition, every, bucket, index) }
36
+ raw_rollups = @rollup_storage.hgetall_many(keys)
37
+ seconds = TimeBuckets.seconds(every)
38
+
39
+ buckets.zip(raw_rollups).map do |bucket, raw|
40
+ Rollup.from_hash(raw).to_h(seconds: seconds).merge(bucket: bucket.iso8601)
41
+ end
42
+ end
43
+
44
+ def compare(name, version:, before:, after:, by: {})
45
+ before_from, before_to = comparison_window(before)
46
+ after_from, after_to = comparison_window(after)
47
+
48
+ {
49
+ before: summary(name, version: version, from: before_from, to: before_to, by: by),
50
+ after: summary(name, version: version, from: after_from, to: after_to, by: by)
51
+ }
52
+ end
53
+
54
+ private
55
+
56
+ def summary_keys(definition, from:, to:, index:)
57
+ if from && to
58
+ buckets = TimeBuckets.between(from, to, :minute)
59
+ keys = buckets.map { |bucket| rollup_key(definition, :minute, bucket, index) }
60
+ seconds = buckets.length * TimeBuckets.seconds(:minute)
61
+
62
+ [keys, seconds]
63
+ else
64
+ pattern = Keys.rollup_pattern(
65
+ namespace: @configuration.namespace,
66
+ name: definition.name,
67
+ version: definition.version,
68
+ every: :hour,
69
+ index: index
70
+ )
71
+
72
+ keys = keys_matching(pattern)
73
+ validate_summary_key_count!(keys)
74
+ [keys, nil]
75
+ end
76
+ end
77
+
78
+ def keys_matching(pattern)
79
+ limit = @configuration.summary_key_limit
80
+ storage_key_limit = limit && limit + 1
81
+ method = @rollup_storage.method(:keys_matching)
82
+
83
+ if method.parameters.any? { |kind, name| kind == :keyrest || name == :limit }
84
+ @rollup_storage.keys_matching(pattern, limit: storage_key_limit)
85
+ else
86
+ @rollup_storage.keys_matching(pattern)
87
+ end
88
+ end
89
+
90
+ def validate_summary_key_count!(keys)
91
+ limit = @configuration.summary_key_limit
92
+ return unless limit && keys.length > limit
93
+
94
+ raise ArgumentError, "summary without a time window matched more than #{limit} rollup buckets; pass from: and to:"
95
+ end
96
+
97
+ def rollup_key(definition, every, bucket, index)
98
+ Keys.rollup(
99
+ namespace: @configuration.namespace,
100
+ name: definition.name,
101
+ version: definition.version,
102
+ every: every,
103
+ bucket: bucket,
104
+ index: index
105
+ )
106
+ end
107
+
108
+ def report_definition(name, version)
109
+ stored = @rollup_storage.report_definition(name: name, version: version)
110
+ raise DefinitionNotFoundError, "no definition stored for #{name} v#{version}" unless stored
111
+
112
+ ReportDefinition.from_h(stored)
113
+ end
114
+
115
+ def validate_window!(from, to)
116
+ return if to > from
117
+
118
+ raise ArgumentError, "to must be after from"
119
+ end
120
+
121
+ def comparison_window(window)
122
+ unless window.respond_to?(:begin) && window.respond_to?(:end)
123
+ raise ArgumentError, "comparison windows must be ranges"
124
+ end
125
+
126
+ from = window.begin
127
+ to = window.end
128
+ raise ArgumentError, "comparison windows must have a start and end" if from.nil? || to.nil?
129
+
130
+ [from, to]
131
+ end
132
+
133
+ def time_value(value)
134
+ return value.utc if value.respond_to?(:utc)
135
+ raise ArgumentError unless value.respond_to?(:to_str)
136
+
137
+ Time.parse(value.to_str).utc
138
+ rescue ArgumentError, TypeError, RangeError
139
+ raise ArgumentError, "time must be a Time or parseable time string"
140
+ end
141
+
142
+ end
143
+ end
@@ -0,0 +1,148 @@
1
+ require "time"
2
+
3
+ module EventMeter
4
+ class Rollup
5
+ MIN_FIELDS = %w[duration_ms_min interval_ms_min started_at_ms_min].freeze
6
+ MAX_FIELDS = %w[duration_ms_max interval_ms_max started_at_ms_max].freeze
7
+ MAX_RENDERABLE_TIMESTAMP_MS = 253_402_300_799_999 # 9999-12-31T23:59:59.999Z
8
+
9
+ attr_reader :fields
10
+
11
+ def self.from_hash(hash)
12
+ new(hash || {})
13
+ end
14
+
15
+ def self.combine(raw_rollups)
16
+ raw_rollups.reduce(new) do |combined, raw|
17
+ combined.merge!(from_hash(raw))
18
+ end
19
+ end
20
+
21
+ def self.min_field?(field)
22
+ MIN_FIELDS.include?(field.to_s)
23
+ end
24
+
25
+ def self.max_field?(field)
26
+ MAX_FIELDS.include?(field.to_s)
27
+ end
28
+
29
+ def initialize(fields = {})
30
+ @fields = {}
31
+ fields.each { |key, value| @fields[key.to_s] = numeric_value(value) }
32
+ end
33
+
34
+ def increment(field, value = 1)
35
+ fields[field.to_s] = fields.fetch(field.to_s, 0) + numeric_value(value)
36
+ end
37
+
38
+ def add_duration(duration_ms)
39
+ return if duration_ms.nil?
40
+
41
+ add_metric("duration_ms", duration_ms)
42
+ end
43
+
44
+ def add_interval(interval_ms)
45
+ return if interval_ms.nil?
46
+
47
+ add_metric("interval_ms", interval_ms)
48
+ end
49
+
50
+ def add_started_at(started_ms)
51
+ return if started_ms.nil?
52
+
53
+ value = numeric_value(started_ms)
54
+ fields["started_at_ms_min"] = [fields["started_at_ms_min"], value].compact.min
55
+ fields["started_at_ms_max"] = [fields["started_at_ms_max"], value].compact.max
56
+ end
57
+
58
+ def merge!(other)
59
+ other.fields.each do |field, value|
60
+ if self.class.min_field?(field)
61
+ fields[field] = [fields[field], value].compact.min
62
+ elsif self.class.max_field?(field)
63
+ fields[field] = [fields[field], value].compact.max
64
+ else
65
+ increment(field, value)
66
+ end
67
+ end
68
+
69
+ self
70
+ end
71
+
72
+ def to_h(seconds: nil)
73
+ count = fields.fetch("count", 0)
74
+ duration_count = fields.fetch("duration_ms_count", 0)
75
+ interval_count = fields.fetch("interval_ms_count", 0)
76
+ started_at_min_ms = fields["started_at_ms_min"]
77
+ started_at_max_ms = fields["started_at_ms_max"]
78
+ rate_window_seconds = seconds || inferred_rate_window_seconds(started_at_min_ms, started_at_max_ms)
79
+
80
+ {
81
+ count: count,
82
+ success_count: fields.fetch("success_count", 0),
83
+ failure_count: fields.fetch("failure_count", 0),
84
+ skipped_count: fields.fetch("skipped_count", 0),
85
+ started_at_min: iso_time(started_at_min_ms),
86
+ started_at_max: iso_time(started_at_max_ms),
87
+ rate_window_seconds: rate_window_seconds,
88
+ per_second: rate(count, rate_window_seconds),
89
+ per_minute: rate(count * 60, rate_window_seconds),
90
+ duration_ms_count: duration_count,
91
+ duration_ms_sum: fields.fetch("duration_ms_sum", 0),
92
+ duration_ms_avg: average(fields.fetch("duration_ms_sum", 0), duration_count),
93
+ duration_ms_min: fields["duration_ms_min"],
94
+ duration_ms_max: fields["duration_ms_max"],
95
+ interval_ms_count: interval_count,
96
+ interval_ms_sum: fields.fetch("interval_ms_sum", 0),
97
+ interval_ms_avg: average(fields.fetch("interval_ms_sum", 0), interval_count),
98
+ interval_ms_min: fields["interval_ms_min"],
99
+ interval_ms_max: fields["interval_ms_max"]
100
+ }.delete_if { |_key, value| value.nil? }
101
+ end
102
+
103
+ private
104
+
105
+ def inferred_rate_window_seconds(started_at_min_ms, started_at_max_ms)
106
+ return nil unless started_at_min_ms && started_at_max_ms
107
+
108
+ (started_at_max_ms - started_at_min_ms) / 1000.0
109
+ end
110
+
111
+ def rate(count, seconds)
112
+ return nil unless seconds&.positive?
113
+
114
+ count / seconds.to_f
115
+ end
116
+
117
+ def average(sum, count)
118
+ return nil unless count.positive?
119
+
120
+ sum / count.to_f
121
+ end
122
+
123
+ def iso_time(milliseconds)
124
+ return nil unless milliseconds
125
+ return nil if milliseconds.abs > MAX_RENDERABLE_TIMESTAMP_MS
126
+
127
+ Time.at(milliseconds / 1000.0).utc.iso8601(6)
128
+ rescue ArgumentError, RangeError
129
+ nil
130
+ end
131
+
132
+ def add_metric(prefix, value)
133
+ value = numeric_value(value)
134
+ return if value.negative?
135
+
136
+ increment("#{prefix}_count")
137
+ increment("#{prefix}_sum", value)
138
+ fields["#{prefix}_min"] = [fields["#{prefix}_min"], value].compact.min
139
+ fields["#{prefix}_max"] = [fields["#{prefix}_max"], value].compact.max
140
+ end
141
+
142
+ def numeric_value(value)
143
+ Integer(value)
144
+ rescue ArgumentError, TypeError, RangeError
145
+ 0
146
+ end
147
+ end
148
+ end
@@ -0,0 +1,76 @@
1
+ require "time"
2
+
3
+ require_relative "../index_key"
4
+
5
+ module EventMeter
6
+ module Stores
7
+ module CleanupHelpers
8
+ private
9
+
10
+ def event_filter(events)
11
+ events = Array(events).compact
12
+
13
+ events.empty? ? nil : events.map { |event| IndexKey.escape(event) }
14
+ end
15
+
16
+ def rollup_key_old?(key, before, filter)
17
+ prefix = "#{namespace}:rollup:"
18
+ return false unless key.start_with?(prefix)
19
+
20
+ event_name, _version, every, bucket_id = key.delete_prefix(prefix).split(":", 5)
21
+ return false unless event_name && every && bucket_id
22
+ return false if filter && !filter.include?(event_name)
23
+
24
+ bucket_end_time(every, bucket_id) <= before
25
+ rescue ArgumentError, TypeError
26
+ false
27
+ end
28
+
29
+ def state_key_old?(key, before_ms, filter, value)
30
+ prefix = "#{namespace}:state:"
31
+ return false unless key.start_with?(prefix)
32
+
33
+ event_name = key.delete_prefix(prefix).split(":", 3).first
34
+ return false if filter && !filter.include?(event_name)
35
+
36
+ Integer(value) < before_ms
37
+ rescue ArgumentError, TypeError, RangeError
38
+ true
39
+ end
40
+
41
+ def bucket_end_time(every, bucket_id)
42
+ case every
43
+ when "minute"
44
+ minute_bucket_time(bucket_id) + 60
45
+ when "hour"
46
+ hour_bucket_time(bucket_id) + 3600
47
+ else
48
+ raise ArgumentError, "unsupported rollup bucket: #{every.inspect}"
49
+ end
50
+ end
51
+
52
+ def minute_bucket_time(bucket_id)
53
+ raise ArgumentError, "malformed minute bucket" unless bucket_id.to_s.match?(/\A\d{12}\z/)
54
+
55
+ Time.utc(
56
+ bucket_id[0, 4].to_i,
57
+ bucket_id[4, 2].to_i,
58
+ bucket_id[6, 2].to_i,
59
+ bucket_id[8, 2].to_i,
60
+ bucket_id[10, 2].to_i
61
+ )
62
+ end
63
+
64
+ def hour_bucket_time(bucket_id)
65
+ raise ArgumentError, "malformed hour bucket" unless bucket_id.to_s.match?(/\A\d{10}\z/)
66
+
67
+ Time.utc(
68
+ bucket_id[0, 4].to_i,
69
+ bucket_id[4, 2].to_i,
70
+ bucket_id[6, 2].to_i,
71
+ bucket_id[8, 2].to_i
72
+ )
73
+ end
74
+ end
75
+ end
76
+ end
@@ -0,0 +1,47 @@
1
+ require "fileutils"
2
+ require "json"
3
+ require "securerandom"
4
+
5
+ module EventMeter
6
+ module Stores
7
+ module FileHelpers
8
+ private
9
+
10
+ def atomic_write_json(path, value)
11
+ atomic_write(path, JSON.generate(value))
12
+ end
13
+
14
+ def normalize_file_store_path(value, name: "path")
15
+ path = value.to_s
16
+ raise ArgumentError, "#{name} cannot be blank" if path.strip.empty?
17
+
18
+ ::File.expand_path(path)
19
+ end
20
+
21
+ def atomic_write(path, contents)
22
+ temporary_path = temporary_path_for(path)
23
+
24
+ ::File.open(temporary_path, ::File::WRONLY | ::File::CREAT | ::File::TRUNC, 0o600) do |file|
25
+ file.write(contents)
26
+ file.flush
27
+ file.fsync
28
+ end
29
+
30
+ ::File.rename(temporary_path, path)
31
+ fsync_directory(::File.dirname(path))
32
+ ensure
33
+ FileUtils.rm_f(temporary_path) if temporary_path && ::File.exist?(temporary_path)
34
+ end
35
+
36
+ def fsync_directory(path)
37
+ ::File.open(path, ::File::RDONLY) { |directory| directory.fsync }
38
+ rescue SystemCallError
39
+ nil
40
+ end
41
+
42
+ def temporary_path_for(path)
43
+ "#{path}.#{Process.pid}.#{Thread.current.object_id}.#{SecureRandom.hex(4)}.tmp"
44
+ end
45
+ end
46
+ end
47
+ end
@@ -0,0 +1,75 @@
1
+ require_relative "../errors"
2
+
3
+ module EventMeter
4
+ module Stores
5
+ class LockRefresher
6
+ JOIN_TIMEOUT = 1.0
7
+
8
+ def initialize(interval:, refresh:, failure_message:, thread_name:)
9
+ @interval = interval
10
+ @refresh = refresh
11
+ @failure_message = failure_message
12
+ @thread_name = thread_name
13
+ @mutex = Mutex.new
14
+ @condition = ConditionVariable.new
15
+ @stopped = false
16
+ end
17
+
18
+ def start(owner: Thread.current)
19
+ @owner = owner
20
+ @thread = Thread.new { run }
21
+ self
22
+ end
23
+
24
+ def stop
25
+ @mutex.synchronize do
26
+ @stopped = true
27
+ @condition.broadcast
28
+ end
29
+
30
+ @thread&.join(JOIN_TIMEOUT)
31
+ end
32
+
33
+ private
34
+
35
+ def run
36
+ Thread.current.report_on_exception = false
37
+ Thread.current.name = @thread_name if Thread.current.respond_to?(:name=)
38
+
39
+ loop do
40
+ break if stopped_after_wait?
41
+
42
+ refreshed = @refresh.call
43
+ raise LockLostError, @failure_message unless refreshed
44
+ end
45
+ rescue LockLostError => error
46
+ notify_owner(error)
47
+ rescue StandardError => error
48
+ notify_owner(LockLostError.new("#{@failure_message}: #{error.class}: #{error.message}"))
49
+ end
50
+
51
+ def stopped_after_wait?
52
+ @mutex.synchronize do
53
+ @condition.wait(@mutex, @interval) unless @stopped
54
+ @stopped
55
+ end
56
+ end
57
+
58
+ def notify_owner(error)
59
+ owner = @mutex.synchronize do
60
+ already_stopped = @stopped
61
+ @stopped = true
62
+ @condition.broadcast
63
+ already_stopped ? nil : @owner
64
+ end
65
+
66
+ # Losing the refresh means another process can acquire this lock while
67
+ # the owner still writes. Interrupt the owner immediately instead of
68
+ # letting split-brain processing continue until the block returns.
69
+ owner&.raise(error)
70
+ rescue ThreadError
71
+ nil
72
+ end
73
+ end
74
+ end
75
+ end
@@ -0,0 +1,14 @@
1
+ module EventMeter
2
+ module Stores
3
+ module Namespace
4
+ private
5
+
6
+ def normalize_namespace(value)
7
+ namespace = value.to_s
8
+ raise ArgumentError, "namespace cannot be blank" if namespace.strip.empty?
9
+
10
+ namespace
11
+ end
12
+ end
13
+ end
14
+ end