event_meter 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE.txt +21 -0
  3. data/README.md +1081 -0
  4. data/exe/event_meter +5 -0
  5. data/lib/event_meter/auto_cleanup.rb +93 -0
  6. data/lib/event_meter/cli.rb +124 -0
  7. data/lib/event_meter/configuration.rb +244 -0
  8. data/lib/event_meter/errors.rb +9 -0
  9. data/lib/event_meter/event.rb +180 -0
  10. data/lib/event_meter/event_payload.rb +103 -0
  11. data/lib/event_meter/hash_input.rb +20 -0
  12. data/lib/event_meter/index_key.rb +19 -0
  13. data/lib/event_meter/keys.rb +63 -0
  14. data/lib/event_meter/path_name.rb +37 -0
  15. data/lib/event_meter/processor.rb +305 -0
  16. data/lib/event_meter/rails.rb +79 -0
  17. data/lib/event_meter/report_definition.rb +184 -0
  18. data/lib/event_meter/reports.rb +143 -0
  19. data/lib/event_meter/rollup.rb +148 -0
  20. data/lib/event_meter/stores/cleanup_helpers.rb +76 -0
  21. data/lib/event_meter/stores/file_helpers.rb +47 -0
  22. data/lib/event_meter/stores/lock_refresher.rb +75 -0
  23. data/lib/event_meter/stores/namespace.rb +14 -0
  24. data/lib/event_meter/stores/redis_lock.rb +77 -0
  25. data/lib/event_meter/stores/rollup/active_record_postgres.rb +135 -0
  26. data/lib/event_meter/stores/rollup/file.rb +736 -0
  27. data/lib/event_meter/stores/rollup/postgres.rb +813 -0
  28. data/lib/event_meter/stores/rollup/redis.rb +349 -0
  29. data/lib/event_meter/stores/stream/file.rb +98 -0
  30. data/lib/event_meter/stores/stream/redis.rb +79 -0
  31. data/lib/event_meter/time_buckets.rb +56 -0
  32. data/lib/event_meter/version.rb +3 -0
  33. data/lib/event_meter/write_result.rb +26 -0
  34. data/lib/event_meter.rb +150 -0
  35. data/lib/generators/event_meter/install_generator.rb +57 -0
  36. data/lib/generators/event_meter/templates/create_event_meter_tables.rb.erb +12 -0
  37. data/lib/generators/event_meter/templates/event_meter.rb.erb +12 -0
  38. metadata +156 -0
@@ -0,0 +1,349 @@
1
+ require "json"
2
+ require "time"
3
+
4
+ require_relative "../../index_key"
5
+ require_relative "../../rollup"
6
+ require_relative "../cleanup_helpers"
7
+ require_relative "../namespace"
8
+ require_relative "../redis_lock"
9
+
10
+ module EventMeter
11
+ module Stores
12
+ module Rollup
13
+ class Redis
14
+ include CleanupHelpers
15
+ include Namespace
16
+ include RedisLock
17
+
18
+ DEFAULT_ROLLUP_TTL = 31 * 24 * 60 * 60
19
+
20
+ MIN_FIELD_SCRIPT = <<~LUA
21
+ local current = redis.call("hget", KEYS[1], ARGV[1])
22
+ local current_number = tonumber(current)
23
+ local value = tonumber(ARGV[2])
24
+
25
+ if value == nil then
26
+ return redis.error_reply("ERR event_meter rollup min value must be numeric")
27
+ end
28
+
29
+ if current == false or current_number == nil or current_number > value then
30
+ redis.call("hset", KEYS[1], ARGV[1], ARGV[2])
31
+ end
32
+
33
+ return 1
34
+ LUA
35
+ MAX_FIELD_SCRIPT = <<~LUA
36
+ local current = redis.call("hget", KEYS[1], ARGV[1])
37
+ local current_number = tonumber(current)
38
+ local value = tonumber(ARGV[2])
39
+
40
+ if value == nil then
41
+ return redis.error_reply("ERR event_meter rollup max value must be numeric")
42
+ end
43
+
44
+ if current == false or current_number == nil or current_number < value then
45
+ redis.call("hset", KEYS[1], ARGV[1], ARGV[2])
46
+ end
47
+
48
+ return 1
49
+ LUA
50
+ SET_MAX_SCRIPT = <<~LUA
51
+ local current = redis.call("get", KEYS[1])
52
+ local current_number = tonumber(current)
53
+ local value = tonumber(ARGV[1])
54
+
55
+ if value == nil then
56
+ return redis.error_reply("ERR event_meter state value must be numeric")
57
+ end
58
+
59
+ if current == false or current_number == nil or current_number < value then
60
+ redis.call("set", KEYS[1], ARGV[1])
61
+ end
62
+
63
+ return 1
64
+ LUA
65
+
66
+ attr_reader :redis, :lock_redis, :namespace, :report_name, :version, :lock_scope, :rollup_ttl
67
+
68
+ def initialize(redis:, namespace:, lock_redis: nil, report_name: nil, version: nil, lock_scope: nil, rollup_ttl: DEFAULT_ROLLUP_TTL)
69
+ @redis = redis
70
+ @lock_redis = lock_redis || redis
71
+ @namespace = normalize_namespace(namespace)
72
+ @report_name = report_name&.to_s
73
+ @version = version&.to_i
74
+ @lock_scope = lock_scope
75
+ @rollup_ttl = positive_integer(rollup_ttl, "rollup_ttl")
76
+ end
77
+
78
+ def for_report(name:, version:)
79
+ name = name.to_s
80
+ version = version.to_i
81
+
82
+ self.class.new(
83
+ redis: redis,
84
+ lock_redis: lock_redis,
85
+ namespace: namespace,
86
+ report_name: name,
87
+ version: version,
88
+ lock_scope: "#{Keys.event_name(name)}:#{Keys.version_key(version)}",
89
+ rollup_ttl: rollup_ttl
90
+ )
91
+ end
92
+
93
+ def ensure_definition(definition)
94
+ key = definition_key(definition.name, definition.version)
95
+ payload = JSON.generate(definition.to_h)
96
+ stored = redis.get(key)
97
+
98
+ if stored
99
+ ensure_same_definition!(stored, definition)
100
+ else
101
+ redis.set(key, payload, nx: true)
102
+ ensure_same_definition!(redis.get(key), definition)
103
+ end
104
+ end
105
+
106
+ def report_definition(name:, version:)
107
+ payload = redis.get(definition_key(name, version))
108
+ payload && JSON.parse(payload)
109
+ rescue JSON::ParserError, TypeError
110
+ nil
111
+ end
112
+
113
+ def processed_ids(ids)
114
+ ensure_scoped!
115
+ return [] if ids.empty?
116
+
117
+ values = redis.pipelined do |pipe|
118
+ ids.each { |id| pipe.get(processed_key(id)) }
119
+ end
120
+
121
+ ids.zip(values).filter_map { |id, value| id if value }
122
+ end
123
+
124
+ def forget_processed_ids(ids)
125
+ ensure_scoped!
126
+ keys = ids.map { |id| processed_key(id) }
127
+ delete_keys(keys)
128
+ end
129
+
130
+ def apply(batch)
131
+ ensure_scoped!
132
+
133
+ redis.multi do |transaction|
134
+ batch.rollups.each { |key, rollup| apply_rollup(transaction, key, rollup) }
135
+
136
+ batch.state_updates.each do |key, value|
137
+ transaction.eval(SET_MAX_SCRIPT, keys: [key], argv: [value])
138
+ transaction.expire(key, rollup_ttl)
139
+ end
140
+
141
+ processed_at = Time.now.utc.iso8601(6)
142
+ batch.entry_ids.each do |id|
143
+ key = processed_key(id)
144
+ transaction.set(key, processed_at)
145
+ transaction.expire(key, rollup_ttl)
146
+ end
147
+ end
148
+ end
149
+
150
+ def hgetall_many(keys)
151
+ return [] if keys.empty?
152
+
153
+ redis.pipelined do |pipe|
154
+ keys.each { |key| pipe.hgetall(key) }
155
+ end
156
+ end
157
+
158
+ def keys_matching(pattern, limit: nil)
159
+ limit = positive_integer(limit, "limit") if limit
160
+ keys = []
161
+
162
+ redis.scan_each(match: namespace_glob(pattern)) do |key|
163
+ next unless key_matches?(key, pattern)
164
+
165
+ keys << key
166
+ break if limit && keys.length >= limit
167
+ end
168
+
169
+ keys.sort
170
+ end
171
+
172
+ def get(key)
173
+ redis.get(key)
174
+ end
175
+
176
+ def cleanup_watermark(key)
177
+ redis.get(key)
178
+ end
179
+
180
+ def write_cleanup_watermark(key, value)
181
+ redis.set(key, value)
182
+ end
183
+
184
+ def with_lock(ttl:)
185
+ with_redis_lock(lock_key, ttl: ttl) { yield }
186
+ end
187
+
188
+ def cleanup_history(before:, events:, interval_state:)
189
+ filter = event_filter(events)
190
+
191
+ {
192
+ rollup_keys_deleted: cleanup_rollups(before, filter),
193
+ interval_state_keys_deleted: interval_state ? cleanup_interval_state(before, filter) : 0,
194
+ processed_entries_deleted: cleanup_processed_entries(before, filter)
195
+ }
196
+ end
197
+
198
+ private
199
+
200
+ def apply_rollup(transaction, key, rollup)
201
+ rollup.fields.each do |field, value|
202
+ if EventMeter::Rollup.min_field?(field)
203
+ transaction.eval(MIN_FIELD_SCRIPT, keys: [key], argv: [field, value])
204
+ elsif EventMeter::Rollup.max_field?(field)
205
+ transaction.eval(MAX_FIELD_SCRIPT, keys: [key], argv: [field, value])
206
+ else
207
+ transaction.hincrby(key, field, value)
208
+ end
209
+ end
210
+
211
+ transaction.expire(key, rollup_ttl)
212
+ end
213
+
214
+ def positive_integer(value, name)
215
+ integer = Integer(value)
216
+ return integer if integer.positive?
217
+
218
+ raise ArgumentError, "#{name} must be positive"
219
+ rescue ArgumentError, TypeError, RangeError
220
+ raise ArgumentError, "#{name} must be positive"
221
+ end
222
+
223
+ def lock_key
224
+ [namespace, "process_lock", lock_scope].compact.join(":")
225
+ end
226
+
227
+ def processed_key(id)
228
+ Keys.processed(
229
+ namespace: namespace,
230
+ name: report_name,
231
+ version: version,
232
+ id: id
233
+ )
234
+ end
235
+
236
+ def ensure_scoped!
237
+ return if report_name && version&.positive?
238
+
239
+ raise ConfigurationError, "redis rollup storage must be scoped with for_report"
240
+ end
241
+
242
+ def definition_key(name, version)
243
+ Keys.definition(namespace: namespace, name: name, version: version)
244
+ end
245
+
246
+ def ensure_same_definition!(stored, definition)
247
+ stored_definition = ReportDefinition.from_h(JSON.parse(stored))
248
+ return if stored_definition.fingerprint == definition.fingerprint
249
+
250
+ raise DefinitionChangedError, "#{definition.name} v#{definition.version} changed; bump version"
251
+ rescue JSON::ParserError, TypeError
252
+ raise DefinitionChangedError, "#{definition.name} v#{definition.version} stored definition is invalid"
253
+ end
254
+
255
+ def cleanup_rollups(before, event_filter)
256
+ keys = scan_keys(namespace_glob("#{namespace}:rollup:*")).select do |key|
257
+ rollup_key_old?(key, before, event_filter)
258
+ end
259
+ delete_keys(keys)
260
+ keys.length
261
+ end
262
+
263
+ def cleanup_interval_state(before, event_filter)
264
+ before_ms = (before.to_f * 1000).to_i
265
+ keys = scan_keys(namespace_glob("#{namespace}:state:*")).select do |key|
266
+ state_key_matches_event?(key, event_filter)
267
+ end
268
+ return 0 if keys.empty?
269
+
270
+ values = redis.pipelined { |pipe| keys.each { |key| pipe.get(key) } }
271
+ expired = keys.zip(values).filter_map do |key, value|
272
+ key if state_key_old?(key, before_ms, event_filter, value)
273
+ end
274
+
275
+ delete_keys(expired)
276
+ expired.length
277
+ end
278
+
279
+ def cleanup_processed_entries(before, event_filter)
280
+ keys = scan_keys(namespace_glob("#{namespace}:processed:*")).select do |key|
281
+ processed_key_matches_event?(key, event_filter)
282
+ end
283
+ return 0 if keys.empty?
284
+
285
+ values = redis.pipelined { |pipe| keys.each { |key| pipe.get(key) } }
286
+ expired = keys.zip(values).filter_map do |key, value|
287
+ key if processed_entry_old?(value, before)
288
+ end
289
+
290
+ delete_keys(expired)
291
+ expired.length
292
+ end
293
+
294
+ def processed_key_matches_event?(key, event_filter)
295
+ prefix = "#{namespace}:processed:"
296
+ return false unless key.start_with?(prefix)
297
+
298
+ event_name = key.delete_prefix(prefix).split(":", 2).first
299
+ event_name && (!event_filter || event_filter.include?(event_name))
300
+ end
301
+
302
+ def state_key_matches_event?(key, event_filter)
303
+ prefix = "#{namespace}:state:"
304
+ return false unless key.start_with?(prefix)
305
+
306
+ event_name = key.delete_prefix(prefix).split(":", 3).first
307
+ event_name && (!event_filter || event_filter.include?(event_name))
308
+ end
309
+
310
+ def processed_entry_old?(timestamp, before)
311
+ Time.parse(timestamp).utc < before
312
+ rescue ArgumentError, TypeError, RangeError
313
+ true
314
+ end
315
+
316
+ def scan_keys(pattern)
317
+ keys = []
318
+ redis.scan_each(match: pattern) { |key| keys << key }
319
+ keys
320
+ end
321
+
322
+ def delete_keys(keys)
323
+ keys.each_slice(500) do |slice|
324
+ redis.del(*slice)
325
+ end
326
+ end
327
+
328
+ def namespace_glob(pattern)
329
+ prefix = "#{namespace}:"
330
+ return pattern unless pattern.start_with?(prefix)
331
+
332
+ "#{glob_escape(namespace)}:#{pattern.delete_prefix(prefix)}"
333
+ end
334
+
335
+ def glob_escape(value)
336
+ value.to_s.gsub(/[\\*\?\[\]]/) { |character| "\\#{character}" }
337
+ end
338
+
339
+ def key_matches?(key, pattern)
340
+ prefix = "#{namespace}:"
341
+ return ::File.fnmatch?(pattern, key) unless pattern.start_with?(prefix)
342
+ return false unless key.start_with?(prefix)
343
+
344
+ ::File.fnmatch?(pattern.delete_prefix(prefix), key.delete_prefix(prefix))
345
+ end
346
+ end
347
+ end
348
+ end
349
+ end
@@ -0,0 +1,98 @@
1
+ require "time_bucket_stream"
2
+
3
+ require_relative "../file_helpers"
4
+
5
+ module EventMeter
6
+ module Stores
7
+ module Stream
8
+ class File
9
+ include FileHelpers
10
+
11
+ SYNC_MODES = %i[none flush fsync].freeze
12
+
13
+ attr_reader :path, :stream_options, :sync
14
+
15
+ def initialize(path:, sync: :flush, **stream_options)
16
+ @path = normalize_file_store_path(path)
17
+ @sync = sync.respond_to?(:to_sym) ? sync.to_sym : sync
18
+ @stream_options = stream_options
19
+ validate_sync!
20
+ @streams = {}
21
+ @read_ids = []
22
+ end
23
+
24
+ def append(payload)
25
+ hash = payload.to_h
26
+ stream_for(hash.fetch("name")).append(hash)
27
+ end
28
+
29
+ def read(name:)
30
+ release
31
+ @read_name = name.to_s
32
+ @batch = stream_for(name).read
33
+ @read_ids = @batch.entries.map(&:first)
34
+ @batch.entries
35
+ end
36
+
37
+ def delete
38
+ return false unless @batch
39
+
40
+ @batch.delete
41
+ deleted_claims?
42
+ ensure
43
+ @read_ids = []
44
+ @read_name = nil
45
+ @batch = nil
46
+ end
47
+
48
+ def release
49
+ @batch&.release
50
+ ensure
51
+ @read_ids = []
52
+ @read_name = nil
53
+ @batch = nil
54
+ end
55
+
56
+ def close
57
+ release
58
+ @streams.each_value(&:close)
59
+ end
60
+
61
+ private
62
+
63
+ def validate_sync!
64
+ return if SYNC_MODES.include?(sync)
65
+
66
+ raise ArgumentError, "unsupported file sync mode: #{sync.inspect}"
67
+ end
68
+
69
+ def stream_for(name)
70
+ @streams[name.to_s] ||= TimeBucketStream.new(
71
+ path: ::File.join(path, "streams", PathName.event(name)),
72
+ sync: sync,
73
+ **stream_options
74
+ )
75
+ end
76
+
77
+ def deleted_claims?
78
+ log_names = @read_ids.filter_map { |id| stream_file_name(id) }.uniq
79
+ return true if log_names.empty?
80
+
81
+ log_names.none? do |log_name|
82
+ ::File.exist?(::File.join(stream_path_for(@read_name), "processing", log_name))
83
+ end
84
+ end
85
+
86
+ def stream_file_name(entry_id)
87
+ value = entry_id.to_s
88
+ separator = value.index(":")
89
+ value[0...separator] if separator&.positive?
90
+ end
91
+
92
+ def stream_path_for(name)
93
+ ::File.join(path, "streams", PathName.event(name))
94
+ end
95
+ end
96
+ end
97
+ end
98
+ end
@@ -0,0 +1,79 @@
1
+ require "json"
2
+
3
+ require_relative "../namespace"
4
+ require_relative "../redis_lock"
5
+
6
+ module EventMeter
7
+ module Stores
8
+ module Stream
9
+ class Redis
10
+ include Namespace
11
+ include RedisLock
12
+
13
+ attr_reader :redis, :lock_redis, :namespace, :redis_read_limit
14
+
15
+ def initialize(redis:, namespace:, lock_redis: nil, redis_read_limit: nil)
16
+ @redis = redis
17
+ @lock_redis = lock_redis || redis
18
+ @namespace = normalize_namespace(namespace)
19
+ @redis_read_limit = normalize_redis_read_limit(redis_read_limit)
20
+ @read_ids = []
21
+ end
22
+
23
+ def append(payload)
24
+ hash = payload.to_h
25
+ redis.xadd(stream_key(hash.fetch("name")), { "payload" => hash.to_json })
26
+ end
27
+
28
+ def read(name:)
29
+ range_options = redis_read_limit ? { count: redis_read_limit } : {}
30
+ @read_key = stream_key(name)
31
+ rows = redis.xrange(@read_key, "-", "+", **range_options)
32
+ @read_ids = rows.map(&:first)
33
+
34
+ rows.map do |id, fields|
35
+ [id, parse_payload(fields)]
36
+ end
37
+ end
38
+
39
+ def delete
40
+ redis.xdel(@read_key, *@read_ids) if @read_key && !@read_ids.empty?
41
+ ensure
42
+ @read_ids = []
43
+ @read_key = nil
44
+ end
45
+
46
+ def with_lock(ttl:)
47
+ with_redis_lock(lock_key, ttl: ttl) { yield }
48
+ end
49
+
50
+ private
51
+
52
+ def stream_key(name)
53
+ [namespace, "stream", Keys.event_name(name)].join(":")
54
+ end
55
+
56
+ def lock_key
57
+ [namespace, "stream_lock"].join(":")
58
+ end
59
+
60
+ def parse_payload(fields)
61
+ JSON.parse(fields.fetch("payload"))
62
+ rescue JSON::ParserError, KeyError, TypeError
63
+ nil
64
+ end
65
+
66
+ def normalize_redis_read_limit(limit)
67
+ return nil if limit.nil?
68
+
69
+ limit = Integer(limit)
70
+ return limit if limit.positive?
71
+
72
+ raise ArgumentError, "redis_read_limit must be positive"
73
+ rescue ArgumentError, TypeError, RangeError
74
+ raise ArgumentError, "redis_read_limit must be positive"
75
+ end
76
+ end
77
+ end
78
+ end
79
+ end
@@ -0,0 +1,56 @@
1
+ module EventMeter
2
+ module TimeBuckets
3
+ SIZES = {
4
+ minute: 60,
5
+ hour: 3600
6
+ }.freeze
7
+
8
+ module_function
9
+
10
+ def id(time, size)
11
+ time = time.utc
12
+
13
+ case normalize(size)
14
+ when :minute
15
+ time.strftime("%Y%m%d%H%M")
16
+ when :hour
17
+ time.strftime("%Y%m%d%H")
18
+ end
19
+ end
20
+
21
+ def time(value, size)
22
+ value = value.utc
23
+
24
+ case normalize(size)
25
+ when :minute
26
+ Time.utc(value.year, value.month, value.day, value.hour, value.min)
27
+ when :hour
28
+ Time.utc(value.year, value.month, value.day, value.hour)
29
+ end
30
+ end
31
+
32
+ def seconds(size)
33
+ SIZES.fetch(normalize(size))
34
+ end
35
+
36
+ def between(from, to, size)
37
+ step = seconds(size)
38
+ current = time(from, size)
39
+ buckets = []
40
+
41
+ while current < to
42
+ buckets << current
43
+ current += step
44
+ end
45
+
46
+ buckets
47
+ end
48
+
49
+ def normalize(size)
50
+ size = size.to_sym if size.respond_to?(:to_sym)
51
+ return size if SIZES.key?(size)
52
+
53
+ raise ArgumentError, "unsupported bucket size: #{size.inspect}"
54
+ end
55
+ end
56
+ end
@@ -0,0 +1,3 @@
1
+ module EventMeter
2
+ VERSION = "0.1.0"
3
+ end
@@ -0,0 +1,26 @@
1
+ module EventMeter
2
+ class WriteResult
3
+ attr_reader :payload, :error
4
+
5
+ def self.recorded(payload)
6
+ new(payload: payload, error: nil)
7
+ end
8
+
9
+ def self.failed(payload:, error:)
10
+ new(payload: payload, error: error)
11
+ end
12
+
13
+ def initialize(payload:, error:)
14
+ @payload = payload
15
+ @error = error
16
+ end
17
+
18
+ def recorded?
19
+ error.nil?
20
+ end
21
+
22
+ def error?
23
+ !recorded?
24
+ end
25
+ end
26
+ end