event_meter 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE.txt +21 -0
- data/README.md +1081 -0
- data/exe/event_meter +5 -0
- data/lib/event_meter/auto_cleanup.rb +93 -0
- data/lib/event_meter/cli.rb +124 -0
- data/lib/event_meter/configuration.rb +244 -0
- data/lib/event_meter/errors.rb +9 -0
- data/lib/event_meter/event.rb +180 -0
- data/lib/event_meter/event_payload.rb +103 -0
- data/lib/event_meter/hash_input.rb +20 -0
- data/lib/event_meter/index_key.rb +19 -0
- data/lib/event_meter/keys.rb +63 -0
- data/lib/event_meter/path_name.rb +37 -0
- data/lib/event_meter/processor.rb +305 -0
- data/lib/event_meter/rails.rb +79 -0
- data/lib/event_meter/report_definition.rb +184 -0
- data/lib/event_meter/reports.rb +143 -0
- data/lib/event_meter/rollup.rb +148 -0
- data/lib/event_meter/stores/cleanup_helpers.rb +76 -0
- data/lib/event_meter/stores/file_helpers.rb +47 -0
- data/lib/event_meter/stores/lock_refresher.rb +75 -0
- data/lib/event_meter/stores/namespace.rb +14 -0
- data/lib/event_meter/stores/redis_lock.rb +77 -0
- data/lib/event_meter/stores/rollup/active_record_postgres.rb +135 -0
- data/lib/event_meter/stores/rollup/file.rb +736 -0
- data/lib/event_meter/stores/rollup/postgres.rb +813 -0
- data/lib/event_meter/stores/rollup/redis.rb +349 -0
- data/lib/event_meter/stores/stream/file.rb +98 -0
- data/lib/event_meter/stores/stream/redis.rb +79 -0
- data/lib/event_meter/time_buckets.rb +56 -0
- data/lib/event_meter/version.rb +3 -0
- data/lib/event_meter/write_result.rb +26 -0
- data/lib/event_meter.rb +150 -0
- data/lib/generators/event_meter/install_generator.rb +57 -0
- data/lib/generators/event_meter/templates/create_event_meter_tables.rb.erb +12 -0
- data/lib/generators/event_meter/templates/event_meter.rb.erb +12 -0
- metadata +156 -0
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
require "json"
|
|
2
|
+
require "time"
|
|
3
|
+
|
|
4
|
+
require_relative "hash_input"
|
|
5
|
+
|
|
6
|
+
module EventMeter
|
|
7
|
+
class EventPayload
|
|
8
|
+
attr_reader :name, :status, :started_at, :duration_ms, :params
|
|
9
|
+
|
|
10
|
+
def self.build(name, params:, status: nil, started_at: nil, duration_ms: nil)
|
|
11
|
+
new(
|
|
12
|
+
name: name,
|
|
13
|
+
params: params,
|
|
14
|
+
status: status,
|
|
15
|
+
started_at: started_at,
|
|
16
|
+
duration_ms: duration_ms
|
|
17
|
+
)
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
def self.load(hash)
|
|
21
|
+
hash = stringify(hash)
|
|
22
|
+
|
|
23
|
+
new(
|
|
24
|
+
name: hash.fetch("name"),
|
|
25
|
+
params: hash.fetch("params", {}),
|
|
26
|
+
status: hash["status"],
|
|
27
|
+
started_at: hash.fetch("started_at"),
|
|
28
|
+
duration_ms: hash["duration_ms"]
|
|
29
|
+
)
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
def initialize(name:, params:, status:, started_at:, duration_ms:)
|
|
33
|
+
now = Time.now.utc
|
|
34
|
+
|
|
35
|
+
@name = normalize_name(name)
|
|
36
|
+
@status = normalize_status(status || "success")
|
|
37
|
+
@started_at = parse_time(started_at || now)
|
|
38
|
+
@duration_ms = integer_or_nil(duration_ms)
|
|
39
|
+
@params = stringify(params)
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
def to_h
|
|
43
|
+
{
|
|
44
|
+
"name" => name,
|
|
45
|
+
"status" => status,
|
|
46
|
+
"started_at" => started_at.iso8601(6),
|
|
47
|
+
"duration_ms" => duration_ms,
|
|
48
|
+
"params" => params
|
|
49
|
+
}.delete_if { |_key, value| value.nil? || value == {} }
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
def to_json(*args)
|
|
53
|
+
to_h.to_json(*args)
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
def started_ms
|
|
57
|
+
(started_at.to_f * 1000).to_i
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
private
|
|
61
|
+
|
|
62
|
+
def self.stringify(hash)
|
|
63
|
+
HashInput.coerce(hash, "event params").each_with_object({}) do |(key, value), result|
|
|
64
|
+
result[key.to_s] = value
|
|
65
|
+
end
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
def stringify(hash)
|
|
69
|
+
self.class.stringify(hash)
|
|
70
|
+
end
|
|
71
|
+
|
|
72
|
+
def parse_time(value)
|
|
73
|
+
return value.utc if value.respond_to?(:utc)
|
|
74
|
+
raise ArgumentError unless value.respond_to?(:to_str)
|
|
75
|
+
|
|
76
|
+
Time.parse(value.to_str).utc
|
|
77
|
+
rescue ArgumentError, TypeError, RangeError
|
|
78
|
+
raise ArgumentError, "started_at must be a Time or parseable time string"
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
def normalize_name(value)
|
|
82
|
+
name = value.to_s
|
|
83
|
+
raise ArgumentError, "event name cannot be blank" if name.strip.empty?
|
|
84
|
+
|
|
85
|
+
name
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
def normalize_status(value)
|
|
89
|
+
status = value.to_s
|
|
90
|
+
raise ArgumentError, "event status cannot be blank" if status.strip.empty?
|
|
91
|
+
|
|
92
|
+
status
|
|
93
|
+
end
|
|
94
|
+
|
|
95
|
+
def integer_or_nil(value)
|
|
96
|
+
return nil if value.nil?
|
|
97
|
+
|
|
98
|
+
[Integer(value), 0].max
|
|
99
|
+
rescue ArgumentError, TypeError, RangeError
|
|
100
|
+
raise ArgumentError, "duration_ms must be an integer"
|
|
101
|
+
end
|
|
102
|
+
end
|
|
103
|
+
end
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
module EventMeter
|
|
2
|
+
module HashInput
|
|
3
|
+
module_function
|
|
4
|
+
|
|
5
|
+
def coerce(value, label)
|
|
6
|
+
return {} if value.nil?
|
|
7
|
+
|
|
8
|
+
unless value.respond_to?(:to_h)
|
|
9
|
+
raise TypeError, "#{label} must respond to to_h"
|
|
10
|
+
end
|
|
11
|
+
|
|
12
|
+
hash = value.to_h
|
|
13
|
+
unless hash.is_a?(Hash)
|
|
14
|
+
raise TypeError, "#{label}#to_h must return a Hash"
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
hash.dup
|
|
18
|
+
end
|
|
19
|
+
end
|
|
20
|
+
end
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
require "cgi/escape"
|
|
2
|
+
|
|
3
|
+
module EventMeter
|
|
4
|
+
module IndexKey
|
|
5
|
+
module_function
|
|
6
|
+
|
|
7
|
+
def build(params, values)
|
|
8
|
+
return "all" if params.empty?
|
|
9
|
+
|
|
10
|
+
params.map do |param|
|
|
11
|
+
"#{escape(param)}=#{escape(values.fetch(param) { values.fetch(param.to_s) })}"
|
|
12
|
+
end.join("|")
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
def escape(value)
|
|
16
|
+
CGI.escape(value.to_s)
|
|
17
|
+
end
|
|
18
|
+
end
|
|
19
|
+
end
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
module EventMeter
|
|
2
|
+
module Keys
|
|
3
|
+
module_function
|
|
4
|
+
|
|
5
|
+
def rollup(namespace:, name:, version:, every:, bucket:, index:)
|
|
6
|
+
[
|
|
7
|
+
namespace,
|
|
8
|
+
"rollup",
|
|
9
|
+
event_name(name),
|
|
10
|
+
version_key(version),
|
|
11
|
+
every,
|
|
12
|
+
TimeBuckets.id(bucket, every),
|
|
13
|
+
index.key
|
|
14
|
+
].join(":")
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
def rollup_pattern(namespace:, name:, version:, every:, index:)
|
|
18
|
+
[
|
|
19
|
+
namespace,
|
|
20
|
+
"rollup",
|
|
21
|
+
event_name(name),
|
|
22
|
+
version_key(version),
|
|
23
|
+
every,
|
|
24
|
+
"*",
|
|
25
|
+
index.key
|
|
26
|
+
].join(":")
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
def interval_state(namespace:, name:, version:, definition:, value:)
|
|
30
|
+
[
|
|
31
|
+
namespace,
|
|
32
|
+
"state",
|
|
33
|
+
event_name(name),
|
|
34
|
+
version_key(version),
|
|
35
|
+
"interval",
|
|
36
|
+
IndexKey.escape(definition.param),
|
|
37
|
+
IndexKey.escape(value)
|
|
38
|
+
].join(":")
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
def definition(namespace:, name:, version:)
|
|
42
|
+
[namespace, "definition", event_name(name), version_key(version)].join(":")
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
def processed(namespace:, name:, version:, id:)
|
|
46
|
+
[
|
|
47
|
+
namespace,
|
|
48
|
+
"processed",
|
|
49
|
+
event_name(name),
|
|
50
|
+
version_key(version),
|
|
51
|
+
IndexKey.escape(id)
|
|
52
|
+
].join(":")
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
def event_name(name)
|
|
56
|
+
IndexKey.escape(name)
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
def version_key(version)
|
|
60
|
+
PathName.version(version)
|
|
61
|
+
end
|
|
62
|
+
end
|
|
63
|
+
end
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
require "digest"
|
|
2
|
+
|
|
3
|
+
module EventMeter
|
|
4
|
+
module PathName
|
|
5
|
+
WINDOWS_RESERVED_NAMES = %w[
|
|
6
|
+
con prn aux nul
|
|
7
|
+
com1 com2 com3 com4 com5 com6 com7 com8 com9
|
|
8
|
+
lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9
|
|
9
|
+
].freeze
|
|
10
|
+
|
|
11
|
+
module_function
|
|
12
|
+
|
|
13
|
+
def event(name)
|
|
14
|
+
original = name.to_s
|
|
15
|
+
readable = original
|
|
16
|
+
.downcase
|
|
17
|
+
.gsub(/[^a-z0-9_-]+/, "-")
|
|
18
|
+
.gsub(/-+/, "-")
|
|
19
|
+
.gsub(/\A-|-+\z/, "")
|
|
20
|
+
|
|
21
|
+
readable = "event" if readable.empty?
|
|
22
|
+
readable = "event-#{readable}" if WINDOWS_RESERVED_NAMES.include?(readable)
|
|
23
|
+
readable = readable[0, 80].gsub(/-+\z/, "")
|
|
24
|
+
|
|
25
|
+
"#{readable}-#{Digest::SHA256.hexdigest(original)[0, 16]}"
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
def version(version)
|
|
29
|
+
version = Integer(version)
|
|
30
|
+
raise ArgumentError if version <= 0
|
|
31
|
+
|
|
32
|
+
"v#{version}"
|
|
33
|
+
rescue ArgumentError, TypeError, RangeError
|
|
34
|
+
raise ArgumentError, "report version must be a positive integer"
|
|
35
|
+
end
|
|
36
|
+
end
|
|
37
|
+
end
|
|
@@ -0,0 +1,305 @@
|
|
|
1
|
+
module EventMeter
|
|
2
|
+
class Processor
|
|
3
|
+
ROLLUP_BUCKETS = %i[minute hour].freeze
|
|
4
|
+
|
|
5
|
+
Result = Struct.new(
|
|
6
|
+
:event_name,
|
|
7
|
+
:version,
|
|
8
|
+
:processed,
|
|
9
|
+
:skipped_already_processed,
|
|
10
|
+
:malformed,
|
|
11
|
+
:complete,
|
|
12
|
+
:locked,
|
|
13
|
+
keyword_init: true
|
|
14
|
+
) do
|
|
15
|
+
def self.empty(definition)
|
|
16
|
+
new(
|
|
17
|
+
event_name: definition.name,
|
|
18
|
+
version: definition.version,
|
|
19
|
+
processed: 0,
|
|
20
|
+
skipped_already_processed: 0,
|
|
21
|
+
malformed: 0,
|
|
22
|
+
complete: true,
|
|
23
|
+
locked: false
|
|
24
|
+
)
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
def self.locked(definition)
|
|
28
|
+
new(
|
|
29
|
+
event_name: definition.name,
|
|
30
|
+
version: definition.version,
|
|
31
|
+
processed: 0,
|
|
32
|
+
skipped_already_processed: 0,
|
|
33
|
+
malformed: 0,
|
|
34
|
+
complete: false,
|
|
35
|
+
locked: true
|
|
36
|
+
)
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
def self.processed(definition, processed:, skipped_already_processed:, malformed:, complete:)
|
|
40
|
+
new(
|
|
41
|
+
event_name: definition.name,
|
|
42
|
+
version: definition.version,
|
|
43
|
+
processed: processed,
|
|
44
|
+
skipped_already_processed: skipped_already_processed,
|
|
45
|
+
malformed: malformed,
|
|
46
|
+
complete: complete,
|
|
47
|
+
locked: false
|
|
48
|
+
)
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
def to_h
|
|
52
|
+
{
|
|
53
|
+
event_name: event_name,
|
|
54
|
+
version: version,
|
|
55
|
+
processed: processed,
|
|
56
|
+
skipped_already_processed: skipped_already_processed,
|
|
57
|
+
malformed: malformed,
|
|
58
|
+
complete: complete,
|
|
59
|
+
locked: locked
|
|
60
|
+
}
|
|
61
|
+
end
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
def initialize(configuration:, report_definition:, stream_storage:, rollup_storage:)
|
|
65
|
+
@configuration = configuration
|
|
66
|
+
@report_definition = report_definition
|
|
67
|
+
@stream_storage = stream_storage
|
|
68
|
+
@rollup_storage = rollup_storage_for(report_definition, rollup_storage)
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
def process
|
|
72
|
+
with_stream_lock do
|
|
73
|
+
process_with_rollup_lock
|
|
74
|
+
end
|
|
75
|
+
end
|
|
76
|
+
|
|
77
|
+
private
|
|
78
|
+
|
|
79
|
+
def with_stream_lock
|
|
80
|
+
return yield unless @stream_storage.respond_to?(:with_lock)
|
|
81
|
+
|
|
82
|
+
result = nil
|
|
83
|
+
lock_acquired = @stream_storage.with_lock(ttl: @configuration.lock_ttl) do
|
|
84
|
+
result = yield
|
|
85
|
+
end
|
|
86
|
+
|
|
87
|
+
return result if lock_acquired
|
|
88
|
+
|
|
89
|
+
Result.locked(@report_definition)
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
def process_with_rollup_lock
|
|
93
|
+
# Counts and durations are merge-safe and use processed-entry retry
|
|
94
|
+
# guards. Only interval metrics need the rollup-wide lock because they
|
|
95
|
+
# advance shared "previous start time" state.
|
|
96
|
+
return process_unlocked unless @report_definition.intervals.any?
|
|
97
|
+
|
|
98
|
+
result = nil
|
|
99
|
+
lock_acquired = @rollup_storage.with_lock(ttl: @configuration.lock_ttl) do
|
|
100
|
+
result = process_unlocked
|
|
101
|
+
end
|
|
102
|
+
|
|
103
|
+
return result if lock_acquired
|
|
104
|
+
|
|
105
|
+
Result.locked(@report_definition)
|
|
106
|
+
end
|
|
107
|
+
|
|
108
|
+
def rollup_storage_for(definition, storage)
|
|
109
|
+
return storage if scoped_for?(definition, storage)
|
|
110
|
+
return storage.for_report(name: definition.name, version: definition.version) if storage.respond_to?(:for_report)
|
|
111
|
+
|
|
112
|
+
storage
|
|
113
|
+
end
|
|
114
|
+
|
|
115
|
+
def scoped_for?(definition, storage)
|
|
116
|
+
return false unless storage.respond_to?(:report_name) && storage.respond_to?(:version)
|
|
117
|
+
|
|
118
|
+
storage.report_name.to_s == definition.name.to_s && storage.version.to_i == definition.version.to_i
|
|
119
|
+
end
|
|
120
|
+
|
|
121
|
+
def process_unlocked
|
|
122
|
+
@rollup_storage.ensure_definition(@report_definition)
|
|
123
|
+
|
|
124
|
+
entries = @stream_storage.read(name: @report_definition.name)
|
|
125
|
+
return Result.empty(@report_definition) if entries.empty?
|
|
126
|
+
|
|
127
|
+
entry_ids = entries.map(&:first)
|
|
128
|
+
process_entries(entries, entry_ids)
|
|
129
|
+
rescue StandardError
|
|
130
|
+
release_stream if entries
|
|
131
|
+
raise
|
|
132
|
+
end
|
|
133
|
+
|
|
134
|
+
def process_entries(entries, entry_ids)
|
|
135
|
+
pending_entries = unprocessed_entries(entries, entry_ids)
|
|
136
|
+
skipped_count = entries.length - pending_entries.length
|
|
137
|
+
batch = build_batch(pending_entries)
|
|
138
|
+
|
|
139
|
+
@rollup_storage.apply(batch) unless batch.empty?
|
|
140
|
+
stream_deleted = !!@stream_storage.delete
|
|
141
|
+
@rollup_storage.forget_processed_ids(entry_ids) if stream_deleted && @rollup_storage.respond_to?(:forget_processed_ids)
|
|
142
|
+
|
|
143
|
+
Result.processed(
|
|
144
|
+
@report_definition,
|
|
145
|
+
processed: pending_entries.length,
|
|
146
|
+
skipped_already_processed: skipped_count,
|
|
147
|
+
malformed: batch.malformed,
|
|
148
|
+
complete: stream_deleted
|
|
149
|
+
)
|
|
150
|
+
end
|
|
151
|
+
|
|
152
|
+
def release_stream
|
|
153
|
+
@stream_storage.release if @stream_storage.respond_to?(:release)
|
|
154
|
+
rescue StandardError
|
|
155
|
+
nil
|
|
156
|
+
end
|
|
157
|
+
|
|
158
|
+
def unprocessed_entries(entries, entry_ids)
|
|
159
|
+
processed_ids = @rollup_storage.processed_ids(entry_ids)
|
|
160
|
+
processed_lookup = processed_ids.to_h { |id| [id, true] }
|
|
161
|
+
seen = {}
|
|
162
|
+
|
|
163
|
+
entries.reject do |id, _raw_payload|
|
|
164
|
+
duplicate = seen.key?(id)
|
|
165
|
+
seen[id] = true
|
|
166
|
+
|
|
167
|
+
duplicate || processed_lookup.key?(id)
|
|
168
|
+
end
|
|
169
|
+
end
|
|
170
|
+
|
|
171
|
+
def build_batch(entries)
|
|
172
|
+
Batch.new.tap do |batch|
|
|
173
|
+
entries.each do |id, raw_payload|
|
|
174
|
+
payload = load_payload(raw_payload)
|
|
175
|
+
unless payload
|
|
176
|
+
batch.entry_ids << id
|
|
177
|
+
batch.malformed += 1
|
|
178
|
+
next
|
|
179
|
+
end
|
|
180
|
+
|
|
181
|
+
if payload.name == @report_definition.name
|
|
182
|
+
record_event(batch, payload)
|
|
183
|
+
record_intervals(batch, payload)
|
|
184
|
+
else
|
|
185
|
+
batch.malformed += 1
|
|
186
|
+
end
|
|
187
|
+
|
|
188
|
+
batch.entry_ids << id
|
|
189
|
+
end
|
|
190
|
+
end
|
|
191
|
+
end
|
|
192
|
+
|
|
193
|
+
def load_payload(raw_payload)
|
|
194
|
+
EventPayload.load(raw_payload)
|
|
195
|
+
rescue ArgumentError, KeyError, TypeError
|
|
196
|
+
nil
|
|
197
|
+
end
|
|
198
|
+
|
|
199
|
+
def record_event(batch, payload)
|
|
200
|
+
@report_definition.indexes_for(payload).each do |index|
|
|
201
|
+
add_event_rollup(batch, payload, index)
|
|
202
|
+
end
|
|
203
|
+
end
|
|
204
|
+
|
|
205
|
+
def add_event_rollup(batch, payload, index)
|
|
206
|
+
ROLLUP_BUCKETS.each do |every|
|
|
207
|
+
rollup = rollup_for(batch, payload, every, index)
|
|
208
|
+
rollup.increment("count")
|
|
209
|
+
rollup.increment("#{payload.status}_count")
|
|
210
|
+
rollup.add_started_at(payload.started_ms)
|
|
211
|
+
rollup.add_duration(payload.duration_ms)
|
|
212
|
+
end
|
|
213
|
+
end
|
|
214
|
+
|
|
215
|
+
def record_intervals(batch, payload)
|
|
216
|
+
state_updates = {}
|
|
217
|
+
|
|
218
|
+
@report_definition.intervals.each do |definition|
|
|
219
|
+
value = payload.params[definition.param.to_s]
|
|
220
|
+
next if value.nil?
|
|
221
|
+
next unless definition.group_by.all? { |param| ReportDefinition.indexable_value?(payload.params, param) }
|
|
222
|
+
|
|
223
|
+
state_key = interval_state_key(payload.name, definition, value)
|
|
224
|
+
previous_ms = previous_interval_ms(batch, state_key)
|
|
225
|
+
current_ms = payload.started_ms
|
|
226
|
+
next if previous_ms && current_ms <= previous_ms
|
|
227
|
+
|
|
228
|
+
state_updates[state_key] = current_ms
|
|
229
|
+
next unless previous_ms
|
|
230
|
+
|
|
231
|
+
index = definition.group_index.key_for(payload.params)
|
|
232
|
+
built_index = ReportDefinition::BuiltIndex.new(index: definition.group_index, key: index)
|
|
233
|
+
interval_ms = current_ms - previous_ms
|
|
234
|
+
|
|
235
|
+
ROLLUP_BUCKETS.each do |every|
|
|
236
|
+
rollup_for(batch, payload, every, built_index).add_interval(interval_ms)
|
|
237
|
+
end
|
|
238
|
+
end
|
|
239
|
+
|
|
240
|
+
state_updates.each do |state_key, current_ms|
|
|
241
|
+
batch.interval_state[state_key] = current_ms
|
|
242
|
+
batch.state_updates[state_key] = current_ms
|
|
243
|
+
end
|
|
244
|
+
end
|
|
245
|
+
|
|
246
|
+
def previous_interval_ms(batch, state_key)
|
|
247
|
+
batch.interval_state.fetch(state_key) do
|
|
248
|
+
stored_value = @rollup_storage.get(state_key)
|
|
249
|
+
batch.interval_state[state_key] = interval_timestamp(stored_value)
|
|
250
|
+
end
|
|
251
|
+
end
|
|
252
|
+
|
|
253
|
+
def interval_timestamp(value)
|
|
254
|
+
return nil if value.nil?
|
|
255
|
+
|
|
256
|
+
timestamp = Integer(value)
|
|
257
|
+
timestamp >= 0 ? timestamp : nil
|
|
258
|
+
rescue ArgumentError, TypeError, RangeError
|
|
259
|
+
nil
|
|
260
|
+
end
|
|
261
|
+
|
|
262
|
+
def rollup_for(batch, payload, every, index)
|
|
263
|
+
bucket = TimeBuckets.time(payload.started_at, every)
|
|
264
|
+
batch.rollups[rollup_key(payload.name, every, bucket, index)]
|
|
265
|
+
end
|
|
266
|
+
|
|
267
|
+
def rollup_key(name, every, bucket, index)
|
|
268
|
+
Keys.rollup(
|
|
269
|
+
namespace: @configuration.namespace,
|
|
270
|
+
name: name,
|
|
271
|
+
version: @report_definition.version,
|
|
272
|
+
every: every,
|
|
273
|
+
bucket: bucket,
|
|
274
|
+
index: index
|
|
275
|
+
)
|
|
276
|
+
end
|
|
277
|
+
|
|
278
|
+
def interval_state_key(name, definition, value)
|
|
279
|
+
Keys.interval_state(
|
|
280
|
+
namespace: @configuration.namespace,
|
|
281
|
+
name: name,
|
|
282
|
+
version: @report_definition.version,
|
|
283
|
+
definition: definition,
|
|
284
|
+
value: value
|
|
285
|
+
)
|
|
286
|
+
end
|
|
287
|
+
|
|
288
|
+
class Batch
|
|
289
|
+
attr_reader :rollups, :state_updates, :entry_ids, :interval_state
|
|
290
|
+
attr_accessor :malformed
|
|
291
|
+
|
|
292
|
+
def initialize
|
|
293
|
+
@rollups = Hash.new { |hash, key| hash[key] = Rollup.new }
|
|
294
|
+
@state_updates = {}
|
|
295
|
+
@entry_ids = []
|
|
296
|
+
@interval_state = {}
|
|
297
|
+
@malformed = 0
|
|
298
|
+
end
|
|
299
|
+
|
|
300
|
+
def empty?
|
|
301
|
+
entry_ids.empty?
|
|
302
|
+
end
|
|
303
|
+
end
|
|
304
|
+
end
|
|
305
|
+
end
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
require_relative "../event_meter"
|
|
2
|
+
require_relative "stores/rollup/active_record_postgres"
|
|
3
|
+
|
|
4
|
+
module EventMeter
|
|
5
|
+
module Rails
|
|
6
|
+
module_function
|
|
7
|
+
|
|
8
|
+
def configure(namespace:, stream_path:, stream_storage: :file, rollup_storage: :postgres,
|
|
9
|
+
table_prefix: "event_meter", connection_class: nil, stream_sync: :flush,
|
|
10
|
+
auto_cleanup_history: false, cleanup_history_retention: nil,
|
|
11
|
+
cleanup_history_interval: nil, summary_key_limit: nil, logger: nil)
|
|
12
|
+
|
|
13
|
+
connection_class ||= Stores::Rollup::ActiveRecordPostgres.default_connection_class
|
|
14
|
+
|
|
15
|
+
EventMeter.configure do |config|
|
|
16
|
+
config.namespace = namespace
|
|
17
|
+
config.stream_storage = build_stream_storage(
|
|
18
|
+
stream_storage,
|
|
19
|
+
path: stream_path,
|
|
20
|
+
sync: stream_sync
|
|
21
|
+
)
|
|
22
|
+
config.rollup_storage = build_rollup_storage(
|
|
23
|
+
rollup_storage,
|
|
24
|
+
connection_class: connection_class,
|
|
25
|
+
namespace: namespace,
|
|
26
|
+
table_prefix: table_prefix
|
|
27
|
+
)
|
|
28
|
+
config.auto_cleanup_history = auto_cleanup_history
|
|
29
|
+
config.cleanup_history_retention = cleanup_history_retention if cleanup_history_retention
|
|
30
|
+
config.cleanup_history_interval = cleanup_history_interval if cleanup_history_interval
|
|
31
|
+
config.summary_key_limit = summary_key_limit if summary_key_limit
|
|
32
|
+
config.auto_cleanup_error_handler = auto_cleanup_error_handler(logger) if logger
|
|
33
|
+
end
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
def migration_sql(table_prefix: "event_meter")
|
|
37
|
+
Stores::Rollup::Postgres.schema_sql(table_prefix: table_prefix)
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
def install_postgres!(connection_class: nil, table_prefix: "event_meter")
|
|
41
|
+
Stores::Rollup::ActiveRecordPostgres.install!(
|
|
42
|
+
connection_class: connection_class || Stores::Rollup::ActiveRecordPostgres.default_connection_class,
|
|
43
|
+
table_prefix: table_prefix
|
|
44
|
+
)
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
def auto_cleanup_error_handler(logger)
|
|
48
|
+
lambda do |error|
|
|
49
|
+
logger.warn "EventMeter auto cleanup failed: #{error.class}: #{error.message}"
|
|
50
|
+
end
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
def build_stream_storage(storage, path:, sync:)
|
|
54
|
+
validate_storage!(storage, expected: :file, name: "stream_storage")
|
|
55
|
+
|
|
56
|
+
Stores::Stream::File.new(path: path, sync: sync)
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
def build_rollup_storage(storage, connection_class:, namespace:, table_prefix:)
|
|
60
|
+
validate_storage!(storage, expected: :postgres, name: "rollup_storage")
|
|
61
|
+
|
|
62
|
+
Stores::Rollup::ActiveRecordPostgres.new(
|
|
63
|
+
connection_class: connection_class,
|
|
64
|
+
namespace: namespace,
|
|
65
|
+
table_prefix: table_prefix
|
|
66
|
+
)
|
|
67
|
+
end
|
|
68
|
+
|
|
69
|
+
def validate_storage!(storage, expected:, name:)
|
|
70
|
+
return if storage_key(storage) == expected
|
|
71
|
+
|
|
72
|
+
raise ArgumentError, "#{name} must be #{expected.inspect}"
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
def storage_key(storage)
|
|
76
|
+
storage.respond_to?(:to_sym) ? storage.to_sym : storage
|
|
77
|
+
end
|
|
78
|
+
end
|
|
79
|
+
end
|