chronicle-rails 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/MIT-LICENSE +20 -0
- data/README.md +28 -0
- data/Rakefile +6 -0
- data/app/controllers/chronicle/api_logs_controller.rb +61 -0
- data/app/controllers/chronicle/api_routes_controller.rb +35 -0
- data/app/controllers/chronicle/application_controller.rb +23 -0
- data/app/controllers/chronicle/auth_controller.rb +17 -0
- data/app/controllers/chronicle/error_groups_controller.rb +58 -0
- data/app/controllers/chronicle/error_logs_controller.rb +47 -0
- data/app/controllers/chronicle/resource_controller.rb +41 -0
- data/app/controllers/concerns/chronicle/filterable.rb +57 -0
- data/app/controllers/concerns/chronicle/pagination.rb +41 -0
- data/app/errors/chronicle/authentication_error.rb +7 -0
- data/app/errors/chronicle/bad_request_error.rb +7 -0
- data/app/errors/chronicle/base_error.rb +10 -0
- data/app/errors/chronicle/forbidden_error.rb +7 -0
- data/app/errors/chronicle/not_acceptable_error.rb +7 -0
- data/app/errors/chronicle/not_found_error.rb +7 -0
- data/app/errors/chronicle/resource_busy_error.rb +7 -0
- data/app/errors/chronicle/validation_error.rb +7 -0
- data/app/jobs/chronicle/application_job.rb +4 -0
- data/app/jobs/chronicle/flush_api_logs_job.rb +9 -0
- data/app/mailers/chronicle/application_mailer.rb +6 -0
- data/app/models/chronicle/admin_user.rb +16 -0
- data/app/models/chronicle/api_log.rb +25 -0
- data/app/models/chronicle/api_route.rb +5 -0
- data/app/models/chronicle/application_record.rb +5 -0
- data/app/models/chronicle/error_group.rb +53 -0
- data/app/models/chronicle/error_log.rb +69 -0
- data/app/services/chronicle/api_logs/buffer.rb +62 -0
- data/app/services/chronicle/api_logs/flusher.rb +98 -0
- data/app/services/chronicle/api_logs/metrics.rb +285 -0
- data/app/services/chronicle/api_logs/updater.rb +19 -0
- data/app/services/chronicle/api_routes/stats.rb +131 -0
- data/app/services/chronicle/error_logs/group_resolver.rb +66 -0
- data/config/routes.rb +28 -0
- data/db/migrate/20260101000001_create_chronicle_admin_users.rb +16 -0
- data/db/migrate/20260101000002_create_chronicle_api_logs.rb +32 -0
- data/db/migrate/20260101000003_create_chronicle_api_routes.rb +13 -0
- data/db/migrate/20260101000004_create_chronicle_error_groups.rb +26 -0
- data/db/migrate/20260101000005_create_chronicle_error_logs.rb +19 -0
- data/lib/chronicle/configuration.rb +56 -0
- data/lib/chronicle/engine.rb +12 -0
- data/lib/chronicle/util.rb +26 -0
- data/lib/chronicle/version.rb +3 -0
- data/lib/chronicle-rails.rb +1 -0
- data/lib/chronicle.rb +70 -0
- data/lib/tasks/chronicle_tasks.rake +4 -0
- metadata +127 -0
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
module Chronicle
|
|
2
|
+
class ErrorLog < ApplicationRecord
|
|
3
|
+
belongs_to :error_group
|
|
4
|
+
|
|
5
|
+
# Constants are re-exported so callers can reference ErrorLog::OPEN etc.
|
|
6
|
+
# without knowing about the group model.
|
|
7
|
+
OPEN = ErrorGroup::OPEN
|
|
8
|
+
RESOLVED = ErrorGroup::RESOLVED
|
|
9
|
+
IGNORED = ErrorGroup::IGNORED
|
|
10
|
+
VALID_STATUSES = ErrorGroup::VALID_STATUSES
|
|
11
|
+
|
|
12
|
+
# These fields do not live in error_logs; they are provided at creation time
|
|
13
|
+
# and forwarded to the ErrorGroup by GroupResolver. After the group is
|
|
14
|
+
# resolved the reader methods below return the persisted group values.
|
|
15
|
+
attr_writer :project, :source_type, :source_name, :error_message,
|
|
16
|
+
:error_fingerprint, :original_backtrace, :cleaned_backtrace
|
|
17
|
+
|
|
18
|
+
validates :error_group, presence: true
|
|
19
|
+
validates :source_type,
|
|
20
|
+
:source_name,
|
|
21
|
+
:error_message, presence: true, on: :create
|
|
22
|
+
|
|
23
|
+
before_validation :resolve_error_group, on: :create
|
|
24
|
+
|
|
25
|
+
delegate :status, :jira_link, to: :error_group, allow_nil: true
|
|
26
|
+
|
|
27
|
+
# Reader methods: prefer the persisted group value once the log is saved;
|
|
28
|
+
# fall back to the in-memory virtual attribute during the create flow.
|
|
29
|
+
def project = error_group&.project || @project
|
|
30
|
+
def source_type = error_group&.source_type || @source_type
|
|
31
|
+
def source_name = error_group&.source_name || @source_name
|
|
32
|
+
def error_message = error_group&.error_message || @error_message
|
|
33
|
+
def error_fingerprint = error_group&.fingerprint || @error_fingerprint
|
|
34
|
+
def original_backtrace = error_group&.original_backtrace || @original_backtrace
|
|
35
|
+
def cleaned_backtrace = error_group&.cleaned_backtrace || @cleaned_backtrace
|
|
36
|
+
|
|
37
|
+
# Returns a flat hash that combines the log's own columns with the key
|
|
38
|
+
# group-level fields inlined for API convenience, plus the full nested
|
|
39
|
+
# group for clients that need it.
|
|
40
|
+
def get_hash
|
|
41
|
+
group = error_group
|
|
42
|
+
attributes.symbolize_keys.merge(
|
|
43
|
+
error_fingerprint: group&.fingerprint,
|
|
44
|
+
project: group&.project,
|
|
45
|
+
source_type: group&.source_type,
|
|
46
|
+
source_name: group&.source_name,
|
|
47
|
+
error_message: group&.error_message,
|
|
48
|
+
original_backtrace: group&.original_backtrace,
|
|
49
|
+
cleaned_backtrace: group&.cleaned_backtrace,
|
|
50
|
+
status: group&.status,
|
|
51
|
+
jira_link: group&.jira_link,
|
|
52
|
+
error_group: group&.get_hash,
|
|
53
|
+
user: (Chronicle.config.user_model&.find_by(id: user_id)&.try(:basic_info) if user_id.present?)
|
|
54
|
+
)
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
def as_json(_options = {})
|
|
58
|
+
get_hash
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
private
|
|
62
|
+
|
|
63
|
+
def resolve_error_group
|
|
64
|
+
return if error_group_id.present? || error_group&.persisted?
|
|
65
|
+
|
|
66
|
+
self.error_group = ErrorLogs::GroupResolver.new(self).call
|
|
67
|
+
end
|
|
68
|
+
end
|
|
69
|
+
end
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
require 'fileutils'
|
|
2
|
+
require 'json'
|
|
3
|
+
|
|
4
|
+
module Chronicle
|
|
5
|
+
module ApiLogs
|
|
6
|
+
# Append-only, per-process file buffer for API log payloads.
|
|
7
|
+
#
|
|
8
|
+
# Each Puma/SolidQueue worker writes to its own `api_logs.{pid}.jsonl`
|
|
9
|
+
# file, so no cross-process locking is needed. The Flusher reads all
|
|
10
|
+
# PID files when it runs.
|
|
11
|
+
class Buffer
|
|
12
|
+
FILE_PREFIX = 'api_logs'.freeze
|
|
13
|
+
FILE_EXT = 'jsonl'.freeze
|
|
14
|
+
|
|
15
|
+
class << self
|
|
16
|
+
def append(payload)
|
|
17
|
+
dir = buffer_dir
|
|
18
|
+
FileUtils.mkdir_p(dir) unless File.directory?(dir)
|
|
19
|
+
|
|
20
|
+
path = current_file_path
|
|
21
|
+
File.open(path, 'a') { |f| f.puts(payload.to_json) }
|
|
22
|
+
|
|
23
|
+
maybe_flush(path)
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
def buffer_dir
|
|
27
|
+
configured = Chronicle.config.api_log_buffer_dir
|
|
28
|
+
return configured if configured.present?
|
|
29
|
+
|
|
30
|
+
if defined?(Rails) && Rails.respond_to?(:root) && Rails.root
|
|
31
|
+
Rails.root.join('tmp/chronicle').to_s
|
|
32
|
+
else
|
|
33
|
+
File.join(Dir.tmpdir, 'chronicle')
|
|
34
|
+
end
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
def current_file_path
|
|
38
|
+
File.join(buffer_dir, "#{FILE_PREFIX}.#{Process.pid}.#{FILE_EXT}")
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
def line_count(path)
|
|
42
|
+
return 0 unless File.exist?(path)
|
|
43
|
+
count = 0
|
|
44
|
+
File.foreach(path) { count += 1 }
|
|
45
|
+
count
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
private
|
|
49
|
+
|
|
50
|
+
def maybe_flush(path)
|
|
51
|
+
threshold = Chronicle.config.api_log_flush_size
|
|
52
|
+
return if threshold.nil? || threshold <= 0
|
|
53
|
+
return if line_count(path) < threshold
|
|
54
|
+
|
|
55
|
+
Chronicle::FlushApiLogsJob.perform_later
|
|
56
|
+
rescue StandardError
|
|
57
|
+
# Never let a flush enqueue failure break the request path.
|
|
58
|
+
end
|
|
59
|
+
end
|
|
60
|
+
end
|
|
61
|
+
end
|
|
62
|
+
end
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
require 'json'
|
|
2
|
+
|
|
3
|
+
module Chronicle
|
|
4
|
+
module ApiLogs
|
|
5
|
+
# Reads all per-PID buffer files, bulk-inserts their contents into
|
|
6
|
+
# chronicle_api_logs, upserts api_routes, and deletes the consumed files.
|
|
7
|
+
#
|
|
8
|
+
# Atomic-rename strategy: each buffer file is renamed to a `.flushing-*`
|
|
9
|
+
# name before reading, so concurrent writers (same PID) recreate the
|
|
10
|
+
# original file and never lose entries.
|
|
11
|
+
class Flusher
|
|
12
|
+
INSERT_BATCH_SIZE = 1000
|
|
13
|
+
|
|
14
|
+
class << self
|
|
15
|
+
def call
|
|
16
|
+
paths = claim_files
|
|
17
|
+
return { files: 0, records: 0 } if paths.empty?
|
|
18
|
+
|
|
19
|
+
records = paths.flat_map { |p| read_jsonl(p) }
|
|
20
|
+
return cleanup_and_return(paths, 0) if records.empty?
|
|
21
|
+
|
|
22
|
+
insert_logs(records)
|
|
23
|
+
sync_routes(records)
|
|
24
|
+
|
|
25
|
+
cleanup_and_return(paths, records.size)
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
def sync_routes(records)
|
|
29
|
+
now = Time.current
|
|
30
|
+
pairs = records.filter_map do |r|
|
|
31
|
+
path = r['api_endpoint']
|
|
32
|
+
method = r['http_method']
|
|
33
|
+
next if path.blank? || method.blank?
|
|
34
|
+
[path, method]
|
|
35
|
+
end.uniq
|
|
36
|
+
|
|
37
|
+
return if pairs.empty?
|
|
38
|
+
|
|
39
|
+
rows = pairs.map do |path, method|
|
|
40
|
+
{ path: path, http_method: method, first_seen_at: now, created_at: now, updated_at: now }
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
ApiRoute.insert_all(rows, unique_by: 'index_chronicle_api_routes_on_path_and_method') # rubocop:disable Rails/SkipsModelValidations
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
private
|
|
47
|
+
|
|
48
|
+
def claim_files
|
|
49
|
+
dir = Buffer.buffer_dir
|
|
50
|
+
return [] unless File.directory?(dir)
|
|
51
|
+
|
|
52
|
+
glob = File.join(dir, "#{Buffer::FILE_PREFIX}.*.#{Buffer::FILE_EXT}")
|
|
53
|
+
stamp = Time.now.utc.strftime('%Y%m%d%H%M%S%N')
|
|
54
|
+
|
|
55
|
+
Dir.glob(glob).reject { |p| p.include?('.flushing-') }.filter_map do |path|
|
|
56
|
+
target = path.sub(/\.#{Buffer::FILE_EXT}\z/o, ".flushing-#{stamp}.#{Buffer::FILE_EXT}")
|
|
57
|
+
begin
|
|
58
|
+
File.rename(path, target)
|
|
59
|
+
target
|
|
60
|
+
rescue Errno::ENOENT
|
|
61
|
+
nil
|
|
62
|
+
end
|
|
63
|
+
end
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
def read_jsonl(path)
|
|
67
|
+
File.foreach(path).filter_map do |line|
|
|
68
|
+
line = line.strip
|
|
69
|
+
next if line.empty?
|
|
70
|
+
JSON.parse(line)
|
|
71
|
+
rescue JSON::ParserError
|
|
72
|
+
nil
|
|
73
|
+
end
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
def insert_logs(records)
|
|
77
|
+
now = Time.current
|
|
78
|
+
allowed = ApiLog.column_names - ['id']
|
|
79
|
+
|
|
80
|
+
records.each_slice(INSERT_BATCH_SIZE) do |batch|
|
|
81
|
+
rows = batch.map do |raw|
|
|
82
|
+
row = raw.slice(*allowed)
|
|
83
|
+
row['created_at'] ||= now
|
|
84
|
+
row['updated_at'] ||= now
|
|
85
|
+
row
|
|
86
|
+
end
|
|
87
|
+
ApiLog.insert_all(rows) # rubocop:disable Rails/SkipsModelValidations
|
|
88
|
+
end
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
def cleanup_and_return(paths, count)
|
|
92
|
+
paths.each { |p| FileUtils.rm_f(p) }
|
|
93
|
+
{ files: paths.size, records: count }
|
|
94
|
+
end
|
|
95
|
+
end
|
|
96
|
+
end
|
|
97
|
+
end
|
|
98
|
+
end
|
|
@@ -0,0 +1,285 @@
|
|
|
1
|
+
module Chronicle
|
|
2
|
+
module ApiLogs
|
|
3
|
+
class Metrics
|
|
4
|
+
include Filterable
|
|
5
|
+
|
|
6
|
+
FILTER_DEFINITION = {
|
|
7
|
+
client_version: :exact,
|
|
8
|
+
backend_version: :exact,
|
|
9
|
+
device_os: :exact,
|
|
10
|
+
time_zone: :exact,
|
|
11
|
+
start_date: :date_range,
|
|
12
|
+
end_date: :date_range,
|
|
13
|
+
http_method: :exact,
|
|
14
|
+
api_endpoint: :exact,
|
|
15
|
+
}.freeze
|
|
16
|
+
|
|
17
|
+
def initialize(filters: {})
|
|
18
|
+
@filters = filters
|
|
19
|
+
start_time, end_time = normalize_date_range
|
|
20
|
+
@filters[:start_date] = start_time.to_date.to_s
|
|
21
|
+
@filters[:end_date] = end_time.to_date.to_s
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def kpi_cards
|
|
25
|
+
scope = build_query(ApiLog, filter_definition: FILTER_DEFINITION, filters: @filters, date_column: :timestamp)
|
|
26
|
+
aggregates = fetch_aggregates(scope)
|
|
27
|
+
|
|
28
|
+
{
|
|
29
|
+
total_api_calls: aggregates[:total_count],
|
|
30
|
+
unique_users: aggregates[:unique_users],
|
|
31
|
+
unique_devices: aggregates[:unique_devices],
|
|
32
|
+
average_response_time: aggregates[:avg_response_time],
|
|
33
|
+
p50_response_time: aggregates[:p50_response_time],
|
|
34
|
+
p95_response_time: aggregates[:p95_response_time],
|
|
35
|
+
p99_response_time: aggregates[:p99_response_time],
|
|
36
|
+
error_rate_percentage: calculate_error_rate(aggregates[:total_count], aggregates[:error_count]),
|
|
37
|
+
requests_per_hour: calculate_requests_per_hour(aggregates[:total_count]),
|
|
38
|
+
}
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
def distribution_metrics
|
|
42
|
+
start_time, end_time = normalize_date_range
|
|
43
|
+
scope = build_query(ApiLog, filter_definition: FILTER_DEFINITION, filters: @filters, date_column: :timestamp)
|
|
44
|
+
|
|
45
|
+
{
|
|
46
|
+
status_code_distribution: fetch_status_code_distribution(scope),
|
|
47
|
+
traffic_over_time: fetch_traffic_over_time(scope, start_time, end_time),
|
|
48
|
+
response_time_trend: fetch_response_time_trend(scope, start_time, end_time),
|
|
49
|
+
}
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
class << self
|
|
53
|
+
def kpi_cards(filters: {})
|
|
54
|
+
new(filters: filters).kpi_cards
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
def distribution_metrics(filters: {})
|
|
58
|
+
new(filters: filters).distribution_metrics
|
|
59
|
+
end
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
private
|
|
63
|
+
|
|
64
|
+
def fetch_aggregates(scope)
|
|
65
|
+
result = scope.pick(
|
|
66
|
+
Arel.sql('COUNT(*)'),
|
|
67
|
+
Arel.sql('COUNT(DISTINCT user_id)'),
|
|
68
|
+
Arel.sql('COUNT(DISTINCT device_id)'),
|
|
69
|
+
Arel.sql('AVG(response_time_ms)'),
|
|
70
|
+
Arel.sql('COUNT(CASE WHEN http_status_code BETWEEN 400 AND 599 THEN 1 END)'),
|
|
71
|
+
Arel.sql('PERCENTILE_CONT(0.50) WITHIN GROUP (ORDER BY response_time_ms)'),
|
|
72
|
+
Arel.sql('PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY response_time_ms)'),
|
|
73
|
+
Arel.sql('PERCENTILE_CONT(0.99) WITHIN GROUP (ORDER BY response_time_ms)')
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
return default_aggregates if result.nil?
|
|
77
|
+
|
|
78
|
+
{
|
|
79
|
+
total_count: result[0] || 0,
|
|
80
|
+
unique_users: result[1] || 0,
|
|
81
|
+
unique_devices: result[2] || 0,
|
|
82
|
+
avg_response_time: result[3]&.round(2) || 0.0,
|
|
83
|
+
error_count: result[4] || 0,
|
|
84
|
+
p50_response_time: result[5]&.round(2) || 0.0,
|
|
85
|
+
p95_response_time: result[6]&.round(2) || 0.0,
|
|
86
|
+
p99_response_time: result[7]&.round(2) || 0.0,
|
|
87
|
+
}
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
def default_aggregates
|
|
91
|
+
{
|
|
92
|
+
total_count: 0,
|
|
93
|
+
unique_users: 0,
|
|
94
|
+
unique_devices: 0,
|
|
95
|
+
avg_response_time: 0.0,
|
|
96
|
+
error_count: 0,
|
|
97
|
+
p50_response_time: 0.0,
|
|
98
|
+
p95_response_time: 0.0,
|
|
99
|
+
p99_response_time: 0.0,
|
|
100
|
+
}
|
|
101
|
+
end
|
|
102
|
+
|
|
103
|
+
def calculate_error_rate(total_count, error_count)
|
|
104
|
+
return 0.0 if total_count == 0
|
|
105
|
+
|
|
106
|
+
((error_count.to_f / total_count) * 100).round(2)
|
|
107
|
+
end
|
|
108
|
+
|
|
109
|
+
def calculate_requests_per_hour(total_count)
|
|
110
|
+
return 0.0 if total_count == 0
|
|
111
|
+
|
|
112
|
+
# Use filter dates if provided
|
|
113
|
+
start_date = @filters[:start_date] || @filters['start_date']
|
|
114
|
+
end_date = @filters[:end_date] || @filters['end_date']
|
|
115
|
+
|
|
116
|
+
if start_date.present? && end_date.present?
|
|
117
|
+
start_time = Util.parse_date(start_date).beginning_of_day
|
|
118
|
+
end_time = Util.parse_date(end_date).end_of_day
|
|
119
|
+
|
|
120
|
+
duration_hours = ((end_time - start_time) / 3600.0)
|
|
121
|
+
return 0.0 if duration_hours == 0
|
|
122
|
+
|
|
123
|
+
(total_count / duration_hours).round(2)
|
|
124
|
+
else
|
|
125
|
+
# No date range provided, return 0.0
|
|
126
|
+
0.0
|
|
127
|
+
end
|
|
128
|
+
end
|
|
129
|
+
|
|
130
|
+
def normalize_date_range
|
|
131
|
+
@normalize_date_range ||= begin
|
|
132
|
+
start_date = @filters[:start_date] || @filters['start_date']
|
|
133
|
+
end_date = @filters[:end_date] || @filters['end_date']
|
|
134
|
+
|
|
135
|
+
if start_date.present? && end_date.present?
|
|
136
|
+
start_time = Util.parse_date(start_date).beginning_of_day
|
|
137
|
+
end_time = Util.parse_date(end_date).end_of_day
|
|
138
|
+
else
|
|
139
|
+
# Default to last 6 months
|
|
140
|
+
end_time = Time.current
|
|
141
|
+
start_time = 6.months.ago
|
|
142
|
+
end
|
|
143
|
+
|
|
144
|
+
# Ensure max range is 6 months
|
|
145
|
+
start_time = end_time - 6.months if (end_time - start_time) > 6.months
|
|
146
|
+
|
|
147
|
+
[start_time, end_time]
|
|
148
|
+
end
|
|
149
|
+
end
|
|
150
|
+
|
|
151
|
+
def calculate_time_interval(start_time, end_time)
|
|
152
|
+
duration_hours = ((end_time - start_time) / 3600.0)
|
|
153
|
+
|
|
154
|
+
if duration_hours <= 24
|
|
155
|
+
{ interval: 1.hour, format: '%H:%M' }
|
|
156
|
+
elsif duration_hours <= 720 # 30 days
|
|
157
|
+
{ interval: 1.day, format: '%b %d' }
|
|
158
|
+
else # More than 30 days
|
|
159
|
+
{ interval: 1.month, format: '%b %Y' }
|
|
160
|
+
end
|
|
161
|
+
end
|
|
162
|
+
|
|
163
|
+
def fetch_status_code_distribution(scope)
|
|
164
|
+
results = scope
|
|
165
|
+
.group(:http_status_code)
|
|
166
|
+
.order(Arel.sql('COUNT(*) DESC'))
|
|
167
|
+
.count
|
|
168
|
+
|
|
169
|
+
total = 0
|
|
170
|
+
|
|
171
|
+
status_code_frequency = results.map do |status_code, count|
|
|
172
|
+
total += count
|
|
173
|
+
{
|
|
174
|
+
status_code: status_code,
|
|
175
|
+
count: count,
|
|
176
|
+
}
|
|
177
|
+
end
|
|
178
|
+
|
|
179
|
+
{
|
|
180
|
+
total_count: total,
|
|
181
|
+
status_codes: status_code_frequency,
|
|
182
|
+
}
|
|
183
|
+
end
|
|
184
|
+
|
|
185
|
+
def fetch_traffic_over_time(scope, start_time, end_time)
|
|
186
|
+
interval_config = calculate_time_interval(start_time, end_time)
|
|
187
|
+
interval = interval_config[:interval]
|
|
188
|
+
format = interval_config[:format]
|
|
189
|
+
|
|
190
|
+
time_buckets = generate_time_buckets(start_time, end_time, interval)
|
|
191
|
+
|
|
192
|
+
# Fetch actual data grouped by interval
|
|
193
|
+
data = fetch_grouped_data(scope, interval_config, 'COUNT(*)')
|
|
194
|
+
|
|
195
|
+
# Fill in missing buckets with 0
|
|
196
|
+
time_buckets.map do |bucket_time|
|
|
197
|
+
{
|
|
198
|
+
label: bucket_time.strftime(format),
|
|
199
|
+
timestamp: bucket_time.to_i,
|
|
200
|
+
count: data[bucket_time] || 0,
|
|
201
|
+
}
|
|
202
|
+
end
|
|
203
|
+
end
|
|
204
|
+
|
|
205
|
+
def fetch_response_time_trend(scope, start_time, end_time)
|
|
206
|
+
interval_config = calculate_time_interval(start_time, end_time)
|
|
207
|
+
interval = interval_config[:interval]
|
|
208
|
+
format = interval_config[:format]
|
|
209
|
+
|
|
210
|
+
time_buckets = generate_time_buckets(start_time, end_time, interval)
|
|
211
|
+
|
|
212
|
+
# Fetch avg and p95 data
|
|
213
|
+
avg_data = fetch_grouped_data(scope, interval_config, 'AVG(response_time_ms)')
|
|
214
|
+
p95_data = fetch_grouped_data(scope, interval_config,
|
|
215
|
+
'PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY response_time_ms)')
|
|
216
|
+
|
|
217
|
+
time_buckets.map do |bucket_time|
|
|
218
|
+
{
|
|
219
|
+
label: bucket_time.strftime(format),
|
|
220
|
+
timestamp: bucket_time.to_i,
|
|
221
|
+
average_response_time: (avg_data[bucket_time] || 0).round(2),
|
|
222
|
+
p95_response_time: (p95_data[bucket_time] || 0).round(2),
|
|
223
|
+
}
|
|
224
|
+
end
|
|
225
|
+
end
|
|
226
|
+
|
|
227
|
+
def generate_time_buckets(start_time, end_time, interval)
|
|
228
|
+
buckets = []
|
|
229
|
+
|
|
230
|
+
if interval == 1.month
|
|
231
|
+
# For monthly intervals, start at beginning of month
|
|
232
|
+
current_time = start_time.beginning_of_month
|
|
233
|
+
while current_time <= end_time
|
|
234
|
+
buckets << current_time
|
|
235
|
+
current_time = (current_time + 1.month).beginning_of_month
|
|
236
|
+
end
|
|
237
|
+
else
|
|
238
|
+
# For hour/day intervals, use regular increments
|
|
239
|
+
current_time = start_time
|
|
240
|
+
while current_time <= end_time
|
|
241
|
+
buckets << current_time
|
|
242
|
+
current_time += interval
|
|
243
|
+
end
|
|
244
|
+
end
|
|
245
|
+
|
|
246
|
+
buckets
|
|
247
|
+
end
|
|
248
|
+
|
|
249
|
+
def fetch_grouped_data(scope, interval_config, aggregate_function)
|
|
250
|
+
interval = interval_config[:interval]
|
|
251
|
+
|
|
252
|
+
# Validate aggregate function to prevent SQL injection
|
|
253
|
+
validate_aggregate_function!(aggregate_function)
|
|
254
|
+
|
|
255
|
+
# Determine PostgreSQL date truncation function based on interval
|
|
256
|
+
trunc_func = if interval == 1.hour
|
|
257
|
+
"date_trunc('hour', timestamp)"
|
|
258
|
+
elsif interval == 1.month
|
|
259
|
+
"date_trunc('month', timestamp)"
|
|
260
|
+
else
|
|
261
|
+
"date_trunc('day', timestamp)"
|
|
262
|
+
end
|
|
263
|
+
|
|
264
|
+
results = scope
|
|
265
|
+
.group(Arel.sql(trunc_func))
|
|
266
|
+
.pluck(Arel.sql("#{trunc_func} as time_bucket, #{aggregate_function}"))
|
|
267
|
+
|
|
268
|
+
results.to_h { |time_str, value| [Time.zone.parse(time_str.to_s), value] }
|
|
269
|
+
end
|
|
270
|
+
|
|
271
|
+
def validate_aggregate_function!(function)
|
|
272
|
+
# Whitelist of allowed aggregate functions
|
|
273
|
+
allowed_functions = [
|
|
274
|
+
'COUNT(*)',
|
|
275
|
+
'AVG(response_time_ms)',
|
|
276
|
+
'PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY response_time_ms)',
|
|
277
|
+
]
|
|
278
|
+
|
|
279
|
+
return if allowed_functions.include?(function)
|
|
280
|
+
|
|
281
|
+
raise ArgumentError, "Invalid aggregate function: #{function}"
|
|
282
|
+
end
|
|
283
|
+
end
|
|
284
|
+
end
|
|
285
|
+
end
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
module Chronicle
|
|
2
|
+
module ApiLogs
|
|
3
|
+
class Updater
|
|
4
|
+
def initialize(request_id, frontend_response_time_ms)
|
|
5
|
+
@request_id = request_id
|
|
6
|
+
@frontend_response_time_ms = frontend_response_time_ms
|
|
7
|
+
end
|
|
8
|
+
|
|
9
|
+
def call
|
|
10
|
+
api_log = ApiLog.find_by(request_id: @request_id)
|
|
11
|
+
|
|
12
|
+
raise NotFoundError, "API log not found with request_id: #{@request_id}" if api_log.nil?
|
|
13
|
+
|
|
14
|
+
api_log.update!(frontend_response_time_ms: @frontend_response_time_ms)
|
|
15
|
+
api_log
|
|
16
|
+
end
|
|
17
|
+
end
|
|
18
|
+
end
|
|
19
|
+
end
|
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
module Chronicle
|
|
2
|
+
module ApiRoutes
|
|
3
|
+
class Stats
|
|
4
|
+
SORTABLE_COLUMNS = %w[
|
|
5
|
+
avg_response_time_ms
|
|
6
|
+
p95_response_time_ms
|
|
7
|
+
p99_response_time_ms
|
|
8
|
+
error_rate_percentage
|
|
9
|
+
total_requests
|
|
10
|
+
unique_users
|
|
11
|
+
requests_per_hour
|
|
12
|
+
].freeze
|
|
13
|
+
|
|
14
|
+
DEFAULT_SORT_BY = 'avg_response_time_ms'.freeze
|
|
15
|
+
DEFAULT_SORT_DIRECTION = 'desc'.freeze
|
|
16
|
+
DEFAULT_PER_PAGE = 25
|
|
17
|
+
MAX_PER_PAGE = 100
|
|
18
|
+
|
|
19
|
+
# Aliases that PostgreSQL exposes after the GROUP BY+SELECT; safe to interpolate
|
|
20
|
+
# because they are taken from the SORTABLE_COLUMNS whitelist above.
|
|
21
|
+
SORT_COLUMN_ALIAS = {
|
|
22
|
+
'avg_response_time_ms' => 'avg_response_time_ms',
|
|
23
|
+
'p95_response_time_ms' => 'p95_response_time_ms',
|
|
24
|
+
'p99_response_time_ms' => 'p99_response_time_ms',
|
|
25
|
+
'error_rate_percentage' => 'error_rate_percentage',
|
|
26
|
+
'total_requests' => 'total_requests',
|
|
27
|
+
'unique_users' => 'unique_users',
|
|
28
|
+
# duration is constant per request so ordering by total_requests is equivalent
|
|
29
|
+
'requests_per_hour' => 'total_requests',
|
|
30
|
+
}.freeze
|
|
31
|
+
|
|
32
|
+
AGGREGATE_SELECT = [
|
|
33
|
+
'api_endpoint AS path',
|
|
34
|
+
'http_method',
|
|
35
|
+
'COUNT(*) AS total_requests',
|
|
36
|
+
'COUNT(DISTINCT user_id) AS unique_users',
|
|
37
|
+
'ROUND(AVG(response_time_ms)::numeric, 2) AS avg_response_time_ms',
|
|
38
|
+
'ROUND(PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY response_time_ms)::numeric, 2) AS p95_response_time_ms',
|
|
39
|
+
'ROUND(PERCENTILE_CONT(0.99) WITHIN GROUP (ORDER BY response_time_ms)::numeric, 2) AS p99_response_time_ms',
|
|
40
|
+
'ROUND(100.0 * COUNT(CASE WHEN http_status_code BETWEEN 400 AND 599 THEN 1 END) / COUNT(*), 2)
|
|
41
|
+
AS error_rate_percentage',
|
|
42
|
+
].freeze
|
|
43
|
+
|
|
44
|
+
def initialize(filters: {}, sort_by: nil, sort_direction: nil, page: 1, per_page: DEFAULT_PER_PAGE)
|
|
45
|
+
filters = filters.to_unsafe_h.symbolize_keys if filters.respond_to?(:to_unsafe_h)
|
|
46
|
+
@filters = filters.symbolize_keys
|
|
47
|
+
@sort_by = SORTABLE_COLUMNS.include?(sort_by.to_s) ? sort_by.to_s : DEFAULT_SORT_BY
|
|
48
|
+
@sort_direction = if %w[asc
|
|
49
|
+
desc].include?(sort_direction.to_s.downcase)
|
|
50
|
+
sort_direction.to_s.downcase
|
|
51
|
+
else
|
|
52
|
+
DEFAULT_SORT_DIRECTION
|
|
53
|
+
end
|
|
54
|
+
@page = [page.to_i, 1].max
|
|
55
|
+
@per_page = per_page.to_i.clamp(1, MAX_PER_PAGE)
|
|
56
|
+
@start_time, @end_time = normalize_date_range
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
def call
|
|
60
|
+
base = filtered_scope
|
|
61
|
+
total = base.group(:api_endpoint, :http_method).count.size
|
|
62
|
+
records = stats_scope(base)
|
|
63
|
+
.order(Arel.sql("#{SORT_COLUMN_ALIAS[@sort_by]} #{@sort_direction.upcase} NULLS LAST"))
|
|
64
|
+
.limit(@per_page)
|
|
65
|
+
.offset((@page - 1) * @per_page)
|
|
66
|
+
|
|
67
|
+
duration_hours = [(@end_time - @start_time) / 3600.0, 1].max
|
|
68
|
+
|
|
69
|
+
{
|
|
70
|
+
data: records.map { |row| serialize(row, duration_hours) },
|
|
71
|
+
pagination: {
|
|
72
|
+
total_count: total,
|
|
73
|
+
page: @page,
|
|
74
|
+
per_page: @per_page,
|
|
75
|
+
total_pages: (total.to_f / @per_page).ceil,
|
|
76
|
+
},
|
|
77
|
+
}
|
|
78
|
+
end
|
|
79
|
+
|
|
80
|
+
private
|
|
81
|
+
|
|
82
|
+
def filtered_scope
|
|
83
|
+
scope = ApiLog.where(timestamp: @start_time..@end_time)
|
|
84
|
+
|
|
85
|
+
scope = scope.where(http_method: @filters[:http_method]) if @filters[:http_method].present?
|
|
86
|
+
|
|
87
|
+
if @filters[:api_endpoint].present?
|
|
88
|
+
pattern = "%#{ActiveRecord::Base.sanitize_sql_like(@filters[:api_endpoint])}%"
|
|
89
|
+
scope = scope.where(ApiLog.arel_table[:api_endpoint].matches(pattern))
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
scope
|
|
93
|
+
end
|
|
94
|
+
|
|
95
|
+
def stats_scope(base)
|
|
96
|
+
base.group(:api_endpoint, :http_method).select(*AGGREGATE_SELECT)
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
def serialize(row, duration_hours)
|
|
100
|
+
{
|
|
101
|
+
path: row.path,
|
|
102
|
+
http_method: row.http_method,
|
|
103
|
+
total_requests: row.total_requests.to_i,
|
|
104
|
+
unique_users: row.unique_users.to_i,
|
|
105
|
+
requests_per_hour: (row.total_requests.to_f / duration_hours).round(2),
|
|
106
|
+
avg_response_time_ms: row.avg_response_time_ms.to_f,
|
|
107
|
+
p95_response_time_ms: row.p95_response_time_ms.to_f,
|
|
108
|
+
p99_response_time_ms: row.p99_response_time_ms.to_f,
|
|
109
|
+
error_rate_percentage: row.error_rate_percentage.to_f,
|
|
110
|
+
}
|
|
111
|
+
end
|
|
112
|
+
|
|
113
|
+
def normalize_date_range
|
|
114
|
+
start_date = @filters[:start_date]
|
|
115
|
+
end_date = @filters[:end_date]
|
|
116
|
+
|
|
117
|
+
if start_date.present? && end_date.present?
|
|
118
|
+
start_time = Util.parse_date(start_date).beginning_of_day
|
|
119
|
+
end_time = Util.parse_date(end_date).end_of_day
|
|
120
|
+
else
|
|
121
|
+
end_time = Time.current
|
|
122
|
+
start_time = 6.months.ago
|
|
123
|
+
end
|
|
124
|
+
|
|
125
|
+
start_time = end_time - 6.months if (end_time - start_time) > 6.months
|
|
126
|
+
|
|
127
|
+
[start_time, end_time]
|
|
128
|
+
end
|
|
129
|
+
end
|
|
130
|
+
end
|
|
131
|
+
end
|