whodunit-chronicles 0.1.0.pre → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.codeclimate.yml +50 -0
- data/.rubocop.yml +2 -1
- data/.yardopts +7 -5
- data/CHANGELOG.md +76 -1
- data/README.md +408 -22
- data/examples/images/campaign-performance-analytics.png +0 -0
- data/examples/images/candidate-journey-analytics.png +0 -0
- data/examples/images/recruitment-funnel-analytics.png +0 -0
- data/lib/whodunit/chronicles/adapters/mysql.rb +261 -0
- data/lib/whodunit/chronicles/configuration.rb +23 -12
- data/lib/whodunit/chronicles/connection.rb +88 -0
- data/lib/whodunit/chronicles/persistence.rb +129 -0
- data/lib/whodunit/chronicles/processor.rb +127 -0
- data/lib/whodunit/chronicles/service.rb +23 -21
- data/lib/whodunit/chronicles/table.rb +120 -0
- data/lib/whodunit/chronicles/version.rb +1 -1
- data/lib/whodunit/chronicles.rb +11 -1
- data/whodunit-chronicles.gemspec +6 -2
- metadata +68 -4
- data/lib/whodunit/chronicles/audit_processor.rb +0 -270
@@ -0,0 +1,261 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'trilogy'
|
4
|
+
require 'uri'
|
5
|
+
|
6
|
+
module Whodunit
|
7
|
+
module Chronicles
|
8
|
+
module Adapters
|
9
|
+
# MySQL/MariaDB binary log streaming adapter
|
10
|
+
#
|
11
|
+
# Uses MySQL's binary log replication to stream database changes
|
12
|
+
# without impacting application performance.
|
13
|
+
class MySQL < StreamAdapter
|
14
|
+
DEFAULT_SERVER_ID = 1001
|
15
|
+
|
16
|
+
attr_reader :connection, :database_url, :server_id, :binlog_file, :binlog_position
|
17
|
+
|
18
|
+
def initialize(
|
19
|
+
database_url: Chronicles.config.database_url,
|
20
|
+
server_id: DEFAULT_SERVER_ID,
|
21
|
+
logger: Chronicles.logger
|
22
|
+
)
|
23
|
+
super(logger: logger)
|
24
|
+
@database_url = database_url
|
25
|
+
@server_id = server_id
|
26
|
+
@connection = nil
|
27
|
+
@binlog_file = nil
|
28
|
+
@binlog_position = nil
|
29
|
+
@binlog_checksum = true
|
30
|
+
end
|
31
|
+
|
32
|
+
# Start streaming binary log changes
|
33
|
+
def start_streaming(&)
|
34
|
+
raise ArgumentError, 'Block required for processing events' unless block_given?
|
35
|
+
|
36
|
+
log(:info, 'Starting MySQL binary log streaming')
|
37
|
+
|
38
|
+
establish_connection
|
39
|
+
ensure_setup
|
40
|
+
|
41
|
+
self.running = true
|
42
|
+
fetch_current_position
|
43
|
+
|
44
|
+
log(:info, 'Starting replication from position',
|
45
|
+
file: @binlog_file, position: @binlog_position)
|
46
|
+
|
47
|
+
begin
|
48
|
+
stream_binlog_events(&)
|
49
|
+
rescue StandardError => e
|
50
|
+
log(:error, 'Streaming error', error: e.message, backtrace: e.backtrace.first(5))
|
51
|
+
raise ReplicationError, "Failed to stream changes: #{e.message}"
|
52
|
+
ensure
|
53
|
+
self.running = false
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
# Stop streaming
|
58
|
+
def stop_streaming
|
59
|
+
log(:info, 'Stopping MySQL binary log streaming')
|
60
|
+
self.running = false
|
61
|
+
close_connection
|
62
|
+
end
|
63
|
+
|
64
|
+
# Get current replication position
|
65
|
+
def current_position
|
66
|
+
return "#{@binlog_file}:#{@binlog_position}" if @binlog_file && @binlog_position
|
67
|
+
|
68
|
+
fetch_current_position
|
69
|
+
"#{@binlog_file}:#{@binlog_position}"
|
70
|
+
end
|
71
|
+
|
72
|
+
# Set up binary log replication
|
73
|
+
def setup
|
74
|
+
log(:info, 'Setting up MySQL binary log replication')
|
75
|
+
|
76
|
+
establish_connection
|
77
|
+
validate_binlog_format
|
78
|
+
validate_server_id
|
79
|
+
enable_binlog_checksum
|
80
|
+
|
81
|
+
log(:info, 'MySQL setup completed successfully')
|
82
|
+
end
|
83
|
+
|
84
|
+
# Remove binary log replication setup (minimal cleanup needed)
|
85
|
+
def teardown
|
86
|
+
log(:info, 'Tearing down MySQL binary log replication')
|
87
|
+
close_connection
|
88
|
+
log(:info, 'MySQL teardown completed')
|
89
|
+
end
|
90
|
+
|
91
|
+
# Test database connection
|
92
|
+
def test_connection
|
93
|
+
establish_connection
|
94
|
+
result = @connection.query('SELECT @@hostname, @@version, @@server_id')
|
95
|
+
info = result.first
|
96
|
+
|
97
|
+
log(:info, 'Connection test successful',
|
98
|
+
hostname: info['@@hostname'],
|
99
|
+
version: info['@@version'],
|
100
|
+
server_id: info['@@server_id'])
|
101
|
+
|
102
|
+
true
|
103
|
+
rescue StandardError => e
|
104
|
+
log(:error, 'Connection test failed', error: e.message)
|
105
|
+
false
|
106
|
+
end
|
107
|
+
|
108
|
+
private
|
109
|
+
|
110
|
+
def establish_connection
|
111
|
+
return if @connection&.ping
|
112
|
+
|
113
|
+
parsed_url = parse_database_url(@database_url)
|
114
|
+
|
115
|
+
@connection = Trilogy.new(
|
116
|
+
host: parsed_url[:host],
|
117
|
+
port: parsed_url[:port] || 3306,
|
118
|
+
username: parsed_url[:username],
|
119
|
+
password: parsed_url[:password],
|
120
|
+
database: parsed_url[:database],
|
121
|
+
ssl: parsed_url[:ssl],
|
122
|
+
)
|
123
|
+
|
124
|
+
log(:debug, 'Established MySQL connection',
|
125
|
+
host: parsed_url[:host],
|
126
|
+
database: parsed_url[:database])
|
127
|
+
rescue StandardError => e
|
128
|
+
log(:error, 'Failed to establish connection', error: e.message)
|
129
|
+
raise AdapterError, "Connection failed: #{e.message}"
|
130
|
+
end
|
131
|
+
|
132
|
+
def close_connection
|
133
|
+
@connection&.close
|
134
|
+
@connection = nil
|
135
|
+
end
|
136
|
+
|
137
|
+
def parse_database_url(url)
|
138
|
+
uri = URI.parse(url)
|
139
|
+
{
|
140
|
+
host: uri.host,
|
141
|
+
port: uri.port,
|
142
|
+
username: uri.user,
|
143
|
+
password: uri.password,
|
144
|
+
database: uri.path&.sub('/', ''),
|
145
|
+
ssl: uri.query&.include?('ssl=true'),
|
146
|
+
}
|
147
|
+
end
|
148
|
+
|
149
|
+
def ensure_setup
|
150
|
+
validate_binlog_format
|
151
|
+
validate_server_id
|
152
|
+
end
|
153
|
+
|
154
|
+
def validate_binlog_format
|
155
|
+
result = @connection.query('SELECT @@binlog_format')
|
156
|
+
format = result.first['@@binlog_format']
|
157
|
+
|
158
|
+
unless %w[ROW MIXED].include?(format)
|
159
|
+
raise ReplicationError,
|
160
|
+
"Binary log format must be ROW or MIXED, currently: #{format}"
|
161
|
+
end
|
162
|
+
|
163
|
+
log(:debug, 'Binary log format validated', format: format)
|
164
|
+
end
|
165
|
+
|
166
|
+
def validate_server_id
|
167
|
+
result = @connection.query('SELECT @@server_id')
|
168
|
+
current_server_id = result.first['@@server_id'].to_i
|
169
|
+
|
170
|
+
if current_server_id == @server_id
|
171
|
+
raise ReplicationError,
|
172
|
+
"Server ID conflict: #{@server_id} is already in use"
|
173
|
+
end
|
174
|
+
|
175
|
+
log(:debug, 'Server ID validated',
|
176
|
+
current: current_server_id,
|
177
|
+
replication: @server_id)
|
178
|
+
end
|
179
|
+
|
180
|
+
def enable_binlog_checksum
|
181
|
+
@connection.query('SET @master_binlog_checksum = @@global.binlog_checksum')
|
182
|
+
log(:debug, 'Binary log checksum enabled')
|
183
|
+
end
|
184
|
+
|
185
|
+
def fetch_current_position
|
186
|
+
result = @connection.query('SHOW MASTER STATUS')
|
187
|
+
status = result.first
|
188
|
+
|
189
|
+
raise ReplicationError, 'Unable to fetch master status - binary logging may be disabled' unless status
|
190
|
+
|
191
|
+
@binlog_file = status['File']
|
192
|
+
@binlog_position = status['Position']
|
193
|
+
log(:debug, 'Fetched master position',
|
194
|
+
file: @binlog_file,
|
195
|
+
position: @binlog_position)
|
196
|
+
end
|
197
|
+
|
198
|
+
def stream_binlog_events(&)
|
199
|
+
# Register as replica server
|
200
|
+
register_replica_server
|
201
|
+
|
202
|
+
# Request binary log dump
|
203
|
+
request_binlog_dump
|
204
|
+
|
205
|
+
# Process binary log events
|
206
|
+
process_binlog_stream(&)
|
207
|
+
rescue StandardError => e
|
208
|
+
log(:error, 'Binary log streaming error', error: e.message)
|
209
|
+
raise
|
210
|
+
end
|
211
|
+
|
212
|
+
def register_replica_server
|
213
|
+
# This would typically use COM_REGISTER_SLAVE MySQL protocol command
|
214
|
+
# For now, we'll use a simplified approach
|
215
|
+
log(:debug, 'Registering as replica server', server_id: @server_id)
|
216
|
+
|
217
|
+
# NOTE: Full implementation would require low-level MySQL protocol handling
|
218
|
+
# This is a placeholder for the binary log streaming setup
|
219
|
+
end
|
220
|
+
|
221
|
+
def request_binlog_dump
|
222
|
+
log(:debug, 'Requesting binary log dump',
|
223
|
+
file: @binlog_file,
|
224
|
+
position: @binlog_position)
|
225
|
+
|
226
|
+
# This would use COM_BINLOG_DUMP MySQL protocol command
|
227
|
+
# Full implementation requires binary protocol handling
|
228
|
+
end
|
229
|
+
|
230
|
+
def process_binlog_stream(&)
|
231
|
+
# This would process the binary log event stream
|
232
|
+
# Each event would be parsed and converted to a ChangeEvent
|
233
|
+
|
234
|
+
log(:info, 'Processing binary log stream (placeholder implementation)')
|
235
|
+
|
236
|
+
# Placeholder: In a real implementation, this would:
|
237
|
+
# 1. Read binary log events from the stream
|
238
|
+
# 2. Parse event headers and data
|
239
|
+
# 3. Convert to ChangeEvent objects
|
240
|
+
# 4. Yield each event to the block
|
241
|
+
|
242
|
+
# For now, we'll simulate with a warning
|
243
|
+
log(:warn, 'MySQL binary log streaming requires full protocol implementation')
|
244
|
+
|
245
|
+
# Yield a placeholder change event to demonstrate the interface
|
246
|
+
change_event = ChangeEvent.new(
|
247
|
+
table_name: 'example_table',
|
248
|
+
action: 'INSERT',
|
249
|
+
primary_key: { id: 1 },
|
250
|
+
new_data: { id: 1, name: 'test' },
|
251
|
+
old_data: nil,
|
252
|
+
timestamp: Time.now,
|
253
|
+
metadata: { position: current_position },
|
254
|
+
)
|
255
|
+
|
256
|
+
yield(change_event) if block_given?
|
257
|
+
end
|
258
|
+
end
|
259
|
+
end
|
260
|
+
end
|
261
|
+
end
|
@@ -30,21 +30,20 @@ module Whodunit
|
|
30
30
|
# @raise [ConfigurationError] if configuration is invalid
|
31
31
|
def validate!
|
32
32
|
raise ConfigurationError, 'database_url is required' if database_url.nil?
|
33
|
-
raise ConfigurationError, 'adapter must be :postgresql' unless adapter
|
33
|
+
raise ConfigurationError, 'adapter must be :postgresql or :mysql' unless %i[postgresql mysql].include?(adapter)
|
34
34
|
raise ConfigurationError, 'batch_size must be positive' unless batch_size.positive?
|
35
35
|
raise ConfigurationError, 'max_retry_attempts must be positive' unless max_retry_attempts.positive?
|
36
36
|
raise ConfigurationError, 'retry_delay must be positive' unless retry_delay.positive?
|
37
37
|
|
38
|
-
|
39
|
-
validate_slot_name!
|
38
|
+
validate_adapter_specific_settings!
|
40
39
|
end
|
41
40
|
|
42
|
-
# Check if a table should be
|
41
|
+
# Check if a table should be chronicled based on filters
|
43
42
|
#
|
44
43
|
# @param table_name [String] The table name to check
|
45
44
|
# @param schema_name [String] The schema name to check
|
46
|
-
# @return [Boolean] true if the table should be
|
47
|
-
def
|
45
|
+
# @return [Boolean] true if the table should be chronicled
|
46
|
+
def chronicle_table?(table_name, schema_name = 'public')
|
48
47
|
return false if filtered_by_schema?(schema_name)
|
49
48
|
return false if filtered_by_table?(table_name)
|
50
49
|
|
@@ -53,18 +52,30 @@ module Whodunit
|
|
53
52
|
|
54
53
|
private
|
55
54
|
|
56
|
-
def
|
57
|
-
|
58
|
-
|
59
|
-
|
55
|
+
def validate_adapter_specific_settings!
|
56
|
+
case adapter
|
57
|
+
when :postgresql
|
58
|
+
validate_postgresql_settings!
|
59
|
+
when :mysql
|
60
|
+
validate_mysql_settings!
|
61
|
+
end
|
60
62
|
end
|
61
63
|
|
62
|
-
def
|
63
|
-
|
64
|
+
def validate_postgresql_settings!
|
65
|
+
if publication_name && !/\A[a-zA-Z_][a-zA-Z0-9_]*\z/.match?(publication_name)
|
66
|
+
raise ConfigurationError, 'publication_name must be a valid PostgreSQL identifier'
|
67
|
+
end
|
68
|
+
|
69
|
+
return unless replication_slot_name && !/\A[a-zA-Z_][a-zA-Z0-9_]*\z/.match?(replication_slot_name)
|
64
70
|
|
65
71
|
raise ConfigurationError, 'replication_slot_name must be a valid PostgreSQL identifier'
|
66
72
|
end
|
67
73
|
|
74
|
+
def validate_mysql_settings!
|
75
|
+
# MySQL-specific validations can be added here in the future
|
76
|
+
# For now, MySQL settings are less restrictive
|
77
|
+
end
|
78
|
+
|
68
79
|
def filtered_by_schema?(schema_name)
|
69
80
|
return false unless schema_filter
|
70
81
|
|
@@ -0,0 +1,88 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'uri'
|
4
|
+
|
5
|
+
module Whodunit
|
6
|
+
module Chronicles
|
7
|
+
# Handles database connections for chronicles processing
|
8
|
+
#
|
9
|
+
# Provides adapter-agnostic connection management for both PostgreSQL and MySQL
|
10
|
+
module Connection
|
11
|
+
private
|
12
|
+
|
13
|
+
def create_connection
|
14
|
+
audit_url = @audit_database_url || Chronicles.config.database_url
|
15
|
+
|
16
|
+
case detect_database_type(audit_url)
|
17
|
+
when :postgresql
|
18
|
+
require 'pg'
|
19
|
+
PG.connect(audit_url)
|
20
|
+
when :mysql
|
21
|
+
require 'trilogy'
|
22
|
+
parsed = parse_mysql_url(audit_url)
|
23
|
+
Trilogy.new(
|
24
|
+
host: parsed[:host],
|
25
|
+
port: parsed[:port] || 3306,
|
26
|
+
username: parsed[:username],
|
27
|
+
password: parsed[:password],
|
28
|
+
database: parsed[:database],
|
29
|
+
ssl: parsed[:ssl],
|
30
|
+
)
|
31
|
+
else
|
32
|
+
raise ConfigurationError, 'Unsupported database type for connection'
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
def detect_database_type(url)
|
37
|
+
return Chronicles.config.adapter unless url
|
38
|
+
return :postgresql if url.start_with?('postgres://', 'postgresql://')
|
39
|
+
return :mysql if url.start_with?('mysql://', 'mysql2://')
|
40
|
+
|
41
|
+
# Fallback to configured adapter
|
42
|
+
Chronicles.config.adapter
|
43
|
+
end
|
44
|
+
|
45
|
+
def parse_mysql_url(url)
|
46
|
+
return {} if url.nil? || url.empty?
|
47
|
+
|
48
|
+
uri = URI.parse(url)
|
49
|
+
{
|
50
|
+
host: uri.host,
|
51
|
+
port: uri.port,
|
52
|
+
username: uri.user,
|
53
|
+
password: uri.password,
|
54
|
+
database: uri.path&.sub('/', ''),
|
55
|
+
ssl: uri.query&.include?('ssl=true'),
|
56
|
+
}
|
57
|
+
end
|
58
|
+
|
59
|
+
def connection_active?
|
60
|
+
case detect_database_type(@audit_database_url || Chronicles.config.database_url)
|
61
|
+
when :postgresql
|
62
|
+
@connection && !@connection.finished?
|
63
|
+
when :mysql
|
64
|
+
@connection&.ping
|
65
|
+
else
|
66
|
+
false
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
def setup_connection_specifics
|
71
|
+
case detect_database_type(@audit_database_url || Chronicles.config.database_url)
|
72
|
+
when :postgresql
|
73
|
+
@connection.type_map_for_results = PG::BasicTypeMapForResults.new(@connection)
|
74
|
+
when :mysql
|
75
|
+
# MySQL/Trilogy doesn't need special setup
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
def ensure_connection
|
80
|
+
return if @connection && connection_active?
|
81
|
+
|
82
|
+
@connection = create_connection
|
83
|
+
setup_connection_specifics
|
84
|
+
ensure_table_exists
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
88
|
+
end
|
@@ -0,0 +1,129 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Whodunit
|
4
|
+
module Chronicles
|
5
|
+
# Handles record persistence for different database adapters
|
6
|
+
#
|
7
|
+
# Provides adapter-specific SQL for inserting chronicle records
|
8
|
+
module Persistence
|
9
|
+
private
|
10
|
+
|
11
|
+
def persist_record(record)
|
12
|
+
db_type = detect_database_type(@audit_database_url || Chronicles.config.database_url)
|
13
|
+
|
14
|
+
case db_type
|
15
|
+
when :postgresql
|
16
|
+
persist_record_postgresql(record)
|
17
|
+
when :mysql
|
18
|
+
persist_record_mysql(record)
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
def persist_record_postgresql(record)
|
23
|
+
sql = <<~SQL
|
24
|
+
INSERT INTO whodunit_chronicles_audits (
|
25
|
+
table_name, schema_name, record_id, action, old_data, new_data, changes,
|
26
|
+
user_id, user_type, transaction_id, sequence_number, occurred_at, created_at, metadata
|
27
|
+
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)
|
28
|
+
RETURNING id
|
29
|
+
SQL
|
30
|
+
|
31
|
+
params = build_record_params(record)
|
32
|
+
result = @connection.exec_params(sql, params)
|
33
|
+
record[:id] = result.first['id'].to_i
|
34
|
+
result.clear
|
35
|
+
|
36
|
+
record
|
37
|
+
end
|
38
|
+
|
39
|
+
def persist_record_mysql(record)
|
40
|
+
sql = <<~SQL
|
41
|
+
INSERT INTO whodunit_chronicles_audits (
|
42
|
+
table_name, schema_name, record_id, action, old_data, new_data, changes,
|
43
|
+
user_id, user_type, transaction_id, sequence_number, occurred_at, created_at, metadata
|
44
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
45
|
+
SQL
|
46
|
+
|
47
|
+
params = build_record_params(record)
|
48
|
+
@connection.execute(sql, *params)
|
49
|
+
record[:id] = @connection.last_insert_id
|
50
|
+
|
51
|
+
record
|
52
|
+
end
|
53
|
+
|
54
|
+
def persist_records_batch(records)
|
55
|
+
return records if records.empty?
|
56
|
+
|
57
|
+
db_type = detect_database_type(@audit_database_url || Chronicles.config.database_url)
|
58
|
+
|
59
|
+
case db_type
|
60
|
+
when :postgresql
|
61
|
+
persist_records_batch_postgresql(records)
|
62
|
+
when :mysql
|
63
|
+
persist_records_batch_mysql(records)
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
def persist_records_batch_postgresql(records)
|
68
|
+
# Use multi-row INSERT for better performance
|
69
|
+
values_clauses = []
|
70
|
+
all_params = []
|
71
|
+
param_index = 1
|
72
|
+
|
73
|
+
records.each do |record|
|
74
|
+
param_positions = (param_index..(param_index + 13)).map { |i| "$#{i}" }.join(', ')
|
75
|
+
values_clauses << "(#{param_positions})"
|
76
|
+
all_params.concat(build_record_params(record))
|
77
|
+
param_index += 14
|
78
|
+
end
|
79
|
+
|
80
|
+
sql = <<~SQL
|
81
|
+
INSERT INTO whodunit_chronicles_audits (
|
82
|
+
table_name, schema_name, record_id, action, old_data, new_data, changes,
|
83
|
+
user_id, user_type, transaction_id, sequence_number, occurred_at, created_at, metadata
|
84
|
+
) VALUES #{values_clauses.join(', ')}
|
85
|
+
RETURNING id
|
86
|
+
SQL
|
87
|
+
|
88
|
+
result = @connection.exec_params(sql, all_params)
|
89
|
+
|
90
|
+
# Set IDs on the records
|
91
|
+
result.each_with_index do |row, index|
|
92
|
+
records[index][:id] = row['id'].to_i
|
93
|
+
end
|
94
|
+
|
95
|
+
result.clear
|
96
|
+
records
|
97
|
+
end
|
98
|
+
|
99
|
+
def persist_records_batch_mysql(records)
|
100
|
+
# For MySQL, we'll use individual inserts in a transaction for simplicity
|
101
|
+
# A more optimized version could use VALUES() with multiple rows
|
102
|
+
records.each do |record|
|
103
|
+
persist_record_mysql(record)
|
104
|
+
end
|
105
|
+
|
106
|
+
records
|
107
|
+
end
|
108
|
+
|
109
|
+
def build_record_params(record)
|
110
|
+
[
|
111
|
+
record[:table_name],
|
112
|
+
record[:schema_name],
|
113
|
+
record[:record_id].to_json,
|
114
|
+
record[:action],
|
115
|
+
record[:old_data]&.to_json,
|
116
|
+
record[:new_data]&.to_json,
|
117
|
+
record[:changes].to_json,
|
118
|
+
record[:user_id],
|
119
|
+
record[:user_type],
|
120
|
+
record[:transaction_id],
|
121
|
+
record[:sequence_number],
|
122
|
+
record[:occurred_at],
|
123
|
+
record[:created_at],
|
124
|
+
record[:metadata].to_json,
|
125
|
+
]
|
126
|
+
end
|
127
|
+
end
|
128
|
+
end
|
129
|
+
end
|
@@ -0,0 +1,127 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Whodunit
|
4
|
+
module Chronicles
|
5
|
+
# Processes database change events and creates chronicle records
|
6
|
+
#
|
7
|
+
# Transforms ChangeEvent objects into structured chronicle records
|
8
|
+
# with complete object serialization and metadata.
|
9
|
+
class Processor
|
10
|
+
include Connection
|
11
|
+
include Table
|
12
|
+
include Persistence
|
13
|
+
|
14
|
+
attr_reader :logger, :connection
|
15
|
+
|
16
|
+
def initialize(
|
17
|
+
audit_database_url: Chronicles.config.audit_database_url,
|
18
|
+
logger: Chronicles.logger
|
19
|
+
)
|
20
|
+
@audit_database_url = audit_database_url
|
21
|
+
@logger = logger
|
22
|
+
@connection = nil
|
23
|
+
end
|
24
|
+
|
25
|
+
# Process a change event and create chronicle record
|
26
|
+
#
|
27
|
+
# @param change_event [ChangeEvent] The database change to chronicle
|
28
|
+
# @return [Hash] The created chronicle record
|
29
|
+
def process(change_event)
|
30
|
+
ensure_connection
|
31
|
+
|
32
|
+
record = build_record(change_event)
|
33
|
+
persist_record(record)
|
34
|
+
|
35
|
+
log(:debug, 'Processed change event',
|
36
|
+
table: change_event.qualified_table_name,
|
37
|
+
action: change_event.action,
|
38
|
+
id: record[:id])
|
39
|
+
|
40
|
+
record
|
41
|
+
rescue StandardError => e
|
42
|
+
log(:error, 'Failed to process change event',
|
43
|
+
error: e.message,
|
44
|
+
event: change_event.to_s)
|
45
|
+
raise
|
46
|
+
end
|
47
|
+
|
48
|
+
# Process multiple change events in a batch
|
49
|
+
#
|
50
|
+
# @param change_events [Array<ChangeEvent>] Array of change events
|
51
|
+
# @return [Array<Hash>] Array of created chronicle records
|
52
|
+
def process_batch(change_events)
|
53
|
+
return [] if change_events.empty?
|
54
|
+
|
55
|
+
ensure_connection
|
56
|
+
|
57
|
+
records = change_events.map { |event| build_record(event) }
|
58
|
+
persist_records_batch(records)
|
59
|
+
|
60
|
+
log(:info, 'Processed batch of change events', count: change_events.size)
|
61
|
+
|
62
|
+
records
|
63
|
+
rescue StandardError => e
|
64
|
+
log(:error, 'Failed to process batch',
|
65
|
+
error: e.message,
|
66
|
+
count: change_events.size)
|
67
|
+
raise
|
68
|
+
end
|
69
|
+
|
70
|
+
# Close database connection
|
71
|
+
def close
|
72
|
+
@connection&.close
|
73
|
+
@connection = nil
|
74
|
+
end
|
75
|
+
|
76
|
+
private
|
77
|
+
|
78
|
+
def build_record(change_event)
|
79
|
+
user_info = extract_user_info(change_event)
|
80
|
+
|
81
|
+
{
|
82
|
+
id: nil, # Will be set by database
|
83
|
+
table_name: change_event.table_name,
|
84
|
+
schema_name: change_event.schema_name,
|
85
|
+
record_id: change_event.primary_key,
|
86
|
+
action: change_event.action,
|
87
|
+
old_data: change_event.old_data,
|
88
|
+
new_data: change_event.new_data,
|
89
|
+
changes: change_event.changes,
|
90
|
+
user_id: user_info[:user_id],
|
91
|
+
user_type: user_info[:user_type],
|
92
|
+
transaction_id: change_event.transaction_id,
|
93
|
+
sequence_number: change_event.sequence_number,
|
94
|
+
occurred_at: change_event.timestamp,
|
95
|
+
created_at: Time.now,
|
96
|
+
metadata: build_metadata(change_event),
|
97
|
+
}
|
98
|
+
end
|
99
|
+
|
100
|
+
def extract_user_info(change_event)
|
101
|
+
data = change_event.current_data || {}
|
102
|
+
|
103
|
+
# Look for Whodunit user attribution fields
|
104
|
+
user_id = data['creator_id'] || data['updater_id'] || data['deleter_id']
|
105
|
+
|
106
|
+
{
|
107
|
+
user_id: user_id,
|
108
|
+
user_type: user_id ? 'User' : nil,
|
109
|
+
}
|
110
|
+
end
|
111
|
+
|
112
|
+
def build_metadata(change_event)
|
113
|
+
{
|
114
|
+
table_schema: change_event.schema_name,
|
115
|
+
qualified_table_name: change_event.qualified_table_name,
|
116
|
+
changed_columns: change_event.changed_columns,
|
117
|
+
adapter_metadata: change_event.metadata,
|
118
|
+
chronicles_version: Chronicles::VERSION,
|
119
|
+
}
|
120
|
+
end
|
121
|
+
|
122
|
+
def log(level, message, context = {})
|
123
|
+
logger.public_send(level, message, processor: 'Processor', **context)
|
124
|
+
end
|
125
|
+
end
|
126
|
+
end
|
127
|
+
end
|