familia 2.0.0 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.rst +45 -0
  3. data/Gemfile +2 -0
  4. data/Gemfile.lock +11 -1
  5. data/docs/guides/writing-migrations.md +345 -0
  6. data/examples/migrations/v1_to_v2_serialization_migration.rb +374 -0
  7. data/examples/schemas/customer.json +33 -0
  8. data/examples/schemas/session.json +27 -0
  9. data/familia.gemspec +2 -0
  10. data/lib/familia/data_type/types/hashkey.rb +0 -238
  11. data/lib/familia/data_type/types/listkey.rb +4 -110
  12. data/lib/familia/data_type/types/sorted_set.rb +0 -365
  13. data/lib/familia/data_type/types/stringkey.rb +0 -139
  14. data/lib/familia/data_type/types/unsorted_set.rb +2 -122
  15. data/lib/familia/features/schema_validation.rb +139 -0
  16. data/lib/familia/migration/base.rb +447 -0
  17. data/lib/familia/migration/errors.rb +31 -0
  18. data/lib/familia/migration/model.rb +418 -0
  19. data/lib/familia/migration/pipeline.rb +226 -0
  20. data/lib/familia/migration/rake_tasks.rake +3 -0
  21. data/lib/familia/migration/rake_tasks.rb +160 -0
  22. data/lib/familia/migration/registry.rb +364 -0
  23. data/lib/familia/migration/runner.rb +311 -0
  24. data/lib/familia/migration/script.rb +234 -0
  25. data/lib/familia/migration.rb +43 -0
  26. data/lib/familia/schema_registry.rb +173 -0
  27. data/lib/familia/settings.rb +63 -1
  28. data/lib/familia/version.rb +1 -1
  29. data/lib/familia.rb +1 -0
  30. data/try/features/schema_registry_try.rb +193 -0
  31. data/try/features/schema_validation_feature_try.rb +218 -0
  32. data/try/migration/base_try.rb +226 -0
  33. data/try/migration/errors_try.rb +67 -0
  34. data/try/migration/integration_try.rb +451 -0
  35. data/try/migration/model_try.rb +431 -0
  36. data/try/migration/pipeline_try.rb +460 -0
  37. data/try/migration/rake_tasks_try.rb +61 -0
  38. data/try/migration/registry_try.rb +199 -0
  39. data/try/migration/runner_try.rb +311 -0
  40. data/try/migration/schema_validation_try.rb +201 -0
  41. data/try/migration/script_try.rb +192 -0
  42. data/try/migration/v1_to_v2_serialization_try.rb +513 -0
  43. data/try/performance/benchmarks_try.rb +11 -12
  44. metadata +44 -1
@@ -0,0 +1,418 @@
1
+ # lib/familia/migration/model.rb
2
+ #
3
+ # frozen_string_literal: true
4
+
5
+ require_relative 'base'
6
+
7
+ module Familia
8
+ module Migration
9
+ # Base class for individual record migrations on Familia::Horreum models
10
+ #
11
+ # Provides Redis SCAN-based iteration with progress tracking, error handling,
12
+ # and dry-run/actual-run modes for processing records one at a time.
13
+ #
14
+ # ## When to Use Model vs Pipeline
15
+ #
16
+ # Use **Model** when:
17
+ # - Complex logic is needed for each record
18
+ # - Error handling per record is important
19
+ # - Records need individual validation
20
+ # - Updates vary significantly between records
21
+ #
22
+ # Use **Pipeline** when:
23
+ # - Simple bulk updates across many records
24
+ # - Performance is critical for large datasets
25
+ # - All records get similar field updates
26
+ # - Redis pipelining can be utilized effectively
27
+ #
28
+ # ## Subclassing Requirements
29
+ #
30
+ # Subclasses must implement:
31
+ # - {#prepare} - Set @model_class and optionally @batch_size
32
+ # - {#process_record} - Handle individual record processing
33
+ #
34
+ # Subclasses may override:
35
+ # - {#migration_needed?} - Default returns true (always migrate)
36
+ # - {#load_from_key} - Custom object loading from database keys
37
+ #
38
+ # ## Usage Example
39
+ #
40
+ # class CustomerEmailMigration < Familia::Migration::Model
41
+ # def prepare
42
+ # @model_class = Customer
43
+ # @batch_size = 1000 # optional, defaults to config
44
+ # end
45
+ #
46
+ # def process_record(obj, key)
47
+ # return unless obj.email.blank?
48
+ #
49
+ # for_realsies_this_time? do
50
+ # obj.email = "#{obj.custid}@example.com"
51
+ # obj.save
52
+ # end
53
+ # track_stat(:emails_updated)
54
+ # end
55
+ # end
56
+ #
57
+ # ## Development Rule
58
+ #
59
+ # **IMPORTANT**: Deploy schema changes and logic changes separately.
60
+ # This prevents new model logic from breaking migration logic and
61
+ # reduces debugging complexity.
62
+ #
63
+ # @abstract Subclass and implement {#prepare} and {#process_record}
64
+ # @see Pipeline For bulk processing with Redis pipelining
65
+ class Model < Base
66
+ # Model class being migrated
67
+ # @return [Class] Familia::Horreum subclass
68
+ attr_reader :model_class
69
+
70
+ # Number of keys to scan per Redis SCAN operation
71
+ # @return [Integer] batch size for scanning
72
+ attr_reader :batch_size
73
+
74
+ # Total number of indexed records in the model
75
+ # @return [Integer] count from model_class.instances
76
+ attr_reader :total_records
77
+
78
+ # Number of keys found by Redis SCAN
79
+ # @return [Integer] actual keys discovered
80
+ attr_reader :total_scanned
81
+
82
+ # Records that passed through process_record
83
+ # @return [Integer] count of records needing updates
84
+ attr_reader :records_needing_update
85
+
86
+ # Records successfully updated
87
+ # @return [Integer] count of records modified
88
+ attr_reader :records_updated
89
+
90
+ # Number of processing errors encountered
91
+ # @return [Integer] error count
92
+ attr_reader :error_count
93
+
94
+ # Interactive debugging mode flag
95
+ # @return [Boolean] whether to drop into pry on errors
96
+ attr_reader :interactive
97
+
98
+ # Redis SCAN pattern for finding records
99
+ # @return [String] pattern like "customer:*:object"
100
+ attr_reader :scan_pattern
101
+
102
+ def initialize(options = {})
103
+ super
104
+ reset_counters
105
+ set_defaults
106
+ end
107
+
108
+ # Main migration entry point
109
+ #
110
+ # Validates configuration, displays run mode information,
111
+ # executes the SCAN-based record processing, and displays
112
+ # a comprehensive summary.
113
+ #
114
+ # @return [Boolean] true if no errors occurred
115
+ def migrate
116
+ validate_model_class!
117
+
118
+ # Set `@interactive = true` in the implementing migration class
119
+ # for an interactive debug session on a per-record basis.
120
+ require 'pry-byebug' if interactive
121
+
122
+ print_database_details
123
+ run_mode_banner
124
+
125
+ info("[#{self.class.name.split('::').last}] Starting #{model_class.name} migration")
126
+ info("Processing up to #{total_records} records")
127
+ info('Will show progress every 100 records and log each update')
128
+
129
+ scan_and_process_records
130
+ print_database_details
131
+ print_migration_summary
132
+
133
+ @error_count == 0
134
+ end
135
+
136
+ # Default migration check - always returns true
137
+ #
138
+ # Always return true to allow re-running for error recovery.
139
+ # The migration should be idempotent - it won't overwrite existing values.
140
+ # Override if you need conditional migration logic.
141
+ #
142
+ # @return [Boolean] true to proceed with migration
143
+ def migration_needed?
144
+ debug("[#{self.class.name.split('::').last}] Checking if migration is needed...")
145
+ true
146
+ end
147
+
148
+ # Load Familia::Horreum object instance from database key
149
+ #
150
+ # Override this method to customize loading behavior. For example,
151
+ # with a custom @scan_pattern, the migration might loop through
152
+ # relation keys of a horreum model (e.g. customer:ID:custom_domain).
153
+ #
154
+ # Typically migrations iterate over objects themselves, but this
155
+ # won't work if there are dangling "orphan" keys without corresponding
156
+ # objects. Override this method to handle such cases.
157
+ #
158
+ # @param key [String] database key to load from
159
+ # @return [Familia::Horreum, Familia::DataType] loaded object instance
160
+ def load_from_key(key)
161
+ model_class.find_by_key(key)
162
+ end
163
+
164
+ protected
165
+
166
+ # Set @model_class and optionally @batch_size
167
+ #
168
+ # **Required for subclasses** - must set @model_class to a
169
+ # Familia::Horreum subclass. Can optionally set @batch_size
170
+ # to override the default.
171
+ #
172
+ # @abstract Subclasses must implement this method
173
+ # @return [void]
174
+ # @raise [NotImplementedError] if not implemented
175
+ def prepare
176
+ raise NotImplementedError, "#{self.class} must set @model_class in #prepare"
177
+ end
178
+
179
+ # Process a single record
180
+ #
181
+ # **Required for subclasses** - implement the core logic for
182
+ # processing each record. Use {#track_stat} to count operations
183
+ # and {#for_realsies_this_time?} to wrap actual changes.
184
+ #
185
+ # @abstract Subclasses must implement this method
186
+ # @param obj [Familia::Horreum, Familia::DataType] The familia class instance to process
187
+ # @param key [String] The dbkey of the record
188
+ # @return [void]
189
+ # @raise [NotImplementedError] if not implemented
190
+ def process_record(obj, key)
191
+ raise NotImplementedError, "#{self.class} must implement #process_record"
192
+ end
193
+
194
+ # Track statistics and auto-increment records_updated counter
195
+ #
196
+ # Automatically increments @records_updated when statname is :records_updated.
197
+ # Use this to maintain consistent counting across migrations.
198
+ #
199
+ # @param statname [Symbol] The name of the statistic to track
200
+ # @param increment [Integer] The amount to increment by
201
+ # @return [void]
202
+ def track_stat(statname, increment = 1)
203
+ super
204
+ @records_updated += increment if statname == :records_updated
205
+ end
206
+
207
+ # Track stat and log decision reason in one call
208
+ #
209
+ # Convenience method for logging migration decisions with consistent
210
+ # formatting and automatic statistic tracking.
211
+ #
212
+ # @param obj [Familia::Horreum] object being processed
213
+ # @param decision [String] decision made (e.g., 'skipped', 'updated')
214
+ # @param field [String] field name involved in decision
215
+ # @return [nil]
216
+ def track_stat_and_log_reason(obj, decision, field)
217
+ track_stat(:decision)
218
+ track_stat("#{decision}_#{field}")
219
+ info("#{decision} objid=#{obj.respond_to?(:objid) ? obj.objid : 'N/A'} #{field}=#{obj.send(field)}")
220
+ nil
221
+ end
222
+
223
+ # === Schema Validation Hooks ===
224
+
225
+ # Override in subclass to enable pre-transform validation
226
+ #
227
+ # When enabled, validates each record against its schema before
228
+ # {#process_record} is called. Validation failures are tracked
229
+ # via the :schema_errors_before stat.
230
+ #
231
+ # @return [Boolean] true to validate before transform
232
+ def validate_before_transform?
233
+ false
234
+ end
235
+
236
+ # Override in subclass to enable post-transform validation
237
+ #
238
+ # When enabled, validates each record against its schema after
239
+ # {#process_record} completes. Validation failures are tracked
240
+ # via the :schema_errors_after stat.
241
+ #
242
+ # @return [Boolean] true to validate after transform
243
+ def validate_after_transform?
244
+ false
245
+ end
246
+
247
+ # Wrapper that applies validation hooks around process_record
248
+ #
249
+ # Called internally by {#process_single_record} when validation
250
+ # is enabled. Validates the object before and/or after the transform
251
+ # based on {#validate_before_transform?} and {#validate_after_transform?}.
252
+ #
253
+ # @param obj [Familia::Horreum] the object to process
254
+ # @param key [String] the database key of the record
255
+ # @return [void]
256
+ def process_record_with_validation(obj, key)
257
+ if validate_before_transform?
258
+ result = validate_schema(obj, context: 'before transform')
259
+ track_stat(:schema_errors_before) unless result[:valid]
260
+ end
261
+
262
+ process_record(obj, key)
263
+
264
+ if validate_after_transform?
265
+ result = validate_schema(obj, context: 'after transform')
266
+ track_stat(:schema_errors_after) unless result[:valid]
267
+ end
268
+ end
269
+
270
+ private
271
+
272
+ def reset_counters
273
+ @total_scanned = 0
274
+ @records_needing_update = 0
275
+ @records_updated = 0
276
+ @error_count = 0
277
+ end
278
+
279
+ def set_defaults
280
+ @batch_size = Familia::Migration.config.batch_size
281
+ @model_class = nil
282
+ @scan_pattern = nil
283
+ @interactive = false
284
+ @total_records = 0
285
+ end
286
+
287
+ def validate_model_class!
288
+ unless @model_class
289
+ raise Errors::PreconditionFailed, 'Model class not set. Define @model_class in your #prepare method'
290
+ end
291
+
292
+ unless familia_horreum_class?
293
+ raise Errors::PreconditionFailed, "Model class must be a Familia::Horreum subclass #{@model_class}"
294
+ end
295
+
296
+ @total_records = @model_class.respond_to?(:instances) ? @model_class.instances.size : 0
297
+ @dbclient ||= @model_class.respond_to?(:dbclient) ? @model_class.dbclient : Familia.dbclient
298
+ @scan_pattern ||= "#{@model_class.prefix}:*:object"
299
+ nil
300
+ end
301
+
302
+ def familia_horreum_class?
303
+ @model_class.ancestors.include?(Familia::Horreum) ||
304
+ (@model_class.respond_to?(:ancestors) && @model_class < Familia::Base)
305
+ rescue StandardError
306
+ false
307
+ end
308
+
309
+ def scan_and_process_records
310
+ cursor = '0'
311
+
312
+ loop do
313
+ cursor, keys = dbclient.scan(cursor, match: @scan_pattern, count: @batch_size)
314
+ @total_scanned += keys.size
315
+
316
+ show_progress if should_show_progress?
317
+ info("Processing batch of #{keys.size} keys...") unless keys.empty?
318
+
319
+ keys.each { |key| process_single_record(key) }
320
+ break if cursor == '0'
321
+ end
322
+ end
323
+
324
+ def should_show_progress?
325
+ @total_scanned <= 500 || @total_scanned % 100 == 0
326
+ end
327
+
328
+ def show_progress
329
+ progress(@total_scanned, @total_records, "Scanning #{model_class.name.split('::').last} records")
330
+ end
331
+
332
+ def process_single_record(key)
333
+ obj = load_from_key(key)
334
+
335
+ # Every record that gets processed is considered as needing update. The
336
+ # idempotent operations in process_record determine whether changes are
337
+ # actually made.
338
+ @records_needing_update += 1
339
+
340
+ # Call the subclass implementation, with optional schema validation
341
+ if validate_before_transform? || validate_after_transform?
342
+ process_record_with_validation(obj, key)
343
+ else
344
+ process_record(obj, key)
345
+ end
346
+ rescue StandardError => ex
347
+ handle_record_error(key, ex)
348
+ end
349
+
350
+ def handle_record_error(key, ex)
351
+ @error_count += 1
352
+ error("Error processing #{key}: #{ex.message}")
353
+ debug("Stack trace: #{ex.backtrace.first(10).join('; ')}")
354
+ track_stat(:errors)
355
+
356
+ binding.pry if interactive # rubocop:disable Lint/Debugger
357
+ end
358
+
359
+ def print_migration_summary
360
+ print_summary do
361
+ info("Redis SCAN found: #{@total_scanned} #{model_class} records")
362
+ info("Passed migration filter: #{@records_needing_update} records")
363
+ info("#{actual_run? ? 'Processed' : 'Would be processed'}: #{@records_updated} records")
364
+ info("Errors: #{@error_count}")
365
+
366
+ print_custom_stats
367
+ print_error_guidance
368
+ print_dry_run_guidance
369
+ end
370
+ end
371
+
372
+ def print_custom_stats
373
+ return unless @stats.any?
374
+
375
+ info('')
376
+ info('Additional statistics:')
377
+ @stats.each do |key, value|
378
+ next if [:errors, :records_updated].include?(key)
379
+
380
+ info(" #{key}: #{value}")
381
+ end
382
+ end
383
+
384
+ def print_error_guidance
385
+ return unless @error_count > 0
386
+
387
+ info('')
388
+ info('Check logs for error details')
389
+ end
390
+
391
+ def print_dry_run_guidance
392
+ return unless dry_run? && @records_needing_update > 0
393
+
394
+ info ''
395
+ info 'Run with --run to apply these updates'
396
+ end
397
+
398
+ def print_database_details
399
+ print_summary('Redis Details') do
400
+ info("Model class: #{@model_class.name}")
401
+ info("Redis connection: #{dbclient.respond_to?(:id) ? dbclient.id : dbclient.class}")
402
+ info("Scan pattern: #{@scan_pattern}")
403
+ info("Indexed records: #{@total_records} (#{@model_class.name}.instances)")
404
+ info("Batch size: #{@batch_size}")
405
+ verify_database_connection
406
+ end
407
+ end
408
+
409
+ def verify_database_connection
410
+ dbclient.ping
411
+ debug('Redis connection: verified')
412
+ rescue StandardError => ex
413
+ error("Cannot connect to the database: #{ex.message}")
414
+ raise ex
415
+ end
416
+ end
417
+ end
418
+ end
@@ -0,0 +1,226 @@
1
+ # lib/familia/migration/pipeline.rb
2
+ #
3
+ # frozen_string_literal: true
4
+
5
+ require_relative 'model'
6
+
7
+ module Familia
8
+ module Migration
9
+ # Pipeline-based migration for batch Redis operations with improved performance
10
+ #
11
+ # Inherits all Model functionality but processes records in batches
12
+ # using Redis pipelining instead of individual operations. This provides
13
+ # significant performance improvements for large datasets with simple updates.
14
+ #
15
+ # ## When to Use Pipeline vs Model
16
+ #
17
+ # Use **Pipeline** when:
18
+ # - Processing thousands+ records with simple field updates
19
+ # - All records get similar field modifications
20
+ # - Performance is more important than per-record error handling
21
+ # - Updates can be expressed as Hash field assignments
22
+ #
23
+ # Use **Model** when:
24
+ # - Complex logic needed per record
25
+ # - Individual error handling is important
26
+ # - Records need different processing logic
27
+ # - Updates involve method calls beyond simple field assignment
28
+ #
29
+ # ## Subclassing Requirements
30
+ #
31
+ # Subclasses must implement:
32
+ # - {#prepare} - Set @model_class and @batch_size (inherited)
33
+ # - {#should_process?} - Return true/false for each record
34
+ # - {#build_update_fields} - Return Hash of field updates
35
+ #
36
+ # Subclasses may override:
37
+ # - {#execute_update} - Customize the pipeline update operation
38
+ #
39
+ # ## Usage Example
40
+ #
41
+ # class CustomerObjidMigration < Familia::Migration::Pipeline
42
+ # def prepare
43
+ # @model_class = Customer
44
+ # @batch_size = 100 # Smaller batches for pipelines
45
+ # end
46
+ #
47
+ # def should_process?(obj)
48
+ # return track_stat(:skipped_empty_custid) && false if obj.custid.empty?
49
+ # true
50
+ # end
51
+ #
52
+ # def build_update_fields(obj)
53
+ # {
54
+ # objid: obj.objid || SecureRandom.uuid_v7_from(obj.created),
55
+ # user_type: 'authenticated'
56
+ # }
57
+ # end
58
+ # end
59
+ #
60
+ # ## Performance Notes
61
+ #
62
+ # - Use smaller batch sizes (50-200) compared to Model
63
+ # - Pipeline operations are atomic per batch, not per record
64
+ # - Error handling is less granular than Model
65
+ #
66
+ # @abstract Subclass and implement {#should_process?} and {#build_update_fields}
67
+ # @see Model For individual record processing
68
+ class Pipeline < Model
69
+ # Main batch processor - executes Redis operations in pipeline
70
+ #
71
+ # Processes an array of objects using Redis pipelining for improved
72
+ # performance. Each object is checked via {#should_process?} and
73
+ # updated via {#execute_update} if processing is needed.
74
+ #
75
+ # @param objects [Array<Array>] Array of tuples: [obj, original_dbkey]
76
+ # The original database key is preserved because records with missing/empty
77
+ # identifier fields cannot reconstitute their database key via obj.dbkey.
78
+ # Only the original key from SCAN guarantees we can operate on the record.
79
+ # @return [void]
80
+ def process_batch(objects)
81
+ dbclient.pipelined do |pipe|
82
+ objects.each do |obj, original_key|
83
+ next unless should_process?(obj)
84
+
85
+ fields = build_update_fields(obj)
86
+
87
+ # Previously we skipped here when the migration returned no fields
88
+ # to update. We're not always here to update though. Sometimes we
89
+ # delete or update expirations or do other stuff. If we skip ahead
90
+ # here, we never get to the execute_update method which migrations
91
+ # can override to do whatever they want.
92
+ #
93
+ # Now, we simply return inside the default execute_update. The end
94
+ # result is the same but it gives us the opportunity to perform
95
+ # additional operations on the record.
96
+
97
+ execute_update(pipe, obj, fields, original_key)
98
+
99
+ track_stat(:records_updated)
100
+ end
101
+ end
102
+ end
103
+
104
+ # Override scanning to collect batches instead of individual processing
105
+ private
106
+
107
+ def scan_and_process_records
108
+ cursor = '0'
109
+ batch_objects = []
110
+
111
+ loop do
112
+ cursor, keys = dbclient.scan(cursor, match: @scan_pattern, count: @batch_size)
113
+ @total_scanned += keys.size
114
+
115
+ # Progress reporting
116
+ if @total_scanned <= 500 || @total_scanned % 100 == 0
117
+ progress(@total_scanned, @total_records, "Scanning #{model_class.name.split('::').last} records")
118
+ end
119
+
120
+ # Collect objects for batch processing
121
+ keys.each do |key|
122
+ obj = load_from_key(key)
123
+ @records_needing_update += 1
124
+ batch_objects << [obj, key]
125
+
126
+ # Process when batch is full
127
+ if batch_objects.size >= @batch_size
128
+ process_batch_safely(batch_objects)
129
+ batch_objects.clear
130
+ end
131
+ end
132
+
133
+ break if cursor == '0'
134
+ end
135
+
136
+ # Process remaining objects
137
+ process_batch_safely(batch_objects) if batch_objects.any?
138
+ end
139
+
140
+ def process_batch_safely(objects)
141
+ return if objects.empty?
142
+
143
+ info("Processing batch of #{objects.size} objects...")
144
+ for_realsies_this_time? do
145
+ process_batch(objects)
146
+ end
147
+ rescue StandardError => ex
148
+ @error_count += objects.size
149
+ error("Error processing batch of #{objects.size}: #{ex.message}")
150
+ debug("Stack trace: #{ex.backtrace.first(10).join('; ')}")
151
+ objects.each { track_stat(:errors) }
152
+ end
153
+
154
+ protected
155
+
156
+ # Determine if object should be processed in this batch
157
+ #
158
+ # **Required for subclasses** - implement filtering logic to determine
159
+ # which records should be included in the pipeline update. Use
160
+ # {#track_stat} to count skipped records.
161
+ #
162
+ # @abstract Subclasses must implement this method
163
+ # @param obj [Familia::Horreum] The model instance to evaluate
164
+ # @return [Boolean] true to process, false to skip
165
+ # @raise [NotImplementedError] if not implemented
166
+ def should_process?(obj)
167
+ raise NotImplementedError, "#{self.class} must implement #should_process?"
168
+ end
169
+
170
+ # Build fields hash for Redis HMSET operation
171
+ #
172
+ # **Required for subclasses** - return a hash of field names to values
173
+ # that will be applied via Redis HMSET in the pipeline. Return an empty
174
+ # hash or nil to skip the default HMSET operation.
175
+ #
176
+ # @abstract Subclasses must implement this method
177
+ # @param obj [Familia::Horreum] The model instance to update
178
+ # @return [Hash] field_name => value pairs for Redis HMSET
179
+ # @raise [NotImplementedError] if not implemented
180
+ def build_update_fields(obj)
181
+ raise NotImplementedError, "#{self.class} must implement #build_update_fields"
182
+ end
183
+
184
+ # Execute pipeline update operation
185
+ #
186
+ # Override this method to customize pipeline operations beyond simple
187
+ # HMSET field updates. The default implementation handles HMSET with
188
+ # dry-run support.
189
+ #
190
+ # **Important**: Use the provided `pipe` parameter, not the regular
191
+ # Redis connection, to ensure operations are pipelined.
192
+ #
193
+ # NOTE: The `track_stat(:records_updated)` method should not be called here
194
+ # (or anywhere else in a pipeline migration actually) as it is called by the
195
+ # pipeline migration framework itself.
196
+ #
197
+ # @param pipe [Redis::Pipeline] Redis pipeline instance
198
+ # @param obj [Familia::Horreum] object being updated
199
+ # @param fields [Hash] field updates from {#build_update_fields}
200
+ # @param original_key [String] original database key from SCAN
201
+ # @return [void]
202
+ def execute_update(pipe, obj, fields, original_key = nil)
203
+ klass_name = obj.class.name.split('::').last
204
+
205
+ unless fields&.any?
206
+ return debug("Would skip #{klass_name} b/c empty fields (#{original_key})")
207
+ end
208
+
209
+ # Use original_key for records that can't generate valid keys
210
+ dbkey = original_key || obj.dbkey
211
+
212
+ # USE THE PIPELINE AND NOT THE regular redis connection.
213
+ pipe.hmset(dbkey, *fields.flatten)
214
+
215
+ dry_run_only? do
216
+ debug("Would update #{klass_name}: #{fields}")
217
+ end
218
+ end
219
+
220
+ # Not used in Pipeline - batch processing instead
221
+ def process_record(obj, key)
222
+ # No-op: Pipeline uses batch processing
223
+ end
224
+ end
225
+ end
226
+ end
@@ -0,0 +1,3 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative 'rake_tasks'