familia 2.7.0 → 2.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. checksums.yaml +4 -4
  2. data/.pre-commit-config.yaml +0 -2
  3. data/CHANGELOG.rst +175 -0
  4. data/Gemfile.lock +1 -1
  5. data/docs/guides/feature-housekeeping.md +66 -36
  6. data/lib/familia/connection/handlers.rb +131 -47
  7. data/lib/familia/connection/operations.rb +5 -4
  8. data/lib/familia/connection/pipelined_core.rb +14 -7
  9. data/lib/familia/connection/transaction_core.rb +9 -0
  10. data/lib/familia/connection.rb +2 -1
  11. data/lib/familia/data_type/connection.rb +21 -52
  12. data/lib/familia/data_type/types/hashkey.rb +247 -0
  13. data/lib/familia/data_type/types/listkey.rb +117 -4
  14. data/lib/familia/data_type/types/sorted_set.rb +385 -1
  15. data/lib/familia/data_type/types/stringkey.rb +164 -0
  16. data/lib/familia/data_type/types/unsorted_set.rb +121 -3
  17. data/lib/familia/data_type.rb +1 -0
  18. data/lib/familia/errors.rb +4 -0
  19. data/lib/familia/features/encrypted_fields/encrypted_field_type.rb +18 -0
  20. data/lib/familia/features/housekeeping.rb +112 -18
  21. data/lib/familia/field_type.rb +18 -0
  22. data/lib/familia/horreum/connection.rb +15 -10
  23. data/lib/familia/horreum/definition.rb +18 -0
  24. data/lib/familia/horreum/management/audit.rb +37 -39
  25. data/lib/familia/horreum/persistence.rb +23 -19
  26. data/lib/familia/horreum.rb +9 -6
  27. data/lib/familia/version.rb +1 -1
  28. data/try/edge_cases/fast_writer_transaction_guard_try.rb +130 -0
  29. data/try/edge_cases/pipeline_handler_edge_cases_try.rb +214 -0
  30. data/try/features/atomic_write_try.rb +8 -6
  31. data/try/features/housekeeping/housekeeping_try.rb +200 -21
  32. data/try/integration/connection/handler_constraints_try.rb +8 -8
  33. data/try/integration/connection/pipeline_handler_integration_try.rb +175 -0
  34. data/try/integration/connection/pipeline_horreum_routing_try.rb +147 -0
  35. data/try/integration/data_types/datatype_pipelines_try.rb +26 -6
  36. data/try/integration/data_types/datatype_transactions_try.rb +14 -12
  37. data/try/thread_safety/fiber_pipeline_isolation_try.rb +11 -19
  38. data/try/unit/data_types/hashkey_operations_try.rb +269 -0
  39. data/try/unit/data_types/list_commands_try.rb +314 -0
  40. data/try/unit/data_types/sortedset_operations_try.rb +467 -0
  41. data/try/unit/data_types/stringkey_extended_try.rb +239 -0
  42. data/try/unit/data_types/unsortedset_operations_try.rb +174 -0
  43. data/try/unit/fiber_pipeline_handler_try.rb +147 -0
  44. data/try/unit/horreum/destroy_related_fields_cleanup_try.rb +1 -1
  45. metadata +11 -2
  46. data/changelog.d/20260514_034522_claude_review_familia_issue_217.rst +0 -46
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 7b0138946cb9d48258b9f43b8a8a81aa895bfded213543feee354b3d383dd98a
4
- data.tar.gz: bd5b6ee397c2ef19cef85fe7638ead34b5ed40a81c8362f539ed757d0d713ca2
3
+ metadata.gz: c6cb1ccd59d4290c1d75b70e114945180fc5dbb03aaeb405d841c250cd504179
4
+ data.tar.gz: c2bd7946e7e024c8322d0bf48bea39c22cd61c7690589007f9ab32e6f12e614d
5
5
  SHA512:
6
- metadata.gz: 8108ee045d1f4f1bdc722a1b56a6518a32edebd18ae028dfdcd793a62044cc8bea509e06d8399179c11b1eeef971dd980d1bcd18654cd3a375af99b641c514a6
7
- data.tar.gz: 395bcb27503bb30734d0d6e16fa8e95b33fa072dfaca56c2b9b06d33859c4cb915f766923a492cbdaae5407e45db2c76ee074a6c6d7aac245b822332f44d082c
6
+ metadata.gz: 54d3c3020c53fe01de9baf6af78fbcc1ffbea72a671b6e6269c08c9c5eaab786bb267514aa64212ff8ec854e0363cfb1c1de9b9c66674e57cb067ed419a8944b
7
+ data.tar.gz: d8b8ecd85ed1273c64ae75cab7559dee0ddb92fe5d552b60690c3a66ee9df93b3829aa32deef5438677ad9fb8d9acebb0bdbf071f3bccdb63161c4a772b1c021
@@ -62,8 +62,6 @@ repos:
62
62
  rev: v1.32.2
63
63
  hooks:
64
64
  - id: talisman-commit
65
- env:
66
- TALISMAN_SCAN_MODE: CI
67
65
 
68
66
  # Commit message issue tracking integration
69
67
  - repo: https://github.com/delano/add-msg-issue-prefix-hook
data/CHANGELOG.rst CHANGED
@@ -7,6 +7,181 @@ The format is based on `Keep a Changelog <https://keepachangelog.com/en/1.1.0/>`
7
7
 
8
8
  <!--scriv-insert-here-->
9
9
 
10
+ .. _changelog-2.8.0:
11
+
12
+ 2.8.0 — 2026-05-15
13
+ ==================
14
+
15
+ Added
16
+ -----
17
+
18
+ - Expanded Redis 7 command coverage across all DataType classes:
19
+
20
+ - **StringKey**: ``incrbyfloat``, ``getex``, ``getdel``, ``setex``, ``psetex``,
21
+ ``bitcount``, ``bitpos``, ``bitfield``, plus class methods ``mget``, ``mset``,
22
+ ``msetnx``, ``bitop`` for multi-key and bitwise operations.
23
+
24
+ - **List**: ``trim``/``ltrim``, ``set``/``lset``, ``insert``/``linsert``,
25
+ ``move``/``lmove``, ``pushx``/``rpushx``, ``unshiftx``/``lpushx``.
26
+ Updated ``pop`` and ``shift`` to support optional count parameter for batch operations.
27
+
28
+ - **UnsortedSet**: ``intersection``/``inter``, ``union``, ``difference``/``diff``,
29
+ ``member_any?``/``members?``, ``scan``, ``intercard``/``intersection_cardinality``,
30
+ ``interstore``/``intersection_store``, ``unionstore``/``union_store``,
31
+ ``diffstore``/``difference_store``.
32
+
33
+ - **SortedSet**: ``popmin``, ``popmax``, ``score_count``/``zcount``, ``mscore``,
34
+ ``union``, ``inter``, ``rangebylex``, ``revrangebylex``, ``remrangebylex``,
35
+ ``lexcount``, ``randmember``, ``scan``, ``unionstore``, ``interstore``,
36
+ ``diff``, ``diffstore``.
37
+
38
+ - **HashKey**: ``scan``/``hscan``, ``incrbyfloat``/``incrfloat``,
39
+ ``strlen``/``hstrlen``, ``randfield``/``hrandfield``, plus field-level TTL
40
+ commands (Redis 7.4+): ``expire_fields``, ``pexpire_fields``, ``expireat_fields``,
41
+ ``pexpireat_fields``, ``ttl_fields``, ``pttl_fields``, ``persist_fields``,
42
+ ``expiretime_fields``, ``pexpiretime_fields``.
43
+
44
+ - Added 158 new tests across 5 test files covering all new methods.
45
+
46
+ - Instance-scoped ``audit_multi_indexes`` is now fully implemented.
47
+ Discovers per-scope bucket keys via SCAN, partitions them by scope
48
+ instance, and reports stale members, orphaned buckets, and missing
49
+ entries in the same shape as the class-level audit. Orphan entries
50
+ carry a ``:reason`` (``:scope_missing`` or ``:field_value_unheld``)
51
+ and a ``:scope_id``. Missing entries are detected via the indexed
52
+ class's ``participates_in`` relationship to the scope class; when
53
+ absent, the result carries ``missing_status: :not_audited``.
54
+ Resolves the ``:not_implemented`` follow-up from #217.
55
+
56
+ - ``repair_multi_indexes!`` class method that invokes the existing
57
+ ``rebuild_<index_name>`` methods for both class-level (one call on
58
+ the indexed class) and instance-scoped (one call per scope
59
+ instance) multi-indexes. Indexes whose audit status is ``:ok`` are
60
+ skipped; rebuild methods that don't exist or scope classes
61
+ without an ``instances`` collection are recorded in ``:skipped``
62
+ with a reason.
63
+
64
+ - ``housekeeping`` feature gains a class-level bulk runner,
65
+ ``Klass.run_chores!(chore_name:, limit:, batch_size:)``. It iterates
66
+ the class's ``instances`` collection in pipelined batches via
67
+ ``load_multi``, runs all registered chores (or one named chore)
68
+ against each record, and returns a stats hash:
69
+ ``{ model:, scanned:, chores: { name => { modified:, errors: } } }``.
70
+ Truthy chore returns increment ``modified``; raised exceptions are
71
+ isolated per-record, logged via ``Familia.warn``, and counted as
72
+ ``errors`` so a single failure doesn't halt the run. Lifted from the
73
+ shape proven out in OneTime Secret's ``HousekeepingJob``.
74
+
75
+ - Trace events for connection-mode conflicts. ``Familia.trace`` now
76
+ emits ``CONFLICTING_CONTEXT`` when pipeline and transaction
77
+ contexts collide (in both ``FiberPipelineHandler``/
78
+ ``FiberTransactionHandler`` and the
79
+ ``execute_transaction``/``execute_pipeline`` entry points), and
80
+ ``FAST_WRITER_BLOCKED`` when a fast writer (``field!``) is called
81
+ inside a transaction or pipeline. These fire just before the
82
+ corresponding ``ConflictingContextError`` /
83
+ ``OperationModeError`` is raised, so operators can pinpoint where
84
+ blocked operations originate when ``FAMILIA_TRACE=1``.
85
+
86
+ Changed
87
+ -------
88
+
89
+ - ``repair_all!`` now runs each repair stage inside its own rescue
90
+ boundary; a failure in one dimension no longer prevents the others
91
+ from running. The return hash gains ``:status`` (``:ok`` or
92
+ ``:partial_failure``), ``:errors`` (per-stage exception details
93
+ when raised), and ``:multi_indexes`` (results from the new
94
+ ``repair_multi_indexes!``). An opt-in ``verify: true`` kwarg
95
+ re-runs ``health_check`` after repair and exposes the result as
96
+ ``:post_audit`` / ``:verified`` so callers can confirm the run
97
+ actually drove the model back to a healthy state.
98
+
99
+ - ``AuditReport#complete?`` is no longer false-positive due to
100
+ ``:not_implemented`` stubs in ``multi_indexes`` -- instance-scoped
101
+ indexes return ``:ok`` or ``:issues_found`` like class-level ones.
102
+
103
+ - ``housekeeping`` feature: split the dual-purpose ``tidy!`` into two
104
+ explicit instance methods. ``do_chore!(name)`` runs a single named
105
+ chore and returns the block's raw return value (no longer wrapped
106
+ in a ``{name => result}`` hash). ``do_chores!`` runs every
107
+ registered chore and returns the ``{name => result}`` hash.
108
+ ``tidy!`` is preserved as an alias of ``do_chores!`` for backwards
109
+ compatibility with the 2.7.0 no-arg call site; the single-arg form
110
+ ``tidy!(:name)`` now raises ``ArgumentError``.
111
+
112
+ - The connection handler hierarchy has been refactored from class
113
+ inheritance (``BaseConnectionHandler``) to module composition.
114
+ Handlers now ``include Familia::Connection::Handler`` and declare
115
+ their operation-mode capabilities with a small DSL:
116
+ ``supports transaction: true, pipelined: false``. The
117
+ ``BaseConnectionHandler`` constant is gone. This is only relevant if
118
+ you have custom handlers in application code — the public
119
+ ``allows_transaction`` / ``allows_pipelined`` class methods continue
120
+ to work, and the singleton ``.instance`` accessors on
121
+ ``FiberPipelineHandler`` / ``FiberTransactionHandler`` are
122
+ unchanged. The previous default of "allow all operations" when
123
+ capability flags were not set has been removed; every handler is now
124
+ expected to declare its capabilities explicitly via ``supports``.
125
+ - ``Familia.dbclient`` and ``Familia::DataType#dbclient`` now route through ``FiberPipelineHandler`` before ``FiberTransactionHandler``, matching ``Horreum#dbclient``. With both handlers in the chain, attempting to mix pipeline and transaction contexts raises ``Familia::ConflictingContextError`` uniformly from every call site.
126
+
127
+ Removed
128
+ -------
129
+
130
+ - ``Familia::DataType#direct_access`` has been removed. The method
131
+ was a legacy escape hatch for issuing raw Redis commands from
132
+ inside a DataType wrapper; it predates the chain-based routing of
133
+ ``Fiber[:familia_transaction]`` and ``Fiber[:familia_pipeline]``.
134
+ All in-tree call sites now go through the wrapper's own mutating
135
+ methods (which auto-route through the active transaction or
136
+ pipeline) or through the wrapper's ``transaction`` / ``pipelined``
137
+ blocks. If you were calling ``direct_access do |conn, key| ... end``,
138
+ replace it with either the DataType's own mutator or the
139
+ corresponding block API.
140
+
141
+ Fixed
142
+ -----
143
+
144
+ - ``SortedSet#popmin`` and ``SortedSet#popmax`` now normalize an explicitly
145
+ passed ``nil`` count to the default of ``1``. Previously, calling
146
+ ``zset.popmin(nil)`` or ``zset.popmax(nil)`` would bypass the ``count == 1``
147
+ branch of the structural dispatch added in the prior commit, causing
148
+ redis-rb's flat ``[member, score]`` return shape to be iterated as if it
149
+ were a nested result — yielding a malformed pair. Omitting the argument
150
+ was and remains unaffected.
151
+
152
+ - Restored ``require 'set'`` in ``lib/familia/horreum/management/audit.rb``. ``Set`` is autoloaded as a core class only on Ruby 3.4+; on Ruby 3.2/3.3 (the gem's supported floor) the require is mandatory for the five ``Set.new`` usages in that file.
153
+
154
+ AI Assistance
155
+ -------------
156
+
157
+ - Claude Opus 4.5 analyzed Redis 7 command documentation and compared coverage
158
+ against existing Familia DataType implementations using parallel Explore agents.
159
+ - Implementation performed by 5 parallel backend-dev agents, one per DataType.
160
+ - Test coverage written by 5 parallel qa-automation-engineer agents focusing on
161
+ Familia-specific behavior (serialization, deserialization, aliases) rather than
162
+ re-testing redis-rb gem functionality.
163
+
164
+ - Edge case identified by the Claude Code Review GitHub Action
165
+ (``.github/workflows/claude-code-review.yml``) when reviewing the
166
+ structural-dispatch change in commit ``010d5be``. Fix drafted and verified
167
+ by Claude Opus 4.7 under supervision.
168
+
169
+ - Instance-scoped multi-index audit algorithm (bucket discovery,
170
+ scope existence batching, participation-driven missing detection),
171
+ ``repair_multi_indexes!``, the ``repair_all!`` robustness
172
+ refactor, and the accompanying tryouts coverage were authored
173
+ with Claude Code assistance against the #217 review branch.
174
+
175
+ - Method split, alias wiring, bulk runner port from OTS, doc updates,
176
+ and expanded tryouts coverage (25 → 48 testcases) authored with
177
+ Claude Code.
178
+
179
+ - Added the trace instrumentation in response to PR #263 review
180
+ feedback (Claude Code review bot) recommending tracing for
181
+ conflict detection events.
182
+
183
+ - The handler refactor, ``direct_access`` removal, and changelog drafting were performed with Claude Code assistance while resolving review feedback on PR #263.
184
+
10
185
  .. _changelog-2.7.0:
11
186
 
12
187
  2.7.0 — 2026-05-13
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- familia (2.7.0)
4
+ familia (2.8.0)
5
5
  concurrent-ruby (~> 1.3)
6
6
  connection_pool (>= 2.4, < 4.0)
7
7
  csv (~> 3.3)
@@ -3,7 +3,7 @@
3
3
  The Housekeeping feature provides a declarative DSL for registering named cleanup chores on Horreum models. It is designed for short-lived, repeated tidying against fields whose values have drifted over time -- not for versioned, one-shot migrations.
4
4
 
5
5
  > [!TIP]
6
- > Enable with `feature :housekeeping` and register cleanup blocks with `chore :name do |obj| ... end`. Run them with `obj.tidy!`. Iteration and persistence are the caller's responsibility.
6
+ > Enable with `feature :housekeeping` and register cleanup blocks with `chore :name do |obj| ... end`. Run all of them with `obj.do_chores!` (aliased `tidy!`), or one with `obj.do_chore!(:name)`. Iteration and persistence are the caller's responsibility.
7
7
 
8
8
  ## Quick Start
9
9
 
@@ -27,8 +27,12 @@ class Organization < Familia::Horreum
27
27
  end
28
28
 
29
29
  org = Organization.from_identifier("acme-corp")
30
- org.tidy!
30
+ org.do_chores!
31
31
  # => { standardize_planid: true }
32
+
33
+ # Or run a single chore by name (returns the block's raw value):
34
+ org.do_chore!(:standardize_planid)
35
+ # => true
32
36
  ```
33
37
 
34
38
  ## When to Use
@@ -79,35 +83,43 @@ Run all registered chores, or one by name:
79
83
  ```ruby
80
84
  user = User.from_identifier("alice@example.com")
81
85
 
82
- user.tidy!
86
+ user.do_chores!
83
87
  # => { downcase_email: true, default_timezone: nil }
84
88
 
85
- user.tidy!(:downcase_email)
86
- # => { downcase_email: true }
89
+ user.tidy! # alias for do_chores!
90
+ # => { downcase_email: nil, default_timezone: nil }
91
+
92
+ user.do_chore!(:downcase_email)
93
+ # => true
87
94
  ```
88
95
 
89
- The return value is a hash mapping chore name to the block's return value. A truthy result signals "modified"; `nil` or `false` signals "no-op". The feature does not interpret these values -- they are passed through for the caller's stats collection.
96
+ `do_chores!` returns a hash mapping chore name to the block's return value. `do_chore!` returns the block's raw return value (not wrapped in a hash). A truthy result signals "modified"; `nil` or `false` signals "no-op". The feature does not interpret these values -- they are passed through for the caller's stats collection.
90
97
 
91
- ### Iteration -- Caller's Responsibility
98
+ ### Iteration -- Bulk Runner
92
99
 
93
- The feature operates on a single instance. Bulk runs live in the consumer app:
100
+ For running chores across every record, the feature ships a class-level `run_chores!` that iterates the `instances` collection in pipelined batches (via `load_multi`), executes each chore per record with error isolation, and returns a stats hash:
94
101
 
95
102
  ```ruby
96
- # nightly rake task
97
- namespace :data do
98
- task tidy_orgs: :environment do
99
- stats = Hash.new(0)
100
- Organization.instances.each do |id|
101
- org = Organization.find_by_id(id) or next
102
- results = org.tidy!
103
- results.each { |name, result| stats[name] += 1 if result }
104
- end
105
- puts stats.inspect
106
- end
107
- end
103
+ Organization.run_chores!
104
+ # => {
105
+ # model: "Organization",
106
+ # scanned: 4200,
107
+ # chores: {
108
+ # standardize_planid: { modified: 37, errors: 0 },
109
+ # uppercase_country: { modified: 102, errors: 1 },
110
+ # },
111
+ # }
112
+
113
+ Organization.run_chores!(chore_name: :standardize_planid, limit: 500)
114
+ # Filter to one chore and cap records scanned.
115
+
116
+ Organization.run_chores!(batch_size: 50)
117
+ # Tune the load_multi pipeline batch size (default: 100).
108
118
  ```
109
119
 
110
- The feature has no opinion about batching, SCAN vs KEYS, error aggregation, or scheduling -- the consumer app owns all of that.
120
+ A truthy chore return increments `modified`; a raised exception increments `errors` (logged via `Familia.warn`) and iteration continues. The runner requires the class to expose `instances` (Horreum's default class-level sorted set) and `load_multi`.
121
+
122
+ For scheduling, cross-model orchestration, custom logging, or non-default iteration (e.g. a configured allowlist of model classes), wrap `run_chores!` in your own job. The feature deliberately stays out of cron, multi-model discovery, and project-specific logging layers.
111
123
 
112
124
  ## Generated Method Reference
113
125
 
@@ -117,15 +129,18 @@ The feature has no opinion about batching, SCAN vs KEYS, error aggregation, or s
117
129
  |-------|--------|---------|
118
130
  | **Class** | `chore(name, &block)` | Register a chore |
119
131
  | | `chores` | Hash of registered chores |
120
- | **Instance** | `tidy!(name = nil)` | Run all (or one) chore; returns Hash |
132
+ | | `run_chores!(chore_name:, limit:, batch_size:)` | Bulk runner across `instances`; returns stats hash |
133
+ | **Instance** | `do_chore!(name)` | Run a single chore by name; returns the block's raw value |
134
+ | | `do_chores!` | Run every registered chore; returns Hash |
135
+ | | `tidy!` | Alias for `do_chores!` |
121
136
 
122
137
  ## Design Constraints
123
138
 
124
139
  1. **No implicit saves.** The block must call `save` (or `commit_fields`) itself. The feature does not auto-persist.
125
- 2. **No iteration.** Operates on a single instance. There is no class-level `tidy_all!`.
140
+ 2. **Bulk via `run_chores!` only.** The feature operates on a single instance (`do_chore!`/`do_chores!`) plus one bulk runner (`run_chores!`) that iterates `instances`. Scheduling, multi-model orchestration, and custom logging stay in the consumer app.
126
141
  3. **No ordering.** Chores run in registration order, but should not depend on each other. If order matters, write one chore with sequential steps.
127
142
  4. **Idempotent by convention.** Use the conditional pattern (`if canonical && canonical != org.planid`) so a second run is a no-op.
128
- 5. **Errors propagate.** The block can raise; the iteration code in the consumer app decides whether to rescue.
143
+ 5. **Errors isolate in `run_chores!`, propagate in `do_chore!`/`do_chores!`.** Single-instance methods let exceptions propagate; the bulk runner rescues per-record and increments the chore's `errors` counter so one failure doesn't halt the run.
129
144
 
130
145
  ## Common Patterns
131
146
 
@@ -150,7 +165,7 @@ class Customer < Familia::Horreum
150
165
  end
151
166
  end
152
167
 
153
- customer.tidy!
168
+ customer.do_chores!
154
169
  # => { trim_whitespace: true, uppercase_country: nil }
155
170
  ```
156
171
 
@@ -176,28 +191,43 @@ chore :reconcile_billing do |account|
176
191
  end
177
192
  ```
178
193
 
179
- ### Tracking Modified Records
194
+ ### Tracking Modified Records (Bulk)
195
+
196
+ `run_chores!` already aggregates `modified` and `errors` counts per chore. Use it directly:
197
+
198
+ ```ruby
199
+ report = Organization.run_chores!
200
+ report[:chores].each do |name, counts|
201
+ puts "#{name}: #{counts[:modified]} modified, #{counts[:errors]} errors"
202
+ end
203
+ ```
204
+
205
+ ### Custom Iteration (e.g. SCAN-Based)
206
+
207
+ If `instances`-driven iteration isn't suitable (sharded data, custom scoping), drop down to `do_chore!`/`do_chores!`:
180
208
 
181
209
  ```ruby
182
210
  modified = []
183
211
  Organization.instances.each do |id|
184
- org = Organization.find_by_id(id) or next
185
- results = org.tidy!
212
+ org = Organization.find_by_identifier(id) or next
213
+ results = org.do_chores!
186
214
  modified << id if results.values.any?
187
215
  end
188
216
  puts "Modified #{modified.size} records: #{modified.inspect}"
189
217
  ```
190
218
 
191
- ### Error Aggregation
219
+ ### Wrapping `run_chores!` for a Job Framework
192
220
 
193
221
  ```ruby
194
- errors = {}
195
- Organization.instances.each do |id|
196
- org = Organization.find_by_id(id) or next
197
- begin
198
- org.tidy!
199
- rescue => e
200
- errors[id] = e.message
222
+ class HousekeepingJob
223
+ def self.perform_for(klass)
224
+ report = klass.run_chores!(batch_size: 50)
225
+ StatsD.gauge("housekeeping.#{klass.name}.scanned", report[:scanned])
226
+ report[:chores].each do |chore, counts|
227
+ StatsD.increment("housekeeping.#{klass.name}.#{chore}.modified", counts[:modified])
228
+ StatsD.increment("housekeeping.#{klass.name}.#{chore}.errors", counts[:errors])
229
+ end
230
+ report
201
231
  end
202
232
  end
203
233
  ```
@@ -38,37 +38,54 @@ module Familia
38
38
  end
39
39
  end
40
40
 
41
- # Connection handler base class for Chain of Responsibility pattern.
42
- # When no arguments are passed, all behaviour is based on the top
43
- # Familia module itself. e.g. Familia.create_dbclient.
41
+ # Shared interface for connection handlers in the Chain of Responsibility.
44
42
  #
45
- # Summary of Behaviors
43
+ # Including this module gives a handler class:
44
+ # * an instance-side `handle(uri)` stub that raises NotImplementedError, and
45
+ # * a small class-level DSL (`supports`) for declaring capability flags
46
+ # (`allows_transaction`, `allows_pipelined`) read by the operation
47
+ # guards in {Familia::Connection::TransactionCore} and
48
+ # {Familia::Connection::PipelinedCore}.
46
49
  #
47
- # | Handler | Transaction | Pipeline | Ad-hoc Commands |
48
- # |---------|------------|----------|-----------------|
49
- # | **FiberTransaction** | Reentrant (same conn) | Error | Use transaction conn |
50
- # | **FiberConnection** | Error | Error | ✓ Allowed |
51
- # | **Provider** | ✓ New checkout | ✓ New checkout | ✓ New checkout |
52
- # | **Default** | ✓ With guards | ✓ With guards | ✓ Check mode |
53
- # | **Create** | ✓ Fresh conn | ✓ Fresh conn | ✓ Fresh conn |
50
+ # Capability flags default to `nil` when `supports` is not called — there
51
+ # is no implicit "allow all". Every handler is expected to declare its
52
+ # capabilities explicitly.
54
53
  #
55
- # NOTE: Every subclass must provide values for the @allows_transaction
56
- # and @allows_pipelined attributes.
54
+ # Summary of behaviours of the in-tree handlers:
57
55
  #
58
- class BaseConnectionHandler
59
- @allows_transaction = true
60
- @allows_pipelined = true
61
-
62
- class << self
63
- attr_reader :allows_transaction, :allows_pipelined
56
+ # | Handler | Transaction | Pipeline | Ad-hoc Commands |
57
+ # |--------------------|-------------------|----------------------|----------------------|
58
+ # | FiberPipeline | Error | Reentrant (same conn)| Use pipeline conn |
59
+ # | FiberTransaction | Reentrant | Error | Use transaction conn |
60
+ # | FiberConnection | Error | Error | Allowed |
61
+ # | Provider | New checkout | New checkout | New checkout |
62
+ # | Cached | Error | Error | Allowed |
63
+ # | Create / Default | Fresh conn | Fresh conn | Fresh conn |
64
+ # | ParentDelegation | Delegated | Delegated | Delegated |
65
+ # | Standalone | Allowed | Allowed | Allowed |
66
+ #
67
+ module Handler
68
+ def self.included(base)
69
+ base.extend(ClassMethods)
64
70
  end
65
71
 
66
- def initialize(familia_module = nil)
67
- @familia_module = familia_module || Familia
72
+ def handle(_uri)
73
+ raise NotImplementedError, 'Subclasses must implement handle'
68
74
  end
69
75
 
70
- def handle(uri)
71
- raise NotImplementedError, 'Subclasses must implement handle'
76
+ # Class-level DSL injected into every handler class that includes Handler.
77
+ # Holds the capability flag readers and the `supports` declarator.
78
+ module ClassMethods
79
+ attr_reader :allows_transaction, :allows_pipelined
80
+
81
+ # Declare the operation modes this handler supports.
82
+ #
83
+ # @param transaction [Boolean, Symbol] true/false or :reentrant
84
+ # @param pipelined [Boolean, Symbol] true/false or :reentrant
85
+ def supports(transaction: false, pipelined: false)
86
+ @allows_transaction = transaction
87
+ @allows_pipelined = pipelined
88
+ end
72
89
  end
73
90
  end
74
91
 
@@ -79,9 +96,14 @@ module Familia
79
96
  # Fresh connection each time - all operations safe (transactions,
80
97
  # pipelined, ad-hoc)
81
98
  #
82
- class CreateConnectionHandler < BaseConnectionHandler
83
- @allows_transaction = true
84
- @allows_pipelined = true
99
+ class CreateConnectionHandler
100
+ include Handler
101
+
102
+ supports transaction: true, pipelined: true
103
+
104
+ def initialize(familia_module = nil)
105
+ @familia_module = familia_module || Familia
106
+ end
85
107
 
86
108
  def handle(uri)
87
109
  # Create new connection (no module-level caching)
@@ -101,9 +123,14 @@ module Familia
101
123
  # and also expected how they are to be used.
102
124
  # This is where connection pools live
103
125
  #
104
- class ProviderConnectionHandler < BaseConnectionHandler
105
- @allows_transaction = true
106
- @allows_pipelined = true
126
+ class ProviderConnectionHandler
127
+ include Handler
128
+
129
+ supports transaction: true, pipelined: true
130
+
131
+ def initialize(familia_module = nil)
132
+ @familia_module = familia_module || Familia
133
+ end
107
134
 
108
135
  def handle(uri)
109
136
  return nil unless @familia_module.connection_provider
@@ -143,9 +170,14 @@ module Familia
143
170
  # raise "Unknown operation: #{request.operation}"
144
171
  # end
145
172
  #
146
- class FiberConnectionHandler < BaseConnectionHandler
147
- @allows_transaction = false
148
- @allows_pipelined = false
173
+ class FiberConnectionHandler
174
+ include Handler
175
+
176
+ supports transaction: false, pipelined: false
177
+
178
+ def initialize(familia_module = nil)
179
+ @familia_module = familia_module || Familia
180
+ end
149
181
 
150
182
  def handle(uri)
151
183
  return nil unless Fiber[:familia_connection]
@@ -163,31 +195,80 @@ module Familia
163
195
  end
164
196
  end
165
197
 
166
- # Checks for fiber-local transaction connections (highest priority for Horreum)
198
+ # Checks for fiber-local pipeline connections
199
+ #
200
+ # Returns the fiber-local pipeline connection when inside a pipelined block.
201
+ # Raises ConflictingContextError if both pipeline and transaction contexts
202
+ # are active — these are mutually exclusive operations.
203
+ #
204
+ # Reentrant pipeline - just yield the existing connection
205
+ # No new pipeline block, just participate in existing pipeline
206
+ #
207
+ class FiberPipelineHandler
208
+ include Handler
209
+
210
+ supports transaction: false, pipelined: :reentrant
211
+
212
+ # Singleton pattern for stateless handler
213
+ @instance = new.freeze
214
+
215
+ class << self
216
+ attr_reader :instance
217
+ end
218
+
219
+ def handle(_uri)
220
+ return nil unless Fiber[:familia_pipeline]
221
+
222
+ if Fiber[:familia_transaction]
223
+ Familia.trace :CONFLICTING_CONTEXT, _uri,
224
+ 'Pipeline handler detected active transaction context'
225
+ raise Familia::ConflictingContextError,
226
+ 'Cannot mix pipeline and transaction contexts. ' \
227
+ 'Restructure to use one or the other.'
228
+ end
229
+
230
+ Familia.trace :DBCLIENT_FIBER_PIPELINE, nil, 'Using fiber-local pipeline connection'
231
+ Fiber[:familia_pipeline]
232
+ end
233
+ end
234
+
235
+ # Checks for fiber-local transaction connections
167
236
  #
168
237
  # Key insight: Mark that we're in reentrant mode and also track of
169
238
  # depth. This allows nested transaction calls to be safely reentrant
170
239
  # without breaking Redis's single-level MULTI/EXEC.
171
240
  #
241
+ # Raises ConflictingContextError if both pipeline and transaction contexts
242
+ # are active — these are mutually exclusive operations.
243
+ #
172
244
  # Reentrant transaction - just yield the existing connection
173
245
  # No new MULTI/EXEC, just participate in existing transaction
174
246
  # Fiber[:familia_transaction_depth] ||= 0
175
247
  # Fiber[:familia_transaction_depth] += 1
176
248
  #
177
- class FiberTransactionHandler < BaseConnectionHandler
178
- @allows_transaction = :reentrant
179
- @allows_pipelined = false
249
+ class FiberTransactionHandler
250
+ include Handler
251
+
252
+ supports transaction: :reentrant, pipelined: false
180
253
 
181
254
  # Singleton pattern for stateless handler
182
255
  @instance = new.freeze
183
256
 
184
- def self.instance
185
- @instance
257
+ class << self
258
+ attr_reader :instance
186
259
  end
187
260
 
188
261
  def handle(_uri)
189
262
  return nil unless Fiber[:familia_transaction]
190
263
 
264
+ if Fiber[:familia_pipeline]
265
+ Familia.trace :CONFLICTING_CONTEXT, _uri,
266
+ 'Transaction handler detected active pipeline context'
267
+ raise Familia::ConflictingContextError,
268
+ 'Cannot mix pipeline and transaction contexts. ' \
269
+ 'Restructure to use one or the other.'
270
+ end
271
+
191
272
  Familia.trace :DBCLIENT_FIBER_TRANSACTION, nil, 'Using fiber-local transaction connection'
192
273
  Fiber[:familia_transaction]
193
274
  end
@@ -205,9 +286,10 @@ module Familia
205
286
  #
206
287
  # CachedConnectionHandler - Single cached connection - block all multi-mode operations
207
288
  #
208
- class CachedConnectionHandler < BaseConnectionHandler
209
- @allows_transaction = false
210
- @allows_pipelined = false
289
+ class CachedConnectionHandler
290
+ include Handler
291
+
292
+ supports transaction: false, pipelined: false
211
293
 
212
294
  def initialize(familia_module)
213
295
  @familia_module = familia_module
@@ -240,9 +322,10 @@ module Familia
240
322
  # @example Class-level DataType with parent
241
323
  # User.global_users # DataType that delegates to User.dbclient
242
324
  #
243
- class ParentDelegationHandler < BaseConnectionHandler
244
- @allows_transaction = true
245
- @allows_pipelined = true
325
+ class ParentDelegationHandler
326
+ include Handler
327
+
328
+ supports transaction: true, pipelined: true
246
329
 
247
330
  def initialize(data_type)
248
331
  @data_type = data_type
@@ -281,9 +364,10 @@ module Familia
281
364
  # @example Standalone DataType with logical_database option
282
365
  # cache = Familia::HashKey.new('app:cache', logical_database: 2)
283
366
  #
284
- class StandaloneConnectionHandler < BaseConnectionHandler
285
- @allows_transaction = true
286
- @allows_pipelined = true
367
+ class StandaloneConnectionHandler
368
+ include Handler
369
+
370
+ supports transaction: true, pipelined: true
287
371
 
288
372
  def initialize(data_type)
289
373
  @data_type = data_type