fundedness 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. fundedness/__init__.py +71 -0
  2. fundedness/allocation/__init__.py +20 -0
  3. fundedness/allocation/base.py +32 -0
  4. fundedness/allocation/constant.py +25 -0
  5. fundedness/allocation/glidepath.py +111 -0
  6. fundedness/allocation/merton_optimal.py +220 -0
  7. fundedness/cefr.py +241 -0
  8. fundedness/liabilities.py +221 -0
  9. fundedness/liquidity.py +49 -0
  10. fundedness/merton.py +289 -0
  11. fundedness/models/__init__.py +35 -0
  12. fundedness/models/assets.py +148 -0
  13. fundedness/models/household.py +153 -0
  14. fundedness/models/liabilities.py +99 -0
  15. fundedness/models/market.py +199 -0
  16. fundedness/models/simulation.py +80 -0
  17. fundedness/models/tax.py +125 -0
  18. fundedness/models/utility.py +154 -0
  19. fundedness/optimize.py +473 -0
  20. fundedness/policies.py +204 -0
  21. fundedness/risk.py +72 -0
  22. fundedness/simulate.py +595 -0
  23. fundedness/viz/__init__.py +33 -0
  24. fundedness/viz/colors.py +110 -0
  25. fundedness/viz/comparison.py +294 -0
  26. fundedness/viz/fan_chart.py +193 -0
  27. fundedness/viz/histogram.py +225 -0
  28. fundedness/viz/optimal.py +542 -0
  29. fundedness/viz/survival.py +230 -0
  30. fundedness/viz/tornado.py +236 -0
  31. fundedness/viz/waterfall.py +203 -0
  32. fundedness/withdrawals/__init__.py +27 -0
  33. fundedness/withdrawals/base.py +116 -0
  34. fundedness/withdrawals/comparison.py +230 -0
  35. fundedness/withdrawals/fixed_swr.py +174 -0
  36. fundedness/withdrawals/guardrails.py +136 -0
  37. fundedness/withdrawals/merton_optimal.py +286 -0
  38. fundedness/withdrawals/rmd_style.py +203 -0
  39. fundedness/withdrawals/vpw.py +136 -0
  40. fundedness-0.2.4.dist-info/METADATA +300 -0
  41. fundedness-0.2.4.dist-info/RECORD +43 -0
  42. fundedness-0.2.4.dist-info/WHEEL +4 -0
  43. fundedness-0.2.4.dist-info/entry_points.txt +2 -0
fundedness/optimize.py ADDED
@@ -0,0 +1,473 @@
1
+ """Parametric policy optimization via Monte Carlo simulation.
2
+
3
+ This module provides tools for searching over policy parameters to find
4
+ configurations that maximize expected lifetime utility.
5
+ """
6
+
7
+ from dataclasses import dataclass, field
8
+ from typing import Any, Callable
9
+
10
+ import numpy as np
11
+ from scipy import optimize
12
+
13
+ from fundedness.models.market import MarketModel
14
+ from fundedness.models.simulation import SimulationConfig
15
+ from fundedness.models.utility import UtilityModel
16
+ from fundedness.simulate import run_simulation_with_utility
17
+
18
+
19
+ @dataclass
20
+ class PolicyParameterSpec:
21
+ """Specification for an optimizable policy parameter.
22
+
23
+ Attributes:
24
+ name: Parameter name (must match policy attribute)
25
+ min_value: Minimum allowed value
26
+ max_value: Maximum allowed value
27
+ initial_value: Starting value for optimization
28
+ is_integer: Whether parameter should be rounded to integer
29
+ """
30
+
31
+ name: str
32
+ min_value: float
33
+ max_value: float
34
+ initial_value: float | None = None
35
+ is_integer: bool = False
36
+
37
+ def get_initial(self) -> float:
38
+ """Get initial value (midpoint if not specified)."""
39
+ if self.initial_value is not None:
40
+ return self.initial_value
41
+ return (self.min_value + self.max_value) / 2
42
+
43
+ def clip(self, value: float) -> float:
44
+ """Clip value to bounds and optionally round."""
45
+ clipped = max(self.min_value, min(self.max_value, value))
46
+ if self.is_integer:
47
+ return round(clipped)
48
+ return clipped
49
+
50
+
51
+ @dataclass
52
+ class OptimizationResult:
53
+ """Results from policy optimization.
54
+
55
+ Attributes:
56
+ optimal_params: Dictionary of optimal parameter values
57
+ optimal_utility: Expected lifetime utility at optimum
58
+ certainty_equivalent: Certainty equivalent consumption at optimum
59
+ success_rate: Success rate at optimal parameters
60
+ iterations: Number of optimization iterations
61
+ convergence_history: Utility values during optimization
62
+ final_simulation: Full simulation result at optimum
63
+ """
64
+
65
+ optimal_params: dict[str, float]
66
+ optimal_utility: float
67
+ certainty_equivalent: float
68
+ success_rate: float
69
+ iterations: int
70
+ convergence_history: list[float] = field(default_factory=list)
71
+ final_simulation: Any = None
72
+
73
+
74
+ def create_policy_with_params(
75
+ policy_class: type,
76
+ base_params: dict,
77
+ param_specs: list[PolicyParameterSpec],
78
+ param_values: np.ndarray,
79
+ ) -> Any:
80
+ """Create a policy instance with specified parameter values.
81
+
82
+ Args:
83
+ policy_class: Policy class to instantiate
84
+ base_params: Fixed parameters for the policy
85
+ param_specs: Specifications for optimizable parameters
86
+ param_values: Current values for optimizable parameters
87
+
88
+ Returns:
89
+ Policy instance with combined parameters
90
+ """
91
+ params = dict(base_params)
92
+ for spec, value in zip(param_specs, param_values):
93
+ params[spec.name] = spec.clip(value)
94
+ return policy_class(**params)
95
+
96
+
97
+ def optimize_spending_policy(
98
+ policy_class: type,
99
+ param_specs: list[PolicyParameterSpec],
100
+ initial_wealth: float,
101
+ allocation_policy: Any,
102
+ config: SimulationConfig,
103
+ utility_model: UtilityModel,
104
+ base_params: dict | None = None,
105
+ spending_floor: float | None = None,
106
+ method: str = "nelder-mead",
107
+ max_iterations: int = 50,
108
+ ) -> OptimizationResult:
109
+ """Optimize spending policy parameters to maximize utility.
110
+
111
+ Uses scipy.optimize to search over policy parameters, evaluating
112
+ each candidate via Monte Carlo simulation.
113
+
114
+ Args:
115
+ policy_class: Spending policy class to optimize
116
+ param_specs: Parameters to optimize
117
+ initial_wealth: Starting portfolio value
118
+ allocation_policy: Fixed allocation policy to use
119
+ config: Simulation configuration
120
+ utility_model: Utility model for evaluation
121
+ base_params: Fixed parameters for the policy
122
+ spending_floor: Minimum spending floor
123
+ method: Optimization method (nelder-mead, powell, etc.)
124
+ max_iterations: Maximum optimization iterations
125
+
126
+ Returns:
127
+ OptimizationResult with optimal parameters and metrics
128
+ """
129
+ if base_params is None:
130
+ base_params = {}
131
+
132
+ convergence_history = []
133
+ best_utility = -np.inf
134
+ best_params = None
135
+ best_result = None
136
+
137
+ def objective(param_values: np.ndarray) -> float:
138
+ """Negative utility (for minimization)."""
139
+ nonlocal best_utility, best_params, best_result
140
+
141
+ # Create policy with current parameters
142
+ policy = create_policy_with_params(
143
+ policy_class, base_params, param_specs, param_values
144
+ )
145
+
146
+ # Run simulation
147
+ result = run_simulation_with_utility(
148
+ initial_wealth=initial_wealth,
149
+ spending_policy=policy,
150
+ allocation_policy=allocation_policy,
151
+ config=config,
152
+ utility_model=utility_model,
153
+ spending_floor=spending_floor,
154
+ )
155
+
156
+ utility = result.expected_lifetime_utility
157
+ convergence_history.append(utility)
158
+
159
+ # Track best
160
+ if utility > best_utility:
161
+ best_utility = utility
162
+ best_params = {spec.name: spec.clip(v) for spec, v in zip(param_specs, param_values)}
163
+ best_result = result
164
+
165
+ return -utility # Minimize negative utility
166
+
167
+ # Initial values
168
+ x0 = np.array([spec.get_initial() for spec in param_specs])
169
+
170
+ # Bounds
171
+ bounds = [(spec.min_value, spec.max_value) for spec in param_specs]
172
+
173
+ # Run optimization
174
+ if method.lower() in ("nelder-mead", "powell"):
175
+ result = optimize.minimize(
176
+ objective,
177
+ x0,
178
+ method=method,
179
+ options={"maxiter": max_iterations, "disp": False},
180
+ )
181
+ else:
182
+ result = optimize.minimize(
183
+ objective,
184
+ x0,
185
+ method=method,
186
+ bounds=bounds,
187
+ options={"maxiter": max_iterations, "disp": False},
188
+ )
189
+
190
+ return OptimizationResult(
191
+ optimal_params=best_params or {},
192
+ optimal_utility=best_utility,
193
+ certainty_equivalent=best_result.certainty_equivalent_consumption if best_result else 0.0,
194
+ success_rate=best_result.success_rate if best_result else 0.0,
195
+ iterations=result.nit if hasattr(result, "nit") else len(convergence_history),
196
+ convergence_history=convergence_history,
197
+ final_simulation=best_result,
198
+ )
199
+
200
+
201
+ def optimize_allocation_policy(
202
+ policy_class: type,
203
+ param_specs: list[PolicyParameterSpec],
204
+ initial_wealth: float,
205
+ spending_policy: Any,
206
+ config: SimulationConfig,
207
+ utility_model: UtilityModel,
208
+ base_params: dict | None = None,
209
+ spending_floor: float | None = None,
210
+ method: str = "nelder-mead",
211
+ max_iterations: int = 50,
212
+ ) -> OptimizationResult:
213
+ """Optimize allocation policy parameters to maximize utility.
214
+
215
+ Args:
216
+ policy_class: Allocation policy class to optimize
217
+ param_specs: Parameters to optimize
218
+ initial_wealth: Starting portfolio value
219
+ spending_policy: Fixed spending policy to use
220
+ config: Simulation configuration
221
+ utility_model: Utility model for evaluation
222
+ base_params: Fixed parameters for the policy
223
+ spending_floor: Minimum spending floor
224
+ method: Optimization method
225
+ max_iterations: Maximum iterations
226
+
227
+ Returns:
228
+ OptimizationResult with optimal parameters and metrics
229
+ """
230
+ if base_params is None:
231
+ base_params = {}
232
+
233
+ convergence_history = []
234
+ best_utility = -np.inf
235
+ best_params = None
236
+ best_result = None
237
+
238
+ def objective(param_values: np.ndarray) -> float:
239
+ nonlocal best_utility, best_params, best_result
240
+
241
+ policy = create_policy_with_params(
242
+ policy_class, base_params, param_specs, param_values
243
+ )
244
+
245
+ result = run_simulation_with_utility(
246
+ initial_wealth=initial_wealth,
247
+ spending_policy=spending_policy,
248
+ allocation_policy=policy,
249
+ config=config,
250
+ utility_model=utility_model,
251
+ spending_floor=spending_floor,
252
+ )
253
+
254
+ utility = result.expected_lifetime_utility
255
+ convergence_history.append(utility)
256
+
257
+ if utility > best_utility:
258
+ best_utility = utility
259
+ best_params = {spec.name: spec.clip(v) for spec, v in zip(param_specs, param_values)}
260
+ best_result = result
261
+
262
+ return -utility
263
+
264
+ x0 = np.array([spec.get_initial() for spec in param_specs])
265
+ bounds = [(spec.min_value, spec.max_value) for spec in param_specs]
266
+
267
+ if method.lower() in ("nelder-mead", "powell"):
268
+ result = optimize.minimize(
269
+ objective,
270
+ x0,
271
+ method=method,
272
+ options={"maxiter": max_iterations, "disp": False},
273
+ )
274
+ else:
275
+ result = optimize.minimize(
276
+ objective,
277
+ x0,
278
+ method=method,
279
+ bounds=bounds,
280
+ options={"maxiter": max_iterations, "disp": False},
281
+ )
282
+
283
+ return OptimizationResult(
284
+ optimal_params=best_params or {},
285
+ optimal_utility=best_utility,
286
+ certainty_equivalent=best_result.certainty_equivalent_consumption if best_result else 0.0,
287
+ success_rate=best_result.success_rate if best_result else 0.0,
288
+ iterations=result.nit if hasattr(result, "nit") else len(convergence_history),
289
+ convergence_history=convergence_history,
290
+ final_simulation=best_result,
291
+ )
292
+
293
+
294
+ def optimize_combined_policy(
295
+ spending_policy_class: type,
296
+ allocation_policy_class: type,
297
+ spending_param_specs: list[PolicyParameterSpec],
298
+ allocation_param_specs: list[PolicyParameterSpec],
299
+ initial_wealth: float,
300
+ config: SimulationConfig,
301
+ utility_model: UtilityModel,
302
+ spending_base_params: dict | None = None,
303
+ allocation_base_params: dict | None = None,
304
+ spending_floor: float | None = None,
305
+ method: str = "nelder-mead",
306
+ max_iterations: int = 100,
307
+ ) -> OptimizationResult:
308
+ """Jointly optimize spending and allocation policy parameters.
309
+
310
+ Args:
311
+ spending_policy_class: Spending policy class
312
+ allocation_policy_class: Allocation policy class
313
+ spending_param_specs: Spending parameters to optimize
314
+ allocation_param_specs: Allocation parameters to optimize
315
+ initial_wealth: Starting portfolio value
316
+ config: Simulation configuration
317
+ utility_model: Utility model for evaluation
318
+ spending_base_params: Fixed spending policy parameters
319
+ allocation_base_params: Fixed allocation policy parameters
320
+ spending_floor: Minimum spending floor
321
+ method: Optimization method
322
+ max_iterations: Maximum iterations
323
+
324
+ Returns:
325
+ OptimizationResult with optimal parameters for both policies
326
+ """
327
+ if spending_base_params is None:
328
+ spending_base_params = {}
329
+ if allocation_base_params is None:
330
+ allocation_base_params = {}
331
+
332
+ all_specs = spending_param_specs + allocation_param_specs
333
+ n_spending = len(spending_param_specs)
334
+
335
+ convergence_history = []
336
+ best_utility = -np.inf
337
+ best_params = None
338
+ best_result = None
339
+
340
+ def objective(param_values: np.ndarray) -> float:
341
+ nonlocal best_utility, best_params, best_result
342
+
343
+ spending_values = param_values[:n_spending]
344
+ allocation_values = param_values[n_spending:]
345
+
346
+ spending_policy = create_policy_with_params(
347
+ spending_policy_class,
348
+ spending_base_params,
349
+ spending_param_specs,
350
+ spending_values,
351
+ )
352
+ allocation_policy = create_policy_with_params(
353
+ allocation_policy_class,
354
+ allocation_base_params,
355
+ allocation_param_specs,
356
+ allocation_values,
357
+ )
358
+
359
+ result = run_simulation_with_utility(
360
+ initial_wealth=initial_wealth,
361
+ spending_policy=spending_policy,
362
+ allocation_policy=allocation_policy,
363
+ config=config,
364
+ utility_model=utility_model,
365
+ spending_floor=spending_floor,
366
+ )
367
+
368
+ utility = result.expected_lifetime_utility
369
+ convergence_history.append(utility)
370
+
371
+ if utility > best_utility:
372
+ best_utility = utility
373
+ best_params = {}
374
+ for spec, v in zip(spending_param_specs, spending_values):
375
+ best_params[f"spending_{spec.name}"] = spec.clip(v)
376
+ for spec, v in zip(allocation_param_specs, allocation_values):
377
+ best_params[f"allocation_{spec.name}"] = spec.clip(v)
378
+ best_result = result
379
+
380
+ return -utility
381
+
382
+ x0 = np.array([spec.get_initial() for spec in all_specs])
383
+ bounds = [(spec.min_value, spec.max_value) for spec in all_specs]
384
+
385
+ if method.lower() in ("nelder-mead", "powell"):
386
+ result = optimize.minimize(
387
+ objective,
388
+ x0,
389
+ method=method,
390
+ options={"maxiter": max_iterations, "disp": False},
391
+ )
392
+ else:
393
+ result = optimize.minimize(
394
+ objective,
395
+ x0,
396
+ method=method,
397
+ bounds=bounds,
398
+ options={"maxiter": max_iterations, "disp": False},
399
+ )
400
+
401
+ return OptimizationResult(
402
+ optimal_params=best_params or {},
403
+ optimal_utility=best_utility,
404
+ certainty_equivalent=best_result.certainty_equivalent_consumption if best_result else 0.0,
405
+ success_rate=best_result.success_rate if best_result else 0.0,
406
+ iterations=result.nit if hasattr(result, "nit") else len(convergence_history),
407
+ convergence_history=convergence_history,
408
+ final_simulation=best_result,
409
+ )
410
+
411
+
412
+ def grid_search_policy(
413
+ policy_class: type,
414
+ param_specs: list[PolicyParameterSpec],
415
+ grid_points: int,
416
+ evaluate_fn: Callable[[Any], float],
417
+ base_params: dict | None = None,
418
+ ) -> tuple[dict[str, float], float, np.ndarray]:
419
+ """Exhaustive grid search over policy parameters.
420
+
421
+ Useful for visualizing the utility surface or when the parameter
422
+ space is small enough for exhaustive search.
423
+
424
+ Args:
425
+ policy_class: Policy class to optimize
426
+ param_specs: Parameters to search over
427
+ grid_points: Number of points per dimension
428
+ evaluate_fn: Function that takes a policy and returns utility
429
+ base_params: Fixed parameters for the policy
430
+
431
+ Returns:
432
+ Tuple of (best_params, best_utility, utility_grid)
433
+ """
434
+ if base_params is None:
435
+ base_params = {}
436
+
437
+ # Create grid
438
+ grids = [
439
+ np.linspace(spec.min_value, spec.max_value, grid_points)
440
+ for spec in param_specs
441
+ ]
442
+
443
+ # Create meshgrid for all combinations
444
+ mesh = np.meshgrid(*grids, indexing="ij")
445
+ shape = mesh[0].shape
446
+
447
+ utilities = np.zeros(shape)
448
+ best_utility = -np.inf
449
+ best_params = {}
450
+
451
+ # Iterate over all grid points
452
+ it = np.nditer(mesh[0], flags=["multi_index"])
453
+ while not it.finished:
454
+ idx = it.multi_index
455
+ param_values = np.array([m[idx] for m in mesh])
456
+
457
+ policy = create_policy_with_params(
458
+ policy_class, base_params, param_specs, param_values
459
+ )
460
+
461
+ utility = evaluate_fn(policy)
462
+ utilities[idx] = utility
463
+
464
+ if utility > best_utility:
465
+ best_utility = utility
466
+ best_params = {
467
+ spec.name: spec.clip(v)
468
+ for spec, v in zip(param_specs, param_values)
469
+ }
470
+
471
+ it.iternext()
472
+
473
+ return best_params, best_utility, utilities
fundedness/policies.py ADDED
@@ -0,0 +1,204 @@
1
+ """Spending and allocation policy implementations."""
2
+
3
+ from dataclasses import dataclass
4
+
5
+ import numpy as np
6
+
7
+
8
+ @dataclass
9
+ class FixedRealSpending:
10
+ """Fixed real (inflation-adjusted) spending policy."""
11
+
12
+ annual_spending: float
13
+ inflation_rate: float = 0.025
14
+
15
+ def get_spending(
16
+ self,
17
+ wealth: np.ndarray,
18
+ year: int,
19
+ initial_wealth: float,
20
+ ) -> np.ndarray:
21
+ """Get spending, capped at available wealth."""
22
+ nominal_spending = self.annual_spending * (1 + self.inflation_rate) ** year
23
+ return np.minimum(nominal_spending, np.maximum(wealth, 0))
24
+
25
+
26
+ @dataclass
27
+ class PercentOfPortfolio:
28
+ """Spend a fixed percentage of current portfolio value."""
29
+
30
+ percentage: float = 0.04 # 4% rule
31
+ floor: float | None = None
32
+ ceiling: float | None = None
33
+
34
+ def get_spending(
35
+ self,
36
+ wealth: np.ndarray,
37
+ year: int,
38
+ initial_wealth: float,
39
+ ) -> np.ndarray:
40
+ """Get spending as percentage of current wealth."""
41
+ spending = wealth * self.percentage
42
+
43
+ if self.floor is not None:
44
+ spending = np.maximum(spending, self.floor)
45
+
46
+ if self.ceiling is not None:
47
+ spending = np.minimum(spending, self.ceiling)
48
+
49
+ return np.minimum(spending, np.maximum(wealth, 0))
50
+
51
+
52
+ @dataclass
53
+ class ConstantAllocation:
54
+ """Constant stock/bond allocation."""
55
+
56
+ stock_weight: float = 0.6
57
+
58
+ def get_allocation(
59
+ self,
60
+ wealth: np.ndarray,
61
+ year: int,
62
+ initial_wealth: float,
63
+ ) -> float:
64
+ """Return constant stock allocation."""
65
+ return self.stock_weight
66
+
67
+
68
+ @dataclass
69
+ class AgeBasedGlidepath:
70
+ """Age-based declining equity glidepath.
71
+
72
+ Classic rule: stock_weight = 100 - age (or similar)
73
+ """
74
+
75
+ initial_stock_weight: float = 0.8
76
+ final_stock_weight: float = 0.3
77
+ years_to_final: int = 30
78
+ starting_age: int = 65
79
+
80
+ def get_allocation(
81
+ self,
82
+ wealth: np.ndarray,
83
+ year: int,
84
+ initial_wealth: float,
85
+ ) -> float:
86
+ """Calculate stock allocation based on years into retirement."""
87
+ progress = min(year / self.years_to_final, 1.0)
88
+ stock_weight = self.initial_stock_weight - progress * (
89
+ self.initial_stock_weight - self.final_stock_weight
90
+ )
91
+ return stock_weight
92
+
93
+
94
+ @dataclass
95
+ class RisingEquityGlidepath:
96
+ """Rising equity glidepath (bonds-first spending).
97
+
98
+ Start conservative, increase equity over time as sequence risk decreases.
99
+ """
100
+
101
+ initial_stock_weight: float = 0.3
102
+ final_stock_weight: float = 0.7
103
+ years_to_final: int = 20
104
+
105
+ def get_allocation(
106
+ self,
107
+ wealth: np.ndarray,
108
+ year: int,
109
+ initial_wealth: float,
110
+ ) -> float:
111
+ """Calculate stock allocation - increasing over time."""
112
+ progress = min(year / self.years_to_final, 1.0)
113
+ stock_weight = self.initial_stock_weight + progress * (
114
+ self.final_stock_weight - self.initial_stock_weight
115
+ )
116
+ return stock_weight
117
+
118
+
119
+ @dataclass
120
+ class FundednessBasedAllocation:
121
+ """Adjust allocation based on current fundedness level.
122
+
123
+ Higher fundedness = can take more risk
124
+ Lower fundedness = reduce risk to protect floor
125
+ """
126
+
127
+ target_fundedness: float = 1.2 # Target CEFR
128
+ max_stock_weight: float = 0.8
129
+ min_stock_weight: float = 0.2
130
+ liability_pv: float = 1_000_000 # PV of future spending
131
+
132
+ def get_allocation(
133
+ self,
134
+ wealth: np.ndarray,
135
+ year: int,
136
+ initial_wealth: float,
137
+ ) -> np.ndarray:
138
+ """Calculate allocation based on current fundedness."""
139
+ # Simple fundedness estimate (wealth / liability PV)
140
+ # In practice, would recalculate full CEFR
141
+ fundedness = wealth / self.liability_pv
142
+
143
+ # Linear interpolation based on fundedness
144
+ # At target fundedness, use moderate allocation
145
+ # Above target, can increase stocks
146
+ # Below target, reduce stocks
147
+
148
+ relative_fundedness = fundedness / self.target_fundedness
149
+
150
+ # Map to allocation range
151
+ stock_weight = self.min_stock_weight + (self.max_stock_weight - self.min_stock_weight) * (
152
+ np.clip(relative_fundedness, 0.5, 1.5) - 0.5
153
+ )
154
+
155
+ return np.clip(stock_weight, self.min_stock_weight, self.max_stock_weight)
156
+
157
+
158
+ @dataclass
159
+ class FloorCeilingSpending:
160
+ """Spending policy with floor and ceiling guardrails.
161
+
162
+ Attempts to maintain target spending but:
163
+ - Never spends below floor (essential spending)
164
+ - Never spends above ceiling (luxury cap)
165
+ - Adjusts based on portfolio performance
166
+ """
167
+
168
+ target_spending: float
169
+ floor_spending: float
170
+ ceiling_spending: float
171
+ adjustment_rate: float = 0.05 # How fast to adjust toward target
172
+
173
+ def __post_init__(self):
174
+ self._previous_spending = None
175
+
176
+ def get_spending(
177
+ self,
178
+ wealth: np.ndarray,
179
+ year: int,
180
+ initial_wealth: float,
181
+ ) -> np.ndarray:
182
+ """Calculate spending with guardrails."""
183
+ if self._previous_spending is None:
184
+ self._previous_spending = np.full_like(wealth, self.target_spending)
185
+
186
+ # Calculate sustainable spending estimate (simplified)
187
+ sustainable_rate = 0.04 # Simple 4% estimate
188
+ sustainable_spending = wealth * sustainable_rate
189
+
190
+ # Target is previous spending (smoothing)
191
+ target = self._previous_spending
192
+
193
+ # Adjust target toward sustainable level
194
+ target = target + self.adjustment_rate * (sustainable_spending - target)
195
+
196
+ # Apply floor and ceiling
197
+ spending = np.clip(target, self.floor_spending, self.ceiling_spending)
198
+
199
+ # Can't spend more than wealth
200
+ spending = np.minimum(spending, np.maximum(wealth, 0))
201
+
202
+ self._previous_spending = spending
203
+
204
+ return spending