fundedness 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fundedness might be problematic. Click here for more details.

fundedness/optimize.py ADDED
@@ -0,0 +1,473 @@
1
+ """Parametric policy optimization via Monte Carlo simulation.
2
+
3
+ This module provides tools for searching over policy parameters to find
4
+ configurations that maximize expected lifetime utility.
5
+ """
6
+
7
+ from dataclasses import dataclass, field
8
+ from typing import Any, Callable
9
+
10
+ import numpy as np
11
+ from scipy import optimize
12
+
13
+ from fundedness.models.market import MarketModel
14
+ from fundedness.models.simulation import SimulationConfig
15
+ from fundedness.models.utility import UtilityModel
16
+ from fundedness.simulate import run_simulation_with_utility
17
+
18
+
19
+ @dataclass
20
+ class PolicyParameterSpec:
21
+ """Specification for an optimizable policy parameter.
22
+
23
+ Attributes:
24
+ name: Parameter name (must match policy attribute)
25
+ min_value: Minimum allowed value
26
+ max_value: Maximum allowed value
27
+ initial_value: Starting value for optimization
28
+ is_integer: Whether parameter should be rounded to integer
29
+ """
30
+
31
+ name: str
32
+ min_value: float
33
+ max_value: float
34
+ initial_value: float | None = None
35
+ is_integer: bool = False
36
+
37
+ def get_initial(self) -> float:
38
+ """Get initial value (midpoint if not specified)."""
39
+ if self.initial_value is not None:
40
+ return self.initial_value
41
+ return (self.min_value + self.max_value) / 2
42
+
43
+ def clip(self, value: float) -> float:
44
+ """Clip value to bounds and optionally round."""
45
+ clipped = max(self.min_value, min(self.max_value, value))
46
+ if self.is_integer:
47
+ return round(clipped)
48
+ return clipped
49
+
50
+
51
+ @dataclass
52
+ class OptimizationResult:
53
+ """Results from policy optimization.
54
+
55
+ Attributes:
56
+ optimal_params: Dictionary of optimal parameter values
57
+ optimal_utility: Expected lifetime utility at optimum
58
+ certainty_equivalent: Certainty equivalent consumption at optimum
59
+ success_rate: Success rate at optimal parameters
60
+ iterations: Number of optimization iterations
61
+ convergence_history: Utility values during optimization
62
+ final_simulation: Full simulation result at optimum
63
+ """
64
+
65
+ optimal_params: dict[str, float]
66
+ optimal_utility: float
67
+ certainty_equivalent: float
68
+ success_rate: float
69
+ iterations: int
70
+ convergence_history: list[float] = field(default_factory=list)
71
+ final_simulation: Any = None
72
+
73
+
74
+ def create_policy_with_params(
75
+ policy_class: type,
76
+ base_params: dict,
77
+ param_specs: list[PolicyParameterSpec],
78
+ param_values: np.ndarray,
79
+ ) -> Any:
80
+ """Create a policy instance with specified parameter values.
81
+
82
+ Args:
83
+ policy_class: Policy class to instantiate
84
+ base_params: Fixed parameters for the policy
85
+ param_specs: Specifications for optimizable parameters
86
+ param_values: Current values for optimizable parameters
87
+
88
+ Returns:
89
+ Policy instance with combined parameters
90
+ """
91
+ params = dict(base_params)
92
+ for spec, value in zip(param_specs, param_values):
93
+ params[spec.name] = spec.clip(value)
94
+ return policy_class(**params)
95
+
96
+
97
+ def optimize_spending_policy(
98
+ policy_class: type,
99
+ param_specs: list[PolicyParameterSpec],
100
+ initial_wealth: float,
101
+ allocation_policy: Any,
102
+ config: SimulationConfig,
103
+ utility_model: UtilityModel,
104
+ base_params: dict | None = None,
105
+ spending_floor: float | None = None,
106
+ method: str = "nelder-mead",
107
+ max_iterations: int = 50,
108
+ ) -> OptimizationResult:
109
+ """Optimize spending policy parameters to maximize utility.
110
+
111
+ Uses scipy.optimize to search over policy parameters, evaluating
112
+ each candidate via Monte Carlo simulation.
113
+
114
+ Args:
115
+ policy_class: Spending policy class to optimize
116
+ param_specs: Parameters to optimize
117
+ initial_wealth: Starting portfolio value
118
+ allocation_policy: Fixed allocation policy to use
119
+ config: Simulation configuration
120
+ utility_model: Utility model for evaluation
121
+ base_params: Fixed parameters for the policy
122
+ spending_floor: Minimum spending floor
123
+ method: Optimization method (nelder-mead, powell, etc.)
124
+ max_iterations: Maximum optimization iterations
125
+
126
+ Returns:
127
+ OptimizationResult with optimal parameters and metrics
128
+ """
129
+ if base_params is None:
130
+ base_params = {}
131
+
132
+ convergence_history = []
133
+ best_utility = -np.inf
134
+ best_params = None
135
+ best_result = None
136
+
137
+ def objective(param_values: np.ndarray) -> float:
138
+ """Negative utility (for minimization)."""
139
+ nonlocal best_utility, best_params, best_result
140
+
141
+ # Create policy with current parameters
142
+ policy = create_policy_with_params(
143
+ policy_class, base_params, param_specs, param_values
144
+ )
145
+
146
+ # Run simulation
147
+ result = run_simulation_with_utility(
148
+ initial_wealth=initial_wealth,
149
+ spending_policy=policy,
150
+ allocation_policy=allocation_policy,
151
+ config=config,
152
+ utility_model=utility_model,
153
+ spending_floor=spending_floor,
154
+ )
155
+
156
+ utility = result.expected_lifetime_utility
157
+ convergence_history.append(utility)
158
+
159
+ # Track best
160
+ if utility > best_utility:
161
+ best_utility = utility
162
+ best_params = {spec.name: spec.clip(v) for spec, v in zip(param_specs, param_values)}
163
+ best_result = result
164
+
165
+ return -utility # Minimize negative utility
166
+
167
+ # Initial values
168
+ x0 = np.array([spec.get_initial() for spec in param_specs])
169
+
170
+ # Bounds
171
+ bounds = [(spec.min_value, spec.max_value) for spec in param_specs]
172
+
173
+ # Run optimization
174
+ if method.lower() in ("nelder-mead", "powell"):
175
+ result = optimize.minimize(
176
+ objective,
177
+ x0,
178
+ method=method,
179
+ options={"maxiter": max_iterations, "disp": False},
180
+ )
181
+ else:
182
+ result = optimize.minimize(
183
+ objective,
184
+ x0,
185
+ method=method,
186
+ bounds=bounds,
187
+ options={"maxiter": max_iterations, "disp": False},
188
+ )
189
+
190
+ return OptimizationResult(
191
+ optimal_params=best_params or {},
192
+ optimal_utility=best_utility,
193
+ certainty_equivalent=best_result.certainty_equivalent_consumption if best_result else 0.0,
194
+ success_rate=best_result.success_rate if best_result else 0.0,
195
+ iterations=result.nit if hasattr(result, "nit") else len(convergence_history),
196
+ convergence_history=convergence_history,
197
+ final_simulation=best_result,
198
+ )
199
+
200
+
201
+ def optimize_allocation_policy(
202
+ policy_class: type,
203
+ param_specs: list[PolicyParameterSpec],
204
+ initial_wealth: float,
205
+ spending_policy: Any,
206
+ config: SimulationConfig,
207
+ utility_model: UtilityModel,
208
+ base_params: dict | None = None,
209
+ spending_floor: float | None = None,
210
+ method: str = "nelder-mead",
211
+ max_iterations: int = 50,
212
+ ) -> OptimizationResult:
213
+ """Optimize allocation policy parameters to maximize utility.
214
+
215
+ Args:
216
+ policy_class: Allocation policy class to optimize
217
+ param_specs: Parameters to optimize
218
+ initial_wealth: Starting portfolio value
219
+ spending_policy: Fixed spending policy to use
220
+ config: Simulation configuration
221
+ utility_model: Utility model for evaluation
222
+ base_params: Fixed parameters for the policy
223
+ spending_floor: Minimum spending floor
224
+ method: Optimization method
225
+ max_iterations: Maximum iterations
226
+
227
+ Returns:
228
+ OptimizationResult with optimal parameters and metrics
229
+ """
230
+ if base_params is None:
231
+ base_params = {}
232
+
233
+ convergence_history = []
234
+ best_utility = -np.inf
235
+ best_params = None
236
+ best_result = None
237
+
238
+ def objective(param_values: np.ndarray) -> float:
239
+ nonlocal best_utility, best_params, best_result
240
+
241
+ policy = create_policy_with_params(
242
+ policy_class, base_params, param_specs, param_values
243
+ )
244
+
245
+ result = run_simulation_with_utility(
246
+ initial_wealth=initial_wealth,
247
+ spending_policy=spending_policy,
248
+ allocation_policy=policy,
249
+ config=config,
250
+ utility_model=utility_model,
251
+ spending_floor=spending_floor,
252
+ )
253
+
254
+ utility = result.expected_lifetime_utility
255
+ convergence_history.append(utility)
256
+
257
+ if utility > best_utility:
258
+ best_utility = utility
259
+ best_params = {spec.name: spec.clip(v) for spec, v in zip(param_specs, param_values)}
260
+ best_result = result
261
+
262
+ return -utility
263
+
264
+ x0 = np.array([spec.get_initial() for spec in param_specs])
265
+ bounds = [(spec.min_value, spec.max_value) for spec in param_specs]
266
+
267
+ if method.lower() in ("nelder-mead", "powell"):
268
+ result = optimize.minimize(
269
+ objective,
270
+ x0,
271
+ method=method,
272
+ options={"maxiter": max_iterations, "disp": False},
273
+ )
274
+ else:
275
+ result = optimize.minimize(
276
+ objective,
277
+ x0,
278
+ method=method,
279
+ bounds=bounds,
280
+ options={"maxiter": max_iterations, "disp": False},
281
+ )
282
+
283
+ return OptimizationResult(
284
+ optimal_params=best_params or {},
285
+ optimal_utility=best_utility,
286
+ certainty_equivalent=best_result.certainty_equivalent_consumption if best_result else 0.0,
287
+ success_rate=best_result.success_rate if best_result else 0.0,
288
+ iterations=result.nit if hasattr(result, "nit") else len(convergence_history),
289
+ convergence_history=convergence_history,
290
+ final_simulation=best_result,
291
+ )
292
+
293
+
294
+ def optimize_combined_policy(
295
+ spending_policy_class: type,
296
+ allocation_policy_class: type,
297
+ spending_param_specs: list[PolicyParameterSpec],
298
+ allocation_param_specs: list[PolicyParameterSpec],
299
+ initial_wealth: float,
300
+ config: SimulationConfig,
301
+ utility_model: UtilityModel,
302
+ spending_base_params: dict | None = None,
303
+ allocation_base_params: dict | None = None,
304
+ spending_floor: float | None = None,
305
+ method: str = "nelder-mead",
306
+ max_iterations: int = 100,
307
+ ) -> OptimizationResult:
308
+ """Jointly optimize spending and allocation policy parameters.
309
+
310
+ Args:
311
+ spending_policy_class: Spending policy class
312
+ allocation_policy_class: Allocation policy class
313
+ spending_param_specs: Spending parameters to optimize
314
+ allocation_param_specs: Allocation parameters to optimize
315
+ initial_wealth: Starting portfolio value
316
+ config: Simulation configuration
317
+ utility_model: Utility model for evaluation
318
+ spending_base_params: Fixed spending policy parameters
319
+ allocation_base_params: Fixed allocation policy parameters
320
+ spending_floor: Minimum spending floor
321
+ method: Optimization method
322
+ max_iterations: Maximum iterations
323
+
324
+ Returns:
325
+ OptimizationResult with optimal parameters for both policies
326
+ """
327
+ if spending_base_params is None:
328
+ spending_base_params = {}
329
+ if allocation_base_params is None:
330
+ allocation_base_params = {}
331
+
332
+ all_specs = spending_param_specs + allocation_param_specs
333
+ n_spending = len(spending_param_specs)
334
+
335
+ convergence_history = []
336
+ best_utility = -np.inf
337
+ best_params = None
338
+ best_result = None
339
+
340
+ def objective(param_values: np.ndarray) -> float:
341
+ nonlocal best_utility, best_params, best_result
342
+
343
+ spending_values = param_values[:n_spending]
344
+ allocation_values = param_values[n_spending:]
345
+
346
+ spending_policy = create_policy_with_params(
347
+ spending_policy_class,
348
+ spending_base_params,
349
+ spending_param_specs,
350
+ spending_values,
351
+ )
352
+ allocation_policy = create_policy_with_params(
353
+ allocation_policy_class,
354
+ allocation_base_params,
355
+ allocation_param_specs,
356
+ allocation_values,
357
+ )
358
+
359
+ result = run_simulation_with_utility(
360
+ initial_wealth=initial_wealth,
361
+ spending_policy=spending_policy,
362
+ allocation_policy=allocation_policy,
363
+ config=config,
364
+ utility_model=utility_model,
365
+ spending_floor=spending_floor,
366
+ )
367
+
368
+ utility = result.expected_lifetime_utility
369
+ convergence_history.append(utility)
370
+
371
+ if utility > best_utility:
372
+ best_utility = utility
373
+ best_params = {}
374
+ for spec, v in zip(spending_param_specs, spending_values):
375
+ best_params[f"spending_{spec.name}"] = spec.clip(v)
376
+ for spec, v in zip(allocation_param_specs, allocation_values):
377
+ best_params[f"allocation_{spec.name}"] = spec.clip(v)
378
+ best_result = result
379
+
380
+ return -utility
381
+
382
+ x0 = np.array([spec.get_initial() for spec in all_specs])
383
+ bounds = [(spec.min_value, spec.max_value) for spec in all_specs]
384
+
385
+ if method.lower() in ("nelder-mead", "powell"):
386
+ result = optimize.minimize(
387
+ objective,
388
+ x0,
389
+ method=method,
390
+ options={"maxiter": max_iterations, "disp": False},
391
+ )
392
+ else:
393
+ result = optimize.minimize(
394
+ objective,
395
+ x0,
396
+ method=method,
397
+ bounds=bounds,
398
+ options={"maxiter": max_iterations, "disp": False},
399
+ )
400
+
401
+ return OptimizationResult(
402
+ optimal_params=best_params or {},
403
+ optimal_utility=best_utility,
404
+ certainty_equivalent=best_result.certainty_equivalent_consumption if best_result else 0.0,
405
+ success_rate=best_result.success_rate if best_result else 0.0,
406
+ iterations=result.nit if hasattr(result, "nit") else len(convergence_history),
407
+ convergence_history=convergence_history,
408
+ final_simulation=best_result,
409
+ )
410
+
411
+
412
+ def grid_search_policy(
413
+ policy_class: type,
414
+ param_specs: list[PolicyParameterSpec],
415
+ grid_points: int,
416
+ evaluate_fn: Callable[[Any], float],
417
+ base_params: dict | None = None,
418
+ ) -> tuple[dict[str, float], float, np.ndarray]:
419
+ """Exhaustive grid search over policy parameters.
420
+
421
+ Useful for visualizing the utility surface or when the parameter
422
+ space is small enough for exhaustive search.
423
+
424
+ Args:
425
+ policy_class: Policy class to optimize
426
+ param_specs: Parameters to search over
427
+ grid_points: Number of points per dimension
428
+ evaluate_fn: Function that takes a policy and returns utility
429
+ base_params: Fixed parameters for the policy
430
+
431
+ Returns:
432
+ Tuple of (best_params, best_utility, utility_grid)
433
+ """
434
+ if base_params is None:
435
+ base_params = {}
436
+
437
+ # Create grid
438
+ grids = [
439
+ np.linspace(spec.min_value, spec.max_value, grid_points)
440
+ for spec in param_specs
441
+ ]
442
+
443
+ # Create meshgrid for all combinations
444
+ mesh = np.meshgrid(*grids, indexing="ij")
445
+ shape = mesh[0].shape
446
+
447
+ utilities = np.zeros(shape)
448
+ best_utility = -np.inf
449
+ best_params = {}
450
+
451
+ # Iterate over all grid points
452
+ it = np.nditer(mesh[0], flags=["multi_index"])
453
+ while not it.finished:
454
+ idx = it.multi_index
455
+ param_values = np.array([m[idx] for m in mesh])
456
+
457
+ policy = create_policy_with_params(
458
+ policy_class, base_params, param_specs, param_values
459
+ )
460
+
461
+ utility = evaluate_fn(policy)
462
+ utilities[idx] = utility
463
+
464
+ if utility > best_utility:
465
+ best_utility = utility
466
+ best_params = {
467
+ spec.name: spec.clip(v)
468
+ for spec, v in zip(param_specs, param_values)
469
+ }
470
+
471
+ it.iternext()
472
+
473
+ return best_params, best_utility, utilities
fundedness/simulate.py CHANGED
@@ -31,6 +31,12 @@ class SimulationResult:
31
31
  median_terminal_wealth: float = 0.0
32
32
  mean_terminal_wealth: float = 0.0
33
33
 
34
+ # Utility metrics (for utility-integrated simulation)
35
+ utility_paths: np.ndarray | None = None # shape: n_simulations x n_years
36
+ expected_lifetime_utility: float | None = None
37
+ certainty_equivalent_consumption: float | None = None
38
+ utility_percentiles: dict[str, np.ndarray] = field(default_factory=dict)
39
+
34
40
  # Configuration
35
41
  n_simulations: int = 0
36
42
  n_years: int = 0
@@ -399,3 +405,155 @@ class AllocationPolicy:
399
405
  ) -> float | np.ndarray:
400
406
  """Get stock allocation (can be constant or per-path)."""
401
407
  raise NotImplementedError
408
+
409
+
410
+ def run_simulation_with_utility(
411
+ initial_wealth: float,
412
+ spending_policy: "SpendingPolicy",
413
+ allocation_policy: "AllocationPolicy",
414
+ config: SimulationConfig,
415
+ utility_model: "UtilityModel",
416
+ spending_floor: float | None = None,
417
+ survival_probabilities: np.ndarray | None = None,
418
+ ) -> SimulationResult:
419
+ """Run simulation tracking lifetime utility.
420
+
421
+ This function extends run_simulation_with_policy to also track utility
422
+ at each time step, calculate expected lifetime utility, and compute
423
+ the certainty equivalent consumption.
424
+
425
+ Args:
426
+ initial_wealth: Starting portfolio value
427
+ spending_policy: Policy determining annual spending
428
+ allocation_policy: Policy determining asset allocation
429
+ config: Simulation configuration
430
+ utility_model: Utility model for calculating period utility
431
+ spending_floor: Minimum acceptable spending
432
+ survival_probabilities: P(alive) at each year (optional)
433
+
434
+ Returns:
435
+ SimulationResult with utility metrics populated
436
+ """
437
+ # Import here to avoid circular imports
438
+ from fundedness.models.utility import UtilityModel
439
+
440
+ n_sim = config.n_simulations
441
+ n_years = config.n_years
442
+ seed = config.random_seed
443
+
444
+ rng = np.random.default_rng(seed)
445
+
446
+ # Initialize paths
447
+ wealth_paths = np.zeros((n_sim, n_years + 1))
448
+ wealth_paths[:, 0] = initial_wealth
449
+ spending_paths = np.zeros((n_sim, n_years))
450
+ utility_paths = np.zeros((n_sim, n_years))
451
+
452
+ time_to_ruin = np.full(n_sim, np.inf)
453
+ time_to_floor_breach = np.full(n_sim, np.inf) if spending_floor else None
454
+
455
+ # Default survival probabilities (all survive)
456
+ if survival_probabilities is None:
457
+ survival_probabilities = np.ones(n_years)
458
+
459
+ # Generate all random draws upfront
460
+ z = rng.standard_normal((n_sim, n_years))
461
+
462
+ # Simulate year by year
463
+ for year in range(n_years):
464
+ current_wealth = wealth_paths[:, year]
465
+
466
+ # Get spending from policy
467
+ spending = spending_policy.get_spending(
468
+ wealth=current_wealth,
469
+ year=year,
470
+ initial_wealth=initial_wealth,
471
+ )
472
+ spending_paths[:, year] = spending
473
+
474
+ # Calculate utility for this period's consumption
475
+ for i in range(n_sim):
476
+ utility_paths[i, year] = utility_model.utility(spending[i])
477
+
478
+ # Track floor breach
479
+ if time_to_floor_breach is not None and spending_floor:
480
+ floor_breach_mask = (spending < spending_floor) & np.isinf(time_to_floor_breach)
481
+ time_to_floor_breach[floor_breach_mask] = year
482
+
483
+ # Get allocation from policy
484
+ stock_weight = allocation_policy.get_allocation(
485
+ wealth=current_wealth,
486
+ year=year,
487
+ initial_wealth=initial_wealth,
488
+ )
489
+
490
+ # Calculate returns for this allocation
491
+ portfolio_return = config.market_model.expected_portfolio_return(stock_weight)
492
+ portfolio_vol = config.market_model.portfolio_volatility(stock_weight)
493
+
494
+ returns = portfolio_return - portfolio_vol**2 / 2 + portfolio_vol * z[:, year]
495
+
496
+ # Update wealth
497
+ wealth_after_spending = np.maximum(current_wealth - spending, 0)
498
+ wealth_paths[:, year + 1] = wealth_after_spending * (1 + returns)
499
+
500
+ # Track ruin
501
+ ruin_mask = (wealth_paths[:, year + 1] <= 0) & np.isinf(time_to_ruin)
502
+ time_to_ruin[ruin_mask] = year + 1
503
+
504
+ # Calculate discounted lifetime utility for each path
505
+ discount_factors = np.array([
506
+ (1 + utility_model.time_preference) ** (-t) * survival_probabilities[t]
507
+ for t in range(n_years)
508
+ ])
509
+
510
+ # Lifetime utility per path
511
+ discounted_utilities = utility_paths * discount_factors
512
+ lifetime_utilities = np.sum(discounted_utilities, axis=1)
513
+
514
+ # Expected lifetime utility (mean across paths)
515
+ expected_lifetime_utility = np.mean(lifetime_utilities)
516
+
517
+ # Certainty equivalent consumption
518
+ # Find the constant consumption that gives same expected utility
519
+ mean_spending = np.mean(spending_paths)
520
+ ce_consumption = utility_model.certainty_equivalent(
521
+ np.mean(spending_paths, axis=1) # Average spending per path
522
+ )
523
+
524
+ # Calculate percentiles
525
+ wealth_percentiles = {}
526
+ spending_percentiles = {}
527
+ utility_percentiles = {}
528
+
529
+ for p in config.percentiles:
530
+ key = f"P{p}"
531
+ wealth_percentiles[key] = np.percentile(wealth_paths[:, 1:], p, axis=0)
532
+ spending_percentiles[key] = np.percentile(spending_paths, p, axis=0)
533
+ utility_percentiles[key] = np.percentile(utility_paths, p, axis=0)
534
+
535
+ terminal_wealth = wealth_paths[:, -1]
536
+
537
+ return SimulationResult(
538
+ wealth_paths=wealth_paths[:, 1:],
539
+ spending_paths=spending_paths,
540
+ utility_paths=utility_paths,
541
+ time_to_ruin=time_to_ruin,
542
+ time_to_floor_breach=time_to_floor_breach,
543
+ wealth_percentiles=wealth_percentiles,
544
+ spending_percentiles=spending_percentiles,
545
+ utility_percentiles=utility_percentiles,
546
+ success_rate=np.mean(np.isinf(time_to_ruin)),
547
+ floor_breach_rate=np.mean(~np.isinf(time_to_floor_breach)) if time_to_floor_breach is not None else 0.0,
548
+ median_terminal_wealth=np.median(terminal_wealth),
549
+ mean_terminal_wealth=np.mean(terminal_wealth),
550
+ expected_lifetime_utility=expected_lifetime_utility,
551
+ certainty_equivalent_consumption=ce_consumption,
552
+ n_simulations=n_sim,
553
+ n_years=n_years,
554
+ random_seed=seed,
555
+ )
556
+
557
+
558
+ # Type alias for UtilityModel (to avoid import issues)
559
+ UtilityModel = "UtilityModel"
@@ -4,6 +4,14 @@ from fundedness.viz.colors import COLORS
4
4
  from fundedness.viz.comparison import create_strategy_comparison_chart
5
5
  from fundedness.viz.fan_chart import create_fan_chart
6
6
  from fundedness.viz.histogram import create_time_distribution_histogram
7
+ from fundedness.viz.optimal import (
8
+ create_optimal_allocation_curve,
9
+ create_optimal_policy_summary,
10
+ create_optimal_spending_curve,
11
+ create_sensitivity_heatmap,
12
+ create_spending_comparison_by_age,
13
+ create_utility_comparison_chart,
14
+ )
7
15
  from fundedness.viz.survival import create_survival_curve
8
16
  from fundedness.viz.tornado import create_tornado_chart
9
17
  from fundedness.viz.waterfall import create_cefr_waterfall
@@ -12,8 +20,14 @@ __all__ = [
12
20
  "COLORS",
13
21
  "create_cefr_waterfall",
14
22
  "create_fan_chart",
23
+ "create_optimal_allocation_curve",
24
+ "create_optimal_policy_summary",
25
+ "create_optimal_spending_curve",
26
+ "create_sensitivity_heatmap",
27
+ "create_spending_comparison_by_age",
15
28
  "create_strategy_comparison_chart",
16
29
  "create_survival_curve",
17
30
  "create_time_distribution_histogram",
18
31
  "create_tornado_chart",
32
+ "create_utility_comparison_chart",
19
33
  ]