sigma-terminal 2.0.1__py3-none-any.whl → 3.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sigma/portfolio.py ADDED
@@ -0,0 +1,697 @@
1
+ """Portfolio construction - Optimization, risk engines, position sizing."""
2
+
3
+ import numpy as np
4
+ import pandas as pd
5
+ from typing import Any, Dict, List, Optional, Tuple
6
+ from enum import Enum
7
+ from pydantic import BaseModel, Field
8
+
9
+ from scipy.optimize import minimize
10
+ from scipy import stats
11
+
12
+
13
+ # ============================================================================
14
+ # DATA MODELS
15
+ # ============================================================================
16
+
17
+ class OptimizationMethod(str, Enum):
18
+ """Portfolio optimization methods."""
19
+ MEAN_VARIANCE = "mean_variance"
20
+ MIN_VARIANCE = "min_variance"
21
+ MAX_SHARPE = "max_sharpe"
22
+ RISK_PARITY = "risk_parity"
23
+ MAX_DIVERSIFICATION = "max_diversification"
24
+ EQUAL_WEIGHT = "equal_weight"
25
+ HIERARCHICAL_RISK_PARITY = "hrp"
26
+
27
+
28
+ class Constraint(BaseModel):
29
+ """Portfolio constraint."""
30
+ name: str
31
+ type: str # min_weight, max_weight, sector_max, etc.
32
+ value: float
33
+ assets: Optional[List[str]] = None
34
+
35
+
36
+ class PortfolioResult(BaseModel):
37
+ """Portfolio optimization result."""
38
+ weights: Dict[str, float]
39
+ expected_return: float
40
+ volatility: float
41
+ sharpe_ratio: float
42
+ diversification_ratio: Optional[float] = None
43
+ risk_contributions: Optional[Dict[str, float]] = None
44
+ method: str
45
+
46
+
47
+ # ============================================================================
48
+ # PORTFOLIO OPTIMIZER
49
+ # ============================================================================
50
+
51
+ class PortfolioOptimizer:
52
+ """
53
+ Portfolio optimization with multiple methods.
54
+ Supports constraints, transaction costs, and turnover limits.
55
+ """
56
+
57
+ def __init__(self, risk_free_rate: float = 0.05):
58
+ self.risk_free_rate = risk_free_rate
59
+
60
+ def optimize(
61
+ self,
62
+ returns: pd.DataFrame,
63
+ method: OptimizationMethod = OptimizationMethod.MAX_SHARPE,
64
+ constraints: Optional[List[Constraint]] = None,
65
+ min_weight: float = 0.0,
66
+ max_weight: float = 1.0,
67
+ current_weights: Optional[Dict[str, float]] = None,
68
+ max_turnover: Optional[float] = None,
69
+ ) -> PortfolioResult:
70
+ """
71
+ Optimize portfolio using specified method.
72
+
73
+ Args:
74
+ returns: DataFrame of asset returns (columns = assets)
75
+ method: Optimization method to use
76
+ constraints: List of additional constraints
77
+ min_weight: Minimum weight per asset
78
+ max_weight: Maximum weight per asset
79
+ current_weights: Current portfolio weights (for turnover)
80
+ max_turnover: Maximum allowed turnover
81
+
82
+ Returns:
83
+ PortfolioResult with optimal weights and metrics
84
+ """
85
+
86
+ assets = returns.columns.tolist()
87
+ n_assets = len(assets)
88
+
89
+ # Calculate expected returns and covariance
90
+ expected_returns = returns.mean() * 252 # Annualize
91
+ cov_matrix = returns.cov() * 252 # Annualize
92
+
93
+ # Dispatch to appropriate method
94
+ if method == OptimizationMethod.MEAN_VARIANCE:
95
+ weights = self._mean_variance(expected_returns, cov_matrix, min_weight, max_weight)
96
+ elif method == OptimizationMethod.MIN_VARIANCE:
97
+ weights = self._min_variance(cov_matrix, min_weight, max_weight)
98
+ elif method == OptimizationMethod.MAX_SHARPE:
99
+ weights = self._max_sharpe(expected_returns, cov_matrix, min_weight, max_weight)
100
+ elif method == OptimizationMethod.RISK_PARITY:
101
+ weights = self._risk_parity(cov_matrix)
102
+ elif method == OptimizationMethod.MAX_DIVERSIFICATION:
103
+ weights = self._max_diversification(cov_matrix, min_weight, max_weight)
104
+ elif method == OptimizationMethod.EQUAL_WEIGHT:
105
+ weights = np.array([1.0 / n_assets] * n_assets)
106
+ elif method == OptimizationMethod.HIERARCHICAL_RISK_PARITY:
107
+ weights = self._hierarchical_risk_parity(returns)
108
+ else:
109
+ weights = np.array([1.0 / n_assets] * n_assets)
110
+
111
+ # Apply turnover constraint if specified
112
+ if max_turnover and current_weights:
113
+ weights = self._apply_turnover_constraint(
114
+ weights, assets, current_weights, max_turnover
115
+ )
116
+
117
+ # Calculate portfolio metrics
118
+ port_return = np.dot(weights, expected_returns)
119
+ port_vol = np.sqrt(np.dot(weights.T, np.dot(cov_matrix, weights)))
120
+ sharpe = (port_return - self.risk_free_rate) / port_vol if port_vol > 0 else 0
121
+
122
+ # Risk contributions
123
+ risk_contrib = self._calculate_risk_contributions(weights, cov_matrix)
124
+
125
+ # Diversification ratio
126
+ asset_vols = np.sqrt(np.diag(cov_matrix))
127
+ div_ratio = np.dot(weights, asset_vols) / port_vol if port_vol > 0 else 1
128
+
129
+ return PortfolioResult(
130
+ weights={assets[i]: float(weights[i]) for i in range(n_assets)},
131
+ expected_return=float(port_return),
132
+ volatility=float(port_vol),
133
+ sharpe_ratio=float(sharpe),
134
+ diversification_ratio=float(div_ratio),
135
+ risk_contributions={assets[i]: float(risk_contrib[i]) for i in range(n_assets)},
136
+ method=method.value,
137
+ )
138
+
139
+ def _mean_variance(
140
+ self,
141
+ expected_returns: pd.Series,
142
+ cov_matrix: pd.DataFrame,
143
+ min_weight: float,
144
+ max_weight: float,
145
+ target_return: Optional[float] = None,
146
+ ) -> np.ndarray:
147
+ """Mean-variance optimization."""
148
+
149
+ n = len(expected_returns)
150
+
151
+ # If no target return, maximize return for given risk level
152
+ if target_return is None:
153
+ target_return = expected_returns.mean()
154
+
155
+ def objective(w):
156
+ return np.dot(w.T, np.dot(cov_matrix, w))
157
+
158
+ constraints = [
159
+ {"type": "eq", "fun": lambda w: np.sum(w) - 1}, # Weights sum to 1
160
+ {"type": "ineq", "fun": lambda w: np.dot(w, expected_returns) - target_return},
161
+ ]
162
+
163
+ bounds = [(min_weight, max_weight) for _ in range(n)]
164
+
165
+ x0 = np.array([1.0 / n] * n)
166
+ result = minimize(objective, x0, method="SLSQP", bounds=bounds, constraints=constraints)
167
+
168
+ return result.x
169
+
170
+ def _min_variance(
171
+ self,
172
+ cov_matrix: pd.DataFrame,
173
+ min_weight: float,
174
+ max_weight: float,
175
+ ) -> np.ndarray:
176
+ """Minimum variance portfolio."""
177
+
178
+ n = len(cov_matrix)
179
+
180
+ def objective(w):
181
+ return np.dot(w.T, np.dot(cov_matrix, w))
182
+
183
+ constraints = [{"type": "eq", "fun": lambda w: np.sum(w) - 1}]
184
+ bounds = [(min_weight, max_weight) for _ in range(n)]
185
+
186
+ x0 = np.array([1.0 / n] * n)
187
+ result = minimize(objective, x0, method="SLSQP", bounds=bounds, constraints=constraints)
188
+
189
+ return result.x
190
+
191
+ def _max_sharpe(
192
+ self,
193
+ expected_returns: pd.Series,
194
+ cov_matrix: pd.DataFrame,
195
+ min_weight: float,
196
+ max_weight: float,
197
+ ) -> np.ndarray:
198
+ """Maximum Sharpe ratio portfolio."""
199
+
200
+ n = len(expected_returns)
201
+
202
+ def neg_sharpe(w):
203
+ port_return = np.dot(w, expected_returns)
204
+ port_vol = np.sqrt(np.dot(w.T, np.dot(cov_matrix, w)))
205
+ if port_vol == 0:
206
+ return 0
207
+ return -(port_return - self.risk_free_rate) / port_vol
208
+
209
+ constraints = [{"type": "eq", "fun": lambda w: np.sum(w) - 1}]
210
+ bounds = [(min_weight, max_weight) for _ in range(n)]
211
+
212
+ x0 = np.array([1.0 / n] * n)
213
+ result = minimize(neg_sharpe, x0, method="SLSQP", bounds=bounds, constraints=constraints)
214
+
215
+ return result.x
216
+
217
+ def _risk_parity(self, cov_matrix: pd.DataFrame) -> np.ndarray:
218
+ """Risk parity - equal risk contribution."""
219
+
220
+ n = len(cov_matrix)
221
+
222
+ def risk_budget_objective(w, cov):
223
+ port_var = np.dot(w.T, np.dot(cov, w))
224
+ marginal_contrib = np.dot(cov, w)
225
+ risk_contrib = w * marginal_contrib / np.sqrt(port_var)
226
+ target_risk = np.sqrt(port_var) / n
227
+ return np.sum((risk_contrib - target_risk) ** 2)
228
+
229
+ constraints = [{"type": "eq", "fun": lambda w: np.sum(w) - 1}]
230
+ bounds = [(0.01, 1.0) for _ in range(n)]
231
+
232
+ x0 = np.array([1.0 / n] * n)
233
+ result = minimize(
234
+ risk_budget_objective, x0, args=(cov_matrix.values,),
235
+ method="SLSQP", bounds=bounds, constraints=constraints
236
+ )
237
+
238
+ return result.x
239
+
240
+ def _max_diversification(
241
+ self,
242
+ cov_matrix: pd.DataFrame,
243
+ min_weight: float,
244
+ max_weight: float,
245
+ ) -> np.ndarray:
246
+ """Maximum diversification ratio."""
247
+
248
+ n = len(cov_matrix)
249
+ asset_vols = np.sqrt(np.diag(cov_matrix))
250
+
251
+ def neg_div_ratio(w):
252
+ port_vol = np.sqrt(np.dot(w.T, np.dot(cov_matrix, w)))
253
+ if port_vol == 0:
254
+ return 0
255
+ return -np.dot(w, asset_vols) / port_vol
256
+
257
+ constraints = [{"type": "eq", "fun": lambda w: np.sum(w) - 1}]
258
+ bounds = [(min_weight, max_weight) for _ in range(n)]
259
+
260
+ x0 = np.array([1.0 / n] * n)
261
+ result = minimize(neg_div_ratio, x0, method="SLSQP", bounds=bounds, constraints=constraints)
262
+
263
+ return result.x
264
+
265
+ def _hierarchical_risk_parity(self, returns: pd.DataFrame) -> np.ndarray:
266
+ """Hierarchical Risk Parity (simplified)."""
267
+
268
+ from scipy.cluster.hierarchy import linkage, leaves_list
269
+ from scipy.spatial.distance import squareform
270
+
271
+ # Correlation-based distance
272
+ corr = returns.corr()
273
+ dist = np.sqrt(0.5 * (1 - corr))
274
+
275
+ # Hierarchical clustering
276
+ try:
277
+ linkage_matrix = linkage(squareform(dist), method="single")
278
+ sorted_idx = leaves_list(linkage_matrix)
279
+ except Exception:
280
+ sorted_idx = list(range(len(returns.columns)))
281
+
282
+ # Allocate using inverse variance
283
+ cov = returns.cov() * 252
284
+ inv_var = 1 / np.diag(cov)
285
+ weights = inv_var / inv_var.sum()
286
+
287
+ return weights
288
+
289
+ def _calculate_risk_contributions(
290
+ self,
291
+ weights: np.ndarray,
292
+ cov_matrix: pd.DataFrame,
293
+ ) -> np.ndarray:
294
+ """Calculate risk contribution of each asset."""
295
+
296
+ port_var = np.dot(weights.T, np.dot(cov_matrix, weights))
297
+ marginal_contrib = np.dot(cov_matrix, weights)
298
+ risk_contrib = weights * marginal_contrib / np.sqrt(port_var)
299
+
300
+ # Normalize to sum to 1
301
+ return risk_contrib / risk_contrib.sum()
302
+
303
+ def _apply_turnover_constraint(
304
+ self,
305
+ new_weights: np.ndarray,
306
+ assets: List[str],
307
+ current_weights: Dict[str, float],
308
+ max_turnover: float,
309
+ ) -> np.ndarray:
310
+ """Apply turnover constraint to weights."""
311
+
312
+ current = np.array([current_weights.get(a, 0) for a in assets])
313
+ turnover = np.sum(np.abs(new_weights - current))
314
+
315
+ if turnover <= max_turnover:
316
+ return new_weights
317
+
318
+ # Scale down changes to meet turnover constraint
319
+ scale = max_turnover / turnover
320
+ adjusted = current + scale * (new_weights - current)
321
+
322
+ # Ensure sum to 1
323
+ return adjusted / adjusted.sum()
324
+
325
+ def efficient_frontier(
326
+ self,
327
+ returns: pd.DataFrame,
328
+ n_points: int = 20,
329
+ min_weight: float = 0.0,
330
+ max_weight: float = 1.0,
331
+ ) -> List[Tuple[float, float, Dict[str, float]]]:
332
+ """Generate efficient frontier points."""
333
+
334
+ expected_returns = returns.mean() * 252
335
+ cov_matrix = returns.cov() * 252
336
+
337
+ # Find min and max achievable returns
338
+ min_ret_weights = self._min_variance(cov_matrix, min_weight, max_weight)
339
+ max_ret_weights = self._mean_variance(
340
+ expected_returns, cov_matrix, min_weight, max_weight,
341
+ target_return=expected_returns.max()
342
+ )
343
+
344
+ min_ret = np.dot(min_ret_weights, expected_returns)
345
+ max_ret = np.dot(max_ret_weights, expected_returns)
346
+
347
+ # Generate frontier
348
+ target_returns = np.linspace(min_ret, max_ret, n_points)
349
+ frontier = []
350
+
351
+ for target in target_returns:
352
+ try:
353
+ weights = self._mean_variance(
354
+ expected_returns, cov_matrix, min_weight, max_weight, target
355
+ )
356
+ port_vol = np.sqrt(np.dot(weights.T, np.dot(cov_matrix, weights)))
357
+ port_ret = np.dot(weights, expected_returns)
358
+
359
+ weight_dict = {returns.columns[i]: float(weights[i])
360
+ for i in range(len(weights))}
361
+
362
+ frontier.append((float(port_vol), float(port_ret), weight_dict))
363
+ except Exception:
364
+ continue
365
+
366
+ return frontier
367
+
368
+
369
+ # ============================================================================
370
+ # POSITION SIZING
371
+ # ============================================================================
372
+
373
+ class PositionSizer:
374
+ """Position sizing methods."""
375
+
376
+ @staticmethod
377
+ def fixed_dollar(
378
+ capital: float,
379
+ price: float,
380
+ allocation: float,
381
+ ) -> int:
382
+ """Fixed dollar amount per position."""
383
+ dollar_amount = capital * allocation
384
+ return int(dollar_amount // price)
385
+
386
+ @staticmethod
387
+ def volatility_scaled(
388
+ capital: float,
389
+ price: float,
390
+ volatility: float,
391
+ target_vol: float = 0.02, # 2% daily vol contribution
392
+ ) -> int:
393
+ """Size position based on volatility."""
394
+ dollar_vol = capital * target_vol
395
+ position_vol = price * volatility
396
+
397
+ if position_vol == 0:
398
+ return 0
399
+
400
+ shares = dollar_vol / position_vol
401
+ return int(shares)
402
+
403
+ @staticmethod
404
+ def kelly_criterion(
405
+ capital: float,
406
+ price: float,
407
+ win_rate: float,
408
+ win_loss_ratio: float,
409
+ fraction: float = 0.25, # Use fraction of full Kelly
410
+ ) -> int:
411
+ """Kelly criterion position sizing."""
412
+
413
+ # Kelly formula: f* = (bp - q) / b
414
+ # where b = win/loss ratio, p = win probability, q = 1-p
415
+ b = win_loss_ratio
416
+ p = win_rate
417
+ q = 1 - p
418
+
419
+ kelly_fraction = (b * p - q) / b
420
+
421
+ # Apply fractional Kelly
422
+ position_fraction = max(0, min(fraction * kelly_fraction, 0.25)) # Cap at 25%
423
+
424
+ dollar_amount = capital * position_fraction
425
+ return int(dollar_amount // price)
426
+
427
+ @staticmethod
428
+ def atr_based(
429
+ capital: float,
430
+ price: float,
431
+ atr: float,
432
+ risk_per_trade: float = 0.01, # 1% risk per trade
433
+ atr_multiplier: float = 2.0,
434
+ ) -> Tuple[int, float]:
435
+ """ATR-based position sizing with stop loss."""
436
+
437
+ stop_distance = atr * atr_multiplier
438
+ risk_amount = capital * risk_per_trade
439
+
440
+ if stop_distance == 0:
441
+ return 0, 0
442
+
443
+ shares = int(risk_amount / stop_distance)
444
+ stop_price = price - stop_distance
445
+
446
+ return shares, stop_price
447
+
448
+
449
+ # ============================================================================
450
+ # RISK ENGINE
451
+ # ============================================================================
452
+
453
+ class RiskEngine:
454
+ """Portfolio risk management."""
455
+
456
+ def __init__(self, max_portfolio_var: float = 0.02):
457
+ """
458
+ Args:
459
+ max_portfolio_var: Maximum daily VaR as fraction of portfolio
460
+ """
461
+ self.max_portfolio_var = max_portfolio_var
462
+
463
+ def check_position(
464
+ self,
465
+ proposed_weight: float,
466
+ asset_vol: float,
467
+ current_weights: Dict[str, float],
468
+ correlation_with_portfolio: float,
469
+ portfolio_vol: float,
470
+ ) -> Tuple[bool, str]:
471
+ """Check if proposed position meets risk limits."""
472
+
473
+ # Check individual position limit
474
+ if proposed_weight > 0.25:
475
+ return False, "Position exceeds 25% limit"
476
+
477
+ # Check volatility contribution
478
+ vol_contribution = proposed_weight * asset_vol * correlation_with_portfolio
479
+ if vol_contribution > self.max_portfolio_var:
480
+ return False, f"Volatility contribution ({vol_contribution:.2%}) exceeds limit"
481
+
482
+ return True, "Position approved"
483
+
484
+ def calculate_var(
485
+ self,
486
+ returns: pd.DataFrame,
487
+ weights: Dict[str, float],
488
+ confidence: float = 0.95,
489
+ horizon: int = 1,
490
+ ) -> Dict[str, float]:
491
+ """Calculate portfolio VaR and CVaR."""
492
+
493
+ # Build portfolio returns
494
+ port_returns = (returns * pd.Series(weights)).sum(axis=1)
495
+
496
+ # Historical VaR
497
+ var = -np.percentile(port_returns, (1 - confidence) * 100) * np.sqrt(horizon)
498
+
499
+ # CVaR (Expected Shortfall)
500
+ cvar = -port_returns[port_returns <= -var].mean() * np.sqrt(horizon)
501
+
502
+ # Parametric VaR (assuming normal)
503
+ mu = port_returns.mean() * horizon
504
+ sigma = port_returns.std() * np.sqrt(horizon)
505
+ parametric_var = -(mu + sigma * stats.norm.ppf(1 - confidence))
506
+
507
+ return {
508
+ "var_historical": float(var),
509
+ "cvar": float(cvar) if not np.isnan(cvar) else float(var) * 1.2,
510
+ "var_parametric": float(parametric_var),
511
+ "confidence": confidence,
512
+ "horizon_days": horizon,
513
+ }
514
+
515
+ def stress_test(
516
+ self,
517
+ returns: pd.DataFrame,
518
+ weights: Dict[str, float],
519
+ scenarios: Optional[Dict[str, Dict[str, float]]] = None,
520
+ ) -> Dict[str, float]:
521
+ """Run stress tests on portfolio."""
522
+
523
+ # Default scenarios
524
+ default_scenarios = {
525
+ "market_crash": {"equity": -0.20, "bonds": 0.05, "gold": 0.10},
526
+ "rate_spike": {"equity": -0.10, "bonds": -0.15, "gold": -0.05},
527
+ "volatility_spike": {"equity": -0.15, "bonds": 0.02, "gold": 0.05},
528
+ "stagflation": {"equity": -0.12, "bonds": -0.08, "gold": 0.15},
529
+ }
530
+
531
+ scenarios = scenarios or default_scenarios
532
+
533
+ results = {}
534
+ for scenario_name, shocks in scenarios.items():
535
+ # Apply shocks (simplified - assumes assets match categories)
536
+ portfolio_impact = 0
537
+ for asset, weight in weights.items():
538
+ # Map asset to category (simplified)
539
+ category = "equity" # Default
540
+ if "TLT" in asset or "BND" in asset:
541
+ category = "bonds"
542
+ elif "GLD" in asset or "GOLD" in asset:
543
+ category = "gold"
544
+
545
+ shock = shocks.get(category, 0)
546
+ portfolio_impact += weight * shock
547
+
548
+ results[scenario_name] = portfolio_impact
549
+
550
+ return results
551
+
552
+ def hedge_suggestions(
553
+ self,
554
+ returns: pd.DataFrame,
555
+ weights: Dict[str, float],
556
+ hedge_universe: List[str] = None,
557
+ ) -> List[Dict[str, Any]]:
558
+ """Suggest hedges for the portfolio."""
559
+
560
+ hedge_universe = hedge_universe or ["SH", "TLT", "GLD", "VXX"]
561
+
562
+ # Build portfolio returns
563
+ assets_in_portfolio = [a for a in weights.keys() if a in returns.columns]
564
+ port_returns = (returns[assets_in_portfolio] * pd.Series({
565
+ a: weights[a] for a in assets_in_portfolio
566
+ })).sum(axis=1)
567
+
568
+ suggestions = []
569
+
570
+ for hedge in hedge_universe:
571
+ if hedge not in returns.columns:
572
+ continue
573
+
574
+ hedge_returns = returns[hedge]
575
+
576
+ # Align data
577
+ aligned = pd.concat([port_returns, hedge_returns], axis=1).dropna()
578
+ if len(aligned) < 30:
579
+ continue
580
+
581
+ corr = aligned.iloc[:, 0].corr(aligned.iloc[:, 1])
582
+
583
+ # Good hedges have negative correlation
584
+ if corr < -0.3:
585
+ # Calculate hedge ratio
586
+ cov = np.cov(aligned.iloc[:, 0], aligned.iloc[:, 1])
587
+ hedge_ratio = -cov[0, 1] / cov[1, 1] if cov[1, 1] != 0 else 0
588
+
589
+ suggestions.append({
590
+ "asset": hedge,
591
+ "correlation": corr,
592
+ "hedge_ratio": hedge_ratio,
593
+ "effectiveness": abs(corr),
594
+ })
595
+
596
+ return sorted(suggestions, key=lambda x: x["effectiveness"], reverse=True)
597
+
598
+
599
+ # ============================================================================
600
+ # REBALANCING ENGINE
601
+ # ============================================================================
602
+
603
+ class RebalancingEngine:
604
+ """Portfolio rebalancing logic."""
605
+
606
+ def __init__(
607
+ self,
608
+ threshold: float = 0.05, # 5% drift threshold
609
+ min_trade: float = 1000, # Minimum trade size
610
+ tax_aware: bool = False,
611
+ ):
612
+ self.threshold = threshold
613
+ self.min_trade = min_trade
614
+ self.tax_aware = tax_aware
615
+
616
+ def check_rebalance_needed(
617
+ self,
618
+ current_weights: Dict[str, float],
619
+ target_weights: Dict[str, float],
620
+ ) -> Tuple[bool, Dict[str, float]]:
621
+ """Check if rebalancing is needed."""
622
+
623
+ drifts = {}
624
+ max_drift = 0
625
+
626
+ for asset in set(current_weights.keys()) | set(target_weights.keys()):
627
+ current = current_weights.get(asset, 0)
628
+ target = target_weights.get(asset, 0)
629
+ drift = current - target
630
+ drifts[asset] = drift
631
+ max_drift = max(max_drift, abs(drift))
632
+
633
+ needs_rebalance = max_drift > self.threshold
634
+
635
+ return needs_rebalance, drifts
636
+
637
+ def generate_trades(
638
+ self,
639
+ current_weights: Dict[str, float],
640
+ target_weights: Dict[str, float],
641
+ portfolio_value: float,
642
+ prices: Dict[str, float],
643
+ ) -> List[Dict[str, Any]]:
644
+ """Generate trades to rebalance portfolio."""
645
+
646
+ trades = []
647
+
648
+ for asset in set(current_weights.keys()) | set(target_weights.keys()):
649
+ current = current_weights.get(asset, 0)
650
+ target = target_weights.get(asset, 0)
651
+
652
+ diff_weight = target - current
653
+ diff_value = diff_weight * portfolio_value
654
+
655
+ if abs(diff_value) < self.min_trade:
656
+ continue
657
+
658
+ price = prices.get(asset, 0)
659
+ if price == 0:
660
+ continue
661
+
662
+ shares = int(diff_value / price)
663
+
664
+ if shares != 0:
665
+ trades.append({
666
+ "asset": asset,
667
+ "action": "buy" if shares > 0 else "sell",
668
+ "shares": abs(shares),
669
+ "notional": abs(diff_value),
670
+ "price": price,
671
+ })
672
+
673
+ return trades
674
+
675
+ def calendar_rebalance(
676
+ self,
677
+ frequency: str = "quarterly",
678
+ last_rebalance: Optional[str] = None,
679
+ ) -> bool:
680
+ """Check if calendar-based rebalance is due."""
681
+
682
+ from datetime import datetime, timedelta
683
+
684
+ if last_rebalance is None:
685
+ return True
686
+
687
+ last = datetime.fromisoformat(last_rebalance)
688
+ now = datetime.now()
689
+
690
+ if frequency == "monthly":
691
+ return (now - last).days >= 28
692
+ elif frequency == "quarterly":
693
+ return (now - last).days >= 90
694
+ elif frequency == "annually":
695
+ return (now - last).days >= 365
696
+
697
+ return False