skfolio 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. skfolio/__init__.py +29 -0
  2. skfolio/cluster/__init__.py +8 -0
  3. skfolio/cluster/_hierarchical.py +387 -0
  4. skfolio/datasets/__init__.py +20 -0
  5. skfolio/datasets/_base.py +389 -0
  6. skfolio/datasets/data/__init__.py +0 -0
  7. skfolio/datasets/data/factors_dataset.csv.gz +0 -0
  8. skfolio/datasets/data/sp500_dataset.csv.gz +0 -0
  9. skfolio/datasets/data/sp500_index.csv.gz +0 -0
  10. skfolio/distance/__init__.py +26 -0
  11. skfolio/distance/_base.py +55 -0
  12. skfolio/distance/_distance.py +574 -0
  13. skfolio/exceptions.py +30 -0
  14. skfolio/measures/__init__.py +76 -0
  15. skfolio/measures/_enums.py +355 -0
  16. skfolio/measures/_measures.py +607 -0
  17. skfolio/metrics/__init__.py +3 -0
  18. skfolio/metrics/_scorer.py +121 -0
  19. skfolio/model_selection/__init__.py +18 -0
  20. skfolio/model_selection/_combinatorial.py +407 -0
  21. skfolio/model_selection/_validation.py +194 -0
  22. skfolio/model_selection/_walk_forward.py +221 -0
  23. skfolio/moments/__init__.py +41 -0
  24. skfolio/moments/covariance/__init__.py +29 -0
  25. skfolio/moments/covariance/_base.py +101 -0
  26. skfolio/moments/covariance/_covariance.py +1108 -0
  27. skfolio/moments/expected_returns/__init__.py +21 -0
  28. skfolio/moments/expected_returns/_base.py +31 -0
  29. skfolio/moments/expected_returns/_expected_returns.py +415 -0
  30. skfolio/optimization/__init__.py +36 -0
  31. skfolio/optimization/_base.py +147 -0
  32. skfolio/optimization/cluster/__init__.py +13 -0
  33. skfolio/optimization/cluster/_nco.py +348 -0
  34. skfolio/optimization/cluster/hierarchical/__init__.py +13 -0
  35. skfolio/optimization/cluster/hierarchical/_base.py +440 -0
  36. skfolio/optimization/cluster/hierarchical/_herc.py +406 -0
  37. skfolio/optimization/cluster/hierarchical/_hrp.py +368 -0
  38. skfolio/optimization/convex/__init__.py +16 -0
  39. skfolio/optimization/convex/_base.py +1944 -0
  40. skfolio/optimization/convex/_distributionally_robust.py +392 -0
  41. skfolio/optimization/convex/_maximum_diversification.py +417 -0
  42. skfolio/optimization/convex/_mean_risk.py +974 -0
  43. skfolio/optimization/convex/_risk_budgeting.py +560 -0
  44. skfolio/optimization/ensemble/__init__.py +6 -0
  45. skfolio/optimization/ensemble/_base.py +87 -0
  46. skfolio/optimization/ensemble/_stacking.py +326 -0
  47. skfolio/optimization/naive/__init__.py +3 -0
  48. skfolio/optimization/naive/_naive.py +173 -0
  49. skfolio/population/__init__.py +3 -0
  50. skfolio/population/_population.py +883 -0
  51. skfolio/portfolio/__init__.py +13 -0
  52. skfolio/portfolio/_base.py +1096 -0
  53. skfolio/portfolio/_multi_period_portfolio.py +610 -0
  54. skfolio/portfolio/_portfolio.py +842 -0
  55. skfolio/pre_selection/__init__.py +7 -0
  56. skfolio/pre_selection/_pre_selection.py +342 -0
  57. skfolio/preprocessing/__init__.py +3 -0
  58. skfolio/preprocessing/_returns.py +114 -0
  59. skfolio/prior/__init__.py +18 -0
  60. skfolio/prior/_base.py +63 -0
  61. skfolio/prior/_black_litterman.py +238 -0
  62. skfolio/prior/_empirical.py +163 -0
  63. skfolio/prior/_factor_model.py +268 -0
  64. skfolio/typing.py +50 -0
  65. skfolio/uncertainty_set/__init__.py +23 -0
  66. skfolio/uncertainty_set/_base.py +108 -0
  67. skfolio/uncertainty_set/_bootstrap.py +281 -0
  68. skfolio/uncertainty_set/_empirical.py +237 -0
  69. skfolio/utils/__init__.py +0 -0
  70. skfolio/utils/bootstrap.py +115 -0
  71. skfolio/utils/equations.py +350 -0
  72. skfolio/utils/sorting.py +117 -0
  73. skfolio/utils/stats.py +466 -0
  74. skfolio/utils/tools.py +567 -0
  75. skfolio-0.0.1.dist-info/LICENSE +29 -0
  76. skfolio-0.0.1.dist-info/METADATA +568 -0
  77. skfolio-0.0.1.dist-info/RECORD +79 -0
  78. skfolio-0.0.1.dist-info/WHEEL +5 -0
  79. skfolio-0.0.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,406 @@
1
+ """Hierarchical Equal Risk Contribution estimator."""
2
+
3
+ # Author: Hugo Delatte <delatte.hugo@gmail.com>
4
+ # License: BSD 3 clause
5
+
6
+
7
+ import numpy as np
8
+ import numpy.typing as npt
9
+ import pandas as pd
10
+ import scipy.cluster.hierarchy as sch
11
+
12
+ import skfolio.typing as skt
13
+ from skfolio.cluster import HierarchicalClustering
14
+ from skfolio.distance import BaseDistance, PearsonDistance
15
+ from skfolio.measures import ExtraRiskMeasure, RiskMeasure
16
+ from skfolio.optimization.cluster.hierarchical._base import (
17
+ BaseHierarchicalOptimization,
18
+ )
19
+ from skfolio.prior import BasePrior, EmpiricalPrior
20
+ from skfolio.utils.tools import check_estimator
21
+
22
+
23
+ class HierarchicalEqualRiskContribution(BaseHierarchicalOptimization):
24
+ r"""Hierarchical Equal Risk Contribution estimator.
25
+
26
+ The Hierarchical Equal Risk Contribution is a portfolio optimization method
27
+ developed by Thomas Raffinot [2]_.
28
+
29
+ This algorithm uses a distance matrix to compute hierarchical clusters using the
30
+ Hierarchical Tree Clustering algorithm then computes, for each cluster, the total
31
+ cluster risk of an inverse-risk allocation.
32
+ The final step is the top-down recursive division of the dendrogram where the assets
33
+ weights are updated using a naive risk parity within clusters.
34
+
35
+ It differs from the Hierarchical Risk Parity by exploiting the dendrogram shape
36
+ during the top-down recursive division instead of bisecting it.
37
+
38
+ .. note ::
39
+
40
+ The default linkage method is set to the Ward
41
+ variance minimization algorithm which is more stable and have better properties
42
+ than the single-linkage method [4]_.
43
+
44
+ Parameters
45
+ ----------
46
+ risk_measure : RiskMeasure or ExtraRiskMeasure, default=RiskMeasure.VARIANCE
47
+ :class:`~skfolio.meta.RiskMeasure` or :class:`~skfolio.meta.ExtraRiskMeasure`
48
+ of the optimization.
49
+ Can be any of:
50
+
51
+ * MEAN_ABSOLUTE_DEVIATION
52
+ * FIRST_LOWER_PARTIAL_MOMENT
53
+ * VARIANCE
54
+ * SEMI_VARIANCE
55
+ * CVAR
56
+ * EVAR
57
+ * WORST_REALIZATION
58
+ * CDAR
59
+ * MAX_DRAWDOWN
60
+ * AVERAGE_DRAWDOWN
61
+ * EDAR
62
+ * ULCER_INDEX
63
+ * GINI_MEAN_DIFFERENCE_RATIO
64
+ * VALUE_AT_RISK
65
+ * DRAWDOWN_AT_RISK
66
+ * ENTROPIC_RISK_MEASURE
67
+ * FOURTH_CENTRAL_MOMENT
68
+ * FOURTH_LOWER_PARTIAL_MOMENT
69
+ * SKEW
70
+ * KURTOSIS
71
+
72
+ The default is `RiskMeasure.VARIANCE`.
73
+
74
+ prior_estimator : BasePrior, optional
75
+ :ref:`Prior estimator <prior>`.
76
+ The prior estimator is used to estimate the :class:`~skfolio.prior.PriorModel`
77
+ containing the estimation of assets expected returns, covariance matrix and
78
+ returns. The moments and returns estimations are used for the risk computation
79
+ and the returns estimation are used by the distance matrix estimator.
80
+ The default (`None`) is to use :class:`~skfolio.prior.EmpiricalPrior`.
81
+
82
+ distance_estimator : BaseDistance, optional
83
+ :ref:`Distance estimator <distance>`.
84
+ The distance estimator is used to estimate the codependence and the distance
85
+ matrix needed for the computation of the linkage matrix.
86
+ The default (`None`) is to use :class:`~skfolio.distance.PearsonDistance`.
87
+
88
+ hierarchical_clustering_estimator : HierarchicalClustering, optional
89
+ :ref:`Hierarchical Clustering estimator <hierarchical_clustering>`.
90
+ The hierarchical clustering estimator is used to compute the linkage matrix
91
+ and the hierarchical clustering of the assets based on the distance matrix.
92
+ The default (`None`) is to use
93
+ :class:`~skfolio.cluster.HierarchicalClustering`.
94
+
95
+ min_weights : float | dict[str, float] | array-like of shape (n_assets, ), default=0.0
96
+ Minimum assets weights (weights lower bounds). Negative weights are not allowed.
97
+ If a float is provided, it is applied to each asset. `None` is equivalent to
98
+ `-np.Inf` (no lower bound). If a dictionary is provided, its (key/value) pair
99
+ must be the (asset name/asset minium weight) and the input `X` of the `fit`
100
+ methods must be a DataFrame with the assets names in columns. When using a
101
+ dictionary, assets values that are not provided are assigned a minimum weight
102
+ of `0.0`. The default is 0.0 (no short selling).
103
+
104
+ Example:
105
+
106
+ * min_weights = 0 --> long only portfolio (no short selling).
107
+ * min_weights = None --> no lower bound (same as `-np.Inf`).
108
+ * min_weights = {"SX5E": 0, "SPX": 0.1}
109
+ * min_weights = [0, 0.1]
110
+
111
+ max_weights : float | dict[str, float] | array-like of shape (n_assets, ), default=1.0
112
+ Maximum assets weights (weights upper bounds). Weights above 1.0 are not
113
+ allowed. If a float is provided, it is applied to each asset. `None` is
114
+ equivalent to `+np.Inf` (no upper bound). If a dictionary is provided, its
115
+ (key/value) pair must be the (asset name/asset maximum weight) and the input `X`
116
+ of the `fit` methods must be a DataFrame with the assets names in columns. When
117
+ using a dictionary, assets values that are not provided are assigned a minimum
118
+ weight of `1.0`. The default is 1.0 (each asset is below 100%).
119
+
120
+ Example:
121
+
122
+ * max_weights = 0 --> no long position (short only portfolio).
123
+ * max_weights = 0.5 --> each weight must be below 50%.
124
+ * max_weights = {"SX5E": 1, "SPX": 0.25}
125
+ * max_weights = [1, 0.25]
126
+
127
+ transaction_costs : float | dict[str, float] | array-like of shape (n_assets, ), default=0.0
128
+ Transaction costs of the assets. It is used to add linear transaction costs to
129
+ the optimization problem:
130
+
131
+ .. math:: total\_cost = \sum_{i=1}^{N} c_{i} \times |w_{i} - w\_prev_{i}|
132
+
133
+ with :math:`c_{i}` the transaction cost of asset i, :math:`w_{i}` its weight
134
+ and :math:`w\_prev_{i}` its previous weight (defined in `previous_weights`).
135
+ The float :math:`total\_cost` is used in the portfolio expected return:
136
+
137
+ .. math:: expected\_return = \mu^{T} \cdot w - total\_cost
138
+
139
+ with :math:`\mu` the vector af assets' expected returns and :math:`w` the
140
+ vector of assets weights.
141
+
142
+ If a float is provided, it is applied to each asset.
143
+ If a dictionary is provided, its (key/value) pair must be the
144
+ (asset name/asset cost) and the input `X` of the `fit` methods must be a
145
+ DataFrame with the assets names in columns.
146
+ The default value is `0.0`.
147
+
148
+ .. warning::
149
+
150
+ Based on the above formula, the periodicity of the transaction costs
151
+ needs to be homogenous to the periodicity of :math:`\mu`. For example, if
152
+ the input `X` is composed of **daily** returns, the `transaction_costs` need
153
+ to be expressed in **daily** costs.
154
+ (See :ref:`sphx_glr_auto_examples_1_mean_risk_plot_6_transaction_costs.py`)
155
+
156
+ management_fees : float | dict[str, float] | array-like of shape (n_assets, ), default=0.0
157
+ Management fees of the assets. It is used to add linear management fees to the
158
+ optimization problem:
159
+
160
+ .. math:: total\_fee = \sum_{i=1}^{N} f_{i} \times w_{i}
161
+
162
+ with :math:`f_{i}` the management fee of asset i and :math:`w_{i}` its weight.
163
+ The float :math:`total\_fee` is used in the portfolio expected return:
164
+
165
+ .. math:: expected\_return = \mu^{T} \cdot w - total\_fee
166
+
167
+ with :math:`\mu` the vector af assets expected returns and :math:`w` the vector
168
+ of assets weights.
169
+
170
+ If a float is provided, it is applied to each asset.
171
+ If a dictionary is provided, its (key/value) pair must be the
172
+ (asset name/asset fee) and the input `X` of the `fit` methods must be a
173
+ DataFrame with the assets names in columns.
174
+ The default value is `0.0`.
175
+
176
+ .. warning::
177
+
178
+ Based on the above formula, the periodicity of the management fees needs to
179
+ be homogenous to the periodicity of :math:`\mu`. For example, if the input
180
+ `X` is composed of **daily** returns, the `management_fees` need to be
181
+ expressed in **daily** fees.
182
+
183
+ .. note::
184
+
185
+ Another approach is to directly impact the management fees to the input `X`
186
+ in order to express the returns net of fees. However, when estimating the
187
+ :math:`\mu` parameter using for example Shrinkage estimators, this approach
188
+ would mix a deterministic value with an uncertain one leading to unwanted
189
+ bias in the management fees.
190
+
191
+ previous_weights : float | dict[str, float] | array-like of shape (n_assets, ), optional
192
+ Previous weights of the assets. Previous weights are used to compute the
193
+ portfolio total cost. If a float is provided, it is applied to each asset.
194
+ If a dictionary is provided, its (key/value) pair must be the
195
+ (asset name/asset previous weight) and the input `X` of the `fit` methods must
196
+ be a DataFrame with the assets names in columns.
197
+ The default (`None`) means no previous weights.
198
+
199
+ portfolio_params : dict, optional
200
+ Portfolio parameters passed to the portfolio evaluated by the `predict` and
201
+ `score` methods. If not provided, the `name`, `transaction_costs`,
202
+ `management_fees` and `previous_weights` are copied from the optimization
203
+ model and systematically passed to the portfolio.
204
+
205
+ Attributes
206
+ ----------
207
+ weights_ : ndarray of shape (n_assets,)
208
+ Weights of the assets.
209
+
210
+ distance_estimator_ : BaseDistance
211
+ Fitted `distance_estimator`.
212
+
213
+ hierarchical_clustering_estimator_ : HierarchicalClustering
214
+ Fitted `hierarchical_clustering_estimator`.
215
+
216
+ n_features_in_ : int
217
+ Number of assets seen during `fit`.
218
+
219
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
220
+ Names of assets seen during `fit`. Defined only when `X`
221
+ has assets names that are all strings.
222
+
223
+ References
224
+ ----------
225
+ .. [1] "Hierarchical clustering-based asset allocation",
226
+ The Journal of Portfolio Management,
227
+ Thomas Raffinot (2017).
228
+
229
+ .. [2] "The hierarchical equal risk contribution portfolio",
230
+ Thomas Raffinot (2018).
231
+
232
+ .. [3] "Application of two-order difference to gap statistic".
233
+ Yue, Wang & Wei (2009).
234
+
235
+ .. [4] "A review of two decades of correlations, hierarchies, networks and
236
+ clustering in financial markets",
237
+ Gautier Marti, Frank Nielsen, Mikołaj Bińkowski, Philippe Donnat (2020).
238
+ """
239
+
240
+ def __init__(
241
+ self,
242
+ risk_measure: RiskMeasure | ExtraRiskMeasure = RiskMeasure.VARIANCE,
243
+ prior_estimator: BasePrior | None = None,
244
+ distance_estimator: BaseDistance | None = None,
245
+ hierarchical_clustering_estimator: HierarchicalClustering | None = None,
246
+ min_weights: skt.MultiInput | None = 0.0,
247
+ max_weights: skt.MultiInput | None = 1.0,
248
+ transaction_costs: skt.MultiInput = 0.0,
249
+ management_fees: skt.MultiInput = 0.0,
250
+ previous_weights: skt.MultiInput | None = None,
251
+ portfolio_params: dict | None = None,
252
+ ):
253
+ super().__init__(
254
+ risk_measure=risk_measure,
255
+ prior_estimator=prior_estimator,
256
+ distance_estimator=distance_estimator,
257
+ hierarchical_clustering_estimator=hierarchical_clustering_estimator,
258
+ min_weights=min_weights,
259
+ max_weights=max_weights,
260
+ transaction_costs=transaction_costs,
261
+ management_fees=management_fees,
262
+ previous_weights=previous_weights,
263
+ portfolio_params=portfolio_params,
264
+ )
265
+
266
+ def fit(
267
+ self, X: npt.ArrayLike, y: None = None
268
+ ) -> "HierarchicalEqualRiskContribution":
269
+ """Fit the Hierarchical Equal Risk Contribution estimator.
270
+
271
+ Parameters
272
+ ----------
273
+ X : array-like of shape (n_observations, n_assets)
274
+ Price returns of the assets.
275
+
276
+ y : Ignored
277
+ Not used, present for API consistency by convention.
278
+
279
+ Returns
280
+ -------
281
+ self : HierarchicalEqualRiskContribution
282
+ Fitted estimator.
283
+ """
284
+ # Validate
285
+ if not isinstance(self.risk_measure, RiskMeasure | ExtraRiskMeasure):
286
+ raise TypeError(
287
+ "`risk_measure` must be of type `RiskMeasure` or `ExtraRiskMeasure`"
288
+ )
289
+ self.prior_estimator_ = check_estimator(
290
+ self.prior_estimator,
291
+ default=EmpiricalPrior(),
292
+ check_type=BasePrior,
293
+ )
294
+ self.distance_estimator_ = check_estimator(
295
+ self.distance_estimator,
296
+ default=PearsonDistance(),
297
+ check_type=BaseDistance,
298
+ )
299
+ self.hierarchical_clustering_estimator_ = check_estimator(
300
+ self.hierarchical_clustering_estimator,
301
+ default=HierarchicalClustering(),
302
+ check_type=HierarchicalClustering,
303
+ )
304
+
305
+ # Fit the estimators
306
+ self.prior_estimator_.fit(X, y)
307
+ prior_model = self.prior_estimator_.prior_model_
308
+ returns = prior_model.returns
309
+
310
+ # To keep the asset_names
311
+ if isinstance(X, pd.DataFrame):
312
+ returns = pd.DataFrame(returns, columns=X.columns)
313
+
314
+ self.distance_estimator_.fit(returns)
315
+ distance = self.distance_estimator_.distance_
316
+
317
+ # To keep the asset_names
318
+ if isinstance(X, pd.DataFrame):
319
+ distance = pd.DataFrame(distance, columns=X.columns)
320
+
321
+ self.hierarchical_clustering_estimator_.fit(distance)
322
+
323
+ n_clusters = self.hierarchical_clustering_estimator_.n_clusters_
324
+ labels = self.hierarchical_clustering_estimator_.labels_
325
+ linkage_matrix = self.hierarchical_clustering_estimator_.linkage_matrix_
326
+
327
+ X = self._validate_data(X)
328
+ n_assets = X.shape[1]
329
+
330
+ min_weights, max_weights = self._convert_weights_bounds(n_assets=n_assets)
331
+
332
+ assets_risks = self._unitary_risks(prior_model=prior_model)
333
+ weights = np.ones(n_assets)
334
+ clusters_weights = np.ones(n_clusters)
335
+
336
+ clusters = [np.argwhere(labels == i).flatten() for i in range(n_clusters)]
337
+ clusters_sets = [set(cluster_ids) for cluster_ids in clusters]
338
+
339
+ # Compute cluster total risk based on inverse-risk allocation
340
+ cluster_risks = []
341
+ for cluster_ids in clusters:
342
+ inv_risk_w = np.zeros(n_assets)
343
+ inv_risk_w[cluster_ids] = 1 / assets_risks[cluster_ids]
344
+ inv_risk_w /= inv_risk_w.sum()
345
+ cluster_risks.append(
346
+ self._risk(weights=inv_risk_w, prior_model=prior_model)
347
+ )
348
+ weights[cluster_ids] = inv_risk_w[cluster_ids]
349
+ cluster_risks = np.array(cluster_risks)
350
+
351
+ # Compute the cluster weights using the dendrogram structure.
352
+ # Recurse from the root until each of the defined cluster is reached and
353
+ # update the weights using the naive risk parity.
354
+ def _recurse(node):
355
+ # Stop when the cluster is reached
356
+ if set(node.pre_order()) in clusters_sets:
357
+ return
358
+
359
+ left_node = node.get_left()
360
+ right_node = node.get_right()
361
+ left_cluster_tree = set(left_node.pre_order())
362
+ right_cluster_tree = set(right_node.pre_order())
363
+
364
+ left_cluster = []
365
+ right_cluster = []
366
+ for i, cluster_ids in enumerate(clusters_sets):
367
+ if cluster_ids.issubset(left_cluster_tree):
368
+ left_cluster.append(i)
369
+ elif cluster_ids.issubset(right_cluster_tree):
370
+ right_cluster.append(i)
371
+
372
+ if not left_cluster or not right_cluster:
373
+ raise ValueError("Corrupted")
374
+
375
+ left_cluster = np.array(left_cluster)
376
+ right_cluster = np.array(right_cluster)
377
+ left_risk = np.sum(cluster_risks[left_cluster])
378
+ right_risk = np.sum(cluster_risks[right_cluster])
379
+
380
+ alpha = 1 - left_risk / (left_risk + right_risk)
381
+
382
+ # Weights constraints
383
+ alpha = self._apply_weight_constraints_to_alpha(
384
+ alpha=alpha,
385
+ weights=weights,
386
+ max_weights=max_weights,
387
+ min_weights=min_weights,
388
+ left_cluster=left_cluster,
389
+ right_cluster=right_cluster,
390
+ )
391
+
392
+ clusters_weights[left_cluster] *= alpha
393
+ clusters_weights[right_cluster] *= 1 - alpha
394
+
395
+ _recurse(left_node)
396
+ _recurse(right_node)
397
+
398
+ root = sch.to_tree(linkage_matrix)
399
+ _recurse(root)
400
+
401
+ # Combine intra-cluster weights with inter-cluster weights
402
+ for i, cluster_ids in enumerate(clusters):
403
+ weights[cluster_ids] *= clusters_weights[i]
404
+
405
+ self.weights_ = weights
406
+ return self