skfolio 0.9.1__tar.gz → 0.10.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {skfolio-0.9.1/src/skfolio.egg-info → skfolio-0.10.1}/PKG-INFO +105 -14
- {skfolio-0.9.1 → skfolio-0.10.1}/README.rst +104 -13
- {skfolio-0.9.1 → skfolio-0.10.1}/pyproject.toml +1 -1
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/multivariate/_vine_copula.py +35 -34
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/univariate/_base.py +20 -15
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/exceptions.py +5 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/measures/__init__.py +2 -0
- skfolio-0.10.1/src/skfolio/measures/_measures.py +870 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/optimization/_base.py +21 -4
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/optimization/cluster/hierarchical/_base.py +16 -13
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/optimization/cluster/hierarchical/_herc.py +6 -6
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/optimization/cluster/hierarchical/_hrp.py +8 -6
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/optimization/convex/_base.py +238 -144
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/optimization/convex/_distributionally_robust.py +32 -20
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/optimization/convex/_maximum_diversification.py +15 -15
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/optimization/convex/_mean_risk.py +26 -24
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/optimization/convex/_risk_budgeting.py +23 -21
- skfolio-0.10.1/src/skfolio/optimization/ensemble/__init__.py +6 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/optimization/ensemble/_stacking.py +1 -1
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/optimization/naive/_naive.py +2 -2
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/population/_population.py +30 -9
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/portfolio/_base.py +68 -26
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/portfolio/_multi_period_portfolio.py +5 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/portfolio/_portfolio.py +5 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/prior/__init__.py +6 -2
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/prior/_base.py +7 -3
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/prior/_black_litterman.py +14 -12
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/prior/_empirical.py +8 -7
- skfolio-0.10.1/src/skfolio/prior/_entropy_pooling.py +1493 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/prior/_factor_model.py +39 -22
- skfolio-0.10.1/src/skfolio/prior/_opinion_pooling.py +475 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/prior/_synthetic_data.py +10 -8
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/uncertainty_set/_bootstrap.py +4 -4
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/uncertainty_set/_empirical.py +6 -6
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/utils/equations.py +10 -4
- skfolio-0.10.1/src/skfolio/utils/figure.py +185 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/utils/tools.py +4 -2
- {skfolio-0.9.1 → skfolio-0.10.1/src/skfolio.egg-info}/PKG-INFO +105 -14
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio.egg-info/SOURCES.txt +4 -2
- skfolio-0.9.1/src/skfolio/measures/_measures.py +0 -633
- skfolio-0.9.1/src/skfolio/optimization/ensemble/__init__.py +0 -8
- skfolio-0.9.1/src/skfolio/synthetic_returns/__init__.py +0 -1
- {skfolio-0.9.1 → skfolio-0.10.1}/LICENSE +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/MANIFEST.in +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/setup.cfg +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/cluster/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/cluster/_hierarchical.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/datasets/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/datasets/_base.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/datasets/data/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/datasets/data/factors_dataset.csv.gz +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/datasets/data/sp500_dataset.csv.gz +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/datasets/data/sp500_index.csv.gz +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distance/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distance/_base.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distance/_distance.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/_base.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/copula/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/copula/_base.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/copula/_clayton.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/copula/_gaussian.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/copula/_gumbel.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/copula/_independent.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/copula/_joe.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/copula/_selection.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/copula/_student_t.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/copula/_utils.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/multivariate/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/multivariate/_base.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/multivariate/_utils.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/univariate/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/univariate/_gaussian.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/univariate/_johnson_su.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/univariate/_normal_inverse_gaussian.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/univariate/_selection.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/distribution/univariate/_student_t.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/measures/_enums.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/metrics/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/metrics/_scorer.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/model_selection/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/model_selection/_combinatorial.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/model_selection/_validation.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/model_selection/_walk_forward.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/moments/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/moments/covariance/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/moments/covariance/_base.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/moments/covariance/_denoise_covariance.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/moments/covariance/_detone_covariance.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/moments/covariance/_empirical_covariance.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/moments/covariance/_ew_covariance.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/moments/covariance/_gerber_covariance.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/moments/covariance/_graphical_lasso_cv.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/moments/covariance/_implied_covariance.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/moments/covariance/_ledoit_wolf.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/moments/covariance/_oas.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/moments/covariance/_shrunk_covariance.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/moments/expected_returns/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/moments/expected_returns/_base.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/moments/expected_returns/_empirical_mu.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/moments/expected_returns/_equilibrium_mu.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/moments/expected_returns/_ew_mu.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/moments/expected_returns/_shrunk_mu.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/optimization/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/optimization/cluster/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/optimization/cluster/_nco.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/optimization/cluster/hierarchical/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/optimization/convex/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/optimization/naive/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/population/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/portfolio/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/pre_selection/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/pre_selection/_drop_correlated.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/pre_selection/_drop_zero_variance.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/pre_selection/_select_complete.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/pre_selection/_select_k_extremes.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/pre_selection/_select_non_dominated.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/pre_selection/_select_non_expiring.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/preprocessing/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/preprocessing/_returns.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/typing.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/uncertainty_set/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/uncertainty_set/_base.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/utils/__init__.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/utils/bootstrap.py +0 -0
- /skfolio-0.9.1/src/skfolio/optimization/ensemble/_base.py → /skfolio-0.10.1/src/skfolio/utils/composition.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/utils/sorting.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio/utils/stats.py +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio.egg-info/dependency_links.txt +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio.egg-info/requires.txt +0 -0
- {skfolio-0.9.1 → skfolio-0.10.1}/src/skfolio.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: skfolio
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.10.1
|
4
4
|
Summary: Portfolio optimization built on top of scikit-learn
|
5
5
|
Author-email: Hugo Delatte <delatte.hugo@gmail.com>
|
6
6
|
Maintainer-email: Hugo Delatte <delatte.hugo@gmail.com>, Matteo Manzi <matteomanzi09@gmail.com>
|
@@ -92,7 +92,7 @@ Dynamic: license-file
|
|
92
92
|
|
93
93
|
.. -*- mode: rst -*-
|
94
94
|
|
95
|
-
|Licence| |Codecov| |Black| |PythonVersion| |PyPi| |CI/CD| |Downloads| |Ruff| |Contribution| |Website| |JupyterLite|
|
95
|
+
|Licence| |Codecov| |Black| |PythonVersion| |PyPi| |CI/CD| |Downloads| |Ruff| |Contribution| |Website| |JupyterLite| |Discord|
|
96
96
|
|
97
97
|
.. |Licence| image:: https://img.shields.io/badge/License-BSD%203--Clause-blue.svg
|
98
98
|
:target: https://github.com/skfolio/skfolio/blob/main/LICENSE
|
@@ -127,6 +127,9 @@ Dynamic: license-file
|
|
127
127
|
.. |JupyterLite| image:: https://jupyterlite.rtfd.io/en/latest/_static/badge.svg
|
128
128
|
:target: https://skfolio.org/lite
|
129
129
|
|
130
|
+
.. |Discord| image:: https://img.shields.io/badge/Discord-Join%20Chat-5865F2?logo=discord&logoColor=white
|
131
|
+
:target: https://discord.gg/Bu7EtNYugS
|
132
|
+
|
130
133
|
.. |PythonMinVersion| replace:: 3.10
|
131
134
|
.. |NumpyMinVersion| replace:: 1.23.4
|
132
135
|
.. |ScipyMinVersion| replace:: 1.8.0
|
@@ -281,6 +284,8 @@ Available models
|
|
281
284
|
* Black & Litterman
|
282
285
|
* Factor Model
|
283
286
|
* Synthetic Data (Stress Test, Factor Stress Test)
|
287
|
+
* Entropy Pooling
|
288
|
+
* Opinion Pooling
|
284
289
|
|
285
290
|
* Uncertainty Set Estimator:
|
286
291
|
* On Expected Returns:
|
@@ -296,6 +301,7 @@ Available models
|
|
296
301
|
* Drop Highly Correlated Assets
|
297
302
|
* Select Non-Expiring Assets
|
298
303
|
* Select Complete Assets (handle late inception, delisting, etc.)
|
304
|
+
* Drop Zero Variance
|
299
305
|
|
300
306
|
* Cross-Validation and Model Selection:
|
301
307
|
* Compatible with all `sklearn` methods (KFold, etc.)
|
@@ -379,13 +385,21 @@ Imports
|
|
379
385
|
)
|
380
386
|
from skfolio.optimization import (
|
381
387
|
MeanRisk,
|
388
|
+
HierarchicalRiskParity,
|
382
389
|
NestedClustersOptimization,
|
383
390
|
ObjectiveFunction,
|
384
391
|
RiskBudgeting,
|
385
392
|
)
|
386
393
|
from skfolio.pre_selection import SelectKExtremes
|
387
394
|
from skfolio.preprocessing import prices_to_returns
|
388
|
-
from skfolio.prior import
|
395
|
+
from skfolio.prior import (
|
396
|
+
BlackLitterman,
|
397
|
+
EmpiricalPrior,
|
398
|
+
EntropyPooling,
|
399
|
+
FactorModel,
|
400
|
+
OpinionPooling,
|
401
|
+
SyntheticData,
|
402
|
+
)
|
389
403
|
from skfolio.uncertainty_set import BootstrapMuUncertaintySet
|
390
404
|
|
391
405
|
Load Dataset
|
@@ -571,11 +585,13 @@ Factor Model
|
|
571
585
|
|
572
586
|
factor_prices = load_factors_dataset()
|
573
587
|
|
574
|
-
X,
|
575
|
-
X_train, X_test,
|
588
|
+
X, factors = prices_to_returns(prices, factor_prices)
|
589
|
+
X_train, X_test, factors_train, factors_test = train_test_split(
|
590
|
+
X, factors, test_size=0.33, shuffle=False
|
591
|
+
)
|
576
592
|
|
577
593
|
model = MeanRisk(prior_estimator=FactorModel())
|
578
|
-
model.fit(X_train,
|
594
|
+
model.fit(X_train, factors_train)
|
579
595
|
|
580
596
|
print(model.weights_)
|
581
597
|
|
@@ -584,7 +600,6 @@ Factor Model
|
|
584
600
|
print(portfolio.calmar_ratio)
|
585
601
|
print(portfolio.summary())
|
586
602
|
|
587
|
-
|
588
603
|
Factor Model & Covariance Detoning
|
589
604
|
----------------------------------
|
590
605
|
.. code-block:: python
|
@@ -645,7 +660,7 @@ Combinatorial Purged Cross-Validation
|
|
645
660
|
|
646
661
|
cv = CombinatorialPurgedCV(n_folds=10, n_test_folds=2)
|
647
662
|
|
648
|
-
print(cv.
|
663
|
+
print(cv.summary(X_train))
|
649
664
|
|
650
665
|
population = cross_val_predict(model, X_train, cv=cv)
|
651
666
|
|
@@ -661,7 +676,7 @@ Minimum CVaR Optimization on Synthetic Returns
|
|
661
676
|
.. code-block:: python
|
662
677
|
|
663
678
|
vine = VineCopula(log_transform=True, n_jobs=-1)
|
664
|
-
prior =
|
679
|
+
prior = SyntheticData(distribution_estimator=vine, n_samples=2000)
|
665
680
|
model = MeanRisk(risk_measure=RiskMeasure.CVAR, prior_estimator=prior)
|
666
681
|
model.fit(X)
|
667
682
|
print(model.weights_)
|
@@ -671,7 +686,7 @@ Stress Test
|
|
671
686
|
-----------
|
672
687
|
.. code-block:: python
|
673
688
|
|
674
|
-
vine = VineCopula(log_transform=True, central_assets=["BAC"]
|
689
|
+
vine = VineCopula(log_transform=True, central_assets=["BAC"], n_jobs=-1)
|
675
690
|
vine.fit(X)
|
676
691
|
X_stressed = vine.sample(n_samples=10_000, conditioning = {"BAC": -0.2})
|
677
692
|
ptf_stressed = model.predict(X_stressed)
|
@@ -689,7 +704,7 @@ Minimum CVaR Optimization on Synthetic Factors
|
|
689
704
|
)
|
690
705
|
factor_model = FactorModel(factor_prior_estimator=factor_prior)
|
691
706
|
model = MeanRisk(risk_measure=RiskMeasure.CVAR, prior_estimator=factor_model)
|
692
|
-
model.fit(X,
|
707
|
+
model.fit(X, factors)
|
693
708
|
print(model.weights_)
|
694
709
|
|
695
710
|
|
@@ -700,9 +715,85 @@ Factor Stress Test
|
|
700
715
|
factor_model.set_params(factor_prior_estimator__sample_args=dict(
|
701
716
|
conditioning={"QUAL": -0.5}
|
702
717
|
))
|
703
|
-
factor_model.fit(X,
|
704
|
-
|
705
|
-
stressed_ptf = model.predict(
|
718
|
+
factor_model.fit(X, factors)
|
719
|
+
stressed_dist = factor_model.return_distribution_
|
720
|
+
stressed_ptf = model.predict(stressed_dist)
|
721
|
+
|
722
|
+
Entropy Pooling
|
723
|
+
---------------
|
724
|
+
.. code-block:: python
|
725
|
+
|
726
|
+
entropy_pooling = EntropyPooling(
|
727
|
+
mean_views=[
|
728
|
+
"JPM == -0.002",
|
729
|
+
"PG >= LLY",
|
730
|
+
"BAC >= prior(BAC) * 1.2",
|
731
|
+
],
|
732
|
+
cvar_views=[
|
733
|
+
"GE == 0.08",
|
734
|
+
],
|
735
|
+
)
|
736
|
+
entropy_pooling.fit(X)
|
737
|
+
print(entropy_pooling.relative_entropy_)
|
738
|
+
print(entropy_pooling.effective_number_of_scenarios_)
|
739
|
+
print(entropy_pooling.return_distribution_.sample_weight)
|
740
|
+
|
741
|
+
CVaR Hierarchical Risk Parity optimization on Entropy Pooling
|
742
|
+
-------------------------------------------------------------
|
743
|
+
.. code-block:: python
|
744
|
+
|
745
|
+
entropy_pooling = EntropyPooling(cvar_views=["GE == 0.08"])
|
746
|
+
model = HierarchicalRiskParity(
|
747
|
+
risk_measure=RiskMeasure.CVAR,
|
748
|
+
prior_estimator=entropy_pooling
|
749
|
+
)
|
750
|
+
model.fit(X)
|
751
|
+
print(model.weights_)
|
752
|
+
|
753
|
+
Stress Test with Entropy Pooling on Factor Synthetic Data
|
754
|
+
---------------------------------------------------------
|
755
|
+
.. code-block:: python
|
756
|
+
|
757
|
+
# Regular Vine Copula and sampling of 100,000 synthetic factor returns
|
758
|
+
factor_synth = SyntheticData(
|
759
|
+
n_samples=100_000,
|
760
|
+
distribution_estimator=VineCopula(log_transform=True, n_jobs=-1, random_state=0)
|
761
|
+
)
|
762
|
+
|
763
|
+
# Entropy Pooling by imposing a CVaR-95% of 10% on the Quality factor
|
764
|
+
factor_entropy_pooling = EntropyPooling(
|
765
|
+
prior_estimator=factor_synth,
|
766
|
+
cvar_views=["QUAL == 0.10"],
|
767
|
+
)
|
768
|
+
|
769
|
+
factor_entropy_pooling.fit(X, factors)
|
770
|
+
|
771
|
+
# We retrieve the stressed distribution:
|
772
|
+
stressed_dist = factor_model.return_distribution_
|
773
|
+
|
774
|
+
# We stress-test our portfolio:
|
775
|
+
stressed_ptf = model.predict(stressed_dist)
|
776
|
+
|
777
|
+
Opinion Pooling
|
778
|
+
---------------
|
779
|
+
.. code-block:: python
|
780
|
+
|
781
|
+
# We consider two expert opinions, each generated via Entropy Pooling with
|
782
|
+
# user-defined views.
|
783
|
+
# We assign probabilities of 40% to Expert 1, 50% to Expert 2, and by default
|
784
|
+
# the remaining 10% is allocated to the prior distribution:
|
785
|
+
opinion_1 = EntropyPooling(cvar_views=["AMD == 0.10"])
|
786
|
+
opinion_2 = EntropyPooling(
|
787
|
+
mean_views=["AMD >= BAC", "JPM <= prior(JPM) * 0.8"],
|
788
|
+
cvar_views=["GE == 0.12"],
|
789
|
+
)
|
790
|
+
|
791
|
+
opinion_pooling = OpinionPooling(
|
792
|
+
estimators=[("opinion_1", opinion_1), ("opinion_2", opinion_2)],
|
793
|
+
opinion_probabilities=[0.4, 0.5],
|
794
|
+
)
|
795
|
+
|
796
|
+
opinion_pooling.fit(X)
|
706
797
|
|
707
798
|
|
708
799
|
Recognition
|
@@ -1,6 +1,6 @@
|
|
1
1
|
.. -*- mode: rst -*-
|
2
2
|
|
3
|
-
|Licence| |Codecov| |Black| |PythonVersion| |PyPi| |CI/CD| |Downloads| |Ruff| |Contribution| |Website| |JupyterLite|
|
3
|
+
|Licence| |Codecov| |Black| |PythonVersion| |PyPi| |CI/CD| |Downloads| |Ruff| |Contribution| |Website| |JupyterLite| |Discord|
|
4
4
|
|
5
5
|
.. |Licence| image:: https://img.shields.io/badge/License-BSD%203--Clause-blue.svg
|
6
6
|
:target: https://github.com/skfolio/skfolio/blob/main/LICENSE
|
@@ -35,6 +35,9 @@
|
|
35
35
|
.. |JupyterLite| image:: https://jupyterlite.rtfd.io/en/latest/_static/badge.svg
|
36
36
|
:target: https://skfolio.org/lite
|
37
37
|
|
38
|
+
.. |Discord| image:: https://img.shields.io/badge/Discord-Join%20Chat-5865F2?logo=discord&logoColor=white
|
39
|
+
:target: https://discord.gg/Bu7EtNYugS
|
40
|
+
|
38
41
|
.. |PythonMinVersion| replace:: 3.10
|
39
42
|
.. |NumpyMinVersion| replace:: 1.23.4
|
40
43
|
.. |ScipyMinVersion| replace:: 1.8.0
|
@@ -189,6 +192,8 @@ Available models
|
|
189
192
|
* Black & Litterman
|
190
193
|
* Factor Model
|
191
194
|
* Synthetic Data (Stress Test, Factor Stress Test)
|
195
|
+
* Entropy Pooling
|
196
|
+
* Opinion Pooling
|
192
197
|
|
193
198
|
* Uncertainty Set Estimator:
|
194
199
|
* On Expected Returns:
|
@@ -204,6 +209,7 @@ Available models
|
|
204
209
|
* Drop Highly Correlated Assets
|
205
210
|
* Select Non-Expiring Assets
|
206
211
|
* Select Complete Assets (handle late inception, delisting, etc.)
|
212
|
+
* Drop Zero Variance
|
207
213
|
|
208
214
|
* Cross-Validation and Model Selection:
|
209
215
|
* Compatible with all `sklearn` methods (KFold, etc.)
|
@@ -287,13 +293,21 @@ Imports
|
|
287
293
|
)
|
288
294
|
from skfolio.optimization import (
|
289
295
|
MeanRisk,
|
296
|
+
HierarchicalRiskParity,
|
290
297
|
NestedClustersOptimization,
|
291
298
|
ObjectiveFunction,
|
292
299
|
RiskBudgeting,
|
293
300
|
)
|
294
301
|
from skfolio.pre_selection import SelectKExtremes
|
295
302
|
from skfolio.preprocessing import prices_to_returns
|
296
|
-
from skfolio.prior import
|
303
|
+
from skfolio.prior import (
|
304
|
+
BlackLitterman,
|
305
|
+
EmpiricalPrior,
|
306
|
+
EntropyPooling,
|
307
|
+
FactorModel,
|
308
|
+
OpinionPooling,
|
309
|
+
SyntheticData,
|
310
|
+
)
|
297
311
|
from skfolio.uncertainty_set import BootstrapMuUncertaintySet
|
298
312
|
|
299
313
|
Load Dataset
|
@@ -479,11 +493,13 @@ Factor Model
|
|
479
493
|
|
480
494
|
factor_prices = load_factors_dataset()
|
481
495
|
|
482
|
-
X,
|
483
|
-
X_train, X_test,
|
496
|
+
X, factors = prices_to_returns(prices, factor_prices)
|
497
|
+
X_train, X_test, factors_train, factors_test = train_test_split(
|
498
|
+
X, factors, test_size=0.33, shuffle=False
|
499
|
+
)
|
484
500
|
|
485
501
|
model = MeanRisk(prior_estimator=FactorModel())
|
486
|
-
model.fit(X_train,
|
502
|
+
model.fit(X_train, factors_train)
|
487
503
|
|
488
504
|
print(model.weights_)
|
489
505
|
|
@@ -492,7 +508,6 @@ Factor Model
|
|
492
508
|
print(portfolio.calmar_ratio)
|
493
509
|
print(portfolio.summary())
|
494
510
|
|
495
|
-
|
496
511
|
Factor Model & Covariance Detoning
|
497
512
|
----------------------------------
|
498
513
|
.. code-block:: python
|
@@ -553,7 +568,7 @@ Combinatorial Purged Cross-Validation
|
|
553
568
|
|
554
569
|
cv = CombinatorialPurgedCV(n_folds=10, n_test_folds=2)
|
555
570
|
|
556
|
-
print(cv.
|
571
|
+
print(cv.summary(X_train))
|
557
572
|
|
558
573
|
population = cross_val_predict(model, X_train, cv=cv)
|
559
574
|
|
@@ -569,7 +584,7 @@ Minimum CVaR Optimization on Synthetic Returns
|
|
569
584
|
.. code-block:: python
|
570
585
|
|
571
586
|
vine = VineCopula(log_transform=True, n_jobs=-1)
|
572
|
-
prior =
|
587
|
+
prior = SyntheticData(distribution_estimator=vine, n_samples=2000)
|
573
588
|
model = MeanRisk(risk_measure=RiskMeasure.CVAR, prior_estimator=prior)
|
574
589
|
model.fit(X)
|
575
590
|
print(model.weights_)
|
@@ -579,7 +594,7 @@ Stress Test
|
|
579
594
|
-----------
|
580
595
|
.. code-block:: python
|
581
596
|
|
582
|
-
vine = VineCopula(log_transform=True, central_assets=["BAC"]
|
597
|
+
vine = VineCopula(log_transform=True, central_assets=["BAC"], n_jobs=-1)
|
583
598
|
vine.fit(X)
|
584
599
|
X_stressed = vine.sample(n_samples=10_000, conditioning = {"BAC": -0.2})
|
585
600
|
ptf_stressed = model.predict(X_stressed)
|
@@ -597,7 +612,7 @@ Minimum CVaR Optimization on Synthetic Factors
|
|
597
612
|
)
|
598
613
|
factor_model = FactorModel(factor_prior_estimator=factor_prior)
|
599
614
|
model = MeanRisk(risk_measure=RiskMeasure.CVAR, prior_estimator=factor_model)
|
600
|
-
model.fit(X,
|
615
|
+
model.fit(X, factors)
|
601
616
|
print(model.weights_)
|
602
617
|
|
603
618
|
|
@@ -608,9 +623,85 @@ Factor Stress Test
|
|
608
623
|
factor_model.set_params(factor_prior_estimator__sample_args=dict(
|
609
624
|
conditioning={"QUAL": -0.5}
|
610
625
|
))
|
611
|
-
factor_model.fit(X,
|
612
|
-
|
613
|
-
stressed_ptf = model.predict(
|
626
|
+
factor_model.fit(X, factors)
|
627
|
+
stressed_dist = factor_model.return_distribution_
|
628
|
+
stressed_ptf = model.predict(stressed_dist)
|
629
|
+
|
630
|
+
Entropy Pooling
|
631
|
+
---------------
|
632
|
+
.. code-block:: python
|
633
|
+
|
634
|
+
entropy_pooling = EntropyPooling(
|
635
|
+
mean_views=[
|
636
|
+
"JPM == -0.002",
|
637
|
+
"PG >= LLY",
|
638
|
+
"BAC >= prior(BAC) * 1.2",
|
639
|
+
],
|
640
|
+
cvar_views=[
|
641
|
+
"GE == 0.08",
|
642
|
+
],
|
643
|
+
)
|
644
|
+
entropy_pooling.fit(X)
|
645
|
+
print(entropy_pooling.relative_entropy_)
|
646
|
+
print(entropy_pooling.effective_number_of_scenarios_)
|
647
|
+
print(entropy_pooling.return_distribution_.sample_weight)
|
648
|
+
|
649
|
+
CVaR Hierarchical Risk Parity optimization on Entropy Pooling
|
650
|
+
-------------------------------------------------------------
|
651
|
+
.. code-block:: python
|
652
|
+
|
653
|
+
entropy_pooling = EntropyPooling(cvar_views=["GE == 0.08"])
|
654
|
+
model = HierarchicalRiskParity(
|
655
|
+
risk_measure=RiskMeasure.CVAR,
|
656
|
+
prior_estimator=entropy_pooling
|
657
|
+
)
|
658
|
+
model.fit(X)
|
659
|
+
print(model.weights_)
|
660
|
+
|
661
|
+
Stress Test with Entropy Pooling on Factor Synthetic Data
|
662
|
+
---------------------------------------------------------
|
663
|
+
.. code-block:: python
|
664
|
+
|
665
|
+
# Regular Vine Copula and sampling of 100,000 synthetic factor returns
|
666
|
+
factor_synth = SyntheticData(
|
667
|
+
n_samples=100_000,
|
668
|
+
distribution_estimator=VineCopula(log_transform=True, n_jobs=-1, random_state=0)
|
669
|
+
)
|
670
|
+
|
671
|
+
# Entropy Pooling by imposing a CVaR-95% of 10% on the Quality factor
|
672
|
+
factor_entropy_pooling = EntropyPooling(
|
673
|
+
prior_estimator=factor_synth,
|
674
|
+
cvar_views=["QUAL == 0.10"],
|
675
|
+
)
|
676
|
+
|
677
|
+
factor_entropy_pooling.fit(X, factors)
|
678
|
+
|
679
|
+
# We retrieve the stressed distribution:
|
680
|
+
stressed_dist = factor_model.return_distribution_
|
681
|
+
|
682
|
+
# We stress-test our portfolio:
|
683
|
+
stressed_ptf = model.predict(stressed_dist)
|
684
|
+
|
685
|
+
Opinion Pooling
|
686
|
+
---------------
|
687
|
+
.. code-block:: python
|
688
|
+
|
689
|
+
# We consider two expert opinions, each generated via Entropy Pooling with
|
690
|
+
# user-defined views.
|
691
|
+
# We assign probabilities of 40% to Expert 1, 50% to Expert 2, and by default
|
692
|
+
# the remaining 10% is allocated to the prior distribution:
|
693
|
+
opinion_1 = EntropyPooling(cvar_views=["AMD == 0.10"])
|
694
|
+
opinion_2 = EntropyPooling(
|
695
|
+
mean_views=["AMD >= BAC", "JPM <= prior(JPM) * 0.8"],
|
696
|
+
cvar_views=["GE == 0.12"],
|
697
|
+
)
|
698
|
+
|
699
|
+
opinion_pooling = OpinionPooling(
|
700
|
+
estimators=[("opinion_1", opinion_1), ("opinion_2", opinion_2)],
|
701
|
+
opinion_probabilities=[0.4, 0.5],
|
702
|
+
)
|
703
|
+
|
704
|
+
opinion_pooling.fit(X)
|
614
705
|
|
615
706
|
|
616
707
|
Recognition
|
@@ -34,7 +34,6 @@ import numpy as np
|
|
34
34
|
import numpy.typing as npt
|
35
35
|
import plotly.express as px
|
36
36
|
import plotly.graph_objects as go
|
37
|
-
import scipy.stats as st
|
38
37
|
import sklearn.utils as sku
|
39
38
|
import sklearn.utils.parallel as skp
|
40
39
|
import sklearn.utils.validation as skv
|
@@ -65,6 +64,7 @@ from skfolio.distribution.univariate import (
|
|
65
64
|
StudentT,
|
66
65
|
select_univariate_dist,
|
67
66
|
)
|
67
|
+
from skfolio.utils.figure import kde_trace
|
68
68
|
from skfolio.utils.tools import input_to_array, validate_input_list
|
69
69
|
|
70
70
|
_UNIFORM_SAMPLE_EPSILON = 1e-14
|
@@ -996,6 +996,7 @@ class VineCopula(BaseMultivariateDist):
|
|
996
996
|
| None = None,
|
997
997
|
subset: list[int | str] | None = None,
|
998
998
|
n_samples: int = 500,
|
999
|
+
percentile_cutoff: float | None = None,
|
999
1000
|
title: str = "Vine Copula Marginal Distributions",
|
1000
1001
|
) -> go.Figure:
|
1001
1002
|
"""
|
@@ -1025,7 +1026,7 @@ class VineCopula(BaseMultivariateDist):
|
|
1025
1026
|
If an array-like of length `n_samples` is provided, each sample is
|
1026
1027
|
conditioned on the corresponding value in the array for that asset.
|
1027
1028
|
|
1028
|
-
|
1029
|
+
When using conditional sampling, it is recommended that the
|
1029
1030
|
assets you condition on are set as central during the vine copula
|
1030
1031
|
construction. This can be specified via the `central_assets` parameter in
|
1031
1032
|
the vine copula instantiation.
|
@@ -1041,6 +1042,12 @@ class VineCopula(BaseMultivariateDist):
|
|
1041
1042
|
rows than `n_samples`, the value is adjusted to match the number of rows in
|
1042
1043
|
`X` to ensure balanced visualization.
|
1043
1044
|
|
1045
|
+
percentile_cutoff : float, default=None
|
1046
|
+
Percentile cutoff for tail truncation (percentile), in percent.
|
1047
|
+
If a float p is provided, the distribution support is truncated at
|
1048
|
+
the p-th and (100 - p)-th percentiles.
|
1049
|
+
If None, no truncation is applied (uses full min/max of returns).
|
1050
|
+
|
1044
1051
|
title : str, default="Vine Copula Marginal Distributions"
|
1045
1052
|
The title for the plot.
|
1046
1053
|
|
@@ -1051,7 +1058,6 @@ class VineCopula(BaseMultivariateDist):
|
|
1051
1058
|
"""
|
1052
1059
|
n_assets = self.n_features_in_
|
1053
1060
|
subset = subset or list(range(n_assets))
|
1054
|
-
colors = px.colors.qualitative.Plotly
|
1055
1061
|
if X is not None:
|
1056
1062
|
X = np.asarray(X)
|
1057
1063
|
if X.ndim != 2:
|
@@ -1070,30 +1076,43 @@ class VineCopula(BaseMultivariateDist):
|
|
1070
1076
|
n_samples = X.shape[0]
|
1071
1077
|
|
1072
1078
|
samples = self.sample(n_samples=n_samples, conditioning=conditioning)
|
1079
|
+
colors = px.colors.qualitative.Plotly
|
1073
1080
|
|
1074
|
-
traces = []
|
1081
|
+
traces: list[go.Scatter] = []
|
1075
1082
|
for i, s in enumerate(subset):
|
1083
|
+
visible = True if i == 0 else "legendonly"
|
1084
|
+
color = colors[i % len(colors)]
|
1085
|
+
asset = self.feature_names_in_[s]
|
1086
|
+
|
1076
1087
|
traces.append(
|
1077
|
-
|
1088
|
+
kde_trace(
|
1078
1089
|
x=samples[:, s],
|
1079
|
-
|
1080
|
-
|
1081
|
-
name=f"{
|
1082
|
-
|
1090
|
+
sample_weight=None,
|
1091
|
+
percentile_cutoff=percentile_cutoff,
|
1092
|
+
name=f"{asset} Generated",
|
1093
|
+
line_color=color,
|
1094
|
+
fill_opacity=0.17,
|
1095
|
+
line_dash="solid",
|
1096
|
+
line_width=1,
|
1097
|
+
visible=visible,
|
1083
1098
|
)
|
1084
1099
|
)
|
1085
1100
|
|
1086
|
-
|
1087
|
-
for i, s in enumerate(subset):
|
1101
|
+
if X is not None:
|
1088
1102
|
traces.append(
|
1089
|
-
|
1103
|
+
kde_trace(
|
1090
1104
|
x=X[:, s],
|
1091
|
-
|
1092
|
-
|
1093
|
-
name=f"{
|
1094
|
-
|
1105
|
+
sample_weight=None,
|
1106
|
+
percentile_cutoff=percentile_cutoff,
|
1107
|
+
name=f"{asset} Empirical",
|
1108
|
+
line_color=color,
|
1109
|
+
fill_opacity=0.17,
|
1110
|
+
line_dash="dash",
|
1111
|
+
line_width=1.5,
|
1112
|
+
visible=visible,
|
1095
1113
|
)
|
1096
1114
|
)
|
1115
|
+
|
1097
1116
|
fig = go.Figure(data=traces)
|
1098
1117
|
fig.update_layout(
|
1099
1118
|
title=title,
|
@@ -1234,21 +1253,3 @@ def _inverse_partial_derivative(
|
|
1234
1253
|
if is_count:
|
1235
1254
|
return np.array([np.nan])
|
1236
1255
|
return edge.copula.inverse_partial_derivative(X)
|
1237
|
-
|
1238
|
-
|
1239
|
-
def _kde_trace(
|
1240
|
-
x: np.ndarray, opacity: float, color: str, name: str, visible: bool
|
1241
|
-
) -> go.Scatter:
|
1242
|
-
"""Gaussian KDE line plot."""
|
1243
|
-
kde = st.gaussian_kde(x)
|
1244
|
-
x = np.linspace(min(x), max(x), 500)
|
1245
|
-
return go.Scatter(
|
1246
|
-
x=x,
|
1247
|
-
y=kde(x),
|
1248
|
-
mode="lines",
|
1249
|
-
name=name,
|
1250
|
-
line=dict(color=color),
|
1251
|
-
fill="tozeroy",
|
1252
|
-
opacity=opacity,
|
1253
|
-
visible=visible,
|
1254
|
-
)
|
@@ -202,6 +202,23 @@ class BaseUnivariateDist(BaseDistribution, ABC):
|
|
202
202
|
x = np.linspace(lower_bound, upper_bound, 1000)
|
203
203
|
|
204
204
|
traces = []
|
205
|
+
|
206
|
+
with warnings.catch_warnings():
|
207
|
+
warnings.filterwarnings("ignore", category=UserWarning)
|
208
|
+
pdfs = np.exp(self.score_samples(x.reshape(-1, 1)))
|
209
|
+
traces.append(
|
210
|
+
go.Scatter(
|
211
|
+
x=x,
|
212
|
+
y=pdfs.flatten(),
|
213
|
+
mode="lines",
|
214
|
+
name=self.__class__.__name__,
|
215
|
+
line=dict(color="rgb(31, 119, 180)", dash="solid", width=1),
|
216
|
+
fill="tozeroy",
|
217
|
+
fillcolor="rgba(31, 119, 180, 0.17)",
|
218
|
+
opacity=1.0,
|
219
|
+
)
|
220
|
+
)
|
221
|
+
|
205
222
|
if X is not None:
|
206
223
|
with warnings.catch_warnings():
|
207
224
|
warnings.filterwarnings(
|
@@ -216,25 +233,13 @@ class BaseUnivariateDist(BaseDistribution, ABC):
|
|
216
233
|
y=y_kde,
|
217
234
|
mode="lines",
|
218
235
|
name="Empirical KDE",
|
219
|
-
line=dict(color="rgb(85,168,104)"),
|
236
|
+
line=dict(color="rgb(85, 168, 104)", dash="dash", width=2),
|
220
237
|
fill="tozeroy",
|
238
|
+
fillcolor="rgba(85, 168, 104, 0.17)",
|
239
|
+
opacity=1.0,
|
221
240
|
)
|
222
241
|
)
|
223
242
|
|
224
|
-
with warnings.catch_warnings():
|
225
|
-
warnings.filterwarnings("ignore", category=UserWarning)
|
226
|
-
pdfs = np.exp(self.score_samples(x.reshape(-1, 1)))
|
227
|
-
traces.append(
|
228
|
-
go.Scatter(
|
229
|
-
x=x,
|
230
|
-
y=pdfs.flatten(),
|
231
|
-
mode="lines",
|
232
|
-
name=self.__class__.__name__,
|
233
|
-
line=dict(color="rgb(31, 119, 180)"),
|
234
|
-
fill="tozeroy",
|
235
|
-
)
|
236
|
-
)
|
237
|
-
|
238
243
|
fig = go.Figure(data=traces)
|
239
244
|
fig.update_layout(
|
240
245
|
title=title,
|
@@ -13,6 +13,7 @@ __all__ = [
|
|
13
13
|
"GroupNotFoundError",
|
14
14
|
"NonPositiveVarianceError",
|
15
15
|
"OptimizationError",
|
16
|
+
"SolverError",
|
16
17
|
]
|
17
18
|
|
18
19
|
|
@@ -20,6 +21,10 @@ class OptimizationError(Exception):
|
|
20
21
|
"""Optimization Did not converge."""
|
21
22
|
|
22
23
|
|
24
|
+
class SolverError(Exception):
|
25
|
+
"""Solver error."""
|
26
|
+
|
27
|
+
|
23
28
|
class EquationToMatrixError(Exception):
|
24
29
|
"""Error while processing equations."""
|
25
30
|
|
@@ -13,6 +13,7 @@ from skfolio.measures._enums import (
|
|
13
13
|
from skfolio.measures._measures import (
|
14
14
|
average_drawdown,
|
15
15
|
cdar,
|
16
|
+
correlation,
|
16
17
|
cvar,
|
17
18
|
drawdown_at_risk,
|
18
19
|
edar,
|
@@ -49,6 +50,7 @@ __all__ = [
|
|
49
50
|
"RiskMeasure",
|
50
51
|
"average_drawdown",
|
51
52
|
"cdar",
|
53
|
+
"correlation",
|
52
54
|
"cvar",
|
53
55
|
"drawdown_at_risk",
|
54
56
|
"edar",
|