skfolio 0.9.1__py3-none-any.whl → 0.10.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. skfolio/distribution/multivariate/_vine_copula.py +35 -34
  2. skfolio/distribution/univariate/_base.py +20 -15
  3. skfolio/exceptions.py +5 -0
  4. skfolio/measures/__init__.py +2 -0
  5. skfolio/measures/_measures.py +392 -155
  6. skfolio/optimization/_base.py +21 -4
  7. skfolio/optimization/cluster/hierarchical/_base.py +16 -13
  8. skfolio/optimization/cluster/hierarchical/_herc.py +6 -6
  9. skfolio/optimization/cluster/hierarchical/_hrp.py +8 -6
  10. skfolio/optimization/convex/_base.py +238 -144
  11. skfolio/optimization/convex/_distributionally_robust.py +32 -20
  12. skfolio/optimization/convex/_maximum_diversification.py +15 -15
  13. skfolio/optimization/convex/_mean_risk.py +26 -24
  14. skfolio/optimization/convex/_risk_budgeting.py +23 -21
  15. skfolio/optimization/ensemble/__init__.py +2 -4
  16. skfolio/optimization/ensemble/_stacking.py +1 -1
  17. skfolio/optimization/naive/_naive.py +2 -2
  18. skfolio/population/_population.py +30 -9
  19. skfolio/portfolio/_base.py +68 -26
  20. skfolio/portfolio/_multi_period_portfolio.py +5 -0
  21. skfolio/portfolio/_portfolio.py +5 -0
  22. skfolio/prior/__init__.py +6 -2
  23. skfolio/prior/_base.py +7 -3
  24. skfolio/prior/_black_litterman.py +14 -12
  25. skfolio/prior/_empirical.py +8 -7
  26. skfolio/prior/_entropy_pooling.py +1493 -0
  27. skfolio/prior/_factor_model.py +39 -22
  28. skfolio/prior/_opinion_pooling.py +475 -0
  29. skfolio/prior/_synthetic_data.py +10 -8
  30. skfolio/uncertainty_set/_bootstrap.py +4 -4
  31. skfolio/uncertainty_set/_empirical.py +6 -6
  32. skfolio/utils/equations.py +10 -4
  33. skfolio/utils/figure.py +185 -0
  34. skfolio/utils/tools.py +4 -2
  35. {skfolio-0.9.1.dist-info → skfolio-0.10.1.dist-info}/METADATA +105 -14
  36. {skfolio-0.9.1.dist-info → skfolio-0.10.1.dist-info}/RECORD +40 -38
  37. {skfolio-0.9.1.dist-info → skfolio-0.10.1.dist-info}/WHEEL +1 -1
  38. skfolio/synthetic_returns/__init__.py +0 -1
  39. /skfolio/{optimization/ensemble/_base.py → utils/composition.py} +0 -0
  40. {skfolio-0.9.1.dist-info → skfolio-0.10.1.dist-info}/licenses/LICENSE +0 -0
  41. {skfolio-0.9.1.dist-info → skfolio-0.10.1.dist-info}/top_level.txt +0 -0
@@ -7,53 +7,74 @@
7
7
  # from Riskfolio-Lib, Copyright (c) 2020-2023, Dany Cajas, Licensed under BSD 3 clause.
8
8
 
9
9
  import numpy as np
10
+ import numpy.typing as npt
10
11
  import scipy.optimize as sco
11
12
 
12
13
 
13
- def mean(returns: np.ndarray) -> float:
14
+ def mean(
15
+ returns: npt.ArrayLike, sample_weight: np.ndarray | None = None
16
+ ) -> float | np.ndarray:
14
17
  """Compute the mean.
15
18
 
16
19
  Parameters
17
20
  ----------
18
- returns : ndarray of shape (n_observations,)
19
- Vector of returns.
21
+ returns : ndarray of shape (n_observations,) or (n_observations, n_assets)
22
+ Array of return values.
23
+
24
+ sample_weight : ndarray of shape (n_observations,), optional
25
+ Sample weights for each observation. If None, equal weights are assumed.
20
26
 
21
27
  Returns
22
28
  -------
23
- value : float
24
- Mean
29
+ value : float or ndarray of shape (n_assets,)
30
+ The computed mean.
31
+ If `returns` is a 1D-array, the result is a float.
32
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
25
33
  """
26
- return returns.mean()
34
+ if sample_weight is None:
35
+ return np.mean(returns, axis=0)
36
+ return sample_weight @ returns
27
37
 
28
38
 
29
39
  def mean_absolute_deviation(
30
- returns: np.ndarray, min_acceptable_return: float | None = None
31
- ) -> float:
40
+ returns: npt.ArrayLike,
41
+ min_acceptable_return: float | np.ndarray | None = None,
42
+ sample_weight: np.ndarray | None = None,
43
+ ) -> float | np.ndarray:
32
44
  """Compute the mean absolute deviation (MAD).
33
45
 
34
46
  Parameters
35
47
  ----------
36
- returns : ndarray of shape (n_observations,)
37
- Vector of returns.
48
+ returns : ndarray of shape (n_observations,) or (n_observations, n_assets)
49
+ Array of return values.
38
50
 
39
- min_acceptable_return : float, optional
51
+ min_acceptable_return : float or ndarray of shape (n_assets,) optional
40
52
  Minimum acceptable return. It is the return target to distinguish "downside" and
41
- "upside" returns.
42
- The default (`None`) is to use the returns' mean.
53
+ "upside" returns. The default (`None`) is to use the returns' mean.
54
+
55
+ sample_weight : ndarray of shape (n_observations,), optional
56
+ Sample weights for each observation. If None, equal weights are assumed.
43
57
 
44
58
  Returns
45
59
  -------
46
- value : float
60
+ value : float or ndarray of shape (n_assets,)
47
61
  Mean absolute deviation.
62
+ If `returns` is a 1D-array, the result is a float.
63
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
48
64
  """
49
65
  if min_acceptable_return is None:
50
- min_acceptable_return = np.mean(returns, axis=0)
51
- return float(np.mean(np.abs(returns - min_acceptable_return)))
66
+ min_acceptable_return = mean(returns, sample_weight=sample_weight)
67
+
68
+ absolute_deviations = np.abs(returns - min_acceptable_return)
69
+
70
+ return mean(absolute_deviations, sample_weight=sample_weight)
52
71
 
53
72
 
54
73
  def first_lower_partial_moment(
55
- returns: np.ndarray, min_acceptable_return: float | None = None
56
- ) -> float:
74
+ returns: npt.ArrayLike,
75
+ min_acceptable_return: float | np.ndarray | None = None,
76
+ sample_weight: np.ndarray | None = None,
77
+ ) -> float | np.ndarray:
57
78
  """Compute the first lower partial moment.
58
79
 
59
80
  The first lower partial moment is the mean of the returns below a minimum
@@ -61,128 +82,218 @@ def first_lower_partial_moment(
61
82
 
62
83
  Parameters
63
84
  ----------
64
- returns : ndarray of shape (n_observations,)
65
- Vector of returns
85
+ returns : ndarray of shape (n_observations,) or (n_observations, n_assets)
86
+ Array of return values.
66
87
 
67
- min_acceptable_return : float, optional
88
+ min_acceptable_return : float or ndarray of shape (n_assets,) optional
68
89
  Minimum acceptable return. It is the return target to distinguish "downside" and
69
- "upside" returns.
70
- The default (`None`) is to use the mean.
90
+ "upside" returns. The default (`None`) is to use the returns' mean.
91
+
92
+ sample_weight : ndarray of shape (n_observations,), optional
93
+ Sample weights for each observation. If None, equal weights are assumed.
71
94
 
72
95
  Returns
73
96
  -------
74
- value : float
97
+ value : float or ndarray of shape (n_assets,)
75
98
  First lower partial moment.
99
+ If `returns` is a 1D-array, the result is a float.
100
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
76
101
  """
77
102
  if min_acceptable_return is None:
78
- min_acceptable_return = np.mean(returns, axis=0)
79
- return -np.sum(np.minimum(0, returns - min_acceptable_return)) / len(returns)
103
+ min_acceptable_return = mean(returns, sample_weight=sample_weight)
104
+
105
+ deviations = np.maximum(0, min_acceptable_return - returns)
106
+
107
+ return mean(deviations, sample_weight=sample_weight)
80
108
 
81
109
 
82
- def variance(returns: np.ndarray) -> float:
110
+ def variance(
111
+ returns: npt.ArrayLike,
112
+ biased: bool = False,
113
+ sample_weight: np.ndarray | None = None,
114
+ ) -> float | np.ndarray:
83
115
  """Compute the variance (second moment).
84
116
 
85
117
  Parameters
86
118
  ----------
87
- returns : ndarray of shape (n_observations,)
88
- Vector of returns.
119
+ returns : ndarray of shape (n_observations,) or (n_observations, n_assets)
120
+ Array of return values.
121
+
122
+ biased : bool, default=False
123
+ If False (default), computes the sample variance (unbiased); otherwise,
124
+ computes the population variance (biased).
125
+
126
+ sample_weight : ndarray of shape (n_observations,), optional
127
+ Sample weights for each observation. If None, equal weights are assumed.
89
128
 
90
129
  Returns
91
130
  -------
92
- value : float
93
- Variance.
131
+ value : float or ndarray of shape (n_assets,)
132
+ Variance.
133
+ If `returns` is a 1D-array, the result is a float.
134
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
94
135
  """
95
- return returns.var(ddof=1)
136
+ if sample_weight is None:
137
+ return np.var(returns, ddof=0 if biased else 1, axis=0)
138
+
139
+ biased_var = (
140
+ sample_weight @ (returns - mean(returns, sample_weight=sample_weight)) ** 2
141
+ )
142
+ if biased:
143
+ return biased_var
144
+ n_eff = 1 / np.sum(sample_weight**2)
145
+ return biased_var * n_eff / (n_eff - 1)
96
146
 
97
147
 
98
148
  def semi_variance(
99
- returns: np.ndarray, min_acceptable_return: float | None = None
100
- ) -> float:
149
+ returns: npt.ArrayLike,
150
+ min_acceptable_return: float | np.ndarray | None = None,
151
+ sample_weight: np.ndarray | None = None,
152
+ biased: bool = False,
153
+ ) -> float | np.ndarray:
101
154
  """Compute the semi-variance (second lower partial moment).
102
155
 
103
156
  The semi-variance is the variance of the returns below a minimum acceptable return.
104
157
 
105
158
  Parameters
106
159
  ----------
107
- returns : ndarray of shape (n_observations,)
108
- Vector of returns
160
+ returns : ndarray of shape (n_observations,) or (n_observations, n_assets)
161
+ Array of return values.
109
162
 
110
- min_acceptable_return : float, optional
163
+ min_acceptable_return : float or ndarray of shape (n_assets,) optional
111
164
  Minimum acceptable return. It is the return target to distinguish "downside" and
112
- "upside" returns.
113
- The default (`None`) is to use the mean.
165
+ "upside" returns. The default (`None`) is to use the returns' mean.
166
+
167
+ biased : bool, default=False
168
+ If False (default), computes the sample semi-variance (unbiased); otherwise,
169
+ computes the population semi-variance (biased).
170
+
171
+ sample_weight : ndarray of shape (n_observations,), optional
172
+ Sample weights for each observation. If None, equal weights are assumed.
114
173
 
115
174
  Returns
116
175
  -------
117
- value : float
176
+ value : float or ndarray of shape (n_assets,)
118
177
  Semi-variance.
178
+ If `returns` is a 1D-array, the result is a float.
179
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
119
180
  """
120
181
  if min_acceptable_return is None:
121
- min_acceptable_return = np.mean(returns, axis=0)
122
- return np.sum(np.power(np.minimum(0, returns - min_acceptable_return), 2)) / (
123
- len(returns) - 1
182
+ min_acceptable_return = mean(returns, sample_weight=sample_weight)
183
+
184
+ biased_semi_var = mean(
185
+ np.maximum(0, min_acceptable_return - returns) ** 2, sample_weight=sample_weight
124
186
  )
187
+ if biased:
188
+ return biased_semi_var
189
+
190
+ n_observations = len(returns)
191
+ if sample_weight is None:
192
+ correction = n_observations / (n_observations - 1)
193
+ else:
194
+ correction = 1.0 / (1.0 - np.sum(sample_weight**2))
195
+ return biased_semi_var * correction
125
196
 
126
197
 
127
- def standard_deviation(returns: np.ndarray) -> float:
198
+ def standard_deviation(
199
+ returns: npt.ArrayLike,
200
+ sample_weight: np.ndarray | None = None,
201
+ biased: bool = False,
202
+ ) -> float | np.ndarray:
128
203
  """Compute the standard-deviation (square root of the second moment).
129
204
 
130
205
  Parameters
131
206
  ----------
132
- returns : ndarray of shape (n_observations,)
133
- Vector of returns.
207
+ returns : ndarray of shape (n_observations,) or (n_observations, n_assets)
208
+ Array of return values.
209
+
210
+ biased : bool, default=False
211
+ If False (default), computes the sample standard-deviation (unbiased);
212
+ otherwise, computes the population standard-deviation (biased).
213
+
214
+ sample_weight : ndarray of shape (n_observations,), optional
215
+ Sample weights for each observation. If None, equal weights are assumed.
134
216
 
135
217
  Returns
136
218
  -------
137
- value : float
219
+ value : float or ndarray of shape (n_assets,)
138
220
  Standard-deviation.
221
+ If `returns` is a 1D-array, the result is a float.
222
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
139
223
  """
140
- return np.sqrt(variance(returns=returns))
224
+ return np.sqrt(variance(returns, sample_weight=sample_weight, biased=biased))
141
225
 
142
226
 
143
227
  def semi_deviation(
144
- returns: np.ndarray, min_acceptable_return: float | None = None
145
- ) -> float:
146
- """Compute the semi standard-deviation (semi-deviation) (square root of the second lower
147
- partial moment).
228
+ returns: npt.ArrayLike,
229
+ min_acceptable_return: float | np.ndarray | None = None,
230
+ sample_weight: np.ndarray | None = None,
231
+ biased: bool = False,
232
+ ) -> float | np.ndarray:
233
+ """Compute the semi deviation (square root of the second lower partial moment).
148
234
 
149
235
  Parameters
150
236
  ----------
151
- returns : ndarray of shape (n_observations,)
152
- Vector of returns.
237
+ returns : ndarray of shape (n_observations,) or (n_observations, n_assets)
238
+ Array of return values.
153
239
 
154
- min_acceptable_return : float, optional
240
+ min_acceptable_return : float or ndarray of shape (n_assets,) optional
155
241
  Minimum acceptable return. It is the return target to distinguish "downside" and
156
- "upside" returns.
157
- The default (`None`) is to use the returns mean.
242
+ "upside" returns. The default (`None`) is to use the returns' mean.
243
+
244
+ biased : bool, default=False
245
+ If False (default), computes the sample semi-deviation (unbiased); otherwise,
246
+ computes the population semi-seviation (biased).
247
+
248
+ sample_weight : ndarray of shape (n_observations,), optional
249
+ Sample weights for each observation. If None, equal weights are assumed.
158
250
 
159
251
  Returns
160
252
  -------
161
- value : float
162
- Semi-standard-deviation.
253
+ value : float or ndarray of shape (n_assets,)
254
+ Semi-deviation.
255
+ If `returns` is a 1D-array, the result is a float.
256
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
163
257
  """
164
258
  return np.sqrt(
165
- semi_variance(returns=returns, min_acceptable_return=min_acceptable_return)
259
+ semi_variance(
260
+ returns,
261
+ min_acceptable_return=min_acceptable_return,
262
+ biased=biased,
263
+ sample_weight=sample_weight,
264
+ )
166
265
  )
167
266
 
168
267
 
169
- def third_central_moment(returns: np.ndarray) -> float:
268
+ def third_central_moment(
269
+ returns: npt.ArrayLike, sample_weight: np.ndarray | None = None
270
+ ) -> float | np.ndarray:
170
271
  """Compute the third central moment.
171
272
 
172
273
  Parameters
173
274
  ----------
174
- returns : ndarray of shape (n_observations,)
175
- Vector of returns.
275
+ returns : ndarray of shape (n_observations,) or (n_observations, n_assets)
276
+ Array of return values.
277
+
278
+ sample_weight : ndarray of shape (n_observations,), optional
279
+ Sample weights for each observation. If None, equal weights are assumed.
176
280
 
177
281
  Returns
178
282
  -------
179
- value : float
283
+ value : float or ndarray of shape (n_assets,)
180
284
  Third central moment.
285
+ If `returns` is a 1D-array, the result is a float.
286
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
181
287
  """
182
- return np.sum(np.power(returns - np.mean(returns, axis=0), 3)) / len(returns)
288
+ return mean(
289
+ (returns - mean(returns, sample_weight=sample_weight)) ** 3,
290
+ sample_weight=sample_weight,
291
+ )
183
292
 
184
293
 
185
- def skew(returns: np.ndarray) -> float:
294
+ def skew(
295
+ returns: npt.ArrayLike, sample_weight: np.ndarray | None = None
296
+ ) -> float | np.ndarray:
186
297
  """Compute the Skew.
187
298
 
188
299
  The Skew is a measure of the lopsidedness of the distribution.
@@ -191,34 +302,54 @@ def skew(returns: np.ndarray) -> float:
191
302
 
192
303
  Parameters
193
304
  ----------
194
- returns : ndarray of shape (n_observations,)
195
- Vector of returns.
305
+ returns : ndarray of shape (n_observations,) or (n_observations, n_assets)
306
+ Array of return values.
307
+
308
+ sample_weight : ndarray of shape (n_observations,), optional
309
+ Sample weights for each observation. If None, equal weights are assumed.
196
310
 
197
311
  Returns
198
312
  -------
199
- value : float
313
+ value : float or ndarray of shape (n_assets,)
200
314
  Skew.
315
+ If `returns` is a 1D-array, the result is a float.
316
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
201
317
  """
202
- return third_central_moment(returns) / standard_deviation(returns) ** 3
318
+ return (
319
+ third_central_moment(returns, sample_weight)
320
+ / variance(returns, sample_weight=sample_weight, biased=True) ** 1.5
321
+ )
203
322
 
204
323
 
205
- def fourth_central_moment(returns: np.ndarray) -> float:
324
+ def fourth_central_moment(
325
+ returns: npt.ArrayLike, sample_weight: np.ndarray | None = None
326
+ ) -> float | np.ndarray:
206
327
  """Compute the Fourth central moment.
207
328
 
208
329
  Parameters
209
330
  ----------
210
- returns : ndarray of shape (n_observations,)
211
- Vector of returns.
331
+ returns : ndarray of shape (n_observations,) or (n_observations, n_assets)
332
+ Array of return values.
333
+
334
+ sample_weight : ndarray of shape (n_observations,), optional
335
+ Sample weights for each observation. If None, equal weights are assumed.
212
336
 
213
337
  Returns
214
338
  -------
215
- value : float
339
+ value : float or ndarray of shape (n_assets,)
216
340
  Fourth central moment.
341
+ If `returns` is a 1D-array, the result is a float.
342
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
217
343
  """
218
- return np.sum(np.power(returns - np.mean(returns, axis=0), 4)) / len(returns)
344
+ return mean(
345
+ (returns - mean(returns, sample_weight=sample_weight)) ** 4,
346
+ sample_weight=sample_weight,
347
+ )
219
348
 
220
349
 
221
- def kurtosis(returns: np.ndarray) -> float:
350
+ def kurtosis(
351
+ returns: npt.ArrayLike, sample_weight: np.ndarray | None = None
352
+ ) -> float | np.ndarray:
222
353
  """Compute the Kurtosis.
223
354
 
224
355
  The Kurtosis is a measure of the heaviness of the tail of the distribution.
@@ -226,20 +357,28 @@ def kurtosis(returns: np.ndarray) -> float:
226
357
 
227
358
  Parameters
228
359
  ----------
229
- returns : ndarray of shape (n_observations,)
230
- Vector of returns.
360
+ returns : ndarray of shape (n_observations,) or (n_observations, n_assets)
361
+ Array of return values.
362
+
363
+ sample_weight : ndarray of shape (n_observations,), optional
364
+ Sample weights for each observation. If None, equal weights are assumed.
231
365
 
232
366
  Returns
233
367
  -------
234
- value : float
368
+ value : float or ndarray of shape (n_assets,)
235
369
  Kurtosis.
370
+ If `returns` is a 1D-array, the result is a float.
371
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
236
372
  """
237
- return fourth_central_moment(returns) / standard_deviation(returns) ** 4
373
+ return (
374
+ fourth_central_moment(returns, sample_weight=sample_weight)
375
+ / variance(returns, sample_weight=sample_weight, biased=True) ** 2
376
+ )
238
377
 
239
378
 
240
379
  def fourth_lower_partial_moment(
241
- returns: np.ndarray, min_acceptable_return: float | None = None
242
- ) -> float:
380
+ returns: npt.ArrayLike, min_acceptable_return: float | None = None
381
+ ) -> float | np.ndarray:
243
382
  """Compute the fourth lower partial moment.
244
383
 
245
384
  The Fourth Lower Partial Moment is a measure of the heaviness of the downside tail
@@ -249,8 +388,8 @@ def fourth_lower_partial_moment(
249
388
 
250
389
  Parameters
251
390
  ----------
252
- returns : ndarray of shape (n_observations,)
253
- Vector of returns
391
+ returns : ndarray of shape (n_observations,) or (n_observations, n_assets)
392
+ Array of return values.
254
393
 
255
394
  min_acceptable_return : float, optional
256
395
  Minimum acceptable return. It is the return target to distinguish "downside" and
@@ -259,59 +398,79 @@ def fourth_lower_partial_moment(
259
398
 
260
399
  Returns
261
400
  -------
262
- value : float
401
+ value : float or ndarray of shape (n_assets,)
263
402
  Fourth lower partial moment.
403
+ If `returns` is a 1D-array, the result is a float.
404
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
264
405
  """
265
406
  if min_acceptable_return is None:
266
- min_acceptable_return = np.mean(returns, axis=0)
267
- return np.sum(np.power(np.minimum(0, returns - min_acceptable_return), 4)) / len(
268
- returns
269
- )
407
+ min_acceptable_return = mean(returns)
408
+ return mean(np.maximum(0, min_acceptable_return - returns) ** 4)
270
409
 
271
410
 
272
- def worst_realization(returns: np.ndarray) -> float:
411
+ def worst_realization(returns: npt.ArrayLike) -> float | np.ndarray:
273
412
  """Compute the worst realization (worst return).
274
413
 
275
414
  Parameters
276
415
  ----------
277
- returns : ndarray of shape (n_observations,)
278
- Vector of returns.
416
+ returns : ndarray of shape (n_observations,) or (n_observations, n_assets)
417
+ Array of return values.
279
418
 
280
419
  Returns
281
420
  -------
282
- value : float
421
+ value : float or ndarray of shape (n_assets,)
283
422
  Worst realization.
423
+ If `returns` is a 1D-array, the result is a float.
424
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
284
425
  """
285
- return -min(returns)
426
+ return -np.min(returns, axis=0)
286
427
 
287
428
 
288
- def value_at_risk(returns: np.ndarray, beta: float = 0.95) -> float:
429
+ def value_at_risk(
430
+ returns: npt.ArrayLike, beta: float = 0.95, sample_weight: np.ndarray | None = None
431
+ ) -> float | np.ndarray:
289
432
  """Compute the historical value at risk (VaR).
290
-
291
433
  The VaR is the maximum loss at a given confidence level (beta).
292
434
 
293
435
  Parameters
294
436
  ----------
295
- returns : ndarray of shape (n_observations,)
296
- Vector of returns.
437
+ returns : ndarray of shape (n_observations,) or (n_observations, n_assets)
438
+ Array of return values.
297
439
 
298
440
  beta : float, default=0.95
299
441
  The VaR confidence level (return on the worst (1-beta)% observation).
300
442
 
443
+ sample_weight : ndarray of shape (n_observations,), optional
444
+ Sample weights for each observation. If None, equal weights are assumed.
445
+
301
446
  Returns
302
447
  -------
303
- value : float
448
+ value : float or ndarray of shape (n_assets,)
304
449
  Value at Risk.
450
+ If `returns` is a 1D-array, the result is a float.
451
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
305
452
  """
306
- k = (1 - beta) * len(returns)
307
- ik = max(0, int(np.ceil(k) - 1))
308
- # We only need the first k elements so using `partition` O(n log(n)) is faster
309
- # than `sort` O(n).
310
- ret = np.partition(returns, ik)
311
- return -ret[ik]
453
+ returns = np.asarray(returns)
454
+ if sample_weight is None:
455
+ k = (1 - beta) * len(returns)
456
+ ik = max(0, int(np.ceil(k) - 1))
457
+ # We only need the first k elements so using `partition` O(n log(n)) is faster
458
+ # than `sort` O(n).
459
+ return -np.partition(returns, ik, axis=0)[ik]
460
+
461
+ sorted_idx = np.argsort(returns, axis=0)
462
+ cum_weights = np.cumsum(sample_weight[sorted_idx], axis=0)
463
+ i = np.apply_along_axis(
464
+ np.searchsorted, axis=0, arr=cum_weights, v=1 - beta, side="left"
465
+ )
466
+ if returns.ndim == 1:
467
+ return -returns[sorted_idx][i]
468
+ return -np.diag(np.take_along_axis(returns, sorted_idx, axis=0)[i])
312
469
 
313
470
 
314
- def cvar(returns: np.ndarray, beta: float = 0.95) -> float:
471
+ def cvar(
472
+ returns: npt.ArrayLike, beta: float = 0.95, sample_weight: np.ndarray | None = None
473
+ ) -> float | np.ndarray:
315
474
  """Compute the historical CVaR (conditional value at risk).
316
475
 
317
476
  The CVaR (or Tail VaR) represents the mean shortfall at a specified confidence
@@ -319,28 +478,63 @@ def cvar(returns: np.ndarray, beta: float = 0.95) -> float:
319
478
 
320
479
  Parameters
321
480
  ----------
322
- returns : ndarray of shape (n_observations,)
323
- Vector of returns.
481
+ returns : ndarray of shape (n_observations,) or (n_observations, n_assets)
482
+ Array of return values.
324
483
 
325
484
  beta : float, default=0.95
326
485
  The CVaR confidence level (expected VaR on the worst (1-beta)% observations).
327
486
 
487
+ sample_weight : ndarray of shape (n_observations,), optional
488
+ Sample weights for each observation. If None, equal weights are assumed.
489
+
328
490
  Returns
329
491
  -------
330
- value : float
492
+ value : float or ndarray of shape (n_assets,)
331
493
  CVaR.
494
+ If `returns` is a 1D-array, the result is a float.
495
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
332
496
  """
333
- k = (1 - beta) * len(returns)
334
- ik = max(0, int(np.ceil(k) - 1))
335
- # We only need the first k elements so using `partition` O(n log(n)) is faster
336
- # than `sort` O(n).
337
- ret = np.partition(returns, ik)
338
- return -np.sum(ret[:ik]) / k + ret[ik] * (ik / k - 1)
497
+ returns = np.asarray(returns)
498
+ if sample_weight is None:
499
+ k = (1 - beta) * len(returns)
500
+ ik = max(0, int(np.ceil(k) - 1))
501
+ # We only need the first k elements so using `partition` O(n log(n)) is faster
502
+ # than `sort` O(n).
503
+ ret = np.partition(returns, ik, axis=0)
504
+ return -np.sum(ret[:ik], axis=0) / k + ret[ik] * (ik / k - 1)
505
+
506
+ order = np.argsort(returns, axis=0)
507
+ sorted_returns = np.take_along_axis(returns, order, axis=0)
508
+ sorted_w = sample_weight[order]
509
+ cum_w = np.cumsum(sorted_w, axis=0)
510
+ idx = np.apply_along_axis(
511
+ np.searchsorted, axis=0, arr=cum_w, v=1 - beta, side="left"
512
+ )
513
+
514
+ def _func(_idx, _sorted_returns, _sorted_w, _cum_w) -> float:
515
+ if _idx == 0:
516
+ return _sorted_returns[0]
517
+ return (
518
+ _sorted_returns[:_idx] @ _sorted_w[:_idx]
519
+ + _sorted_returns[_idx] * (1 - beta - _cum_w[_idx - 1])
520
+ ) / (1 - beta)
521
+
522
+ if returns.ndim == 1:
523
+ return -_func(idx, sorted_returns, sorted_w, cum_w)
524
+ return -np.array(
525
+ [
526
+ _func(idx[i], sorted_returns[:, i], sorted_w[:, i], cum_w[:, i])
527
+ for i in range(returns.shape[1])
528
+ ]
529
+ )
339
530
 
340
531
 
341
532
  def entropic_risk_measure(
342
- returns: np.ndarray, theta: float = 1, beta: float = 0.95
343
- ) -> float:
533
+ returns: npt.ArrayLike,
534
+ theta: float = 1,
535
+ beta: float = 0.95,
536
+ sample_weight: np.ndarray | None = None,
537
+ ) -> float | np.ndarray:
344
538
  """Compute the entropic risk measure.
345
539
 
346
540
  The entropic risk measure is a risk measure which depends on the risk aversion
@@ -349,8 +543,8 @@ def entropic_risk_measure(
349
543
 
350
544
  Parameters
351
545
  ----------
352
- returns : ndarray of shape (n_observations,)
353
- Vector of returns.
546
+ returns : ndarray of shape (n_observations,) or (n_observations, n_assets)
547
+ Array of return values.
354
548
 
355
549
  theta : float, default=1.0
356
550
  Risk aversion.
@@ -358,15 +552,22 @@ def entropic_risk_measure(
358
552
  beta : float, default=0.95
359
553
  Confidence level.
360
554
 
555
+ sample_weight : ndarray of shape (n_observations,), optional
556
+ Sample weights for each observation. If None, equal weights are assumed.
557
+
361
558
  Returns
362
559
  -------
363
- value : float
560
+ value : float or ndarray of shape (n_assets,)
364
561
  Entropic risk measure.
562
+ If `returns` is a 1D-array, the result is a float.
563
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
365
564
  """
366
- return theta * np.log(np.mean(np.exp(-returns / theta)) / (1 - beta))
565
+ return theta * np.log(
566
+ mean(np.exp(-returns / theta), sample_weight=sample_weight) / (1 - beta)
567
+ )
367
568
 
368
569
 
369
- def evar(returns: np.ndarray, beta: float = 0.95) -> float:
570
+ def evar(returns: npt.ArrayLike, beta: float = 0.95) -> float:
370
571
  """Compute the EVaR (entropic value at risk) and its associated risk aversion.
371
572
 
372
573
  The EVaR is a coherent risk measure which is an upper bound for the VaR and the
@@ -402,15 +603,17 @@ def evar(returns: np.ndarray, beta: float = 0.95) -> float:
402
603
  return result.fun
403
604
 
404
605
 
405
- def get_cumulative_returns(returns: np.ndarray, compounded: bool = False) -> np.ndarray:
606
+ def get_cumulative_returns(
607
+ returns: npt.ArrayLike, compounded: bool = False
608
+ ) -> np.ndarray:
406
609
  """Compute the cumulative returns from the returns.
407
610
  Non-compounded cumulative returns start at 0.
408
611
  Compounded cumulative returns are rescaled to start at 1000.
409
612
 
410
613
  Parameters
411
614
  ----------
412
- returns : ndarray of shape (n_observations,)
413
- Vector of returns.
615
+ returns : ndarray of shape (n_observations,) or (n_observations, n_assets)
616
+ Array of return values.
414
617
 
415
618
  compounded : bool, default=False
416
619
  If this is set to True, the cumulative returns are compounded otherwise they
@@ -418,23 +621,24 @@ def get_cumulative_returns(returns: np.ndarray, compounded: bool = False) -> np.
418
621
 
419
622
  Returns
420
623
  -------
421
- values: ndarray of shape (n_observations,)
624
+ values: ndarray of shape (n_observations,) or (n_observations, n_assets)
422
625
  Cumulative returns.
423
626
  """
424
627
  if compounded:
425
- cumulative_returns = 1000 * np.cumprod(1 + returns) # Rescaled to start at 1000
628
+ # Rescaled to start at 1000
629
+ cumulative_returns = 1000 * np.cumprod(1 + returns, axis=0)
426
630
  else:
427
- cumulative_returns = np.cumsum(returns)
631
+ cumulative_returns = np.cumsum(returns, axis=0)
428
632
  return cumulative_returns
429
633
 
430
634
 
431
- def get_drawdowns(returns: np.ndarray, compounded: bool = False) -> np.ndarray:
635
+ def get_drawdowns(returns: npt.ArrayLike, compounded: bool = False) -> np.ndarray:
432
636
  """Compute the drawdowns' series from the returns.
433
637
 
434
638
  Parameters
435
639
  ----------
436
- returns : ndarray of shape (n_observations,)
437
- Vector of returns.
640
+ returns : ndarray of shape (n_observations,) or (n_observations, n_assets)
641
+ Array of return values.
438
642
 
439
643
  compounded : bool, default=False
440
644
  If this is set to True, the cumulative returns are compounded otherwise they
@@ -442,7 +646,7 @@ def get_drawdowns(returns: np.ndarray, compounded: bool = False) -> np.ndarray:
442
646
 
443
647
  Returns
444
648
  -------
445
- values: ndarray of shape (n_observations,)
649
+ values: ndarray of shape (n_observations,) or (n_observations, n_assets)
446
650
  Drawdowns.
447
651
  """
448
652
  cumulative_returns = get_cumulative_returns(returns=returns, compounded=compounded)
@@ -453,14 +657,14 @@ def get_drawdowns(returns: np.ndarray, compounded: bool = False) -> np.ndarray:
453
657
  return drawdowns
454
658
 
455
659
 
456
- def drawdown_at_risk(drawdowns: np.ndarray, beta: float = 0.95) -> float:
660
+ def drawdown_at_risk(drawdowns: np.ndarray, beta: float = 0.95) -> float | np.ndarray:
457
661
  """Compute the Drawdown at risk.
458
662
 
459
663
  The Drawdown at risk is the maximum drawdown at a given confidence level (beta).
460
664
 
461
665
  Parameters
462
666
  ----------
463
- drawdowns : ndarray of shape (n_observations,)
667
+ drawdowns : ndarray of shape (n_observations,) or (n_observations, n_assets)
464
668
  Vector of drawdowns.
465
669
 
466
670
  beta : float, default = 0.95
@@ -468,50 +672,56 @@ def drawdown_at_risk(drawdowns: np.ndarray, beta: float = 0.95) -> float:
468
672
 
469
673
  Returns
470
674
  -------
471
- value : float
472
- Drawdown at risk.
675
+ value : float or ndarray of shape (n_assets,)
676
+ Drawdown at risk.
677
+ If `returns` is a 1D-array, the result is a float.
678
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
473
679
  """
474
680
  return value_at_risk(returns=drawdowns, beta=beta)
475
681
 
476
682
 
477
- def max_drawdown(drawdowns: np.ndarray) -> float:
683
+ def max_drawdown(drawdowns: np.ndarray) -> float | np.ndarray:
478
684
  """Compute the maximum drawdown.
479
685
 
480
686
  Parameters
481
687
  ----------
482
- drawdowns : ndarray of shape (n_observations,)
688
+ drawdowns : ndarray of shape (n_observations,) or (n_observations, n_assets)
483
689
  Vector of drawdowns.
484
690
 
485
691
  Returns
486
692
  -------
487
- value : float
693
+ value : float or ndarray of shape (n_assets,)
488
694
  Maximum drawdown.
695
+ If `returns` is a 1D-array, the result is a float.
696
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
489
697
  """
490
698
  return drawdown_at_risk(drawdowns=drawdowns, beta=1)
491
699
 
492
700
 
493
- def average_drawdown(drawdowns: np.ndarray) -> float:
701
+ def average_drawdown(drawdowns: np.ndarray) -> float | np.ndarray:
494
702
  """Compute the average drawdown.
495
703
 
496
704
  Parameters
497
705
  ----------
498
- drawdowns : ndarray of shape (n_observations,)
706
+ drawdowns : ndarray of shape (n_observations,) or (n_observations, n_assets)
499
707
  Vector of drawdowns.
500
708
 
501
709
  Returns
502
710
  -------
503
- value : float
711
+ value : float or ndarray of shape (n_assets,)
504
712
  Average drawdown.
713
+ If `returns` is a 1D-array, the result is a float.
714
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
505
715
  """
506
716
  return cdar(drawdowns=drawdowns, beta=0)
507
717
 
508
718
 
509
- def cdar(drawdowns: np.ndarray, beta: float = 0.95) -> float:
719
+ def cdar(drawdowns: np.ndarray, beta: float = 0.95) -> float | np.ndarray:
510
720
  """Compute the historical CDaR (conditional drawdown at risk).
511
721
 
512
722
  Parameters
513
723
  ----------
514
- drawdowns : ndarray of shape (n_observations,)
724
+ drawdowns : ndarray of shape (n_observations,) or (n_observations, n_assets)
515
725
  Vector of drawdowns.
516
726
 
517
727
  beta : float, default = 0.95
@@ -520,8 +730,10 @@ def cdar(drawdowns: np.ndarray, beta: float = 0.95) -> float:
520
730
 
521
731
  Returns
522
732
  -------
523
- value : float
733
+ value : float or ndarray of shape (n_assets,)
524
734
  CDaR.
735
+ If `returns` is a 1D-array, the result is a float.
736
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
525
737
  """
526
738
  return cvar(returns=drawdowns, beta=beta)
527
739
 
@@ -549,20 +761,22 @@ def edar(drawdowns: np.ndarray, beta: float = 0.95) -> float:
549
761
  return evar(returns=drawdowns, beta=beta)
550
762
 
551
763
 
552
- def ulcer_index(drawdowns: np.ndarray) -> float:
764
+ def ulcer_index(drawdowns: np.ndarray) -> float | np.ndarray:
553
765
  """Compute the Ulcer index.
554
766
 
555
767
  Parameters
556
768
  ----------
557
- drawdowns : ndarray of shape (n_observations,)
769
+ drawdowns : ndarray of shape (n_observations,) or (n_observations, n_assets)
558
770
  Vector of drawdowns.
559
771
 
560
772
  Returns
561
773
  -------
562
- value : float
563
- Ulcer index.
774
+ value : float or ndarray of shape (n_assets,)
775
+ Ulcer Index.
776
+ If `returns` is a 1D-array, the result is a float.
777
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
564
778
  """
565
- return np.sqrt(np.sum(np.power(drawdowns, 2)) / len(drawdowns))
779
+ return np.sqrt(mean(np.power(drawdowns, 2)))
566
780
 
567
781
 
568
782
  def owa_gmd_weights(n_observations: int) -> np.ndarray:
@@ -583,7 +797,7 @@ def owa_gmd_weights(n_observations: int) -> np.ndarray:
583
797
  )
584
798
 
585
799
 
586
- def gini_mean_difference(returns: np.ndarray) -> float:
800
+ def gini_mean_difference(returns: npt.ArrayLike) -> float | np.ndarray:
587
801
  """Compute the Gini mean difference (GMD).
588
802
 
589
803
  The GMD is the expected absolute difference between two realisations.
@@ -594,16 +808,18 @@ def gini_mean_difference(returns: np.ndarray) -> float:
594
808
 
595
809
  Parameters
596
810
  ----------
597
- returns : ndarray of shape (n_observations,)
598
- Vector of returns.
811
+ returns : ndarray of shape (n_observations,) or (n_observations, n_assets)
812
+ Array of return values.
599
813
 
600
814
  Returns
601
815
  -------
602
- value : float
816
+ value : float or ndarray of shape (n_assets,)
603
817
  Gini mean difference.
818
+ If `returns` is a 1D-array, the result is a float.
819
+ If `returns` is a 2D-array, the result is a ndarray of shape (n_assets,).
604
820
  """
605
821
  w = owa_gmd_weights(len(returns))
606
- return float(w @ np.sort(returns, axis=0))
822
+ return w @ np.sort(returns, axis=0)
607
823
 
608
824
 
609
825
  def effective_number_assets(weights: np.ndarray) -> float:
@@ -631,3 +847,24 @@ def effective_number_assets(weights: np.ndarray) -> float:
631
847
  Lovett, William Anthony (1988)
632
848
  """
633
849
  return 1.0 / (np.power(weights, 2).sum())
850
+
851
+
852
+ def correlation(X: np.ndarray, sample_weight: np.ndarray | None = None) -> np.ndarray:
853
+ """Compute the correlation matrix.
854
+
855
+ Parameters
856
+ ----------
857
+ X : ndarray of shape (n_observations, n_assets)
858
+ Array of values.
859
+
860
+ sample_weight : ndarray of shape (n_observations,), optional
861
+ Sample weights for each observation. If None, equal weights are assumed.
862
+
863
+ Returns
864
+ -------
865
+ corr : ndarray of shape (n_assets,)
866
+ The correlation matrix.
867
+ """
868
+ cov = np.cov(X, rowvar=False, aweights=sample_weight)
869
+ std = np.sqrt(np.diag(cov))
870
+ return cov / np.outer(std, std)