openseries 1.5.7__py3-none-any.whl → 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
openseries/frame.py CHANGED
@@ -1,39 +1,29 @@
1
1
  """Defining the OpenFrame class."""
2
2
 
3
- # mypy: disable-error-code="index,assignment"
3
+ # mypy: disable-error-code="index,assignment,arg-type"
4
4
  from __future__ import annotations
5
5
 
6
- import datetime as dt
7
6
  from copy import deepcopy
8
7
  from functools import reduce
9
- from inspect import stack
10
8
  from logging import warning
11
- from pathlib import Path
12
- from typing import Callable, Optional, Union, cast
9
+ from typing import TYPE_CHECKING, cast
10
+
11
+ if TYPE_CHECKING:
12
+ import datetime as dt # pragma: no cover
13
13
 
14
14
  import statsmodels.api as sm # type: ignore[import-untyped,unused-ignore]
15
15
  from numpy import (
16
- append,
17
16
  array,
18
17
  cov,
19
18
  cumprod,
20
19
  divide,
21
- dot,
22
- float64,
23
- inf,
24
20
  isinf,
25
- linspace,
26
21
  log,
27
22
  nan,
28
23
  sqrt,
29
24
  square,
30
25
  std,
31
- zeros,
32
- )
33
- from numpy import (
34
- sum as npsum,
35
26
  )
36
- from numpy.typing import NDArray
37
27
  from pandas import (
38
28
  DataFrame,
39
29
  DatetimeIndex,
@@ -44,11 +34,7 @@ from pandas import (
44
34
  concat,
45
35
  merge,
46
36
  )
47
- from plotly.graph_objs import Figure # type: ignore[import-untyped,unused-ignore]
48
- from plotly.io import to_html # type: ignore[import-untyped,unused-ignore]
49
- from plotly.offline import plot # type: ignore[import-untyped,unused-ignore]
50
- from pydantic import DirectoryPath, field_validator
51
- from scipy.optimize import minimize # type: ignore[import-untyped,unused-ignore]
37
+ from pydantic import field_validator
52
38
 
53
39
  # noinspection PyProtectedMember
54
40
  from statsmodels.regression.linear_model import ( # type: ignore[import-untyped,unused-ignore]
@@ -56,36 +42,31 @@ from statsmodels.regression.linear_model import ( # type: ignore[import-untyped
56
42
  )
57
43
  from typing_extensions import Self
58
44
 
59
- from openseries._common_model import _CommonModel
60
- from openseries.datefixer import do_resample_to_business_period_ends
61
- from openseries.load_plotly import load_plotly_dict
62
- from openseries.series import OpenTimeSeries
63
- from openseries.simulation import random_generator
64
- from openseries.types import (
45
+ from ._common_model import _CommonModel
46
+ from .datefixer import do_resample_to_business_period_ends
47
+ from .series import OpenTimeSeries
48
+ from .types import (
65
49
  CountriesType,
66
50
  DaysInYearType,
67
51
  LiteralBizDayFreq,
68
52
  LiteralCaptureRatio,
69
53
  LiteralFrameProps,
70
54
  LiteralHowMerge,
71
- LiteralLinePlotMode,
72
55
  LiteralOlsFitCovType,
73
56
  LiteralOlsFitMethod,
74
57
  LiteralPandasReindexMethod,
75
- LiteralPlotlyJSlib,
76
- LiteralPlotlyOutput,
77
58
  LiteralPortfolioWeightings,
78
59
  LiteralTrunc,
79
60
  OpenFramePropertiesList,
80
61
  ValueType,
81
62
  )
82
63
 
64
+ __all__ = ["OpenFrame"]
83
65
 
84
- # noinspection PyUnresolvedReferences
85
- class OpenFrame(_CommonModel):
86
66
 
87
- """
88
- OpenFrame objects hold OpenTimeSeries in the list constituents.
67
+ # noinspection PyUnresolvedReferences,PyTypeChecker
68
+ class OpenFrame(_CommonModel):
69
+ """OpenFrame objects hold OpenTimeSeries in the list constituents.
89
70
 
90
71
  The intended use is to allow comparisons across these timeseries.
91
72
 
@@ -105,7 +86,7 @@ class OpenFrame(_CommonModel):
105
86
 
106
87
  constituents: list[OpenTimeSeries]
107
88
  tsdf: DataFrame = DataFrame(dtype="float64")
108
- weights: Optional[list[float]] = None
89
+ weights: list[float] | None = None
109
90
 
110
91
  # noinspection PyMethodParameters
111
92
  @field_validator("constituents") # type: ignore[misc]
@@ -123,10 +104,9 @@ class OpenFrame(_CommonModel):
123
104
  def __init__(
124
105
  self: Self,
125
106
  constituents: list[OpenTimeSeries],
126
- weights: Optional[list[float]] = None,
107
+ weights: list[float] | None = None,
127
108
  ) -> None:
128
- """
129
- OpenFrame objects hold OpenTimeSeries in the list constituents.
109
+ """OpenFrame objects hold OpenTimeSeries in the list constituents.
130
110
 
131
111
  The intended use is to allow comparisons across these timeseries.
132
112
 
@@ -163,8 +143,7 @@ class OpenFrame(_CommonModel):
163
143
  warning("OpenFrame() was passed an empty list.")
164
144
 
165
145
  def from_deepcopy(self: Self) -> Self:
166
- """
167
- Create copy of the OpenFrame object.
146
+ """Create copy of the OpenFrame object.
168
147
 
169
148
  Returns
170
149
  -------
@@ -178,8 +157,7 @@ class OpenFrame(_CommonModel):
178
157
  self: Self,
179
158
  how: LiteralHowMerge = "outer",
180
159
  ) -> Self:
181
- """
182
- Merge index of Pandas Dataframes of the constituent OpenTimeSeries.
160
+ """Merge index of Pandas Dataframes of the constituent OpenTimeSeries.
183
161
 
184
162
  Parameters
185
163
  ----------
@@ -221,10 +199,9 @@ class OpenFrame(_CommonModel):
221
199
 
222
200
  def all_properties(
223
201
  self: Self,
224
- properties: Optional[list[LiteralFrameProps]] = None,
202
+ properties: list[LiteralFrameProps] | None = None,
225
203
  ) -> DataFrame:
226
- """
227
- Calculate chosen timeseries properties.
204
+ """Calculate chosen timeseries properties.
228
205
 
229
206
  Parameters
230
207
  ----------
@@ -248,8 +225,7 @@ class OpenFrame(_CommonModel):
248
225
 
249
226
  @property
250
227
  def lengths_of_items(self: Self) -> Series[int]:
251
- """
252
- Number of observations of all constituents.
228
+ """Number of observations of all constituents.
253
229
 
254
230
  Returns
255
231
  -------
@@ -266,8 +242,7 @@ class OpenFrame(_CommonModel):
266
242
 
267
243
  @property
268
244
  def item_count(self: Self) -> int:
269
- """
270
- Number of constituents.
245
+ """Number of constituents.
271
246
 
272
247
  Returns
273
248
  -------
@@ -279,8 +254,7 @@ class OpenFrame(_CommonModel):
279
254
 
280
255
  @property
281
256
  def columns_lvl_zero(self: Self) -> list[str]:
282
- """
283
- Level 0 values of the MultiIndex columns in the .tsdf DataFrame.
257
+ """Level 0 values of the MultiIndex columns in the .tsdf DataFrame.
284
258
 
285
259
  Returns
286
260
  -------
@@ -292,8 +266,7 @@ class OpenFrame(_CommonModel):
292
266
 
293
267
  @property
294
268
  def columns_lvl_one(self: Self) -> list[ValueType]:
295
- """
296
- Level 1 values of the MultiIndex columns in the .tsdf DataFrame.
269
+ """Level 1 values of the MultiIndex columns in the .tsdf DataFrame.
297
270
 
298
271
  Returns
299
272
  -------
@@ -305,8 +278,7 @@ class OpenFrame(_CommonModel):
305
278
 
306
279
  @property
307
280
  def first_indices(self: Self) -> Series[dt.date]:
308
- """
309
- The first dates in the timeseries of all constituents.
281
+ """The first dates in the timeseries of all constituents.
310
282
 
311
283
  Returns
312
284
  -------
@@ -323,8 +295,7 @@ class OpenFrame(_CommonModel):
323
295
 
324
296
  @property
325
297
  def last_indices(self: Self) -> Series[dt.date]:
326
- """
327
- The last dates in the timeseries of all constituents.
298
+ """The last dates in the timeseries of all constituents.
328
299
 
329
300
  Returns
330
301
  -------
@@ -341,8 +312,7 @@ class OpenFrame(_CommonModel):
341
312
 
342
313
  @property
343
314
  def span_of_days_all(self: Self) -> Series[int]:
344
- """
345
- Number of days from the first date to the last for all items in the frame.
315
+ """Number of days from the first date to the last for all items in the frame.
346
316
 
347
317
  Returns
348
318
  -------
@@ -359,8 +329,7 @@ class OpenFrame(_CommonModel):
359
329
  )
360
330
 
361
331
  def value_to_ret(self: Self) -> Self:
362
- """
363
- Convert series of values into series of returns.
332
+ """Convert series of values into series of returns.
364
333
 
365
334
  Returns
366
335
  -------
@@ -368,7 +337,7 @@ class OpenFrame(_CommonModel):
368
337
  The returns of the values in the series
369
338
 
370
339
  """
371
- self.tsdf = self.tsdf.pct_change(fill_method=cast(str, None))
340
+ self.tsdf = self.tsdf.pct_change(fill_method=None)
372
341
  self.tsdf.iloc[0] = 0
373
342
  new_labels = [ValueType.RTRN] * self.item_count
374
343
  arrays = [self.tsdf.columns.get_level_values(0), new_labels]
@@ -376,8 +345,7 @@ class OpenFrame(_CommonModel):
376
345
  return self
377
346
 
378
347
  def value_to_diff(self: Self, periods: int = 1) -> Self:
379
- """
380
- Convert series of values to series of their period differences.
348
+ """Convert series of values to series of their period differences.
381
349
 
382
350
  Parameters
383
351
  ----------
@@ -399,8 +367,7 @@ class OpenFrame(_CommonModel):
399
367
  return self
400
368
 
401
369
  def to_cumret(self: Self) -> Self:
402
- """
403
- Convert series of returns into cumulative series of values.
370
+ """Convert series of returns into cumulative series of values.
404
371
 
405
372
  Returns
406
373
  -------
@@ -423,14 +390,13 @@ class OpenFrame(_CommonModel):
423
390
 
424
391
  def resample(
425
392
  self: Self,
426
- freq: Union[LiteralBizDayFreq, str] = "BME",
393
+ freq: LiteralBizDayFreq | str = "BME",
427
394
  ) -> Self:
428
- """
429
- Resample the timeseries frequency.
395
+ """Resample the timeseries frequency.
430
396
 
431
397
  Parameters
432
398
  ----------
433
- freq: Union[LiteralBizDayFreq, str], default "BME"
399
+ freq: LiteralBizDayFreq | str, default "BME"
434
400
  The date offset string that sets the resampled frequency
435
401
 
436
402
  Returns
@@ -457,8 +423,7 @@ class OpenFrame(_CommonModel):
457
423
  countries: CountriesType = "SE",
458
424
  method: LiteralPandasReindexMethod = "nearest",
459
425
  ) -> Self:
460
- """
461
- Resamples timeseries frequency to the business calendar month end dates.
426
+ """Resamples timeseries frequency to the business calendar month end dates.
462
427
 
463
428
  Stubs left in place. Stubs will be aligned to the shortest stub.
464
429
 
@@ -478,12 +443,12 @@ class OpenFrame(_CommonModel):
478
443
  An OpenFrame object
479
444
 
480
445
  """
481
- head = self.tsdf.loc[self.first_indices.max()].copy()
482
- tail = self.tsdf.loc[self.last_indices.min()].copy()
446
+ head: Series[float] = self.tsdf.loc[self.first_indices.max()].copy()
447
+ tail: Series[float] = self.tsdf.loc[self.last_indices.min()].copy()
483
448
  dates = do_resample_to_business_period_ends(
484
449
  data=self.tsdf,
485
- head=head, # type: ignore[arg-type,unused-ignore]
486
- tail=tail, # type: ignore[arg-type,unused-ignore]
450
+ head=head,
451
+ tail=tail,
487
452
  freq=freq,
488
453
  countries=countries,
489
454
  )
@@ -502,13 +467,12 @@ class OpenFrame(_CommonModel):
502
467
  dlta_degr_freedms: int = 0,
503
468
  first_column: int = 0,
504
469
  second_column: int = 1,
505
- months_from_last: Optional[int] = None,
506
- from_date: Optional[dt.date] = None,
507
- to_date: Optional[dt.date] = None,
508
- periods_in_a_year_fixed: Optional[DaysInYearType] = None,
470
+ months_from_last: int | None = None,
471
+ from_date: dt.date | None = None,
472
+ to_date: dt.date | None = None,
473
+ periods_in_a_year_fixed: DaysInYearType | None = None,
509
474
  ) -> DataFrame:
510
- """
511
- Exponentially Weighted Moving Average Volatilities and Correlation.
475
+ """Exponentially Weighted Moving Average Volatilities and Correlation.
512
476
 
513
477
  Exponentially Weighted Moving Average (EWMA) for Volatilities and
514
478
  Correlation. https://www.investopedia.com/articles/07/ewma.asp.
@@ -620,8 +584,7 @@ class OpenFrame(_CommonModel):
620
584
 
621
585
  @property
622
586
  def correl_matrix(self: Self) -> DataFrame:
623
- """
624
- Correlation matrix.
587
+ """Correlation matrix.
625
588
 
626
589
  Returns
627
590
  -------
@@ -629,7 +592,7 @@ class OpenFrame(_CommonModel):
629
592
  Correlation matrix
630
593
 
631
594
  """
632
- corr_matrix = self.tsdf.pct_change(fill_method=cast(str, None)).corr(
595
+ corr_matrix = self.tsdf.pct_change(fill_method=None).corr(
633
596
  method="pearson",
634
597
  min_periods=1,
635
598
  )
@@ -642,8 +605,7 @@ class OpenFrame(_CommonModel):
642
605
  self: Self,
643
606
  new_series: OpenTimeSeries,
644
607
  ) -> Self:
645
- """
646
- To add an OpenTimeSeries object.
608
+ """To add an OpenTimeSeries object.
647
609
 
648
610
  Parameters
649
611
  ----------
@@ -662,8 +624,7 @@ class OpenFrame(_CommonModel):
662
624
  return self
663
625
 
664
626
  def delete_timeseries(self: Self, lvl_zero_item: str) -> Self:
665
- """
666
- To delete an OpenTimeSeries object.
627
+ """To delete an OpenTimeSeries object.
667
628
 
668
629
  Parameters
669
630
  ----------
@@ -693,12 +654,11 @@ class OpenFrame(_CommonModel):
693
654
 
694
655
  def trunc_frame(
695
656
  self: Self,
696
- start_cut: Optional[dt.date] = None,
697
- end_cut: Optional[dt.date] = None,
657
+ start_cut: dt.date | None = None,
658
+ end_cut: dt.date | None = None,
698
659
  where: LiteralTrunc = "both",
699
660
  ) -> Self:
700
- """
701
- Truncate DataFrame such that all timeseries have the same time span.
661
+ """Truncate DataFrame such that all timeseries have the same time span.
702
662
 
703
663
  Parameters
704
664
  ----------
@@ -752,8 +712,7 @@ class OpenFrame(_CommonModel):
752
712
  *,
753
713
  base_zero: bool = True,
754
714
  ) -> None:
755
- """
756
- Calculate cumulative relative return between two series.
715
+ """Calculate cumulative relative return between two series.
757
716
 
758
717
  Parameters
759
718
  ----------
@@ -785,14 +744,13 @@ class OpenFrame(_CommonModel):
785
744
 
786
745
  def tracking_error_func(
787
746
  self: Self,
788
- base_column: Union[tuple[str, ValueType], int] = -1,
789
- months_from_last: Optional[int] = None,
790
- from_date: Optional[dt.date] = None,
791
- to_date: Optional[dt.date] = None,
792
- periods_in_a_year_fixed: Optional[DaysInYearType] = None,
747
+ base_column: tuple[str, ValueType] | int = -1,
748
+ months_from_last: int | None = None,
749
+ from_date: dt.date | None = None,
750
+ to_date: dt.date | None = None,
751
+ periods_in_a_year_fixed: DaysInYearType | None = None,
793
752
  ) -> Series[float]:
794
- """
795
- Tracking Error.
753
+ """Tracking Error.
796
754
 
797
755
  Calculates Tracking Error which is the standard deviation of the
798
756
  difference between the fund and its index returns.
@@ -800,7 +758,7 @@ class OpenFrame(_CommonModel):
800
758
 
801
759
  Parameters
802
760
  ----------
803
- base_column: Union[tuple[str, ValueType], int], default: -1
761
+ base_column: tuple[str, ValueType] | int, default: -1
804
762
  Column of timeseries that is the denominator in the ratio.
805
763
  months_from_last : int, optional
806
764
  number of months offset as positive integer. Overrides use of from_date
@@ -865,8 +823,7 @@ class OpenFrame(_CommonModel):
865
823
  # noinspection PyTypeChecker
866
824
  relative = 1.0 + longdf - shortdf
867
825
  vol = float(
868
- relative.pct_change(fill_method=cast(str, None)).std()
869
- * sqrt(time_factor),
826
+ relative.pct_change(fill_method=None).std() * sqrt(time_factor),
870
827
  )
871
828
  terrors.append(vol)
872
829
 
@@ -879,14 +836,13 @@ class OpenFrame(_CommonModel):
879
836
 
880
837
  def info_ratio_func(
881
838
  self: Self,
882
- base_column: Union[tuple[str, ValueType], int] = -1,
883
- months_from_last: Optional[int] = None,
884
- from_date: Optional[dt.date] = None,
885
- to_date: Optional[dt.date] = None,
886
- periods_in_a_year_fixed: Optional[DaysInYearType] = None,
839
+ base_column: tuple[str, ValueType] | int = -1,
840
+ months_from_last: int | None = None,
841
+ from_date: dt.date | None = None,
842
+ to_date: dt.date | None = None,
843
+ periods_in_a_year_fixed: DaysInYearType | None = None,
887
844
  ) -> Series[float]:
888
- """
889
- Information Ratio.
845
+ """Information Ratio.
890
846
 
891
847
  The Information Ratio equals ( fund return less index return ) divided
892
848
  by the Tracking Error. And the Tracking Error is the standard deviation of
@@ -895,7 +851,7 @@ class OpenFrame(_CommonModel):
895
851
 
896
852
  Parameters
897
853
  ----------
898
- base_column: Union[tuple[str, ValueType], int], default: -1
854
+ base_column: tuple[str, ValueType] | int, default: -1
899
855
  Column of timeseries that is the denominator in the ratio.
900
856
  months_from_last : int, optional
901
857
  number of months offset as positive integer. Overrides use of from_date
@@ -960,12 +916,10 @@ class OpenFrame(_CommonModel):
960
916
  # noinspection PyTypeChecker
961
917
  relative = 1.0 + longdf - shortdf
962
918
  ret = float(
963
- relative.pct_change(fill_method=cast(str, None)).mean()
964
- * time_factor,
919
+ relative.pct_change(fill_method=None).mean() * time_factor,
965
920
  )
966
921
  vol = float(
967
- relative.pct_change(fill_method=cast(str, None)).std()
968
- * sqrt(time_factor),
922
+ relative.pct_change(fill_method=None).std() * sqrt(time_factor),
969
923
  )
970
924
  ratios.append(ret / vol)
971
925
 
@@ -979,14 +933,13 @@ class OpenFrame(_CommonModel):
979
933
  def capture_ratio_func( # noqa: C901
980
934
  self: Self,
981
935
  ratio: LiteralCaptureRatio,
982
- base_column: Union[tuple[str, ValueType], int] = -1,
983
- months_from_last: Optional[int] = None,
984
- from_date: Optional[dt.date] = None,
985
- to_date: Optional[dt.date] = None,
986
- periods_in_a_year_fixed: Optional[DaysInYearType] = None,
936
+ base_column: tuple[str, ValueType] | int = -1,
937
+ months_from_last: int | None = None,
938
+ from_date: dt.date | None = None,
939
+ to_date: dt.date | None = None,
940
+ periods_in_a_year_fixed: DaysInYearType | None = None,
987
941
  ) -> Series[float]:
988
- """
989
- Capture Ratio.
942
+ """Capture Ratio.
990
943
 
991
944
  The Up (Down) Capture Ratio is calculated by dividing the CAGR
992
945
  of the asset during periods that the benchmark returns are positive (negative)
@@ -1001,7 +954,7 @@ class OpenFrame(_CommonModel):
1001
954
  ----------
1002
955
  ratio: LiteralCaptureRatio
1003
956
  The ratio to calculate
1004
- base_column: Union[tuple[str, ValueType], int], default: -1
957
+ base_column: tuple[str, ValueType] | int, default: -1
1005
958
  Column of timeseries that is the denominator in the ratio.
1006
959
  months_from_last : int, optional
1007
960
  number of months offset as positive integer. Overrides use of from_date
@@ -1066,8 +1019,8 @@ class OpenFrame(_CommonModel):
1066
1019
  ]
1067
1020
  if ratio == "up":
1068
1021
  uparray = (
1069
- longdf.pct_change(fill_method=cast(str, None))[
1070
- shortdf.pct_change(fill_method=cast(str, None)).to_numpy()
1022
+ longdf.pct_change(fill_method=None)[
1023
+ shortdf.pct_change(fill_method=None).to_numpy()
1071
1024
  > loss_limit
1072
1025
  ]
1073
1026
  .add(1)
@@ -1075,8 +1028,8 @@ class OpenFrame(_CommonModel):
1075
1028
  )
1076
1029
  up_rtrn = uparray.prod() ** (1 / (len(uparray) / time_factor)) - 1
1077
1030
  upidxarray = (
1078
- shortdf.pct_change(fill_method=cast(str, None))[
1079
- shortdf.pct_change(fill_method=cast(str, None)).to_numpy()
1031
+ shortdf.pct_change(fill_method=None)[
1032
+ shortdf.pct_change(fill_method=None).to_numpy()
1080
1033
  > loss_limit
1081
1034
  ]
1082
1035
  .add(1)
@@ -1088,8 +1041,8 @@ class OpenFrame(_CommonModel):
1088
1041
  ratios.append(up_rtrn / up_idx_return)
1089
1042
  elif ratio == "down":
1090
1043
  downarray = (
1091
- longdf.pct_change(fill_method=cast(str, None))[
1092
- shortdf.pct_change(fill_method=cast(str, None)).to_numpy()
1044
+ longdf.pct_change(fill_method=None)[
1045
+ shortdf.pct_change(fill_method=None).to_numpy()
1093
1046
  < loss_limit
1094
1047
  ]
1095
1048
  .add(1)
@@ -1099,8 +1052,8 @@ class OpenFrame(_CommonModel):
1099
1052
  downarray.prod() ** (1 / (len(downarray) / time_factor)) - 1
1100
1053
  )
1101
1054
  downidxarray = (
1102
- shortdf.pct_change(fill_method=cast(str, None))[
1103
- shortdf.pct_change(fill_method=cast(str, None)).to_numpy()
1055
+ shortdf.pct_change(fill_method=None)[
1056
+ shortdf.pct_change(fill_method=None).to_numpy()
1104
1057
  < loss_limit
1105
1058
  ]
1106
1059
  .add(1)
@@ -1113,8 +1066,8 @@ class OpenFrame(_CommonModel):
1113
1066
  ratios.append(down_return / down_idx_return)
1114
1067
  elif ratio == "both":
1115
1068
  uparray = (
1116
- longdf.pct_change(fill_method=cast(str, None))[
1117
- shortdf.pct_change(fill_method=cast(str, None)).to_numpy()
1069
+ longdf.pct_change(fill_method=None)[
1070
+ shortdf.pct_change(fill_method=None).to_numpy()
1118
1071
  > loss_limit
1119
1072
  ]
1120
1073
  .add(1)
@@ -1122,8 +1075,8 @@ class OpenFrame(_CommonModel):
1122
1075
  )
1123
1076
  up_rtrn = uparray.prod() ** (1 / (len(uparray) / time_factor)) - 1
1124
1077
  upidxarray = (
1125
- shortdf.pct_change(fill_method=cast(str, None))[
1126
- shortdf.pct_change(fill_method=cast(str, None)).to_numpy()
1078
+ shortdf.pct_change(fill_method=None)[
1079
+ shortdf.pct_change(fill_method=None).to_numpy()
1127
1080
  > loss_limit
1128
1081
  ]
1129
1082
  .add(1)
@@ -1133,8 +1086,8 @@ class OpenFrame(_CommonModel):
1133
1086
  upidxarray.prod() ** (1 / (len(upidxarray) / time_factor)) - 1
1134
1087
  )
1135
1088
  downarray = (
1136
- longdf.pct_change(fill_method=cast(str, None))[
1137
- shortdf.pct_change(fill_method=cast(str, None)).to_numpy()
1089
+ longdf.pct_change(fill_method=None)[
1090
+ shortdf.pct_change(fill_method=None).to_numpy()
1138
1091
  < loss_limit
1139
1092
  ]
1140
1093
  .add(1)
@@ -1144,8 +1097,8 @@ class OpenFrame(_CommonModel):
1144
1097
  downarray.prod() ** (1 / (len(downarray) / time_factor)) - 1
1145
1098
  )
1146
1099
  downidxarray = (
1147
- shortdf.pct_change(fill_method=cast(str, None))[
1148
- shortdf.pct_change(fill_method=cast(str, None)).to_numpy()
1100
+ shortdf.pct_change(fill_method=None)[
1101
+ shortdf.pct_change(fill_method=None).to_numpy()
1149
1102
  < loss_limit
1150
1103
  ]
1151
1104
  .add(1)
@@ -1175,21 +1128,20 @@ class OpenFrame(_CommonModel):
1175
1128
 
1176
1129
  def beta(
1177
1130
  self: Self,
1178
- asset: Union[tuple[str, ValueType], int],
1179
- market: Union[tuple[str, ValueType], int],
1131
+ asset: tuple[str, ValueType] | int,
1132
+ market: tuple[str, ValueType] | int,
1180
1133
  dlta_degr_freedms: int = 1,
1181
1134
  ) -> float:
1182
- """
1183
- Market Beta.
1135
+ """Market Beta.
1184
1136
 
1185
1137
  Calculates Beta as Co-variance of asset & market divided by Variance
1186
1138
  of the market. https://www.investopedia.com/terms/b/beta.asp.
1187
1139
 
1188
1140
  Parameters
1189
1141
  ----------
1190
- asset: Union[tuple[str, ValueType], int]
1142
+ asset: tuple[str, ValueType] | int
1191
1143
  The column of the asset
1192
- market: Union[tuple[str, ValueType], int]
1144
+ market: tuple[str, ValueType] | int
1193
1145
  The column of the market against which Beta is measured
1194
1146
  dlta_degr_freedms: int, default: 1
1195
1147
  Variance bias factor taking the value 0 or 1.
@@ -1257,15 +1209,14 @@ class OpenFrame(_CommonModel):
1257
1209
 
1258
1210
  def ord_least_squares_fit(
1259
1211
  self: Self,
1260
- y_column: Union[tuple[str, ValueType], int],
1261
- x_column: Union[tuple[str, ValueType], int],
1212
+ y_column: tuple[str, ValueType] | int,
1213
+ x_column: tuple[str, ValueType] | int,
1262
1214
  method: LiteralOlsFitMethod = "pinv",
1263
1215
  cov_type: LiteralOlsFitCovType = "nonrobust",
1264
1216
  *,
1265
1217
  fitted_series: bool = True,
1266
1218
  ) -> OLSResults:
1267
- """
1268
- Ordinary Least Squares fit.
1219
+ """Ordinary Least Squares fit.
1269
1220
 
1270
1221
  Performs a linear regression and adds a new column with a fitted line
1271
1222
  using Ordinary Least Squares fit
@@ -1273,9 +1224,9 @@ class OpenFrame(_CommonModel):
1273
1224
 
1274
1225
  Parameters
1275
1226
  ----------
1276
- y_column: Union[tuple[str, ValueType], int]
1227
+ y_column: tuple[str, ValueType] | int
1277
1228
  The column level values of the dependent variable y
1278
- x_column: Union[tuple[str, ValueType], int]
1229
+ x_column: tuple[str, ValueType] | int
1279
1230
  The column level values of the exogenous variable x
1280
1231
  method: LiteralOlsFitMethod, default: pinv
1281
1232
  Method to solve least squares problem
@@ -1328,13 +1279,12 @@ class OpenFrame(_CommonModel):
1328
1279
 
1329
1280
  def jensen_alpha( # noqa: C901
1330
1281
  self: Self,
1331
- asset: Union[tuple[str, ValueType], int],
1332
- market: Union[tuple[str, ValueType], int],
1282
+ asset: tuple[str, ValueType] | int,
1283
+ market: tuple[str, ValueType] | int,
1333
1284
  riskfree_rate: float = 0.0,
1334
1285
  dlta_degr_freedms: int = 1,
1335
1286
  ) -> float:
1336
- """
1337
- Jensen's alpha.
1287
+ """Jensen's alpha.
1338
1288
 
1339
1289
  The Jensen's measure, or Jensen's alpha, is a risk-adjusted performance
1340
1290
  measure that represents the average return on a portfolio or investment,
@@ -1345,9 +1295,9 @@ class OpenFrame(_CommonModel):
1345
1295
 
1346
1296
  Parameters
1347
1297
  ----------
1348
- asset: Union[tuple[str, ValueType], int]
1298
+ asset: tuple[str, ValueType] | int
1349
1299
  The column of the asset
1350
- market: Union[tuple[str, ValueType], int]
1300
+ market: tuple[str, ValueType] | int
1351
1301
  The column of the market against which Jensen's alpha is measured
1352
1302
  riskfree_rate : float, default: 0.0
1353
1303
  The return of the zero volatility riskfree asset
@@ -1467,10 +1417,9 @@ class OpenFrame(_CommonModel):
1467
1417
  def make_portfolio(
1468
1418
  self: Self,
1469
1419
  name: str,
1470
- weight_strat: Optional[LiteralPortfolioWeightings] = None,
1420
+ weight_strat: LiteralPortfolioWeightings | None = None,
1471
1421
  ) -> DataFrame:
1472
- """
1473
- Calculate a basket timeseries based on the supplied weights.
1422
+ """Calculate a basket timeseries based on the supplied weights.
1474
1423
 
1475
1424
  Parameters
1476
1425
  ----------
@@ -1498,7 +1447,7 @@ class OpenFrame(_CommonModel):
1498
1447
  x == ValueType.RTRN
1499
1448
  for x in self.tsdf.columns.get_level_values(1).to_numpy()
1500
1449
  ):
1501
- dframe = dframe.pct_change(fill_method=cast(str, None))
1450
+ dframe = dframe.pct_change(fill_method=None)
1502
1451
  dframe.iloc[0] = 0
1503
1452
  if weight_strat:
1504
1453
  if weight_strat == "eq_weights":
@@ -1522,10 +1471,9 @@ class OpenFrame(_CommonModel):
1522
1471
  long_column: int = 0,
1523
1472
  short_column: int = 1,
1524
1473
  observations: int = 21,
1525
- periods_in_a_year_fixed: Optional[DaysInYearType] = None,
1474
+ periods_in_a_year_fixed: DaysInYearType | None = None,
1526
1475
  ) -> DataFrame:
1527
- """
1528
- Calculate rolling Information Ratio.
1476
+ """Calculate rolling Information Ratio.
1529
1477
 
1530
1478
  The Information Ratio equals ( fund return less index return ) divided by
1531
1479
  the Tracking Error. And the Tracking Error is the standard deviation of the
@@ -1567,13 +1515,13 @@ class OpenFrame(_CommonModel):
1567
1515
  )
1568
1516
 
1569
1517
  retseries = (
1570
- relative.pct_change(fill_method=cast(str, None))
1518
+ relative.pct_change(fill_method=None)
1571
1519
  .rolling(observations, min_periods=observations)
1572
1520
  .sum()
1573
1521
  )
1574
1522
  retdf = retseries.dropna().to_frame()
1575
1523
 
1576
- voldf = relative.pct_change(fill_method=cast(str, None)).rolling(
1524
+ voldf = relative.pct_change(fill_method=None).rolling(
1577
1525
  observations,
1578
1526
  min_periods=observations,
1579
1527
  ).std() * sqrt(time_factor)
@@ -1591,8 +1539,7 @@ class OpenFrame(_CommonModel):
1591
1539
  observations: int = 21,
1592
1540
  dlta_degr_freedms: int = 1,
1593
1541
  ) -> DataFrame:
1594
- """
1595
- Calculate rolling Market Beta.
1542
+ """Calculate rolling Market Beta.
1596
1543
 
1597
1544
  Calculates Beta as Co-variance of asset & market divided by Variance
1598
1545
  of the market. https://www.investopedia.com/terms/b/beta.asp.
@@ -1618,8 +1565,7 @@ class OpenFrame(_CommonModel):
1618
1565
  asset_label = cast(tuple[str, str], self.tsdf.iloc[:, asset_column].name)[0]
1619
1566
  beta_label = f"{asset_label} / {market_label}"
1620
1567
 
1621
- rolling: DataFrame = self.tsdf.copy()
1622
- rolling = rolling.pct_change(fill_method=cast(str, None)).rolling(
1568
+ rolling = self.tsdf.pct_change(fill_method=None).rolling(
1623
1569
  observations,
1624
1570
  min_periods=observations,
1625
1571
  )
@@ -1649,8 +1595,7 @@ class OpenFrame(_CommonModel):
1649
1595
  second_column: int = 1,
1650
1596
  observations: int = 21,
1651
1597
  ) -> DataFrame:
1652
- """
1653
- Calculate rolling Correlation.
1598
+ """Calculate rolling Correlation.
1654
1599
 
1655
1600
  Calculates correlation between two series. The period with
1656
1601
  at least the given number of observations is the first period calculated.
@@ -1677,11 +1622,11 @@ class OpenFrame(_CommonModel):
1677
1622
  )
1678
1623
  first_series = (
1679
1624
  self.tsdf.iloc[:, first_column]
1680
- .pct_change(fill_method=cast(str, None))[1:]
1625
+ .pct_change(fill_method=None)[1:]
1681
1626
  .rolling(observations, min_periods=observations)
1682
1627
  )
1683
1628
  second_series = self.tsdf.iloc[:, second_column].pct_change(
1684
- fill_method=cast(str, None),
1629
+ fill_method=None,
1685
1630
  )[1:]
1686
1631
  corrdf = first_series.corr(other=second_series).dropna().to_frame()
1687
1632
  corrdf.columns = MultiIndex.from_arrays(
@@ -1692,549 +1637,3 @@ class OpenFrame(_CommonModel):
1692
1637
  )
1693
1638
 
1694
1639
  return DataFrame(corrdf)
1695
-
1696
-
1697
- def simulate_portfolios(
1698
- simframe: OpenFrame,
1699
- num_ports: int,
1700
- seed: int,
1701
- ) -> DataFrame:
1702
- """
1703
- Generate random weights for simulated portfolios.
1704
-
1705
- Parameters
1706
- ----------
1707
- simframe: OpenFrame
1708
- Return data for portfolio constituents
1709
- num_ports: int
1710
- Number of possible portfolios to simulate
1711
- seed: int
1712
- The seed for the random process
1713
-
1714
- Returns
1715
- -------
1716
- pandas.DataFrame
1717
- The resulting data
1718
-
1719
- """
1720
- copi = simframe.from_deepcopy()
1721
-
1722
- if any(
1723
- x == ValueType.PRICE for x in copi.tsdf.columns.get_level_values(1).to_numpy()
1724
- ):
1725
- copi.value_to_ret()
1726
- log_ret = copi.tsdf.copy()[1:]
1727
- else:
1728
- log_ret = copi.tsdf.copy()
1729
-
1730
- log_ret.columns = log_ret.columns.droplevel(level=1)
1731
-
1732
- randomizer = random_generator(seed=seed)
1733
-
1734
- all_weights = zeros((num_ports, simframe.item_count))
1735
- ret_arr = zeros(num_ports)
1736
- vol_arr = zeros(num_ports)
1737
- sharpe_arr = zeros(num_ports)
1738
-
1739
- for x in range(num_ports):
1740
- weights = array(randomizer.random(simframe.item_count))
1741
- weights = weights / npsum(weights)
1742
- all_weights[x, :] = weights
1743
-
1744
- vol_arr[x] = sqrt(
1745
- dot(
1746
- weights.T,
1747
- dot(log_ret.cov() * simframe.periods_in_a_year, weights),
1748
- ),
1749
- )
1750
-
1751
- ret_arr[x] = npsum(log_ret.mean() * weights * simframe.periods_in_a_year)
1752
-
1753
- sharpe_arr[x] = ret_arr[x] / vol_arr[x]
1754
-
1755
- # noinspection PyUnreachableCode
1756
- simdf = concat(
1757
- [
1758
- DataFrame({"stdev": vol_arr, "ret": ret_arr, "sharpe": sharpe_arr}),
1759
- DataFrame(all_weights, columns=simframe.columns_lvl_zero),
1760
- ],
1761
- axis="columns",
1762
- )
1763
- simdf = simdf.replace([inf, -inf], nan)
1764
- return simdf.dropna()
1765
-
1766
-
1767
- def efficient_frontier( # noqa: C901
1768
- eframe: OpenFrame,
1769
- num_ports: int = 5000,
1770
- seed: int = 71,
1771
- upperbounds: float = 1.0,
1772
- frontier_points: int = 200,
1773
- *,
1774
- tweak: bool = True,
1775
- ) -> tuple[DataFrame, DataFrame, NDArray[float64]]:
1776
- """
1777
- Identify an efficient frontier.
1778
-
1779
- Parameters
1780
- ----------
1781
- eframe: OpenFrame
1782
- Portfolio data
1783
- num_ports: int, default: 5000
1784
- Number of possible portfolios to simulate
1785
- seed: int, default: 71
1786
- The seed for the random process
1787
- upperbounds: float, default: 1.0
1788
- The largest allowed allocation to a single asset
1789
- frontier_points: int, default: 200
1790
- number of points along frontier to optimize
1791
- tweak: bool, default: True
1792
- cutting the frontier to exclude multiple points with almost the same risk
1793
-
1794
- Returns
1795
- -------
1796
- tuple[DataFrame, DataFrame, NDArray[float]]
1797
- The efficient frontier data, simulation data and optimal portfolio
1798
-
1799
- """
1800
- if eframe.weights is None:
1801
- eframe.weights = [1.0 / eframe.item_count] * eframe.item_count
1802
-
1803
- copi = eframe.from_deepcopy()
1804
-
1805
- if any(
1806
- x == ValueType.PRICE for x in copi.tsdf.columns.get_level_values(1).to_numpy()
1807
- ):
1808
- copi.value_to_ret()
1809
- log_ret = copi.tsdf.copy()[1:]
1810
- else:
1811
- log_ret = copi.tsdf.copy()
1812
-
1813
- log_ret.columns = log_ret.columns.droplevel(level=1)
1814
-
1815
- simulated = simulate_portfolios(simframe=copi, num_ports=num_ports, seed=seed)
1816
-
1817
- frontier_min = simulated.loc[simulated["stdev"].idxmin()]["ret"]
1818
- arithmetic_mean = log_ret.mean() * copi.periods_in_a_year
1819
- frontier_max = 0.0
1820
- if isinstance(arithmetic_mean, Series):
1821
- frontier_max = arithmetic_mean.max()
1822
-
1823
- def _check_sum(weights: NDArray[float64]) -> float64:
1824
- return cast(float64, npsum(weights) - 1)
1825
-
1826
- def _get_ret_vol_sr(
1827
- lg_ret: DataFrame,
1828
- weights: NDArray[float64],
1829
- per_in_yr: float,
1830
- ) -> NDArray[float64]:
1831
- ret = npsum(lg_ret.mean() * weights) * per_in_yr
1832
- volatility = sqrt(dot(weights.T, dot(lg_ret.cov() * per_in_yr, weights)))
1833
- sr = ret / volatility
1834
- return cast(NDArray[float64], array([ret, volatility, sr]))
1835
-
1836
- def _diff_return(
1837
- lg_ret: DataFrame,
1838
- weights: NDArray[float64],
1839
- per_in_yr: float,
1840
- poss_return: float,
1841
- ) -> float64:
1842
- return cast(
1843
- float64,
1844
- _get_ret_vol_sr(lg_ret=lg_ret, weights=weights, per_in_yr=per_in_yr)[0]
1845
- - poss_return,
1846
- )
1847
-
1848
- def _neg_sharpe(weights: NDArray[float64]) -> float64:
1849
- return cast(
1850
- float64,
1851
- _get_ret_vol_sr(
1852
- lg_ret=log_ret,
1853
- weights=weights,
1854
- per_in_yr=eframe.periods_in_a_year,
1855
- )[2]
1856
- * -1,
1857
- )
1858
-
1859
- def _minimize_volatility(
1860
- weights: NDArray[float64],
1861
- ) -> float64:
1862
- return cast(
1863
- float64,
1864
- _get_ret_vol_sr(
1865
- lg_ret=log_ret,
1866
- weights=weights,
1867
- per_in_yr=eframe.periods_in_a_year,
1868
- )[1],
1869
- )
1870
-
1871
- constraints = {"type": "eq", "fun": _check_sum}
1872
- bounds = tuple((0, upperbounds) for _ in range(eframe.item_count))
1873
- init_guess = array(eframe.weights)
1874
-
1875
- opt_results = minimize(
1876
- fun=_neg_sharpe,
1877
- x0=init_guess,
1878
- method="SLSQP",
1879
- bounds=bounds,
1880
- constraints=constraints,
1881
- )
1882
-
1883
- optimal = _get_ret_vol_sr(
1884
- lg_ret=log_ret,
1885
- weights=opt_results.x,
1886
- per_in_yr=eframe.periods_in_a_year,
1887
- )
1888
-
1889
- frontier_y = linspace(start=frontier_min, stop=frontier_max, num=frontier_points)
1890
- frontier_x = []
1891
- frontier_weights = []
1892
-
1893
- for possible_return in frontier_y:
1894
- cons = cast(
1895
- dict[str, Union[str, Callable[[float, NDArray[float64]], float64]]],
1896
- (
1897
- {"type": "eq", "fun": _check_sum},
1898
- {
1899
- "type": "eq",
1900
- "fun": lambda w, poss_return=possible_return: _diff_return(
1901
- lg_ret=log_ret,
1902
- weights=w,
1903
- per_in_yr=eframe.periods_in_a_year,
1904
- poss_return=poss_return,
1905
- ),
1906
- },
1907
- ),
1908
- )
1909
-
1910
- result = minimize(
1911
- fun=_minimize_volatility,
1912
- x0=init_guess,
1913
- method="SLSQP",
1914
- bounds=bounds,
1915
- constraints=cons,
1916
- )
1917
-
1918
- frontier_x.append(result["fun"])
1919
- frontier_weights.append(result["x"])
1920
-
1921
- # noinspection PyUnreachableCode
1922
- line_df = concat(
1923
- [
1924
- DataFrame(data=frontier_weights, columns=eframe.columns_lvl_zero),
1925
- DataFrame({"stdev": frontier_x, "ret": frontier_y}),
1926
- ],
1927
- axis="columns",
1928
- )
1929
- line_df["sharpe"] = line_df.ret / line_df.stdev
1930
-
1931
- limit_small = 0.0001
1932
- line_df = line_df.mask(line_df.abs() < limit_small, 0.0)
1933
- line_df["text"] = line_df.apply(
1934
- lambda c: "<br><br>Weights:<br>"
1935
- + "<br>".join(
1936
- [f"{c[nm]:.1%} {nm}" for nm in eframe.columns_lvl_zero],
1937
- ),
1938
- axis="columns",
1939
- )
1940
-
1941
- if tweak:
1942
- limit_tweak = 0.001
1943
- line_df["stdev_diff"] = line_df.stdev.pct_change()
1944
- line_df = line_df.loc[line_df.stdev_diff.abs() > limit_tweak]
1945
- line_df = line_df.drop(columns="stdev_diff")
1946
-
1947
- return line_df, simulated, append(optimal, opt_results.x)
1948
-
1949
-
1950
- def constrain_optimized_portfolios(
1951
- data: OpenFrame,
1952
- serie: OpenTimeSeries,
1953
- portfolioname: str = "Current Portfolio",
1954
- simulations: int = 10000,
1955
- curve_points: int = 200,
1956
- upper_bound: float = 0.25,
1957
- ) -> tuple[OpenFrame, OpenTimeSeries, OpenFrame, OpenTimeSeries]:
1958
- """
1959
- Constrain optimized portfolios to those that improve on the current one.
1960
-
1961
- Parameters
1962
- ----------
1963
- data: OpenFrame
1964
- Portfolio data
1965
- serie: OpenTimeSeries
1966
- A
1967
- portfolioname: str, default: "Current Portfolio"
1968
- Name of the portfolio
1969
- simulations: int, default: 10000
1970
- Number of possible portfolios to simulate
1971
- curve_points: int, default: 200
1972
- Number of optimal portfolios on the efficient frontier
1973
- upper_bound: float, default: 0.25
1974
- The largest allowed allocation to a single asset
1975
-
1976
- Returns
1977
- -------
1978
- tuple[OpenFrame, OpenTimeSeries, OpenFrame, OpenTimeSeries]
1979
- The constrained optimal portfolio data
1980
-
1981
- """
1982
- lr_frame = data.from_deepcopy()
1983
- mv_frame = data.from_deepcopy()
1984
-
1985
- front_frame, sim_frame, optimal = efficient_frontier(
1986
- eframe=data,
1987
- num_ports=simulations,
1988
- frontier_points=curve_points,
1989
- upperbounds=upper_bound,
1990
- )
1991
-
1992
- condition_least_ret = front_frame.ret > serie.arithmetic_ret
1993
- # noinspection PyArgumentList
1994
- least_ret_frame = front_frame[condition_least_ret].sort_values(by="stdev")
1995
- least_ret_port = least_ret_frame.iloc[0]
1996
- least_ret_port_name = f"Minimize vol & target return of {portfolioname}"
1997
- least_ret_weights = [least_ret_port[c] for c in lr_frame.columns_lvl_zero]
1998
- lr_frame.weights = least_ret_weights
1999
- resleast = OpenTimeSeries.from_df(lr_frame.make_portfolio(least_ret_port_name))
2000
-
2001
- condition_most_vol = front_frame.stdev < serie.vol
2002
- # noinspection PyArgumentList
2003
- most_vol_frame = front_frame[condition_most_vol].sort_values(
2004
- by="ret",
2005
- ascending=False,
2006
- )
2007
- most_vol_port = most_vol_frame.iloc[0]
2008
- most_vol_port_name = f"Maximize return & target risk of {portfolioname}"
2009
- most_vol_weights = [most_vol_port[c] for c in mv_frame.columns_lvl_zero]
2010
- mv_frame.weights = most_vol_weights
2011
- resmost = OpenTimeSeries.from_df(mv_frame.make_portfolio(most_vol_port_name))
2012
-
2013
- return lr_frame, resleast, mv_frame, resmost
2014
-
2015
-
2016
- def prepare_plot_data(
2017
- assets: OpenFrame,
2018
- current: OpenTimeSeries,
2019
- optimized: NDArray[float64],
2020
- ) -> DataFrame:
2021
- """
2022
- Prepare date to be used as point_frame in the sharpeplot function.
2023
-
2024
- Parameters
2025
- ----------
2026
- assets: OpenFrame
2027
- Portfolio data with individual assets and a weighted portfolio
2028
- current: OpenTimeSeries
2029
- The current or initial portfolio based on given weights
2030
- optimized: DataFrame
2031
- Data optimized with the efficient_frontier method
2032
-
2033
- Returns
2034
- -------
2035
- DataFrame
2036
- The data prepared with mean returns, volatility and weights
2037
-
2038
- """
2039
- txt = "<br><br>Weights:<br>" + "<br>".join(
2040
- [
2041
- f"{wgt:.1%} {nm}"
2042
- for wgt, nm in zip(
2043
- cast(list[float], assets.weights),
2044
- assets.columns_lvl_zero,
2045
- )
2046
- ],
2047
- )
2048
-
2049
- opt_text_list = [
2050
- f"{wgt:.1%} {nm}" for wgt, nm in zip(optimized[3:], assets.columns_lvl_zero)
2051
- ]
2052
- opt_text = "<br><br>Weights:<br>" + "<br>".join(opt_text_list)
2053
- vol: Series[float] = assets.vol
2054
- plotframe = DataFrame(
2055
- data=[
2056
- assets.arithmetic_ret,
2057
- vol,
2058
- Series(
2059
- data=[""] * assets.item_count,
2060
- index=vol.index,
2061
- ),
2062
- ],
2063
- index=["ret", "stdev", "text"],
2064
- )
2065
- plotframe.columns = plotframe.columns.droplevel(level=1)
2066
- plotframe["Max Sharpe Portfolio"] = [optimized[0], optimized[1], opt_text]
2067
- plotframe[current.label] = [current.arithmetic_ret, current.vol, txt]
2068
-
2069
- return plotframe
2070
-
2071
-
2072
- def sharpeplot( # noqa: C901
2073
- sim_frame: DataFrame = None,
2074
- line_frame: DataFrame = None,
2075
- point_frame: DataFrame = None,
2076
- point_frame_mode: LiteralLinePlotMode = "markers",
2077
- filename: Optional[str] = None,
2078
- directory: Optional[DirectoryPath] = None,
2079
- titletext: Optional[str] = None,
2080
- output_type: LiteralPlotlyOutput = "file",
2081
- include_plotlyjs: LiteralPlotlyJSlib = "cdn",
2082
- *,
2083
- title: bool = True,
2084
- add_logo: bool = True,
2085
- auto_open: bool = True,
2086
- ) -> tuple[Figure, str]:
2087
- """
2088
- Create scatter plot coloured by Sharpe Ratio.
2089
-
2090
- Parameters
2091
- ----------
2092
- sim_frame: DataFrame, optional
2093
- Data from the simulate_portfolios method.
2094
- line_frame: DataFrame, optional
2095
- Data from the efficient_frontier method.
2096
- point_frame: DataFrame, optional
2097
- Data to highlight current and efficient portfolios.
2098
- point_frame_mode: LiteralLinePlotMode, default: markers
2099
- Which type of scatter to use.
2100
- filename: str, optional
2101
- Name of the Plotly html file
2102
- directory: DirectoryPath, optional
2103
- Directory where Plotly html file is saved
2104
- titletext: str, optional
2105
- Text for the plot title
2106
- output_type: LiteralPlotlyOutput, default: "file"
2107
- Determines output type
2108
- include_plotlyjs: LiteralPlotlyJSlib, default: "cdn"
2109
- Determines how the plotly.js library is included in the output
2110
- title: bool, default: True
2111
- Whether to add standard plot title
2112
- add_logo: bool, default: True
2113
- Whether to add Captor logo
2114
- auto_open: bool, default: True
2115
- Determines whether to open a browser window with the plot
2116
-
2117
- Returns
2118
- -------
2119
- Figure
2120
- The scatter plot with simulated and optimized results
2121
-
2122
- """
2123
- returns = []
2124
- risk = []
2125
-
2126
- if directory:
2127
- dirpath = Path(directory).resolve()
2128
- elif Path.home().joinpath("Documents").exists():
2129
- dirpath = Path.home().joinpath("Documents")
2130
- else:
2131
- dirpath = Path(stack()[1].filename).parent
2132
-
2133
- if not filename:
2134
- filename = "sharpeplot.html"
2135
- plotfile = dirpath.joinpath(filename)
2136
-
2137
- fig, logo = load_plotly_dict()
2138
- figure = Figure(fig)
2139
-
2140
- if sim_frame is not None:
2141
- returns.extend(list(sim_frame.loc[:, "ret"]))
2142
- risk.extend(list(sim_frame.loc[:, "stdev"]))
2143
- figure.add_scatter(
2144
- x=sim_frame.loc[:, "stdev"],
2145
- y=sim_frame.loc[:, "ret"],
2146
- hoverinfo="skip",
2147
- marker={
2148
- "size": 10,
2149
- "opacity": 0.5,
2150
- "color": sim_frame.loc[:, "sharpe"],
2151
- "colorscale": "Jet",
2152
- "reversescale": True,
2153
- "colorbar": {"thickness": 20, "title": "Ratio<br>ret / vol"},
2154
- },
2155
- mode="markers",
2156
- name="simulated portfolios",
2157
- )
2158
- if line_frame is not None:
2159
- returns.extend(list(line_frame.loc[:, "ret"]))
2160
- risk.extend(list(line_frame.loc[:, "stdev"]))
2161
- figure.add_scatter(
2162
- x=line_frame.loc[:, "stdev"],
2163
- y=line_frame.loc[:, "ret"],
2164
- text=line_frame.loc[:, "text"],
2165
- xhoverformat=".2%",
2166
- yhoverformat=".2%",
2167
- hovertemplate="Return %{y}<br>Vol %{x}%{text}",
2168
- hoverlabel_align="right",
2169
- line={"width": 2.5, "dash": "solid"},
2170
- mode="lines",
2171
- name="Efficient frontier",
2172
- )
2173
-
2174
- colorway = cast(dict[str, list[str]], fig["layout"]).get("colorway")[
2175
- : len(point_frame.columns)
2176
- ]
2177
-
2178
- if point_frame is not None:
2179
- for col, clr in zip(point_frame.columns, colorway):
2180
- returns.extend([point_frame.loc["ret", col]])
2181
- risk.extend([point_frame.loc["stdev", col]])
2182
- figure.add_scatter(
2183
- x=[point_frame.loc["stdev", col]],
2184
- y=[point_frame.loc["ret", col]],
2185
- xhoverformat=".2%",
2186
- yhoverformat=".2%",
2187
- hovertext=[point_frame.loc["text", col]],
2188
- hovertemplate="Return %{y}<br>Vol %{x}%{hovertext}",
2189
- hoverlabel_align="right",
2190
- marker={"size": 20, "color": clr},
2191
- mode=point_frame_mode,
2192
- name=col,
2193
- text=col,
2194
- textfont={"size": 14},
2195
- textposition="bottom center",
2196
- )
2197
-
2198
- figure.update_layout(
2199
- xaxis={"tickformat": ".1%"},
2200
- xaxis_title="volatility",
2201
- yaxis={
2202
- "tickformat": ".1%",
2203
- "scaleanchor": "x",
2204
- "scaleratio": 1,
2205
- },
2206
- yaxis_title="annual return",
2207
- showlegend=False,
2208
- )
2209
- if title:
2210
- if titletext is None:
2211
- titletext = "<b>Risk and Return</b><br>"
2212
- figure.update_layout(title={"text": titletext, "font": {"size": 32}})
2213
-
2214
- if add_logo:
2215
- figure.add_layout_image(logo)
2216
-
2217
- if output_type == "file":
2218
- plot(
2219
- figure_or_data=figure,
2220
- filename=str(plotfile),
2221
- auto_open=auto_open,
2222
- auto_play=False,
2223
- link_text="",
2224
- include_plotlyjs=cast(bool, include_plotlyjs),
2225
- config=fig["config"],
2226
- output_type=output_type,
2227
- )
2228
- string_output = str(plotfile)
2229
- else:
2230
- div_id = filename.split(sep=".")[0]
2231
- string_output = to_html(
2232
- fig=figure,
2233
- config=fig["config"],
2234
- auto_play=False,
2235
- include_plotlyjs=cast(bool, include_plotlyjs),
2236
- full_html=False,
2237
- div_id=div_id,
2238
- )
2239
-
2240
- return figure, string_output