bullishpy 0.14.0__tar.gz → 0.16.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of bullishpy might be problematic. Click here for more details.
- {bullishpy-0.14.0 → bullishpy-0.16.0}/PKG-INFO +4 -3
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/analysis/analysis.py +62 -4
- bullishpy-0.16.0/bullish/analysis/backtest.py +422 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/analysis/filter.py +5 -2
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/analysis/functions.py +14 -13
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/analysis/indicators.py +181 -85
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/analysis/predefined_filters.py +143 -9
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/app/app.py +15 -2
- bullishpy-0.16.0/bullish/database/alembic/versions/12889a2cbd7d_.py +52 -0
- bullishpy-0.16.0/bullish/database/alembic/versions/6d252e23f543_.py +48 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/crud.py +68 -2
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/schemas.py +23 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/interface/interface.py +24 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/jobs/models.py +1 -1
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/jobs/tasks.py +19 -3
- {bullishpy-0.14.0 → bullishpy-0.16.0}/pyproject.toml +4 -3
- {bullishpy-0.14.0 → bullishpy-0.16.0}/README.md +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/__init__.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/analysis/__init__.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/analysis/constants.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/analysis/industry_views.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/app/__init__.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/cli.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/__init__.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/alembic/README +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/alembic/alembic.ini +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/alembic/env.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/alembic/script.py.mako +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/alembic/versions/037dbd721317_.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/alembic/versions/040b15fba458_.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/alembic/versions/08ac1116e055_.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/alembic/versions/11d35a452b40_.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/alembic/versions/17e51420e7ad_.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/alembic/versions/49c83f9eb5ac_.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/alembic/versions/4b0a2f40b7d3_.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/alembic/versions/5b10ee7604c1_.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/alembic/versions/73564b60fe24_.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/alembic/versions/b76079e9845f_.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/alembic/versions/bf6b86dd5463_.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/alembic/versions/d663166c531d_.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/alembic/versions/ec25c8fa449f_.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/alembic/versions/ee5baabb35f8_.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/alembic/versions/fc191121f522_.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/scripts/create_revision.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/scripts/stamp.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/scripts/upgrade.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/database/settings.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/exceptions.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/figures/__init__.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/figures/figures.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/interface/__init__.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/jobs/__init__.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/jobs/app.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/utils/__init__.py +0 -0
- {bullishpy-0.14.0 → bullishpy-0.16.0}/bullish/utils/checks.py +0 -0
|
@@ -1,23 +1,24 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: bullishpy
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.16.0
|
|
4
4
|
Summary:
|
|
5
5
|
Author: aan
|
|
6
6
|
Author-email: andoludovic.andriamamonjy@gmail.com
|
|
7
7
|
Requires-Python: >=3.12,<3.13
|
|
8
8
|
Classifier: Programming Language :: Python :: 3
|
|
9
9
|
Classifier: Programming Language :: Python :: 3.12
|
|
10
|
-
Requires-Dist: bearishpy (>=0.
|
|
10
|
+
Requires-Dist: bearishpy (>=0.26.0,<0.27.0)
|
|
11
11
|
Requires-Dist: click (>=7.0,<=8.1)
|
|
12
12
|
Requires-Dist: huey (>=2.5.3,<3.0.0)
|
|
13
13
|
Requires-Dist: joblib (>=1.5.1,<2.0.0)
|
|
14
14
|
Requires-Dist: pandas-ta (>=0.3.14b0,<0.4.0)
|
|
15
|
-
Requires-Dist: plotly (>=
|
|
15
|
+
Requires-Dist: plotly (>=4.12.0,<6.0.0)
|
|
16
16
|
Requires-Dist: streamlit (>=1.45.1,<2.0.0)
|
|
17
17
|
Requires-Dist: streamlit-file-browser (>=3.2.22,<4.0.0)
|
|
18
18
|
Requires-Dist: streamlit-pydantic (>=v0.6.1-rc.3,<0.7.0)
|
|
19
19
|
Requires-Dist: ta-lib (>=0.6.4,<0.7.0)
|
|
20
20
|
Requires-Dist: tickermood (>=0.4.0,<0.5.0)
|
|
21
|
+
Requires-Dist: vectorbt (>=0.28.0,<0.29.0)
|
|
21
22
|
Description-Content-Type: text/markdown
|
|
22
23
|
|
|
23
24
|
## Bullish
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import time
|
|
3
|
-
from itertools import batched
|
|
3
|
+
from itertools import batched, chain
|
|
4
4
|
from pathlib import Path
|
|
5
5
|
from typing import (
|
|
6
6
|
Annotated,
|
|
@@ -28,7 +28,7 @@ from bearish.models.financials.balance_sheet import ( # type: ignore
|
|
|
28
28
|
BalanceSheet,
|
|
29
29
|
QuarterlyBalanceSheet,
|
|
30
30
|
)
|
|
31
|
-
from bearish.models.financials.base import Financials # type: ignore
|
|
31
|
+
from bearish.models.financials.base import Financials, FinancialsWithDate # type: ignore
|
|
32
32
|
from bearish.models.financials.cash_flow import ( # type: ignore
|
|
33
33
|
CashFlow,
|
|
34
34
|
QuarterlyCashFlow,
|
|
@@ -42,7 +42,7 @@ from bearish.models.query.query import AssetQuery, Symbols # type: ignore
|
|
|
42
42
|
from bearish.types import TickerOnlySources # type: ignore
|
|
43
43
|
from pydantic import BaseModel, BeforeValidator, Field, create_model
|
|
44
44
|
|
|
45
|
-
from bullish.analysis.indicators import Indicators, IndicatorModels
|
|
45
|
+
from bullish.analysis.indicators import Indicators, IndicatorModels, SignalSeries
|
|
46
46
|
from joblib import Parallel, delayed # type: ignore
|
|
47
47
|
|
|
48
48
|
from bullish.analysis.industry_views import compute_industry_view
|
|
@@ -130,7 +130,7 @@ class TechnicalAnalysis(*TechnicalAnalysisModels): # type: ignore
|
|
|
130
130
|
)
|
|
131
131
|
return cls()
|
|
132
132
|
try:
|
|
133
|
-
res = Indicators().
|
|
133
|
+
res = Indicators().compute(prices)
|
|
134
134
|
return cls(last_price=prices.close.iloc[-1], **res)
|
|
135
135
|
except Exception as e:
|
|
136
136
|
logger.error(
|
|
@@ -404,6 +404,24 @@ class FundamentalAnalysis(YearlyFundamentalAnalysis, QuarterlyFundamentalAnalysi
|
|
|
404
404
|
yearly_analysis.model_dump() | quarterly_analysis.model_dump()
|
|
405
405
|
)
|
|
406
406
|
|
|
407
|
+
@classmethod
|
|
408
|
+
def compute_series(
|
|
409
|
+
cls, financials: FinancialsWithDate, ticker: Ticker
|
|
410
|
+
) -> List[SignalSeries]:
|
|
411
|
+
fundamendal_analysis = FundamentalAnalysis.from_financials(financials, ticker)
|
|
412
|
+
fundamental_analysis_ = fundamendal_analysis.model_dump(
|
|
413
|
+
exclude_none=True, exclude_unset=True, exclude_defaults=True
|
|
414
|
+
)
|
|
415
|
+
fundamental_analysis_ = {
|
|
416
|
+
k: v for k, v in fundamental_analysis_.items() if v is True
|
|
417
|
+
}
|
|
418
|
+
return [
|
|
419
|
+
SignalSeries(
|
|
420
|
+
name=k.upper(), symbol=ticker.symbol, value=v, date=financials.date
|
|
421
|
+
)
|
|
422
|
+
for k, v in fundamental_analysis_.items()
|
|
423
|
+
]
|
|
424
|
+
|
|
407
425
|
|
|
408
426
|
class AnalysisView(BaseModel):
|
|
409
427
|
sector: Annotated[
|
|
@@ -488,6 +506,16 @@ class Analysis(AnalysisView, BaseEquity, TechnicalAnalysis, FundamentalAnalysis)
|
|
|
488
506
|
)
|
|
489
507
|
|
|
490
508
|
|
|
509
|
+
def compute_financials_series(
|
|
510
|
+
financials_: Financials, ticker: Ticker
|
|
511
|
+
) -> List[SignalSeries]:
|
|
512
|
+
financials_with_dates = FinancialsWithDate.from_financials(financials_)
|
|
513
|
+
series = []
|
|
514
|
+
for f in financials_with_dates:
|
|
515
|
+
series.extend(FundamentalAnalysis.compute_series(f, ticker))
|
|
516
|
+
return series
|
|
517
|
+
|
|
518
|
+
|
|
491
519
|
def compute_analysis(database_path: Path, ticker: Ticker) -> Analysis:
|
|
492
520
|
from bullish.database.crud import BullishDb
|
|
493
521
|
|
|
@@ -495,6 +523,36 @@ def compute_analysis(database_path: Path, ticker: Ticker) -> Analysis:
|
|
|
495
523
|
return Analysis.from_ticker(bullish_db, ticker)
|
|
496
524
|
|
|
497
525
|
|
|
526
|
+
def compute_signal_series(database_path: Path, ticker: Ticker) -> List[SignalSeries]:
|
|
527
|
+
from bullish.database.crud import BullishDb
|
|
528
|
+
|
|
529
|
+
bullish_db = BullishDb(database_path=database_path)
|
|
530
|
+
indicators = Indicators()
|
|
531
|
+
prices = Prices.from_ticker(bullish_db, ticker)
|
|
532
|
+
signal_series = indicators.compute_series(prices.to_dataframe(), ticker.symbol)
|
|
533
|
+
financials = Financials.from_ticker(bullish_db, ticker)
|
|
534
|
+
financial_series = compute_financials_series(financials, ticker)
|
|
535
|
+
return signal_series + financial_series
|
|
536
|
+
|
|
537
|
+
|
|
538
|
+
def run_signal_series_analysis(bullish_db: "BullishDb") -> None:
|
|
539
|
+
price_trackers = set(bullish_db._read_tracker(TrackerQuery(), PriceTracker))
|
|
540
|
+
finance_trackers = set(bullish_db._read_tracker(TrackerQuery(), FinancialsTracker))
|
|
541
|
+
tickers = list(price_trackers.intersection(finance_trackers))
|
|
542
|
+
parallel = Parallel(n_jobs=-1)
|
|
543
|
+
|
|
544
|
+
for batch_ticker in batched(tickers, 1):
|
|
545
|
+
many_signal_series = parallel(
|
|
546
|
+
delayed(compute_signal_series)(bullish_db.database_path, ticker)
|
|
547
|
+
for ticker in batch_ticker
|
|
548
|
+
)
|
|
549
|
+
series = list(chain.from_iterable(many_signal_series))
|
|
550
|
+
try:
|
|
551
|
+
bullish_db.write_signal_series(series)
|
|
552
|
+
except Exception as e:
|
|
553
|
+
logger.error(f"Failed to compute signal series for {batch_ticker}: {e}")
|
|
554
|
+
|
|
555
|
+
|
|
498
556
|
def run_analysis(bullish_db: "BullishDb") -> None:
|
|
499
557
|
compute_industry_view(bullish_db)
|
|
500
558
|
price_trackers = set(bullish_db._read_tracker(TrackerQuery(), PriceTracker))
|
|
@@ -0,0 +1,422 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
import random
|
|
4
|
+
from datetime import date, timedelta
|
|
5
|
+
from typing import TYPE_CHECKING, Optional, Union, List, Dict, Any
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
import pandas as pd
|
|
9
|
+
from pydantic import BaseModel, Field, model_validator
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
import plotly.graph_objects as go
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from bullish.analysis.predefined_filters import NamedFilterQuery
|
|
16
|
+
from bullish.database.crud import BullishDb
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
COLOR = {
|
|
20
|
+
"mean": "#1f77b4", # A refined blue (Plotly default)
|
|
21
|
+
"upper": "#d62728", # Strong red
|
|
22
|
+
"lower": "#2ca02c", # Rich green
|
|
23
|
+
"median": "#ff7f0e", # Bright orange
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class BacktestQueryBase(BaseModel):
|
|
28
|
+
name: str
|
|
29
|
+
table: str
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class BacktestQueryDate(BacktestQueryBase):
|
|
33
|
+
|
|
34
|
+
start: date
|
|
35
|
+
end: date
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class BacktestQueryRange(BacktestQueryBase):
|
|
39
|
+
|
|
40
|
+
min: float
|
|
41
|
+
max: float
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class BacktestQuerySelection(BacktestQueryBase):
|
|
45
|
+
|
|
46
|
+
selections: List[str]
|
|
47
|
+
|
|
48
|
+
def to_selections(self) -> str:
|
|
49
|
+
return ", ".join([f"'{s}'" for s in self.selections])
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class BacktestQueries(BaseModel):
|
|
53
|
+
queries: list[Union[BacktestQueryDate, BacktestQueryRange, BacktestQuerySelection]]
|
|
54
|
+
|
|
55
|
+
def to_query(self) -> str:
|
|
56
|
+
query_parts = []
|
|
57
|
+
for query in self.queries:
|
|
58
|
+
if isinstance(query, (BacktestQueryDate)):
|
|
59
|
+
query_parts.append(
|
|
60
|
+
f"SELECT symbol FROM {query.table} WHERE name='{query.name}' " # noqa: S608
|
|
61
|
+
f"AND date >='{query.start}' AND date <='{query.end}'"
|
|
62
|
+
)
|
|
63
|
+
if isinstance(query, (BacktestQueryRange)):
|
|
64
|
+
query_parts.append(
|
|
65
|
+
f"SELECT symbol FROM {query.table} WHERE " # noqa: S608
|
|
66
|
+
f"{query.name} >= {query.min} AND {query.name} <= {query.max}"
|
|
67
|
+
)
|
|
68
|
+
if isinstance(query, (BacktestQuerySelection)):
|
|
69
|
+
query_parts.append(
|
|
70
|
+
f"SELECT symbol FROM {query.table} WHERE " # noqa: S608
|
|
71
|
+
f"{query.name} IN ({query.to_selections()})"
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
if len(query_parts) == 1:
|
|
75
|
+
return query_parts[0]
|
|
76
|
+
else:
|
|
77
|
+
return " INTERSECT ".join(query_parts)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class ReturnPercentage(BaseModel):
|
|
81
|
+
return_percentage: float = Field(
|
|
82
|
+
default=12, description="Return percentage of the backtest"
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
class BaseBacktestResult(BaseModel):
|
|
87
|
+
start: date = Field(default=date.today() - timedelta(days=252))
|
|
88
|
+
end: date = Field(default=date.today())
|
|
89
|
+
investment: float = Field(default=1000)
|
|
90
|
+
holding_period: int = Field(default=30 * 3)
|
|
91
|
+
extend_days: int = Field(
|
|
92
|
+
default=5,
|
|
93
|
+
description="Extend the backtest by this many days if no symbols are found",
|
|
94
|
+
)
|
|
95
|
+
percentage: int = Field(default=12, description="Return percentage of the backtest")
|
|
96
|
+
iterations: int = Field(default=200, description="Number of iterations to run")
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class BacktestResultQuery(BaseBacktestResult):
|
|
100
|
+
name: str
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class BacktestResult(BacktestResultQuery):
|
|
104
|
+
data: Dict[str, Any]
|
|
105
|
+
|
|
106
|
+
def to_dataframe(self) -> pd.DataFrame:
|
|
107
|
+
return pd.read_json(json.dumps(self.data)).sort_index()
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class BacktestResults(BaseModel):
|
|
111
|
+
results: List[BacktestResult]
|
|
112
|
+
|
|
113
|
+
def figure(self, type: str = "mean") -> go.Figure:
|
|
114
|
+
fig = go.Figure()
|
|
115
|
+
for result in self.results:
|
|
116
|
+
data = result.to_dataframe()[type]
|
|
117
|
+
fig.add_trace(
|
|
118
|
+
go.Scatter(
|
|
119
|
+
x=data.index,
|
|
120
|
+
y=data,
|
|
121
|
+
mode="lines",
|
|
122
|
+
name=f"{result.name} ({type})",
|
|
123
|
+
line={"width": 1},
|
|
124
|
+
hovertemplate=(
|
|
125
|
+
"Date: %{x}<br>"
|
|
126
|
+
+ "Price: %{y:.2f}<br>"
|
|
127
|
+
+ f"Percentage: {result.percentage}<br>"
|
|
128
|
+
+ f"Iterations: {result.iterations}<br>"
|
|
129
|
+
+ f"Investment: {result.investment}<extra></extra>"
|
|
130
|
+
),
|
|
131
|
+
)
|
|
132
|
+
)
|
|
133
|
+
fig.update_layout(
|
|
134
|
+
height=800,
|
|
135
|
+
showlegend=True,
|
|
136
|
+
margin={"t": 60, "b": 40},
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
return fig
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
class BackTestConfig(BaseBacktestResult):
|
|
143
|
+
exit_strategy: ReturnPercentage = Field(default=ReturnPercentage)
|
|
144
|
+
|
|
145
|
+
def to_base_backtest_result(self) -> BaseBacktestResult:
|
|
146
|
+
return BaseBacktestResult(
|
|
147
|
+
start=self.start,
|
|
148
|
+
end=self.end,
|
|
149
|
+
investment=self.investment,
|
|
150
|
+
holding_period=self.holding_period,
|
|
151
|
+
extend_days=self.extend_days,
|
|
152
|
+
percentage=self.percentage,
|
|
153
|
+
iterations=self.iterations,
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
class Equity(BaseModel):
|
|
158
|
+
symbol: str
|
|
159
|
+
start: date
|
|
160
|
+
end: date
|
|
161
|
+
buy: float
|
|
162
|
+
sell: float
|
|
163
|
+
investment_in: float
|
|
164
|
+
investment_out: Optional[float] = None
|
|
165
|
+
|
|
166
|
+
def profit(self) -> float:
|
|
167
|
+
return (self.sell - self.buy) * (self.investment_in / self.buy)
|
|
168
|
+
|
|
169
|
+
def current_value(self) -> float:
|
|
170
|
+
return self.investment_in + self.profit()
|
|
171
|
+
|
|
172
|
+
def set_investment_out(self) -> None:
|
|
173
|
+
self.investment_out = self.current_value()
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
class BackTest(BaseModel):
|
|
177
|
+
equities: list[Equity] = Field(
|
|
178
|
+
default_factory=list, description="List of equities bought during the backtest"
|
|
179
|
+
)
|
|
180
|
+
end: date = Field(default=date.today(), description="End date of the backtest")
|
|
181
|
+
|
|
182
|
+
def valid(self) -> bool:
|
|
183
|
+
return bool(self.equities)
|
|
184
|
+
|
|
185
|
+
def total_profit(self) -> float:
|
|
186
|
+
return sum(equity.profit() for equity in self.equities)
|
|
187
|
+
|
|
188
|
+
def symbols(self) -> list[str]:
|
|
189
|
+
return [equity.symbol for equity in self.equities]
|
|
190
|
+
|
|
191
|
+
def show(self) -> None:
|
|
192
|
+
for eq in self.equities:
|
|
193
|
+
print(
|
|
194
|
+
f"\n{eq.symbol} ({eq.type}): {eq.start}:{eq.investment_in} ({eq.buy}) - "
|
|
195
|
+
f"{eq.end}:{eq.investment_out} ({eq.sell})"
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
def to_dataframe(self) -> pd.DataFrame:
|
|
199
|
+
prices = [
|
|
200
|
+
self.equities[0].investment_in,
|
|
201
|
+
*[e.investment_out for e in self.equities],
|
|
202
|
+
]
|
|
203
|
+
symbols = [self.equities[0].symbol, *[e.symbol for e in self.equities]]
|
|
204
|
+
index = [self.equities[0].start, *[e.end for e in self.equities]]
|
|
205
|
+
buy = [self.equities[0].buy, *[e.buy for e in self.equities]]
|
|
206
|
+
sell = [self.equities[0].sell, *[e.sell for e in self.equities]]
|
|
207
|
+
data = pd.DataFrame(
|
|
208
|
+
np.array([prices, symbols, buy, sell]).T,
|
|
209
|
+
index=index,
|
|
210
|
+
columns=["prices", "symbols", "buy", "sell"],
|
|
211
|
+
)
|
|
212
|
+
data = data[~data.index.duplicated(keep="first")]
|
|
213
|
+
return data
|
|
214
|
+
|
|
215
|
+
def __hash__(self) -> int:
|
|
216
|
+
return hash(tuple(sorted(equity.symbol for equity in self.equities)))
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
class BackTests(BaseModel):
|
|
220
|
+
tests: list[BackTest] = Field(default_factory=list, description="List of backtests")
|
|
221
|
+
config: BackTestConfig
|
|
222
|
+
name: str
|
|
223
|
+
|
|
224
|
+
@model_validator(mode="after")
|
|
225
|
+
def _validate(self) -> "BackTests":
|
|
226
|
+
self.tests = list(set(self.tests)) # Remove duplicates
|
|
227
|
+
return self
|
|
228
|
+
|
|
229
|
+
def to_dataframe(self) -> pd.DataFrame:
|
|
230
|
+
|
|
231
|
+
data = (
|
|
232
|
+
pd.concat([t.to_dataframe() for t in self.tests if t.valid()], axis=1)
|
|
233
|
+
.sort_index()
|
|
234
|
+
.fillna(method="ffill")
|
|
235
|
+
)
|
|
236
|
+
data = data[~data.index.duplicated(keep="first")]
|
|
237
|
+
return data
|
|
238
|
+
|
|
239
|
+
def to_error(self) -> pd.DataFrame:
|
|
240
|
+
data_ = self.to_dataframe()
|
|
241
|
+
mean = data_.prices.astype(float).mean(axis=1).rename("mean")
|
|
242
|
+
std = data_.prices.astype(float).std(axis=1)
|
|
243
|
+
median = data_.prices.astype(float).median(axis=1).rename("median")
|
|
244
|
+
upper = (mean + std).rename("upper")
|
|
245
|
+
lower = (mean - std).rename("lower")
|
|
246
|
+
return pd.concat([mean, upper, lower, median], axis=1).sort_index()
|
|
247
|
+
|
|
248
|
+
def to_backtest_result(self) -> BacktestResult:
|
|
249
|
+
|
|
250
|
+
return BacktestResult.model_validate(
|
|
251
|
+
self.config.to_base_backtest_result().model_dump()
|
|
252
|
+
| {"data": json.loads(self.to_error().to_json()), "name": self.name}
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
def to_figure(self) -> go.Figure:
|
|
256
|
+
|
|
257
|
+
data_ = self.to_dataframe()
|
|
258
|
+
self.to_error()
|
|
259
|
+
column_chunks = [data_.iloc[:, i : i + 4] for i in range(0, data_.shape[1], 4)]
|
|
260
|
+
fig = go.Figure()
|
|
261
|
+
for data in column_chunks:
|
|
262
|
+
fig.add_trace(
|
|
263
|
+
go.Scatter(
|
|
264
|
+
x=data.index,
|
|
265
|
+
y=data.prices.astype(float),
|
|
266
|
+
mode="lines",
|
|
267
|
+
showlegend=False,
|
|
268
|
+
customdata=data[
|
|
269
|
+
["symbols", "sell", "buy"]
|
|
270
|
+
], # Include multiple overlay columns
|
|
271
|
+
line={"color": "grey", "width": 0.5}, # normal grey
|
|
272
|
+
opacity=0.5,
|
|
273
|
+
hovertemplate=(
|
|
274
|
+
"Date: %{x}<br>"
|
|
275
|
+
+ "Price: %{y:.2f}<br>"
|
|
276
|
+
+ "Symbols: %{customdata[0]}<br>"
|
|
277
|
+
+ "Sell: %{customdata[1]}<br>"
|
|
278
|
+
+ "Buy: %{customdata[2]}<extra></extra>"
|
|
279
|
+
),
|
|
280
|
+
)
|
|
281
|
+
)
|
|
282
|
+
for name, column in self.to_error().items():
|
|
283
|
+
fig.add_trace(
|
|
284
|
+
go.Scatter(
|
|
285
|
+
x=column.index,
|
|
286
|
+
y=column,
|
|
287
|
+
mode="lines",
|
|
288
|
+
line={"color": COLOR[name], "width": 1},
|
|
289
|
+
showlegend=True,
|
|
290
|
+
name=name,
|
|
291
|
+
)
|
|
292
|
+
)
|
|
293
|
+
fig.update_layout(
|
|
294
|
+
title="Predefined filter performance",
|
|
295
|
+
xaxis_title="Date",
|
|
296
|
+
yaxis_title="Prices [Currency]",
|
|
297
|
+
)
|
|
298
|
+
fig.show()
|
|
299
|
+
return fig
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
def run_backtest( # noqa: C901, PLR0915
|
|
303
|
+
bullish_db: "BullishDb", named_filter: "NamedFilterQuery", config: BackTestConfig
|
|
304
|
+
) -> BackTest:
|
|
305
|
+
equities = []
|
|
306
|
+
start_date = config.start
|
|
307
|
+
presence_delta = timedelta(days=config.holding_period)
|
|
308
|
+
investment = config.investment
|
|
309
|
+
exclude_symbols = []
|
|
310
|
+
while True:
|
|
311
|
+
symbols = []
|
|
312
|
+
while not symbols:
|
|
313
|
+
symbols = named_filter.get_backtesting_symbols(bullish_db, start_date)
|
|
314
|
+
symbols = [b for b in symbols if b not in exclude_symbols]
|
|
315
|
+
if symbols:
|
|
316
|
+
break
|
|
317
|
+
start_date = start_date + timedelta(days=config.extend_days)
|
|
318
|
+
if start_date > config.end:
|
|
319
|
+
logger.debug("No symbols found for the given date range.")
|
|
320
|
+
break
|
|
321
|
+
if symbols:
|
|
322
|
+
symbol = random.choice(symbols) # noqa: S311
|
|
323
|
+
logger.debug(f"Found symbol: {symbol}, for date: {start_date}")
|
|
324
|
+
enter_position = start_date
|
|
325
|
+
end_position = None
|
|
326
|
+
counter = 0
|
|
327
|
+
buy_price = None
|
|
328
|
+
while True:
|
|
329
|
+
|
|
330
|
+
data = bullish_db.read_symbol_series(
|
|
331
|
+
symbol,
|
|
332
|
+
start_date=enter_position + counter * presence_delta,
|
|
333
|
+
end_date=enter_position + (counter + 1) * presence_delta,
|
|
334
|
+
)
|
|
335
|
+
if data.empty:
|
|
336
|
+
logger.debug(f"No data found for symbol: {symbol}")
|
|
337
|
+
exclude_symbols.append(symbol)
|
|
338
|
+
end_position = start_date
|
|
339
|
+
break
|
|
340
|
+
data.index = data.index.tz_localize(None)
|
|
341
|
+
if counter == 0:
|
|
342
|
+
enter_position_timestamp = data.close.first_valid_index()
|
|
343
|
+
enter_position = enter_position_timestamp.date()
|
|
344
|
+
buy_price = data.close.loc[enter_position_timestamp]
|
|
345
|
+
|
|
346
|
+
mask = data.close >= buy_price * (
|
|
347
|
+
1 + config.percentage / (100 * (counter + 1))
|
|
348
|
+
)
|
|
349
|
+
mask_ = mask[mask == True] # noqa: E712
|
|
350
|
+
|
|
351
|
+
if mask_.empty:
|
|
352
|
+
if enter_position + (counter + 1) * presence_delta > config.end:
|
|
353
|
+
end_position = data.close.index[-1].date()
|
|
354
|
+
sell_price = data.close.iloc[-1]
|
|
355
|
+
equity = Equity(
|
|
356
|
+
symbol=symbol,
|
|
357
|
+
start=enter_position,
|
|
358
|
+
end=end_position,
|
|
359
|
+
buy=buy_price,
|
|
360
|
+
sell=sell_price,
|
|
361
|
+
investment_in=investment,
|
|
362
|
+
)
|
|
363
|
+
equity.set_investment_out()
|
|
364
|
+
equities.append(equity)
|
|
365
|
+
investment = equity.current_value()
|
|
366
|
+
end_position = config.end
|
|
367
|
+
break
|
|
368
|
+
counter += 1
|
|
369
|
+
continue
|
|
370
|
+
else:
|
|
371
|
+
end_position_timestamp = data[mask].first_valid_index()
|
|
372
|
+
end_position = end_position_timestamp.date()
|
|
373
|
+
equity = Equity(
|
|
374
|
+
symbol=symbol,
|
|
375
|
+
start=enter_position,
|
|
376
|
+
end=end_position,
|
|
377
|
+
buy=buy_price,
|
|
378
|
+
sell=data[mask].close.loc[end_position_timestamp],
|
|
379
|
+
investment_in=investment,
|
|
380
|
+
)
|
|
381
|
+
equity.set_investment_out()
|
|
382
|
+
equities.append(equity)
|
|
383
|
+
investment = equity.current_value()
|
|
384
|
+
break
|
|
385
|
+
|
|
386
|
+
start_date = end_position
|
|
387
|
+
if start_date >= config.end:
|
|
388
|
+
break
|
|
389
|
+
back_test = BackTest(equities=equities)
|
|
390
|
+
return back_test
|
|
391
|
+
|
|
392
|
+
|
|
393
|
+
def run_tests(
|
|
394
|
+
bullish_db: "BullishDb", named_filter: "NamedFilterQuery", config: BackTestConfig
|
|
395
|
+
) -> BackTests:
|
|
396
|
+
return BackTests(
|
|
397
|
+
config=config,
|
|
398
|
+
name=named_filter.name,
|
|
399
|
+
tests=[
|
|
400
|
+
run_backtest(bullish_db, named_filter, config)
|
|
401
|
+
for _ in range(config.iterations)
|
|
402
|
+
],
|
|
403
|
+
)
|
|
404
|
+
|
|
405
|
+
|
|
406
|
+
def run_many_tests(
|
|
407
|
+
bullish_db: "BullishDb",
|
|
408
|
+
named_filters: List["NamedFilterQuery"],
|
|
409
|
+
config: BackTestConfig,
|
|
410
|
+
) -> None:
|
|
411
|
+
back_tests = []
|
|
412
|
+
for named_filter in named_filters:
|
|
413
|
+
try:
|
|
414
|
+
back_tests.append(
|
|
415
|
+
run_tests(bullish_db, named_filter, config).to_backtest_result()
|
|
416
|
+
)
|
|
417
|
+
except Exception as e: # noqa: PERF203
|
|
418
|
+
logger.error(e)
|
|
419
|
+
continue
|
|
420
|
+
|
|
421
|
+
if back_tests:
|
|
422
|
+
bullish_db.write_many_backtest_results(back_tests)
|
|
@@ -82,12 +82,15 @@ PROPERTIES_GROUP = list(
|
|
|
82
82
|
{*INCOME_GROUP, *CASH_FLOW_GROUP, *EPS_GROUP}
|
|
83
83
|
)
|
|
84
84
|
)
|
|
85
|
-
|
|
86
|
-
GROUP_MAPPING: Dict[str, List[str]] = {
|
|
85
|
+
BOOLEAN_GROUP_MAPPING: Dict[str, List[str]] = {
|
|
87
86
|
"income": INCOME_GROUP,
|
|
88
87
|
"cash_flow": CASH_FLOW_GROUP,
|
|
89
88
|
"eps": EPS_GROUP,
|
|
90
89
|
"properties": PROPERTIES_GROUP,
|
|
90
|
+
}
|
|
91
|
+
GROUP_MAPPING: Dict[str, List[str]] = {
|
|
92
|
+
**BOOLEAN_GROUP_MAPPING,
|
|
93
|
+
"properties": PROPERTIES_GROUP,
|
|
91
94
|
"country": list(get_args(Country)),
|
|
92
95
|
"industry": list(get_args(Industry)),
|
|
93
96
|
"industry_group": list(get_args(IndustryGroup)),
|
|
@@ -15,10 +15,17 @@ except Exception:
|
|
|
15
15
|
logger.warning("Talib is not installed, skipping analysis")
|
|
16
16
|
|
|
17
17
|
|
|
18
|
+
def cross_simple(
|
|
19
|
+
series_a: pd.Series, series_b: pd.Series, above: bool = True
|
|
20
|
+
) -> pd.Series:
|
|
21
|
+
crossing = ta.cross(series_a=series_a, series_b=series_b, above=above)
|
|
22
|
+
return crossing # type: ignore
|
|
23
|
+
|
|
24
|
+
|
|
18
25
|
def cross(
|
|
19
26
|
series_a: pd.Series, series_b: pd.Series, above: bool = True
|
|
20
27
|
) -> Optional[date]:
|
|
21
|
-
crossing =
|
|
28
|
+
crossing = cross_simple(series_a=series_a, series_b=series_b, above=above)
|
|
22
29
|
if not crossing[crossing == 1].index.empty:
|
|
23
30
|
return crossing[crossing == 1].last_valid_index().date() # type: ignore
|
|
24
31
|
return None
|
|
@@ -31,10 +38,10 @@ def cross_value(series: pd.Series, number: int, above: bool = True) -> Optional[
|
|
|
31
38
|
def cross_value_series(
|
|
32
39
|
series_a: pd.Series, number: int, above: bool = True
|
|
33
40
|
) -> pd.Series:
|
|
34
|
-
crossing =
|
|
35
|
-
series_a
|
|
41
|
+
crossing = cross_simple(
|
|
42
|
+
series_a, pd.Series(number, index=series_a.index), above=above
|
|
36
43
|
)
|
|
37
|
-
return crossing
|
|
44
|
+
return crossing
|
|
38
45
|
|
|
39
46
|
|
|
40
47
|
def compute_adx(data: pd.DataFrame) -> pd.DataFrame:
|
|
@@ -302,14 +309,6 @@ def price_above_sma50(data: pd.DataFrame) -> Optional[date]:
|
|
|
302
309
|
return date_1
|
|
303
310
|
|
|
304
311
|
|
|
305
|
-
def momentum(data: pd.DataFrame) -> Optional[date]:
|
|
306
|
-
date_1 = find_last_true_run_start(data.SMA_50 < data.CLOSE)
|
|
307
|
-
date_2 = find_last_true_run_start(data.SMA_200 < data.SMA_50)
|
|
308
|
-
if date_1 is None or date_2 is None:
|
|
309
|
-
return None
|
|
310
|
-
return max(date_1, date_2)
|
|
311
|
-
|
|
312
|
-
|
|
313
312
|
class IndicatorFunction(BaseModel):
|
|
314
313
|
expected_columns: list[str]
|
|
315
314
|
functions: list[Callable[[pd.DataFrame], pd.DataFrame]]
|
|
@@ -323,7 +322,9 @@ class IndicatorFunction(BaseModel):
|
|
|
323
322
|
except Exception as e:
|
|
324
323
|
logger.error(f"Fail to compute function {function.__name__}: {e}")
|
|
325
324
|
if data_ is None:
|
|
326
|
-
raise ValueError(
|
|
325
|
+
raise ValueError(
|
|
326
|
+
f"No data returned from indicator functions with expected columns {self.expected_columns}."
|
|
327
|
+
)
|
|
327
328
|
if not set(self.expected_columns).issubset(set(data_.columns)):
|
|
328
329
|
raise ValueError(
|
|
329
330
|
f"Expected columns {self.expected_columns} not found in data columns {data_.columns.tolist()}"
|