Qubx 0.6.23__tar.gz → 0.6.25__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of Qubx might be problematic. Click here for more details.
- {qubx-0.6.23 → qubx-0.6.25}/PKG-INFO +1 -1
- {qubx-0.6.23 → qubx-0.6.25}/pyproject.toml +1 -1
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/backtester/data.py +5 -134
- qubx-0.6.25/src/qubx/backtester/runner.py +484 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/backtester/simulator.py +13 -22
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/backtester/utils.py +27 -39
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/connectors/ccxt/account.py +5 -5
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/connectors/ccxt/data.py +93 -18
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/connectors/ccxt/exchanges/__init__.py +5 -1
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/connectors/ccxt/exchanges/binance/exchange.py +1 -0
- qubx-0.6.25/src/qubx/connectors/ccxt/exchanges/bitfinex/bitfinex.py +43 -0
- qubx-0.6.25/src/qubx/connectors/ccxt/exchanges/kraken/kraken.py +14 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/connectors/ccxt/utils.py +20 -6
- qubx-0.6.25/src/qubx/connectors/tardis/data.py +733 -0
- qubx-0.6.25/src/qubx/connectors/tardis/utils.py +249 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/account.py +206 -20
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/basics.py +0 -9
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/context.py +55 -53
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/interfaces.py +34 -36
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/lookups.py +129 -18
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/metrics.py +14 -11
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/mixins/market.py +24 -9
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/mixins/subscription.py +58 -28
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/mixins/trading.py +35 -31
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/mixins/universe.py +0 -20
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/series.pyx +1 -1
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/data/helpers.py +1 -1
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/data/tardis.py +0 -1
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/restorers/state.py +2 -0
- qubx-0.6.25/src/qubx/utils/questdb.py +79 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/runner/accounts.py +0 -1
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/runner/configs.py +8 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/runner/runner.py +36 -15
- qubx-0.6.23/src/qubx/backtester/runner.py +0 -279
- {qubx-0.6.23 → qubx-0.6.25}/LICENSE +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/README.md +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/build.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/_nb_magic.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/backtester/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/backtester/account.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/backtester/broker.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/backtester/management.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/backtester/ome.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/backtester/optimization.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/backtester/simulated_data.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/backtester/simulated_exchange.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/cli/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/cli/commands.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/cli/deploy.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/cli/misc.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/cli/release.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/connectors/ccxt/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/connectors/ccxt/broker.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/connectors/ccxt/exceptions.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/connectors/ccxt/exchanges/binance/broker.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/connectors/ccxt/factory.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/connectors/ccxt/reader.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/deque.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/errors.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/exceptions.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/helpers.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/initializer.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/loggers.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/mixins/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/mixins/processing.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/series.pxd +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/series.pyi +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/utils.pyi +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/core/utils.pyx +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/data/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/data/composite.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/data/hft.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/data/readers.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/data/registry.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/emitters/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/emitters/base.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/emitters/composite.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/emitters/csv.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/emitters/prometheus.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/emitters/questdb.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/exporters/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/exporters/composite.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/exporters/formatters/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/exporters/formatters/base.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/exporters/formatters/incremental.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/exporters/formatters/slack.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/exporters/redis_streams.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/exporters/slack.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/features/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/features/core.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/features/orderbook.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/features/price.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/features/trades.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/features/utils.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/gathering/simplest.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/health/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/health/base.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/math/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/math/stats.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/notifications/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/notifications/composite.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/notifications/slack.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/pandaz/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/pandaz/ta.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/pandaz/utils.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/resources/_build.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/resources/instruments/symbols-binance.cm.json +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/resources/instruments/symbols-binance.json +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/resources/instruments/symbols-binance.um.json +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/resources/instruments/symbols-bitfinex.f.json +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/resources/instruments/symbols-bitfinex.json +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/resources/instruments/symbols-kraken.f.json +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/resources/instruments/symbols-kraken.json +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/restarts/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/restarts/state_resolvers.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/restarts/time_finders.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/restorers/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/restorers/balance.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/restorers/factory.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/restorers/interfaces.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/restorers/position.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/restorers/signal.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/restorers/utils.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/ta/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/ta/indicators.pxd +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/ta/indicators.pyi +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/ta/indicators.pyx +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/trackers/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/trackers/advanced.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/trackers/composite.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/trackers/rebalancers.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/trackers/riskctrl.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/trackers/sizers.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/_pyxreloader.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/charting/lookinglass.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/charting/mpl_helpers.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/collections.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/marketdata/binance.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/marketdata/ccxt.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/marketdata/dukas.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/misc.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/ntp.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/numbers_utils.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/orderbook.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/plotting/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/plotting/dashboard.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/plotting/data.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/plotting/interfaces.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/plotting/renderers/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/plotting/renderers/plotly.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/runner/__init__.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/runner/_jupyter_runner.pyt +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/runner/factory.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/time.py +0 -0
- {qubx-0.6.23 → qubx-0.6.25}/src/qubx/utils/version.py +0 -0
|
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
|
|
|
4
4
|
|
|
5
5
|
[tool.poetry]
|
|
6
6
|
name = "Qubx"
|
|
7
|
-
version = "0.6.
|
|
7
|
+
version = "0.6.25"
|
|
8
8
|
description = "Qubx - Quantitative Trading Framework"
|
|
9
9
|
authors = [ "Dmitry Marienko <dmitry.marienko@xlydian.com>", "Yuriy Arabskyy <yuriy.arabskyy@xlydian.com>",]
|
|
10
10
|
readme = "README.md"
|
|
@@ -1,9 +1,6 @@
|
|
|
1
1
|
from collections import defaultdict
|
|
2
|
-
from typing import Any
|
|
3
2
|
|
|
4
|
-
import numpy as np
|
|
5
3
|
import pandas as pd
|
|
6
|
-
from tqdm.auto import tqdm
|
|
7
4
|
|
|
8
5
|
from qubx import logger
|
|
9
6
|
from qubx.backtester.simulated_data import IterableSimulationData
|
|
@@ -13,7 +10,6 @@ from qubx.core.basics import (
|
|
|
13
10
|
Instrument,
|
|
14
11
|
TimestampedDict,
|
|
15
12
|
)
|
|
16
|
-
from qubx.core.exceptions import SimulationError
|
|
17
13
|
from qubx.core.helpers import BasicScheduler
|
|
18
14
|
from qubx.core.interfaces import IDataProvider
|
|
19
15
|
from qubx.core.series import Bar, Quote, time_as_nsec
|
|
@@ -32,8 +28,6 @@ class SimulatedDataProvider(IDataProvider):
|
|
|
32
28
|
_account: SimulatedAccountProcessor
|
|
33
29
|
_last_quotes: dict[Instrument, Quote | None]
|
|
34
30
|
_readers: dict[str, DataReader]
|
|
35
|
-
_pregenerated_signals: dict[Instrument, pd.Series | pd.DataFrame]
|
|
36
|
-
_to_process: dict[Instrument, list]
|
|
37
31
|
_data_source: IterableSimulationData
|
|
38
32
|
_open_close_time_indent_ns: int
|
|
39
33
|
|
|
@@ -45,6 +39,7 @@ class SimulatedDataProvider(IDataProvider):
|
|
|
45
39
|
time_provider: SimulatedTimeProvider,
|
|
46
40
|
account: SimulatedAccountProcessor,
|
|
47
41
|
readers: dict[str, DataReader],
|
|
42
|
+
data_source: IterableSimulationData,
|
|
48
43
|
open_close_time_indent_secs=1,
|
|
49
44
|
):
|
|
50
45
|
self.channel = channel
|
|
@@ -54,79 +49,14 @@ class SimulatedDataProvider(IDataProvider):
|
|
|
54
49
|
self._account = account
|
|
55
50
|
self._readers = readers
|
|
56
51
|
|
|
57
|
-
# - create exchange's instance
|
|
58
|
-
self._last_quotes = defaultdict(lambda: None)
|
|
59
|
-
|
|
60
|
-
# - pregenerated signals storage
|
|
61
|
-
self._pregenerated_signals = dict()
|
|
62
|
-
self._to_process = {}
|
|
63
|
-
|
|
64
52
|
# - simulation data source
|
|
65
|
-
self._data_source =
|
|
66
|
-
self._readers, open_close_time_indent_secs=open_close_time_indent_secs
|
|
67
|
-
)
|
|
53
|
+
self._data_source = data_source
|
|
68
54
|
self._open_close_time_indent_ns = open_close_time_indent_secs * 1_000_000_000 # convert seconds to nanoseconds
|
|
69
55
|
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
def run(
|
|
73
|
-
self,
|
|
74
|
-
start: str | pd.Timestamp,
|
|
75
|
-
end: str | pd.Timestamp,
|
|
76
|
-
silent: bool = False,
|
|
77
|
-
) -> None:
|
|
78
|
-
logger.info(f"{self.__class__.__name__} ::: Simulation started at {start} :::")
|
|
79
|
-
|
|
80
|
-
if self._pregenerated_signals:
|
|
81
|
-
self._prepare_generated_signals(start, end)
|
|
82
|
-
_run = self._process_generated_signals
|
|
83
|
-
else:
|
|
84
|
-
_run = self._process_strategy
|
|
85
|
-
|
|
86
|
-
start, end = pd.Timestamp(start), pd.Timestamp(end)
|
|
87
|
-
total_duration = end - start
|
|
88
|
-
update_delta = total_duration / 100
|
|
89
|
-
prev_dt = pd.Timestamp(start)
|
|
90
|
-
|
|
91
|
-
# - date iteration
|
|
92
|
-
qiter = self._data_source.create_iterable(start, end)
|
|
93
|
-
if silent:
|
|
94
|
-
for instrument, data_type, event, is_hist in qiter:
|
|
95
|
-
if not _run(instrument, data_type, event, is_hist):
|
|
96
|
-
break
|
|
97
|
-
else:
|
|
98
|
-
_p = 0
|
|
99
|
-
with tqdm(total=100, desc="Simulating", unit="%", leave=False) as pbar:
|
|
100
|
-
for instrument, data_type, event, is_hist in qiter:
|
|
101
|
-
if not _run(instrument, data_type, event, is_hist):
|
|
102
|
-
break
|
|
103
|
-
dt = pd.Timestamp(event.time)
|
|
104
|
-
# update only if date has changed
|
|
105
|
-
if dt - prev_dt > update_delta:
|
|
106
|
-
_p += 1
|
|
107
|
-
pbar.n = _p
|
|
108
|
-
pbar.refresh()
|
|
109
|
-
prev_dt = dt
|
|
110
|
-
pbar.n = 100
|
|
111
|
-
pbar.refresh()
|
|
112
|
-
|
|
113
|
-
logger.info(f"{self.__class__.__name__} ::: Simulation finished at {end} :::")
|
|
114
|
-
|
|
115
|
-
def set_generated_signals(self, signals: pd.Series | pd.DataFrame):
|
|
116
|
-
logger.debug(
|
|
117
|
-
f"[<y>{self.__class__.__name__}</y>] :: Using pre-generated signals:\n {str(signals.count()).strip('ndtype: int64')}"
|
|
118
|
-
)
|
|
119
|
-
# - sanity check
|
|
120
|
-
signals.index = pd.DatetimeIndex(signals.index)
|
|
121
|
-
|
|
122
|
-
if isinstance(signals, pd.Series):
|
|
123
|
-
self._pregenerated_signals[str(signals.name)] = signals # type: ignore
|
|
56
|
+
# - create exchange's instance
|
|
57
|
+
self._last_quotes = defaultdict(lambda: None)
|
|
124
58
|
|
|
125
|
-
|
|
126
|
-
for col in signals.columns:
|
|
127
|
-
self._pregenerated_signals[col] = signals[col] # type: ignore
|
|
128
|
-
else:
|
|
129
|
-
raise ValueError("Invalid signals or strategy configuration")
|
|
59
|
+
logger.info(f"{self.__class__.__name__}.{exchange_id} is initialized")
|
|
130
60
|
|
|
131
61
|
@property
|
|
132
62
|
def is_simulation(self) -> bool:
|
|
@@ -200,26 +130,6 @@ class SimulatedDataProvider(IDataProvider):
|
|
|
200
130
|
def close(self):
|
|
201
131
|
pass
|
|
202
132
|
|
|
203
|
-
def _prepare_generated_signals(self, start: str | pd.Timestamp, end: str | pd.Timestamp):
|
|
204
|
-
for s, v in self._pregenerated_signals.items():
|
|
205
|
-
_s_inst = None
|
|
206
|
-
|
|
207
|
-
for i in self.get_subscribed_instruments():
|
|
208
|
-
# - we can process series with variable id's if we can find some similar instrument
|
|
209
|
-
if s == i.symbol or s == str(i) or s == f"{i.exchange}:{i.symbol}" or str(s) == str(i):
|
|
210
|
-
_start, _end = pd.Timestamp(start), pd.Timestamp(end)
|
|
211
|
-
_start_idx, _end_idx = v.index.get_indexer([_start, _end], method="ffill")
|
|
212
|
-
sel = v.iloc[max(_start_idx, 0) : _end_idx + 1]
|
|
213
|
-
|
|
214
|
-
# TODO: check if data has exec_price - it means we have deals
|
|
215
|
-
self._to_process[i] = list(zip(sel.index, sel.values))
|
|
216
|
-
_s_inst = i
|
|
217
|
-
break
|
|
218
|
-
|
|
219
|
-
if _s_inst is None:
|
|
220
|
-
logger.error(f"Can't find instrument for pregenerated signals with id '{s}'")
|
|
221
|
-
raise SimulationError(f"Can't find instrument for pregenerated signals with id '{s}'")
|
|
222
|
-
|
|
223
133
|
def _convert_records_to_bars(
|
|
224
134
|
self, records: list[TimestampedDict], cut_time_ns: int, timeframe_ns: int
|
|
225
135
|
) -> list[Bar]:
|
|
@@ -252,44 +162,5 @@ class SimulatedDataProvider(IDataProvider):
|
|
|
252
162
|
|
|
253
163
|
return bars
|
|
254
164
|
|
|
255
|
-
def _process_generated_signals(self, instrument: Instrument, data_type: str, data: Any, is_hist: bool) -> bool:
|
|
256
|
-
cc = self.channel
|
|
257
|
-
t = np.datetime64(data.time, "ns")
|
|
258
|
-
|
|
259
|
-
if not is_hist:
|
|
260
|
-
# - signals for this instrument
|
|
261
|
-
sigs = self._to_process[instrument]
|
|
262
|
-
|
|
263
|
-
while sigs and t >= (_signal_time := sigs[0][0].as_unit("ns").asm8):
|
|
264
|
-
self.time_provider.set_time(_signal_time)
|
|
265
|
-
cc.send((instrument, "event", {"order": sigs[0][1]}, False))
|
|
266
|
-
sigs.pop(0)
|
|
267
|
-
|
|
268
|
-
if q := self._account._exchange.emulate_quote_from_data(instrument, t, data):
|
|
269
|
-
self._last_quotes[instrument] = q
|
|
270
|
-
|
|
271
|
-
self.time_provider.set_time(t)
|
|
272
|
-
cc.send((instrument, data_type, data, is_hist))
|
|
273
|
-
|
|
274
|
-
return cc.control.is_set()
|
|
275
|
-
|
|
276
|
-
def _process_strategy(self, instrument: Instrument, data_type: str, data: Any, is_hist: bool) -> bool:
|
|
277
|
-
cc = self.channel
|
|
278
|
-
t = np.datetime64(data.time, "ns")
|
|
279
|
-
|
|
280
|
-
if not is_hist:
|
|
281
|
-
if t >= (_next_exp_time := self._scheduler.next_expected_event_time()):
|
|
282
|
-
# - we use exact event's time
|
|
283
|
-
self.time_provider.set_time(_next_exp_time)
|
|
284
|
-
self._scheduler.check_and_run_tasks()
|
|
285
|
-
|
|
286
|
-
if q := self._account._exchange.emulate_quote_from_data(instrument, t, data):
|
|
287
|
-
self._last_quotes[instrument] = q
|
|
288
|
-
|
|
289
|
-
self.time_provider.set_time(t)
|
|
290
|
-
cc.send((instrument, data_type, data, is_hist))
|
|
291
|
-
|
|
292
|
-
return cc.control.is_set()
|
|
293
|
-
|
|
294
165
|
def exchange(self) -> str:
|
|
295
166
|
return self._exchange_id.upper()
|
|
@@ -0,0 +1,484 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import pandas as pd
|
|
5
|
+
from tqdm.auto import tqdm
|
|
6
|
+
|
|
7
|
+
from qubx import logger
|
|
8
|
+
from qubx.backtester.simulated_data import IterableSimulationData
|
|
9
|
+
from qubx.core.account import CompositeAccountProcessor
|
|
10
|
+
from qubx.core.basics import SW, DataType, Instrument, TransactionCostsCalculator
|
|
11
|
+
from qubx.core.context import StrategyContext
|
|
12
|
+
from qubx.core.exceptions import SimulationConfigError, SimulationError
|
|
13
|
+
from qubx.core.helpers import extract_parameters_from_object, full_qualified_class_name
|
|
14
|
+
from qubx.core.initializer import BasicStrategyInitializer
|
|
15
|
+
from qubx.core.interfaces import (
|
|
16
|
+
CtrlChannel,
|
|
17
|
+
IMetricEmitter,
|
|
18
|
+
IStrategy,
|
|
19
|
+
IStrategyContext,
|
|
20
|
+
ITimeProvider,
|
|
21
|
+
StrategyState,
|
|
22
|
+
)
|
|
23
|
+
from qubx.core.loggers import InMemoryLogsWriter, StrategyLogging
|
|
24
|
+
from qubx.core.lookups import lookup
|
|
25
|
+
from qubx.pandaz.utils import _frame_to_str
|
|
26
|
+
|
|
27
|
+
from .account import SimulatedAccountProcessor
|
|
28
|
+
from .broker import SimulatedBroker
|
|
29
|
+
from .data import SimulatedDataProvider
|
|
30
|
+
from .simulated_exchange import get_simulated_exchange
|
|
31
|
+
from .utils import (
|
|
32
|
+
SetupTypes,
|
|
33
|
+
SignalsProxy,
|
|
34
|
+
SimulatedCtrlChannel,
|
|
35
|
+
SimulatedScheduler,
|
|
36
|
+
SimulatedTimeProvider,
|
|
37
|
+
SimulationDataConfig,
|
|
38
|
+
SimulationSetup,
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class SimulationRunner:
|
|
43
|
+
"""
|
|
44
|
+
A wrapper around the StrategyContext that encapsulates the simulation logic.
|
|
45
|
+
This class is responsible for running a backtest context from a start time to an end time.
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
setup: SimulationSetup
|
|
49
|
+
data_config: SimulationDataConfig
|
|
50
|
+
start: pd.Timestamp
|
|
51
|
+
stop: pd.Timestamp
|
|
52
|
+
account_id: str
|
|
53
|
+
portfolio_log_freq: str
|
|
54
|
+
ctx: IStrategyContext
|
|
55
|
+
logs_writer: InMemoryLogsWriter
|
|
56
|
+
|
|
57
|
+
account: CompositeAccountProcessor
|
|
58
|
+
channel: CtrlChannel
|
|
59
|
+
time_provider: SimulatedTimeProvider
|
|
60
|
+
scheduler: SimulatedScheduler
|
|
61
|
+
strategy_params: dict[str, Any]
|
|
62
|
+
strategy_class: str
|
|
63
|
+
|
|
64
|
+
# adjusted times
|
|
65
|
+
_stop: pd.Timestamp | None = None
|
|
66
|
+
|
|
67
|
+
_data_source: IterableSimulationData
|
|
68
|
+
_data_providers: list[SimulatedDataProvider]
|
|
69
|
+
_exchange_to_data_provider: dict[str, SimulatedDataProvider]
|
|
70
|
+
|
|
71
|
+
def __init__(
|
|
72
|
+
self,
|
|
73
|
+
setup: SimulationSetup,
|
|
74
|
+
data_config: SimulationDataConfig,
|
|
75
|
+
start: pd.Timestamp | str,
|
|
76
|
+
stop: pd.Timestamp | str,
|
|
77
|
+
account_id: str = "SimulatedAccount",
|
|
78
|
+
portfolio_log_freq: str = "5Min",
|
|
79
|
+
emitter: IMetricEmitter | None = None,
|
|
80
|
+
strategy_state: StrategyState | None = None,
|
|
81
|
+
initializer: BasicStrategyInitializer | None = None,
|
|
82
|
+
):
|
|
83
|
+
"""
|
|
84
|
+
Initialize the BacktestContextRunner with a strategy context.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
setup (SimulationSetup): The setup to run.
|
|
88
|
+
data_config (SimulationDataConfig): The data setup to use.
|
|
89
|
+
start (pd.Timestamp): The start time of the simulation.
|
|
90
|
+
stop (pd.Timestamp): The end time of the simulation.
|
|
91
|
+
account_id (str): The account id to use.
|
|
92
|
+
portfolio_log_freq (str): The portfolio log frequency to use.
|
|
93
|
+
emitter (IMetricEmitter): The emitter to use.
|
|
94
|
+
"""
|
|
95
|
+
self.setup = setup
|
|
96
|
+
self.data_config = data_config
|
|
97
|
+
self.start = pd.Timestamp(start)
|
|
98
|
+
self.stop = pd.Timestamp(stop)
|
|
99
|
+
self.account_id = account_id
|
|
100
|
+
self.portfolio_log_freq = portfolio_log_freq
|
|
101
|
+
self.emitter = emitter
|
|
102
|
+
self.strategy_state = strategy_state if strategy_state is not None else StrategyState()
|
|
103
|
+
self.initializer = initializer
|
|
104
|
+
self._pregenerated_signals = dict()
|
|
105
|
+
self._to_process = {}
|
|
106
|
+
|
|
107
|
+
# - get strategy parameters BEFORE simulation start
|
|
108
|
+
# potentially strategy may change it's parameters during simulation
|
|
109
|
+
self.strategy_params = {}
|
|
110
|
+
self.strategy_class = ""
|
|
111
|
+
if self.setup.setup_type in [SetupTypes.STRATEGY, SetupTypes.STRATEGY_AND_TRACKER]:
|
|
112
|
+
self.strategy_params = extract_parameters_from_object(self.setup.generator)
|
|
113
|
+
self.strategy_class = full_qualified_class_name(self.setup.generator)
|
|
114
|
+
|
|
115
|
+
self.ctx = self._create_backtest_context()
|
|
116
|
+
|
|
117
|
+
def run(self, silent: bool = False, catch_keyboard_interrupt: bool = True, close_data_readers: bool = False):
|
|
118
|
+
"""
|
|
119
|
+
Run the backtest from start to stop.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
start (pd.Timestamp | str): The start time of the simulation.
|
|
123
|
+
stop (pd.Timestamp | str): The end time of the simulation.
|
|
124
|
+
silent (bool, optional): Whether to suppress progress output. Defaults to False.
|
|
125
|
+
"""
|
|
126
|
+
logger.debug(f"[<y>SimulationRunner</y>] :: Running simulation from {self.start} to {self.stop}")
|
|
127
|
+
|
|
128
|
+
# Start the context
|
|
129
|
+
self.ctx.start()
|
|
130
|
+
|
|
131
|
+
# Apply default warmup periods if strategy didn't set them
|
|
132
|
+
for s in self.ctx.get_subscriptions():
|
|
133
|
+
if not self.ctx.get_warmup(s) and (_d_wt := self.data_config.default_warmups.get(s)):
|
|
134
|
+
logger.debug(
|
|
135
|
+
f"[<y>SimulationRunner</y>] :: Strategy didn't set warmup period for <c>{s}</c> so default <c>{_d_wt}</c> will be used"
|
|
136
|
+
)
|
|
137
|
+
self.ctx.set_warmup({s: _d_wt})
|
|
138
|
+
|
|
139
|
+
# Subscribe to any custom data types if needed
|
|
140
|
+
def _is_known_type(t: str):
|
|
141
|
+
try:
|
|
142
|
+
DataType(t)
|
|
143
|
+
return True
|
|
144
|
+
except: # noqa: E722
|
|
145
|
+
return False
|
|
146
|
+
|
|
147
|
+
for t, r in self.data_config.data_providers.items():
|
|
148
|
+
if not _is_known_type(t) or t in [
|
|
149
|
+
DataType.TRADE,
|
|
150
|
+
DataType.OHLC_TRADES,
|
|
151
|
+
DataType.OHLC_QUOTES,
|
|
152
|
+
DataType.QUOTE,
|
|
153
|
+
DataType.ORDERBOOK,
|
|
154
|
+
]:
|
|
155
|
+
logger.debug(f"[<y>BacktestContextRunner</y>] :: Subscribing to: {t}")
|
|
156
|
+
self.ctx.subscribe(t, self.ctx.instruments)
|
|
157
|
+
|
|
158
|
+
stop = self._stop or self.stop
|
|
159
|
+
|
|
160
|
+
try:
|
|
161
|
+
self._run(self.start, stop, silent=silent)
|
|
162
|
+
except KeyboardInterrupt:
|
|
163
|
+
logger.error("Simulated trading interrupted by user!")
|
|
164
|
+
if not catch_keyboard_interrupt:
|
|
165
|
+
raise
|
|
166
|
+
finally:
|
|
167
|
+
# Stop the context
|
|
168
|
+
self.ctx.stop()
|
|
169
|
+
if close_data_readers:
|
|
170
|
+
for dp in self._data_providers:
|
|
171
|
+
for reader in dp._readers.values():
|
|
172
|
+
if hasattr(reader, "close"):
|
|
173
|
+
reader.close() # type: ignore
|
|
174
|
+
|
|
175
|
+
def _set_generated_signals(self, signals: pd.Series | pd.DataFrame):
|
|
176
|
+
logger.debug(
|
|
177
|
+
f"[<y>{self.__class__.__name__}</y>] :: Using pre-generated signals:\n {str(signals.count()).strip('ndtype: int64')}"
|
|
178
|
+
)
|
|
179
|
+
# - sanity check
|
|
180
|
+
signals.index = pd.DatetimeIndex(signals.index)
|
|
181
|
+
|
|
182
|
+
if isinstance(signals, pd.Series):
|
|
183
|
+
self._pregenerated_signals[str(signals.name)] = signals # type: ignore
|
|
184
|
+
|
|
185
|
+
elif isinstance(signals, pd.DataFrame):
|
|
186
|
+
for col in signals.columns:
|
|
187
|
+
self._pregenerated_signals[col] = signals[col] # type: ignore
|
|
188
|
+
else:
|
|
189
|
+
raise ValueError("Invalid signals or strategy configuration")
|
|
190
|
+
|
|
191
|
+
def _prepare_generated_signals(self, start: str | pd.Timestamp, end: str | pd.Timestamp):
|
|
192
|
+
for s, v in self._pregenerated_signals.items():
|
|
193
|
+
_s_inst = None
|
|
194
|
+
|
|
195
|
+
for i in self._data_providers[0].get_subscribed_instruments():
|
|
196
|
+
# - we can process series with variable id's if we can find some similar instrument
|
|
197
|
+
if s == i.symbol or s == str(i) or s == f"{i.exchange}:{i.symbol}" or str(s) == str(i):
|
|
198
|
+
_start, _end = pd.Timestamp(start), pd.Timestamp(end)
|
|
199
|
+
_start_idx, _end_idx = v.index.get_indexer([_start, _end], method="ffill")
|
|
200
|
+
sel = v.iloc[max(_start_idx, 0) : _end_idx + 1]
|
|
201
|
+
|
|
202
|
+
# TODO: check if data has exec_price - it means we have deals
|
|
203
|
+
self._to_process[i] = list(zip(sel.index, sel.values))
|
|
204
|
+
_s_inst = i
|
|
205
|
+
break
|
|
206
|
+
|
|
207
|
+
if _s_inst is None:
|
|
208
|
+
logger.error(f"Can't find instrument for pregenerated signals with id '{s}'")
|
|
209
|
+
raise SimulationError(f"Can't find instrument for pregenerated signals with id '{s}'")
|
|
210
|
+
|
|
211
|
+
def _process_generated_signals(self, instrument: Instrument, data_type: str, data: Any, is_hist: bool) -> bool:
|
|
212
|
+
cc = self.channel
|
|
213
|
+
t = np.datetime64(data.time, "ns")
|
|
214
|
+
_account = self.account.get_account_processor(instrument.exchange)
|
|
215
|
+
_data_provider = self._exchange_to_data_provider[instrument.exchange]
|
|
216
|
+
assert isinstance(_account, SimulatedAccountProcessor)
|
|
217
|
+
assert isinstance(_data_provider, SimulatedDataProvider)
|
|
218
|
+
|
|
219
|
+
if not is_hist:
|
|
220
|
+
# - signals for this instrument
|
|
221
|
+
sigs = self._to_process[instrument]
|
|
222
|
+
|
|
223
|
+
while sigs and t >= (_signal_time := sigs[0][0].as_unit("ns").asm8):
|
|
224
|
+
self.time_provider.set_time(_signal_time)
|
|
225
|
+
cc.send((instrument, "event", {"order": sigs[0][1]}, False))
|
|
226
|
+
sigs.pop(0)
|
|
227
|
+
|
|
228
|
+
if q := _account._exchange.emulate_quote_from_data(instrument, t, data):
|
|
229
|
+
_data_provider._last_quotes[instrument] = q
|
|
230
|
+
|
|
231
|
+
self.time_provider.set_time(t)
|
|
232
|
+
cc.send((instrument, data_type, data, is_hist))
|
|
233
|
+
|
|
234
|
+
return cc.control.is_set()
|
|
235
|
+
|
|
236
|
+
def _process_strategy(self, instrument: Instrument, data_type: str, data: Any, is_hist: bool) -> bool:
|
|
237
|
+
cc = self.channel
|
|
238
|
+
t = np.datetime64(data.time, "ns")
|
|
239
|
+
_account = self.account.get_account_processor(instrument.exchange)
|
|
240
|
+
_data_provider = self._exchange_to_data_provider[instrument.exchange]
|
|
241
|
+
assert isinstance(_account, SimulatedAccountProcessor)
|
|
242
|
+
assert isinstance(_data_provider, SimulatedDataProvider)
|
|
243
|
+
|
|
244
|
+
if not is_hist:
|
|
245
|
+
if t >= (_next_exp_time := self.scheduler.next_expected_event_time()):
|
|
246
|
+
# - we use exact event's time
|
|
247
|
+
self.time_provider.set_time(_next_exp_time)
|
|
248
|
+
self.scheduler.check_and_run_tasks()
|
|
249
|
+
|
|
250
|
+
if q := _account._exchange.emulate_quote_from_data(instrument, t, data):
|
|
251
|
+
_data_provider._last_quotes[instrument] = q
|
|
252
|
+
|
|
253
|
+
self.time_provider.set_time(t)
|
|
254
|
+
cc.send((instrument, data_type, data, is_hist))
|
|
255
|
+
|
|
256
|
+
return cc.control.is_set()
|
|
257
|
+
|
|
258
|
+
def _run(self, start: pd.Timestamp, stop: pd.Timestamp, silent: bool = False) -> None:
|
|
259
|
+
logger.info(f"{self.__class__.__name__} ::: Simulation started at {start} :::")
|
|
260
|
+
|
|
261
|
+
if self._pregenerated_signals:
|
|
262
|
+
self._prepare_generated_signals(start, stop)
|
|
263
|
+
_run = self._process_generated_signals
|
|
264
|
+
else:
|
|
265
|
+
_run = self._process_strategy
|
|
266
|
+
|
|
267
|
+
start, stop = pd.Timestamp(start), pd.Timestamp(stop)
|
|
268
|
+
total_duration = stop - start
|
|
269
|
+
update_delta = total_duration / 100
|
|
270
|
+
prev_dt = pd.Timestamp(start)
|
|
271
|
+
|
|
272
|
+
# - date iteration
|
|
273
|
+
qiter = self._data_source.create_iterable(start, stop)
|
|
274
|
+
if silent:
|
|
275
|
+
for instrument, data_type, event, is_hist in qiter:
|
|
276
|
+
if not _run(instrument, data_type, event, is_hist):
|
|
277
|
+
break
|
|
278
|
+
else:
|
|
279
|
+
_p = 0
|
|
280
|
+
with tqdm(total=100, desc="Simulating", unit="%", leave=False) as pbar:
|
|
281
|
+
for instrument, data_type, event, is_hist in qiter:
|
|
282
|
+
if not _run(instrument, data_type, event, is_hist):
|
|
283
|
+
break
|
|
284
|
+
dt = pd.Timestamp(event.time)
|
|
285
|
+
# update only if date has changed
|
|
286
|
+
if dt - prev_dt > update_delta:
|
|
287
|
+
_p += 1
|
|
288
|
+
pbar.n = _p
|
|
289
|
+
pbar.refresh()
|
|
290
|
+
prev_dt = dt
|
|
291
|
+
pbar.n = 100
|
|
292
|
+
pbar.refresh()
|
|
293
|
+
|
|
294
|
+
logger.info(f"{self.__class__.__name__} ::: Simulation finished at {stop} :::")
|
|
295
|
+
|
|
296
|
+
def print_latency_report(self) -> None:
|
|
297
|
+
_l_r = SW.latency_report()
|
|
298
|
+
if _l_r is not None:
|
|
299
|
+
logger.info(
|
|
300
|
+
"<BLUE> Time spent in simulation report </BLUE>\n<r>"
|
|
301
|
+
+ _frame_to_str(
|
|
302
|
+
_l_r.sort_values("latency", ascending=False).reset_index(drop=True), "simulation", -1, -1, False
|
|
303
|
+
)
|
|
304
|
+
+ "</r>"
|
|
305
|
+
)
|
|
306
|
+
|
|
307
|
+
def _create_backtest_context(self) -> IStrategyContext:
|
|
308
|
+
logger.debug(
|
|
309
|
+
f"[<y>Simulator</y>] :: Preparing simulated trading on <g>{self.setup.exchanges}</g> "
|
|
310
|
+
f"for {self.setup.capital} {self.setup.base_currency}..."
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
data_source = IterableSimulationData(
|
|
314
|
+
self.data_config.data_providers,
|
|
315
|
+
open_close_time_indent_secs=self.data_config.adjusted_open_close_time_indent_secs,
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
channel = SimulatedCtrlChannel("databus", sentinel=(None, None, None, None))
|
|
319
|
+
simulated_clock = SimulatedTimeProvider(np.datetime64(self.start, "ns"))
|
|
320
|
+
|
|
321
|
+
account = self._construct_account_processor(
|
|
322
|
+
self.setup.exchanges, self.setup.commissions, simulated_clock, channel
|
|
323
|
+
)
|
|
324
|
+
|
|
325
|
+
scheduler = SimulatedScheduler(channel, lambda: simulated_clock.time().item())
|
|
326
|
+
|
|
327
|
+
brokers = []
|
|
328
|
+
for exchange in self.setup.exchanges:
|
|
329
|
+
_exchange_account = account.get_account_processor(exchange)
|
|
330
|
+
assert isinstance(_exchange_account, SimulatedAccountProcessor)
|
|
331
|
+
brokers.append(SimulatedBroker(channel, _exchange_account, _exchange_account._exchange))
|
|
332
|
+
|
|
333
|
+
data_providers = []
|
|
334
|
+
for exchange in self.setup.exchanges:
|
|
335
|
+
_exchange_account = account.get_account_processor(exchange)
|
|
336
|
+
assert isinstance(_exchange_account, SimulatedAccountProcessor)
|
|
337
|
+
data_providers.append(
|
|
338
|
+
SimulatedDataProvider(
|
|
339
|
+
exchange_id=exchange,
|
|
340
|
+
channel=channel,
|
|
341
|
+
scheduler=scheduler,
|
|
342
|
+
time_provider=simulated_clock,
|
|
343
|
+
account=_exchange_account,
|
|
344
|
+
readers=self.data_config.data_providers,
|
|
345
|
+
data_source=data_source,
|
|
346
|
+
open_close_time_indent_secs=self.data_config.adjusted_open_close_time_indent_secs,
|
|
347
|
+
)
|
|
348
|
+
)
|
|
349
|
+
|
|
350
|
+
# - get aux data provider
|
|
351
|
+
_aux_data = self.data_config.get_timeguarded_aux_reader(simulated_clock)
|
|
352
|
+
|
|
353
|
+
# - it will store simulation results into memory
|
|
354
|
+
logs_writer = InMemoryLogsWriter(self.account_id, self.setup.name, "0")
|
|
355
|
+
|
|
356
|
+
# - it will store simulation results into memory
|
|
357
|
+
strat: IStrategy | None = None
|
|
358
|
+
|
|
359
|
+
match self.setup.setup_type:
|
|
360
|
+
case SetupTypes.STRATEGY:
|
|
361
|
+
strat = self.setup.generator # type: ignore
|
|
362
|
+
|
|
363
|
+
case SetupTypes.STRATEGY_AND_TRACKER:
|
|
364
|
+
strat = self.setup.generator # type: ignore
|
|
365
|
+
strat.tracker = lambda ctx: self.setup.tracker # type: ignore
|
|
366
|
+
|
|
367
|
+
case SetupTypes.SIGNAL:
|
|
368
|
+
strat = SignalsProxy(timeframe=self.setup.signal_timeframe)
|
|
369
|
+
if len(data_providers) > 1:
|
|
370
|
+
raise SimulationConfigError("Signal setup is not supported for multiple exchanges !")
|
|
371
|
+
|
|
372
|
+
self._set_generated_signals(self.setup.generator) # type: ignore
|
|
373
|
+
|
|
374
|
+
# - we don't need any unexpected triggerings
|
|
375
|
+
self._stop = min(self.setup.generator.index[-1], self.stop) # type: ignore
|
|
376
|
+
|
|
377
|
+
case SetupTypes.SIGNAL_AND_TRACKER:
|
|
378
|
+
strat = SignalsProxy(timeframe=self.setup.signal_timeframe)
|
|
379
|
+
strat.tracker = lambda ctx: self.setup.tracker
|
|
380
|
+
if len(data_providers) > 1:
|
|
381
|
+
raise SimulationConfigError("Signal setup is not supported for multiple exchanges !")
|
|
382
|
+
|
|
383
|
+
self._set_generated_signals(self.setup.generator) # type: ignore
|
|
384
|
+
|
|
385
|
+
# - we don't need any unexpected triggerings
|
|
386
|
+
self._stop = min(self.setup.generator.index[-1], self.stop) # type: ignore
|
|
387
|
+
|
|
388
|
+
case _:
|
|
389
|
+
raise SimulationError(f"Unsupported setup type: {self.setup.setup_type} !")
|
|
390
|
+
|
|
391
|
+
if not isinstance(strat, IStrategy):
|
|
392
|
+
raise SimulationConfigError(f"Strategy should be an instance of IStrategy, but got {strat} !")
|
|
393
|
+
|
|
394
|
+
ctx = StrategyContext(
|
|
395
|
+
strategy=strat,
|
|
396
|
+
brokers=brokers,
|
|
397
|
+
data_providers=data_providers,
|
|
398
|
+
account=account,
|
|
399
|
+
scheduler=scheduler,
|
|
400
|
+
time_provider=simulated_clock,
|
|
401
|
+
instruments=self.setup.instruments,
|
|
402
|
+
logging=StrategyLogging(logs_writer, portfolio_log_freq=self.portfolio_log_freq),
|
|
403
|
+
aux_data_provider=_aux_data,
|
|
404
|
+
emitter=self.emitter,
|
|
405
|
+
strategy_state=self.strategy_state,
|
|
406
|
+
initializer=self.initializer,
|
|
407
|
+
)
|
|
408
|
+
|
|
409
|
+
if self.emitter is not None:
|
|
410
|
+
self.emitter.set_time_provider(simulated_clock)
|
|
411
|
+
|
|
412
|
+
# - setup base subscription from spec
|
|
413
|
+
if ctx.get_base_subscription() == DataType.NONE:
|
|
414
|
+
logger.debug(
|
|
415
|
+
f"[<y>simulator</y>] :: Setting up default base subscription: {self.data_config.default_base_subscription}"
|
|
416
|
+
)
|
|
417
|
+
ctx.set_base_subscription(self.data_config.default_base_subscription)
|
|
418
|
+
|
|
419
|
+
# - set default on_event schedule if detected and strategy didn't set it's own schedule
|
|
420
|
+
if not ctx.get_event_schedule("time") and self.data_config.default_trigger_schedule:
|
|
421
|
+
logger.debug(f"[<y>simulator</y>] :: Setting default schedule: {self.data_config.default_trigger_schedule}")
|
|
422
|
+
ctx.set_event_schedule(self.data_config.default_trigger_schedule)
|
|
423
|
+
|
|
424
|
+
self.logs_writer = logs_writer
|
|
425
|
+
self.channel = channel
|
|
426
|
+
self.time_provider = simulated_clock
|
|
427
|
+
self.account = account
|
|
428
|
+
self.scheduler = scheduler
|
|
429
|
+
self._data_source = data_source
|
|
430
|
+
self._data_providers = data_providers
|
|
431
|
+
self._exchange_to_data_provider = {dp.exchange(): dp for dp in data_providers}
|
|
432
|
+
return ctx
|
|
433
|
+
|
|
434
|
+
def _construct_tcc(
|
|
435
|
+
self, exchanges: list[str], commissions: str | dict[str, str | None] | None
|
|
436
|
+
) -> dict[str, TransactionCostsCalculator]:
|
|
437
|
+
_exchange_to_tcc = {}
|
|
438
|
+
if isinstance(commissions, (str, type(None))):
|
|
439
|
+
commissions = {e: commissions for e in exchanges}
|
|
440
|
+
for exchange in exchanges:
|
|
441
|
+
_exchange_to_tcc[exchange] = lookup.fees.find(exchange.lower(), commissions.get(exchange))
|
|
442
|
+
return _exchange_to_tcc
|
|
443
|
+
|
|
444
|
+
def _construct_account_processor(
|
|
445
|
+
self,
|
|
446
|
+
exchanges: list[str],
|
|
447
|
+
commissions: str | dict[str, str | None] | None,
|
|
448
|
+
time_provider: ITimeProvider,
|
|
449
|
+
channel: CtrlChannel,
|
|
450
|
+
) -> CompositeAccountProcessor:
|
|
451
|
+
_exchange_to_tcc = self._construct_tcc(exchanges, commissions)
|
|
452
|
+
for tcc in _exchange_to_tcc.values():
|
|
453
|
+
if tcc is None:
|
|
454
|
+
raise SimulationConfigError(
|
|
455
|
+
f"Can't find transaction costs calculator for '{self.setup.exchanges}' for specification '{self.setup.commissions}' !"
|
|
456
|
+
)
|
|
457
|
+
|
|
458
|
+
_exchange_to_simulated_exchange = {}
|
|
459
|
+
for exchange in self.setup.exchanges:
|
|
460
|
+
# - create simulated exchange:
|
|
461
|
+
# - we can use different emulations of real exchanges features in future here: for Binance, Bybit, InteractiveBrokers, etc.
|
|
462
|
+
# - for now we use simple basic simulated exchange implementation
|
|
463
|
+
_exchange_to_simulated_exchange[exchange] = get_simulated_exchange(
|
|
464
|
+
exchange, time_provider, _exchange_to_tcc[exchange], self.setup.accurate_stop_orders_execution
|
|
465
|
+
)
|
|
466
|
+
|
|
467
|
+
_account_processors = {}
|
|
468
|
+
for exchange in self.setup.exchanges:
|
|
469
|
+
_initial_capital = self.setup.capital
|
|
470
|
+
if isinstance(_initial_capital, dict):
|
|
471
|
+
_initial_capital = _initial_capital[exchange]
|
|
472
|
+
assert isinstance(_initial_capital, (float, int))
|
|
473
|
+
_account_processors[exchange] = SimulatedAccountProcessor(
|
|
474
|
+
account_id=self.account_id,
|
|
475
|
+
exchange=_exchange_to_simulated_exchange[exchange],
|
|
476
|
+
channel=channel,
|
|
477
|
+
base_currency=self.setup.base_currency,
|
|
478
|
+
initial_capital=_initial_capital,
|
|
479
|
+
)
|
|
480
|
+
|
|
481
|
+
return CompositeAccountProcessor(
|
|
482
|
+
time_provider=time_provider,
|
|
483
|
+
account_processors=_account_processors,
|
|
484
|
+
)
|