Qubx 0.6.37__cp312-cp312-manylinux_2_39_x86_64.whl → 0.6.40__cp312-cp312-manylinux_2_39_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qubx/backtester/runner.py +2 -1
- qubx/cli/deploy.py +27 -3
- qubx/core/loggers.py +3 -160
- qubx/core/metrics.py +1 -1
- qubx/core/series.cpython-312-x86_64-linux-gnu.so +0 -0
- qubx/core/utils.cpython-312-x86_64-linux-gnu.so +0 -0
- qubx/loggers/__init__.py +17 -0
- qubx/loggers/csv.py +100 -0
- qubx/loggers/factory.py +55 -0
- qubx/loggers/inmemory.py +68 -0
- qubx/loggers/mongo.py +80 -0
- qubx/restorers/balance.py +76 -0
- qubx/restorers/factory.py +8 -4
- qubx/restorers/position.py +95 -0
- qubx/restorers/signal.py +115 -0
- qubx/restorers/state.py +89 -3
- qubx/ta/indicators.cpython-312-x86_64-linux-gnu.so +0 -0
- qubx/utils/runner/configs.py +2 -1
- qubx/utils/runner/factory.py +6 -6
- qubx/utils/runner/runner.py +16 -27
- {qubx-0.6.37.dist-info → qubx-0.6.40.dist-info}/METADATA +1 -1
- {qubx-0.6.37.dist-info → qubx-0.6.40.dist-info}/RECORD +25 -20
- {qubx-0.6.37.dist-info → qubx-0.6.40.dist-info}/LICENSE +0 -0
- {qubx-0.6.37.dist-info → qubx-0.6.40.dist-info}/WHEEL +0 -0
- {qubx-0.6.37.dist-info → qubx-0.6.40.dist-info}/entry_points.txt +0 -0
qubx/backtester/runner.py
CHANGED
|
@@ -20,8 +20,9 @@ from qubx.core.interfaces import (
|
|
|
20
20
|
ITimeProvider,
|
|
21
21
|
StrategyState,
|
|
22
22
|
)
|
|
23
|
-
from qubx.core.loggers import
|
|
23
|
+
from qubx.core.loggers import StrategyLogging
|
|
24
24
|
from qubx.core.lookups import lookup
|
|
25
|
+
from qubx.loggers.inmemory import InMemoryLogsWriter
|
|
25
26
|
from qubx.pandaz.utils import _frame_to_str
|
|
26
27
|
|
|
27
28
|
from .account import SimulatedAccountProcessor
|
qubx/cli/deploy.py
CHANGED
|
@@ -182,6 +182,29 @@ def setup_poetry_environment(output_dir: str) -> bool:
|
|
|
182
182
|
return False
|
|
183
183
|
|
|
184
184
|
|
|
185
|
+
def create_strategy_runners(output_dir: str):
|
|
186
|
+
"""
|
|
187
|
+
Creates a strategy runner script in the output_dir
|
|
188
|
+
"""
|
|
189
|
+
import sys
|
|
190
|
+
|
|
191
|
+
if sys.platform == "win32":
|
|
192
|
+
_pfx = ""
|
|
193
|
+
_f_name = os.path.join(output_dir, "run_paper.bat")
|
|
194
|
+
else:
|
|
195
|
+
_pfx = "#!/bin/bash\n"
|
|
196
|
+
_f_name = os.path.join(output_dir, "run_paper.sh")
|
|
197
|
+
|
|
198
|
+
logger.info(f"Creating strategy paper runner script: {_f_name}")
|
|
199
|
+
|
|
200
|
+
try:
|
|
201
|
+
with open(_f_name, "w") as f:
|
|
202
|
+
f.write(f"{_pfx}poetry run qubx run config.yml --paper -j")
|
|
203
|
+
os.chmod(_f_name, 0o755)
|
|
204
|
+
except Exception as e:
|
|
205
|
+
logger.error(f"Failed to create strategy paper runner script: {e}")
|
|
206
|
+
|
|
207
|
+
|
|
185
208
|
def deploy_strategy(zip_file: str, output_dir: str | None, force: bool) -> bool:
|
|
186
209
|
"""
|
|
187
210
|
Deploys a strategy from a zip file created by the release command.
|
|
@@ -222,9 +245,10 @@ def deploy_strategy(zip_file: str, output_dir: str | None, force: bool) -> bool:
|
|
|
222
245
|
if not setup_poetry_environment(resolved_output_dir):
|
|
223
246
|
return False
|
|
224
247
|
|
|
248
|
+
# Create the strategy runners
|
|
249
|
+
create_strategy_runners(resolved_output_dir)
|
|
250
|
+
|
|
225
251
|
# Success messages
|
|
226
252
|
logger.info(f"Strategy deployed successfully to {resolved_output_dir}")
|
|
227
|
-
logger.info(
|
|
228
|
-
f"To run the strategy (paper mode): <cyan>cd {resolved_output_dir} && poetry run qubx run config.yml --paper</cyan>"
|
|
229
|
-
)
|
|
253
|
+
logger.info(f" -> To run the strategy (in paper mode): <cyan>cd {resolved_output_dir} && ./run_paper.sh</cyan>")
|
|
230
254
|
return True
|
qubx/core/loggers.py
CHANGED
|
@@ -1,10 +1,6 @@
|
|
|
1
|
-
import csv
|
|
2
|
-
import os
|
|
3
|
-
from multiprocessing.pool import ThreadPool
|
|
4
1
|
from typing import Any, Dict, List, Tuple
|
|
5
2
|
|
|
6
3
|
import numpy as np
|
|
7
|
-
import pandas as pd
|
|
8
4
|
|
|
9
5
|
from qubx import logger
|
|
10
6
|
from qubx.core.basics import (
|
|
@@ -14,11 +10,11 @@ from qubx.core.basics import (
|
|
|
14
10
|
Position,
|
|
15
11
|
TargetPosition,
|
|
16
12
|
)
|
|
17
|
-
|
|
13
|
+
|
|
18
14
|
from qubx.core.series import time_as_nsec
|
|
19
15
|
from qubx.core.utils import recognize_timeframe
|
|
20
|
-
|
|
21
|
-
from qubx.utils.misc import Stopwatch
|
|
16
|
+
|
|
17
|
+
from qubx.utils.misc import Stopwatch
|
|
22
18
|
from qubx.utils.time import convert_tf_str_td64, floor_t64
|
|
23
19
|
|
|
24
20
|
_SW = Stopwatch()
|
|
@@ -48,159 +44,6 @@ class LogsWriter:
|
|
|
48
44
|
pass
|
|
49
45
|
|
|
50
46
|
|
|
51
|
-
class InMemoryLogsWriter(LogsWriter):
|
|
52
|
-
_portfolio: List
|
|
53
|
-
_execs: List
|
|
54
|
-
_signals: List
|
|
55
|
-
|
|
56
|
-
def __init__(self, account_id: str, strategy_id: str, run_id: str) -> None:
|
|
57
|
-
super().__init__(account_id, strategy_id, run_id)
|
|
58
|
-
self._portfolio = []
|
|
59
|
-
self._execs = []
|
|
60
|
-
self._signals = []
|
|
61
|
-
|
|
62
|
-
def write_data(self, log_type: str, data: List[Dict[str, Any]]):
|
|
63
|
-
if len(data) > 0:
|
|
64
|
-
if log_type == "portfolio":
|
|
65
|
-
self._portfolio.extend(data)
|
|
66
|
-
elif log_type == "executions":
|
|
67
|
-
self._execs.extend(data)
|
|
68
|
-
elif log_type == "signals":
|
|
69
|
-
self._signals.extend(data)
|
|
70
|
-
|
|
71
|
-
def get_portfolio(self, as_plain_dataframe=True) -> pd.DataFrame:
|
|
72
|
-
pfl = pd.DataFrame.from_records(self._portfolio, index="timestamp")
|
|
73
|
-
pfl.index = pd.DatetimeIndex(pfl.index)
|
|
74
|
-
if as_plain_dataframe:
|
|
75
|
-
# - convert to Qube presentation (TODO: temporary)
|
|
76
|
-
pis = []
|
|
77
|
-
for s in set(pfl["symbol"]):
|
|
78
|
-
pi = pfl[pfl["symbol"] == s]
|
|
79
|
-
pi = pi.drop(columns=["symbol", "realized_pnl_quoted", "current_price", "exchange_time"])
|
|
80
|
-
pi = pi.rename(
|
|
81
|
-
{
|
|
82
|
-
"pnl_quoted": "PnL",
|
|
83
|
-
"quantity": "Pos",
|
|
84
|
-
"avg_position_price": "Price",
|
|
85
|
-
"market_value_quoted": "Value",
|
|
86
|
-
"commissions_quoted": "Commissions",
|
|
87
|
-
},
|
|
88
|
-
axis=1,
|
|
89
|
-
)
|
|
90
|
-
# We want to convert the value to just price * quantity
|
|
91
|
-
# in reality value of perps is just the unrealized pnl but
|
|
92
|
-
# it's not important after simulation for metric calculations
|
|
93
|
-
pi["Value"] = pi["Pos"] * pi["Price"] + pi["Value"]
|
|
94
|
-
pis.append(pi.rename(lambda x: s + "_" + x, axis=1))
|
|
95
|
-
return split_cumulative_pnl(scols(*pis))
|
|
96
|
-
return pfl
|
|
97
|
-
|
|
98
|
-
def get_executions(self) -> pd.DataFrame:
|
|
99
|
-
p = pd.DataFrame()
|
|
100
|
-
if self._execs:
|
|
101
|
-
p = pd.DataFrame.from_records(self._execs, index="timestamp")
|
|
102
|
-
p.index = pd.DatetimeIndex(p.index)
|
|
103
|
-
return p
|
|
104
|
-
|
|
105
|
-
def get_signals(self) -> pd.DataFrame:
|
|
106
|
-
p = pd.DataFrame()
|
|
107
|
-
if self._signals:
|
|
108
|
-
p = pd.DataFrame.from_records(self._signals, index="timestamp")
|
|
109
|
-
p.index = pd.DatetimeIndex(p.index)
|
|
110
|
-
return p
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
class CsvFileLogsWriter(LogsWriter):
|
|
114
|
-
"""
|
|
115
|
-
Simple CSV strategy log data writer. It does data writing in separate thread.
|
|
116
|
-
"""
|
|
117
|
-
|
|
118
|
-
def __init__(self, account_id: str, strategy_id: str, run_id: str, log_folder="logs") -> None:
|
|
119
|
-
super().__init__(account_id, strategy_id, run_id)
|
|
120
|
-
|
|
121
|
-
path = makedirs(log_folder)
|
|
122
|
-
# - it rewrites positions every time
|
|
123
|
-
self._pos_file_path = f"{path}/{self.strategy_id}_{self.account_id}_positions.csv"
|
|
124
|
-
self._balance_file_path = f"{path}/{self.strategy_id}_{self.account_id}_balance.csv"
|
|
125
|
-
|
|
126
|
-
_pfl_path = f"{path}/{strategy_id}_{account_id}_portfolio.csv"
|
|
127
|
-
_exe_path = f"{path}/{strategy_id}_{account_id}_executions.csv"
|
|
128
|
-
_sig_path = f"{path}/{strategy_id}_{account_id}_signals.csv"
|
|
129
|
-
self._hdr_pfl = not os.path.exists(_pfl_path)
|
|
130
|
-
self._hdr_exe = not os.path.exists(_exe_path)
|
|
131
|
-
self._hdr_sig = not os.path.exists(_sig_path)
|
|
132
|
-
|
|
133
|
-
self._pfl_file_ = open(_pfl_path, "+a", newline="")
|
|
134
|
-
self._execs_file_ = open(_exe_path, "+a", newline="")
|
|
135
|
-
self._sig_file_ = open(_sig_path, "+a", newline="")
|
|
136
|
-
self._pfl_writer = csv.writer(self._pfl_file_)
|
|
137
|
-
self._exe_writer = csv.writer(self._execs_file_)
|
|
138
|
-
self._sig_writer = csv.writer(self._sig_file_)
|
|
139
|
-
self.pool = ThreadPool(3)
|
|
140
|
-
|
|
141
|
-
@staticmethod
|
|
142
|
-
def _header(d: dict) -> List[str]:
|
|
143
|
-
return list(d.keys()) + ["run_id"]
|
|
144
|
-
|
|
145
|
-
def _values(self, data: List[Dict[str, Any]]) -> List[List[str]]:
|
|
146
|
-
# - attach run_id (last column)
|
|
147
|
-
return [list((d | {"run_id": self.run_id}).values()) for d in data]
|
|
148
|
-
|
|
149
|
-
def _do_write(self, log_type, data):
|
|
150
|
-
match log_type:
|
|
151
|
-
case "positions":
|
|
152
|
-
with open(self._pos_file_path, "w", newline="") as f:
|
|
153
|
-
w = csv.writer(f)
|
|
154
|
-
w.writerow(self._header(data[0]))
|
|
155
|
-
w.writerows(self._values(data))
|
|
156
|
-
|
|
157
|
-
case "portfolio":
|
|
158
|
-
if self._hdr_pfl:
|
|
159
|
-
self._pfl_writer.writerow(self._header(data[0]))
|
|
160
|
-
self._hdr_pfl = False
|
|
161
|
-
self._pfl_writer.writerows(self._values(data))
|
|
162
|
-
self._pfl_file_.flush()
|
|
163
|
-
|
|
164
|
-
case "executions":
|
|
165
|
-
if self._hdr_exe:
|
|
166
|
-
self._exe_writer.writerow(self._header(data[0]))
|
|
167
|
-
self._hdr_exe = False
|
|
168
|
-
self._exe_writer.writerows(self._values(data))
|
|
169
|
-
self._execs_file_.flush()
|
|
170
|
-
|
|
171
|
-
case "signals":
|
|
172
|
-
if self._hdr_sig:
|
|
173
|
-
self._sig_writer.writerow(self._header(data[0]))
|
|
174
|
-
self._hdr_sig = False
|
|
175
|
-
self._sig_writer.writerows(self._values(data))
|
|
176
|
-
self._sig_file_.flush()
|
|
177
|
-
|
|
178
|
-
case "balance":
|
|
179
|
-
with open(self._balance_file_path, "w", newline="") as f:
|
|
180
|
-
w = csv.writer(f)
|
|
181
|
-
w.writerow(self._header(data[0]))
|
|
182
|
-
w.writerows(self._values(data))
|
|
183
|
-
|
|
184
|
-
def write_data(self, log_type: str, data: List[Dict[str, Any]]):
|
|
185
|
-
if len(data) > 0:
|
|
186
|
-
self.pool.apply_async(self._do_write, (log_type, data))
|
|
187
|
-
|
|
188
|
-
def flush_data(self):
|
|
189
|
-
try:
|
|
190
|
-
self._pfl_file_.flush()
|
|
191
|
-
self._execs_file_.flush()
|
|
192
|
-
self._sig_file_.flush()
|
|
193
|
-
except Exception as e:
|
|
194
|
-
logger.warning(f"Error flushing log writer: {str(e)}")
|
|
195
|
-
|
|
196
|
-
def close(self):
|
|
197
|
-
self._pfl_file_.close()
|
|
198
|
-
self._execs_file_.close()
|
|
199
|
-
self._sig_file_.close()
|
|
200
|
-
self.pool.close()
|
|
201
|
-
self.pool.join()
|
|
202
|
-
|
|
203
|
-
|
|
204
47
|
class _BaseIntervalDumper:
|
|
205
48
|
"""
|
|
206
49
|
Basic functionality for all interval based dumpers
|
qubx/core/metrics.py
CHANGED
|
@@ -884,7 +884,7 @@ class TradingSessionResult:
|
|
|
884
884
|
_perf = info.pop("performance", None)
|
|
885
885
|
info["instruments"] = info.pop("symbols")
|
|
886
886
|
# - fix for old versions
|
|
887
|
-
_exch = info.pop("exchange")
|
|
887
|
+
_exch = info.pop("exchange") if "exchange" in info else info.pop("exchanges")
|
|
888
888
|
info["exchanges"] = _exch if isinstance(_exch, list) else [_exch]
|
|
889
889
|
tsr = TradingSessionResult(**info, portfolio_log=portfolio, executions_log=executions, signals_log=signals)
|
|
890
890
|
tsr.qubx_version = _qbx_version
|
|
Binary file
|
|
Binary file
|
qubx/loggers/__init__.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Loggers module for qubx.
|
|
3
|
+
|
|
4
|
+
This module provides implementations for logs writing, like csv writer or mongodb writer.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from qubx.loggers.csv import CsvFileLogsWriter
|
|
8
|
+
from qubx.loggers.inmemory import InMemoryLogsWriter
|
|
9
|
+
from qubx.loggers.mongo import MongoDBLogsWriter
|
|
10
|
+
from qubx.loggers.factory import create_logs_writer
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
"CsvFileLogsWriter",
|
|
14
|
+
"InMemoryLogsWriter",
|
|
15
|
+
"MongoDBLogsWriter",
|
|
16
|
+
"create_logs_writer",
|
|
17
|
+
]
|
qubx/loggers/csv.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
import csv
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
from typing import Any, Dict, List
|
|
5
|
+
from multiprocessing.pool import ThreadPool
|
|
6
|
+
|
|
7
|
+
from qubx import logger
|
|
8
|
+
from qubx.core.loggers import LogsWriter
|
|
9
|
+
from qubx.utils.misc import makedirs
|
|
10
|
+
|
|
11
|
+
class CsvFileLogsWriter(LogsWriter):
|
|
12
|
+
"""
|
|
13
|
+
Simple CSV strategy log data writer. It does data writing in separate thread.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def __init__(self, account_id: str, strategy_id: str, run_id: str, log_folder="logs") -> None:
|
|
17
|
+
super().__init__(account_id, strategy_id, run_id)
|
|
18
|
+
|
|
19
|
+
path = makedirs(log_folder)
|
|
20
|
+
# - it rewrites positions every time
|
|
21
|
+
self._pos_file_path = f"{path}/{self.strategy_id}_{self.account_id}_positions.csv"
|
|
22
|
+
self._balance_file_path = f"{path}/{self.strategy_id}_{self.account_id}_balance.csv"
|
|
23
|
+
|
|
24
|
+
_pfl_path = f"{path}/{strategy_id}_{account_id}_portfolio.csv"
|
|
25
|
+
_exe_path = f"{path}/{strategy_id}_{account_id}_executions.csv"
|
|
26
|
+
_sig_path = f"{path}/{strategy_id}_{account_id}_signals.csv"
|
|
27
|
+
self._hdr_pfl = not os.path.exists(_pfl_path)
|
|
28
|
+
self._hdr_exe = not os.path.exists(_exe_path)
|
|
29
|
+
self._hdr_sig = not os.path.exists(_sig_path)
|
|
30
|
+
|
|
31
|
+
self._pfl_file_ = open(_pfl_path, "+a", newline="")
|
|
32
|
+
self._execs_file_ = open(_exe_path, "+a", newline="")
|
|
33
|
+
self._sig_file_ = open(_sig_path, "+a", newline="")
|
|
34
|
+
self._pfl_writer = csv.writer(self._pfl_file_)
|
|
35
|
+
self._exe_writer = csv.writer(self._execs_file_)
|
|
36
|
+
self._sig_writer = csv.writer(self._sig_file_)
|
|
37
|
+
self.pool = ThreadPool(3)
|
|
38
|
+
|
|
39
|
+
@staticmethod
|
|
40
|
+
def _header(d: dict) -> List[str]:
|
|
41
|
+
return list(d.keys()) + ["run_id"]
|
|
42
|
+
|
|
43
|
+
def _values(self, data: List[Dict[str, Any]]) -> List[List[str]]:
|
|
44
|
+
# - attach run_id (last column)
|
|
45
|
+
return [list((d | {"run_id": self.run_id}).values()) for d in data]
|
|
46
|
+
|
|
47
|
+
def _do_write(self, log_type, data):
|
|
48
|
+
match log_type:
|
|
49
|
+
case "positions":
|
|
50
|
+
with open(self._pos_file_path, "w", newline="") as f:
|
|
51
|
+
w = csv.writer(f)
|
|
52
|
+
w.writerow(self._header(data[0]))
|
|
53
|
+
w.writerows(self._values(data))
|
|
54
|
+
|
|
55
|
+
case "portfolio":
|
|
56
|
+
if self._hdr_pfl:
|
|
57
|
+
self._pfl_writer.writerow(self._header(data[0]))
|
|
58
|
+
self._hdr_pfl = False
|
|
59
|
+
self._pfl_writer.writerows(self._values(data))
|
|
60
|
+
self._pfl_file_.flush()
|
|
61
|
+
|
|
62
|
+
case "executions":
|
|
63
|
+
if self._hdr_exe:
|
|
64
|
+
self._exe_writer.writerow(self._header(data[0]))
|
|
65
|
+
self._hdr_exe = False
|
|
66
|
+
self._exe_writer.writerows(self._values(data))
|
|
67
|
+
self._execs_file_.flush()
|
|
68
|
+
|
|
69
|
+
case "signals":
|
|
70
|
+
if self._hdr_sig:
|
|
71
|
+
self._sig_writer.writerow(self._header(data[0]))
|
|
72
|
+
self._hdr_sig = False
|
|
73
|
+
self._sig_writer.writerows(self._values(data))
|
|
74
|
+
self._sig_file_.flush()
|
|
75
|
+
|
|
76
|
+
case "balance":
|
|
77
|
+
with open(self._balance_file_path, "w", newline="") as f:
|
|
78
|
+
w = csv.writer(f)
|
|
79
|
+
w.writerow(self._header(data[0]))
|
|
80
|
+
w.writerows(self._values(data))
|
|
81
|
+
|
|
82
|
+
def write_data(self, log_type: str, data: List[Dict[str, Any]]):
|
|
83
|
+
if len(data) > 0:
|
|
84
|
+
self.pool.apply_async(self._do_write, (log_type, data))
|
|
85
|
+
|
|
86
|
+
def flush_data(self):
|
|
87
|
+
try:
|
|
88
|
+
self._pfl_file_.flush()
|
|
89
|
+
self._execs_file_.flush()
|
|
90
|
+
self._sig_file_.flush()
|
|
91
|
+
except Exception as e:
|
|
92
|
+
logger.warning(f"Error flushing log writer: {str(e)}")
|
|
93
|
+
|
|
94
|
+
def close(self):
|
|
95
|
+
self._pfl_file_.close()
|
|
96
|
+
self._execs_file_.close()
|
|
97
|
+
self._sig_file_.close()
|
|
98
|
+
self.pool.close()
|
|
99
|
+
self.pool.join()
|
|
100
|
+
|
qubx/loggers/factory.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import inspect
|
|
2
|
+
|
|
3
|
+
from typing import Type
|
|
4
|
+
|
|
5
|
+
from qubx.core.loggers import LogsWriter
|
|
6
|
+
from qubx.loggers.csv import CsvFileLogsWriter
|
|
7
|
+
from qubx.loggers.mongo import MongoDBLogsWriter
|
|
8
|
+
from qubx.loggers.inmemory import InMemoryLogsWriter
|
|
9
|
+
|
|
10
|
+
# Registry of logs writer types
|
|
11
|
+
LOGS_WRITER_REGISTRY: dict[str, Type[LogsWriter]] = {
|
|
12
|
+
"CsvFileLogsWriter": CsvFileLogsWriter,
|
|
13
|
+
"MongoDBLogsWriter": MongoDBLogsWriter,
|
|
14
|
+
"InMemoryLogsWriter": InMemoryLogsWriter
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
def create_logs_writer(log_writer_type: str, parameters: dict | None = None) -> LogsWriter:
|
|
18
|
+
"""
|
|
19
|
+
Create a logs writer based on configuration.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
log_wirter_type: The type of logs writer to create.
|
|
23
|
+
parameters: Parameters to pass to the logs writer constructor.
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
An instance of the specified logs writer.
|
|
27
|
+
|
|
28
|
+
Raises:
|
|
29
|
+
ValueError: If the specified logs writer type is not registered.
|
|
30
|
+
"""
|
|
31
|
+
if log_writer_type not in LOGS_WRITER_REGISTRY:
|
|
32
|
+
raise ValueError(
|
|
33
|
+
f"Unknown logs writer type: {log_writer_type}. "
|
|
34
|
+
f"Available types: {', '.join(LOGS_WRITER_REGISTRY.keys())}"
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
logs_writer_class = LOGS_WRITER_REGISTRY[log_writer_type]
|
|
38
|
+
params = parameters.copy() if parameters else {}
|
|
39
|
+
|
|
40
|
+
sig = inspect.signature(logs_writer_class)
|
|
41
|
+
accepted_params = set(sig.parameters.keys())
|
|
42
|
+
filtered_params = {k: v for k, v in params.items() if k in accepted_params}
|
|
43
|
+
|
|
44
|
+
return logs_writer_class(**filtered_params)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def register_logs_writer(log_writer_type: str, logs_witer_class: Type[LogsWriter]) -> None:
|
|
48
|
+
"""
|
|
49
|
+
Register a new logs writer type.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
log_writer_type: The name of the logs writer type.
|
|
53
|
+
logs_witer_class: The logs writer class to register.
|
|
54
|
+
"""
|
|
55
|
+
LOGS_WRITER_REGISTRY[log_writer_type] = logs_witer_class
|
qubx/loggers/inmemory.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict, List
|
|
4
|
+
|
|
5
|
+
from qubx.core.loggers import LogsWriter
|
|
6
|
+
from qubx.core.metrics import split_cumulative_pnl
|
|
7
|
+
from qubx.pandaz.utils import scols
|
|
8
|
+
|
|
9
|
+
class InMemoryLogsWriter(LogsWriter):
|
|
10
|
+
_portfolio: List
|
|
11
|
+
_execs: List
|
|
12
|
+
_signals: List
|
|
13
|
+
|
|
14
|
+
def __init__(self, account_id: str, strategy_id: str, run_id: str) -> None:
|
|
15
|
+
super().__init__(account_id, strategy_id, run_id)
|
|
16
|
+
self._portfolio = []
|
|
17
|
+
self._execs = []
|
|
18
|
+
self._signals = []
|
|
19
|
+
|
|
20
|
+
def write_data(self, log_type: str, data: List[Dict[str, Any]]):
|
|
21
|
+
if len(data) > 0:
|
|
22
|
+
if log_type == "portfolio":
|
|
23
|
+
self._portfolio.extend(data)
|
|
24
|
+
elif log_type == "executions":
|
|
25
|
+
self._execs.extend(data)
|
|
26
|
+
elif log_type == "signals":
|
|
27
|
+
self._signals.extend(data)
|
|
28
|
+
|
|
29
|
+
def get_portfolio(self, as_plain_dataframe=True) -> pd.DataFrame:
|
|
30
|
+
pfl = pd.DataFrame.from_records(self._portfolio, index="timestamp")
|
|
31
|
+
pfl.index = pd.DatetimeIndex(pfl.index)
|
|
32
|
+
if as_plain_dataframe:
|
|
33
|
+
# - convert to Qube presentation (TODO: temporary)
|
|
34
|
+
pis = []
|
|
35
|
+
for s in set(pfl["symbol"]):
|
|
36
|
+
pi = pfl[pfl["symbol"] == s]
|
|
37
|
+
pi = pi.drop(columns=["symbol", "realized_pnl_quoted", "current_price", "exchange_time"])
|
|
38
|
+
pi = pi.rename(
|
|
39
|
+
{
|
|
40
|
+
"pnl_quoted": "PnL",
|
|
41
|
+
"quantity": "Pos",
|
|
42
|
+
"avg_position_price": "Price",
|
|
43
|
+
"market_value_quoted": "Value",
|
|
44
|
+
"commissions_quoted": "Commissions",
|
|
45
|
+
},
|
|
46
|
+
axis=1,
|
|
47
|
+
)
|
|
48
|
+
# We want to convert the value to just price * quantity
|
|
49
|
+
# in reality value of perps is just the unrealized pnl but
|
|
50
|
+
# it's not important after simulation for metric calculations
|
|
51
|
+
pi["Value"] = pi["Pos"] * pi["Price"] + pi["Value"]
|
|
52
|
+
pis.append(pi.rename(lambda x: s + "_" + x, axis=1))
|
|
53
|
+
return split_cumulative_pnl(scols(*pis))
|
|
54
|
+
return pfl
|
|
55
|
+
|
|
56
|
+
def get_executions(self) -> pd.DataFrame:
|
|
57
|
+
p = pd.DataFrame()
|
|
58
|
+
if self._execs:
|
|
59
|
+
p = pd.DataFrame.from_records(self._execs, index="timestamp")
|
|
60
|
+
p.index = pd.DatetimeIndex(p.index)
|
|
61
|
+
return p
|
|
62
|
+
|
|
63
|
+
def get_signals(self) -> pd.DataFrame:
|
|
64
|
+
p = pd.DataFrame()
|
|
65
|
+
if self._signals:
|
|
66
|
+
p = pd.DataFrame.from_records(self._signals, index="timestamp")
|
|
67
|
+
p.index = pd.DatetimeIndex(p.index)
|
|
68
|
+
return p
|
qubx/loggers/mongo.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
from multiprocessing.pool import ThreadPool
|
|
3
|
+
from pymongo import MongoClient
|
|
4
|
+
from typing import Any, Dict, List
|
|
5
|
+
|
|
6
|
+
from qubx.core.loggers import LogsWriter
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class MongoDBLogsWriter(LogsWriter):
|
|
10
|
+
"""
|
|
11
|
+
MongoDB implementation of LogsWriter interface.
|
|
12
|
+
Writes log data to a single MongoDB collection asynchronously.
|
|
13
|
+
Supports TTL expiration via index on 'timestamp' field.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
account_id: str,
|
|
19
|
+
strategy_id: str,
|
|
20
|
+
run_id: str,
|
|
21
|
+
mongo_uri: str = "mongodb://localhost:27017/",
|
|
22
|
+
db_name: str = "default_logs_db",
|
|
23
|
+
collection_name_prefix: str = "qubx_logs",
|
|
24
|
+
pool_size: int = 3,
|
|
25
|
+
ttl_seconds: int = 86400,
|
|
26
|
+
) -> None:
|
|
27
|
+
super().__init__(account_id, strategy_id, run_id)
|
|
28
|
+
self.client = MongoClient(mongo_uri)
|
|
29
|
+
self.db = self.client[db_name]
|
|
30
|
+
self.pool = ThreadPool(pool_size)
|
|
31
|
+
self.collection_name_prefix = collection_name_prefix
|
|
32
|
+
|
|
33
|
+
# Ensure TTL index exists on the 'timestamp' field
|
|
34
|
+
self.db[f"{collection_name_prefix}_positions"].create_index(
|
|
35
|
+
"timestamp", expireAfterSeconds=ttl_seconds
|
|
36
|
+
)
|
|
37
|
+
self.db[f"{collection_name_prefix}_portfolio"].create_index(
|
|
38
|
+
"timestamp", expireAfterSeconds=ttl_seconds
|
|
39
|
+
)
|
|
40
|
+
self.db[f"{collection_name_prefix}_executions"].create_index(
|
|
41
|
+
"timestamp", expireAfterSeconds=ttl_seconds
|
|
42
|
+
)
|
|
43
|
+
self.db[f"{collection_name_prefix}_signals"].create_index(
|
|
44
|
+
"timestamp", expireAfterSeconds=ttl_seconds
|
|
45
|
+
)
|
|
46
|
+
self.db[f"{collection_name_prefix}_balance"].create_index(
|
|
47
|
+
"timestamp", expireAfterSeconds=ttl_seconds
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
def _attach_metadata(
|
|
51
|
+
self, data: List[Dict[str, Any]], log_type: str
|
|
52
|
+
) -> List[Dict[str, Any]]:
|
|
53
|
+
now = datetime.utcnow()
|
|
54
|
+
return [
|
|
55
|
+
{
|
|
56
|
+
**d,
|
|
57
|
+
"run_id": self.run_id,
|
|
58
|
+
"account_id": self.account_id,
|
|
59
|
+
"strategy_name": self.strategy_id,
|
|
60
|
+
"log_type": log_type,
|
|
61
|
+
"timestamp": now,
|
|
62
|
+
}
|
|
63
|
+
for d in data
|
|
64
|
+
]
|
|
65
|
+
|
|
66
|
+
def _do_write(self, log_type: str, data: List[Dict[str, Any]]):
|
|
67
|
+
docs = self._attach_metadata(data, log_type)
|
|
68
|
+
self.db[f"{self.collection_name_prefix}_{log_type}"].insert_many(docs)
|
|
69
|
+
|
|
70
|
+
def write_data(self, log_type: str, data: List[Dict[str, Any]]):
|
|
71
|
+
if len(data) > 0:
|
|
72
|
+
self.pool.apply_async(self._do_write, (log_type, data,))
|
|
73
|
+
|
|
74
|
+
def flush_data(self):
|
|
75
|
+
pass
|
|
76
|
+
|
|
77
|
+
def close(self):
|
|
78
|
+
self.pool.close()
|
|
79
|
+
self.pool.join()
|
|
80
|
+
self.client.close()
|
qubx/restorers/balance.py
CHANGED
|
@@ -7,6 +7,7 @@ from various sources.
|
|
|
7
7
|
|
|
8
8
|
import os
|
|
9
9
|
from pathlib import Path
|
|
10
|
+
from pymongo import MongoClient
|
|
10
11
|
|
|
11
12
|
import pandas as pd
|
|
12
13
|
|
|
@@ -118,3 +119,78 @@ class CsvBalanceRestorer(IBalanceRestorer):
|
|
|
118
119
|
balances[currency] = balance
|
|
119
120
|
|
|
120
121
|
return balances
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
class MongoDBBalanceRestorer(IBalanceRestorer):
|
|
125
|
+
"""
|
|
126
|
+
Balance restorer that reads account balances from a MongoDB collection.
|
|
127
|
+
|
|
128
|
+
This restorer queries the most recent balance entries stored using MongoDBLogsWriter.
|
|
129
|
+
It restores data only from the most recent run_id for the given bot_id.
|
|
130
|
+
"""
|
|
131
|
+
|
|
132
|
+
def __init__(
|
|
133
|
+
self,
|
|
134
|
+
strategy_name: str,
|
|
135
|
+
mongo_client: MongoClient,
|
|
136
|
+
db_name: str = "default_logs_db",
|
|
137
|
+
collection_name: str = "qubx_logs",
|
|
138
|
+
):
|
|
139
|
+
self.mongo_client = mongo_client
|
|
140
|
+
self.db_name = db_name
|
|
141
|
+
self.collection_name = collection_name
|
|
142
|
+
self.strategy_name = strategy_name
|
|
143
|
+
|
|
144
|
+
self.collection = self.mongo_client[db_name][collection_name]
|
|
145
|
+
|
|
146
|
+
def restore_balances(self) -> dict[str, AssetBalance]:
|
|
147
|
+
"""
|
|
148
|
+
Restore account balances from the most recent run.
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
A dictionary mapping currency codes to AssetBalance objects.
|
|
152
|
+
Example: {'USDT': AssetBalance(total=100000.0, locked=0.0)}
|
|
153
|
+
"""
|
|
154
|
+
try:
|
|
155
|
+
match_query = {
|
|
156
|
+
"log_type": "balance",
|
|
157
|
+
"strategy_name": self.strategy_name,
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
latest_run_doc = (
|
|
161
|
+
self.collection.find(match_query, {"run_id": 1, "timestamp": 1})
|
|
162
|
+
.sort("timestamp", -1)
|
|
163
|
+
.limit(1)
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
latest_run = next(latest_run_doc, None)
|
|
167
|
+
if not latest_run:
|
|
168
|
+
logger.warning("No balance logs found for given filters.")
|
|
169
|
+
return {}
|
|
170
|
+
|
|
171
|
+
latest_run_id = latest_run["run_id"]
|
|
172
|
+
|
|
173
|
+
logger.info(f"Restoring balances from MongoDB for run_id: {latest_run_id}")
|
|
174
|
+
|
|
175
|
+
query = {**match_query, "run_id": latest_run_id}
|
|
176
|
+
logs = self.collection.find(query).sort("timestamp", 1)
|
|
177
|
+
|
|
178
|
+
balances = {}
|
|
179
|
+
|
|
180
|
+
for log in logs:
|
|
181
|
+
currency = log.get("currency")
|
|
182
|
+
if currency:
|
|
183
|
+
total = log.get("total", 0.0)
|
|
184
|
+
locked = log.get("locked", 0.0)
|
|
185
|
+
|
|
186
|
+
balance = AssetBalance(
|
|
187
|
+
total=total,
|
|
188
|
+
locked=locked,
|
|
189
|
+
)
|
|
190
|
+
balance.free = total - locked
|
|
191
|
+
balances[currency] = balance
|
|
192
|
+
|
|
193
|
+
return balances
|
|
194
|
+
except Exception as e:
|
|
195
|
+
logger.error(f"Error restoring balances from MongoDB: {e}")
|
|
196
|
+
return {}
|