Qubx 0.1.82__cp311-cp311-manylinux_2_35_x86_64.whl → 0.1.83__cp311-cp311-manylinux_2_35_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of Qubx might be problematic. Click here for more details.
- qubx/core/series.cpython-311-x86_64-linux-gnu.so +0 -0
- qubx/core/utils.cpython-311-x86_64-linux-gnu.so +0 -0
- qubx/data/readers.py +469 -168
- qubx/ta/indicators.cpython-311-x86_64-linux-gnu.so +0 -0
- {qubx-0.1.82.dist-info → qubx-0.1.83.dist-info}/METADATA +1 -1
- {qubx-0.1.82.dist-info → qubx-0.1.83.dist-info}/RECORD +7 -7
- {qubx-0.1.82.dist-info → qubx-0.1.83.dist-info}/WHEEL +0 -0
|
Binary file
|
|
Binary file
|
qubx/data/readers.py
CHANGED
|
@@ -13,11 +13,11 @@ from qubx.core.series import TimeSeries, OHLCV, time_as_nsec, Quote, Trade
|
|
|
13
13
|
from qubx.utils.time import infer_series_frequency, handle_start_stop
|
|
14
14
|
|
|
15
15
|
_DT = lambda x: pd.Timedelta(x).to_numpy().item()
|
|
16
|
-
D1, H1 = _DT(
|
|
16
|
+
D1, H1 = _DT("1D"), _DT("1h")
|
|
17
17
|
|
|
18
|
-
DEFAULT_DAILY_SESSION = (_DT(
|
|
19
|
-
STOCK_DAILY_SESSION = (_DT(
|
|
20
|
-
CME_FUTURES_DAILY_SESSION = (_DT(
|
|
18
|
+
DEFAULT_DAILY_SESSION = (_DT("00:00:00.100"), _DT("23:59:59.900"))
|
|
19
|
+
STOCK_DAILY_SESSION = (_DT("9:30:00.100"), _DT("15:59:59.900"))
|
|
20
|
+
CME_FUTURES_DAILY_SESSION = (_DT("8:30:00.100"), _DT("15:14:59.900"))
|
|
21
21
|
|
|
22
22
|
|
|
23
23
|
def _recognize_t(t: Union[int, str], defaultvalue, timeunit) -> int:
|
|
@@ -31,9 +31,9 @@ def _recognize_t(t: Union[int, str], defaultvalue, timeunit) -> int:
|
|
|
31
31
|
|
|
32
32
|
def _time(t, timestamp_units: str) -> int:
|
|
33
33
|
t = int(t) if isinstance(t, float) else t
|
|
34
|
-
if timestamp_units ==
|
|
35
|
-
return np.datetime64(t,
|
|
36
|
-
return np.datetime64(t, timestamp_units).astype(
|
|
34
|
+
if timestamp_units == "ns":
|
|
35
|
+
return np.datetime64(t, "ns").item()
|
|
36
|
+
return np.datetime64(t, timestamp_units).astype("datetime64[ns]").item()
|
|
37
37
|
|
|
38
38
|
|
|
39
39
|
def _find_column_index_in_list(xs, *args):
|
|
@@ -45,7 +45,9 @@ def _find_column_index_in_list(xs, *args):
|
|
|
45
45
|
raise IndexError(f"Can't find any from {args} in list: {xs}")
|
|
46
46
|
|
|
47
47
|
|
|
48
|
-
_FIND_TIME_COL_IDX = lambda column_names: _find_column_index_in_list(
|
|
48
|
+
_FIND_TIME_COL_IDX = lambda column_names: _find_column_index_in_list(
|
|
49
|
+
column_names, "time", "timestamp", "datetime", "date", "open_time"
|
|
50
|
+
)
|
|
49
51
|
|
|
50
52
|
|
|
51
53
|
class DataTransformer:
|
|
@@ -57,9 +59,9 @@ class DataTransformer:
|
|
|
57
59
|
def start_transform(self, name: str, column_names: List[str]):
|
|
58
60
|
self._column_names = column_names
|
|
59
61
|
self.buffer = []
|
|
60
|
-
|
|
62
|
+
|
|
61
63
|
def process_data(self, rows_data: Iterable) -> Any:
|
|
62
|
-
if rows_data is not None:
|
|
64
|
+
if rows_data is not None:
|
|
63
65
|
self.buffer.extend(rows_data)
|
|
64
66
|
|
|
65
67
|
def collect(self) -> Any:
|
|
@@ -68,14 +70,18 @@ class DataTransformer:
|
|
|
68
70
|
|
|
69
71
|
class DataReader:
|
|
70
72
|
|
|
71
|
-
def get_names(self) -> List[str]
|
|
73
|
+
def get_names(self) -> List[str]:
|
|
72
74
|
raise NotImplemented()
|
|
73
75
|
|
|
74
|
-
def read(
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
76
|
+
def read(
|
|
77
|
+
self,
|
|
78
|
+
data_id: str,
|
|
79
|
+
start: str | None = None,
|
|
80
|
+
stop: str | None = None,
|
|
81
|
+
transform: DataTransformer = DataTransformer(),
|
|
82
|
+
chunksize=0,
|
|
83
|
+
**kwargs,
|
|
84
|
+
) -> Iterable | List:
|
|
79
85
|
raise NotImplemented()
|
|
80
86
|
|
|
81
87
|
|
|
@@ -94,7 +100,7 @@ class CsvStorageDataReader(DataReader):
|
|
|
94
100
|
if ix < 0:
|
|
95
101
|
for c in arr.iterchunks():
|
|
96
102
|
a = c.to_numpy()
|
|
97
|
-
ix = np.searchsorted(a, v, side=
|
|
103
|
+
ix = np.searchsorted(a, v, side="right")
|
|
98
104
|
if ix > 0 and ix < len(c):
|
|
99
105
|
ix = arr.index(a[ix]).as_py() - 1
|
|
100
106
|
break
|
|
@@ -102,16 +108,20 @@ class CsvStorageDataReader(DataReader):
|
|
|
102
108
|
|
|
103
109
|
def __check_file_name(self, name: str) -> str | None:
|
|
104
110
|
_f = join(self.path, name)
|
|
105
|
-
for sfx in [
|
|
106
|
-
if exists(p:=(_f + sfx)):
|
|
107
|
-
return p
|
|
111
|
+
for sfx in [".csv", ".csv.gz", ""]:
|
|
112
|
+
if exists(p := (_f + sfx)):
|
|
113
|
+
return p
|
|
108
114
|
return None
|
|
109
115
|
|
|
110
|
-
def read(
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
116
|
+
def read(
|
|
117
|
+
self,
|
|
118
|
+
data_id: str,
|
|
119
|
+
start: str | None = None,
|
|
120
|
+
stop: str | None = None,
|
|
121
|
+
transform: DataTransformer = DataTransformer(),
|
|
122
|
+
chunksize=0,
|
|
123
|
+
timestamp_formatters=None,
|
|
124
|
+
) -> Iterable | Any:
|
|
115
125
|
|
|
116
126
|
f_path = self.__check_file_name(data_id)
|
|
117
127
|
if not f_path:
|
|
@@ -119,31 +129,33 @@ class CsvStorageDataReader(DataReader):
|
|
|
119
129
|
|
|
120
130
|
convert_options = None
|
|
121
131
|
if timestamp_formatters is not None:
|
|
122
|
-
convert_options=csv.ConvertOptions(timestamp_parsers=timestamp_formatters)
|
|
132
|
+
convert_options = csv.ConvertOptions(timestamp_parsers=timestamp_formatters)
|
|
123
133
|
|
|
124
134
|
table = csv.read_csv(
|
|
125
|
-
f_path,
|
|
135
|
+
f_path,
|
|
126
136
|
parse_options=csv.ParseOptions(ignore_empty_lines=True),
|
|
127
|
-
convert_options=convert_options
|
|
137
|
+
convert_options=convert_options,
|
|
128
138
|
)
|
|
129
|
-
fieldnames =
|
|
139
|
+
fieldnames = table.column_names
|
|
130
140
|
|
|
131
|
-
# - try to find range to load
|
|
141
|
+
# - try to find range to load
|
|
132
142
|
start_idx, stop_idx = 0, table.num_rows
|
|
133
143
|
try:
|
|
134
144
|
_time_field_idx = _FIND_TIME_COL_IDX(fieldnames)
|
|
135
145
|
_time_type = table.field(_time_field_idx).type
|
|
136
|
-
_time_unit = _time_type.unit if hasattr(_time_type,
|
|
146
|
+
_time_unit = _time_type.unit if hasattr(_time_type, "unit") else "ms"
|
|
137
147
|
_time_data = table[_time_field_idx]
|
|
138
148
|
|
|
139
149
|
# - check if need convert time to primitive types (i.e. Date32 -> timestamp[x])
|
|
140
150
|
_time_cast_function = lambda xs: xs
|
|
141
151
|
if _time_type != pa.timestamp(_time_unit):
|
|
142
|
-
_time_cast_function = lambda xs: xs.cast(pa.timestamp(_time_unit))
|
|
152
|
+
_time_cast_function = lambda xs: xs.cast(pa.timestamp(_time_unit))
|
|
143
153
|
_time_data = _time_cast_function(_time_data)
|
|
144
154
|
|
|
145
155
|
# - preprocessing start and stop
|
|
146
|
-
t_0, t_1 = handle_start_stop(
|
|
156
|
+
t_0, t_1 = handle_start_stop(
|
|
157
|
+
start, stop, convert=lambda x: _recognize_t(x, None, _time_unit)
|
|
158
|
+
)
|
|
147
159
|
|
|
148
160
|
# - check requested range
|
|
149
161
|
if t_0:
|
|
@@ -159,19 +171,25 @@ class CsvStorageDataReader(DataReader):
|
|
|
159
171
|
|
|
160
172
|
except Exception as exc:
|
|
161
173
|
logger.warning(exc)
|
|
162
|
-
logger.info(
|
|
174
|
+
logger.info("loading whole file")
|
|
163
175
|
|
|
164
|
-
length =
|
|
176
|
+
length = stop_idx - start_idx + 1
|
|
165
177
|
selected_table = table.slice(start_idx, length)
|
|
166
178
|
|
|
167
179
|
# - in this case we want to return iterable chunks of data
|
|
168
180
|
if chunksize > 0:
|
|
181
|
+
|
|
169
182
|
def _iter_chunks():
|
|
170
183
|
for n in range(0, length // chunksize + 1):
|
|
171
184
|
transform.start_transform(data_id, fieldnames)
|
|
172
|
-
raw_data =
|
|
185
|
+
raw_data = (
|
|
186
|
+
selected_table[n * chunksize : min((n + 1) * chunksize, length)]
|
|
187
|
+
.to_pandas()
|
|
188
|
+
.to_numpy()
|
|
189
|
+
)
|
|
173
190
|
transform.process_data(raw_data)
|
|
174
191
|
yield transform.collect()
|
|
192
|
+
|
|
175
193
|
return _iter_chunks()
|
|
176
194
|
|
|
177
195
|
transform.start_transform(data_id, fieldnames)
|
|
@@ -179,10 +197,10 @@ class CsvStorageDataReader(DataReader):
|
|
|
179
197
|
transform.process_data(raw_data)
|
|
180
198
|
return transform.collect()
|
|
181
199
|
|
|
182
|
-
def get_names(self) -> List[str]
|
|
200
|
+
def get_names(self) -> List[str]:
|
|
183
201
|
_n = []
|
|
184
202
|
for s in os.listdir(self.path):
|
|
185
|
-
if
|
|
203
|
+
if m := re.match(r"(.*)\.csv(.gz)?$", s):
|
|
186
204
|
_n.append(m.group(1))
|
|
187
205
|
return _n
|
|
188
206
|
|
|
@@ -191,6 +209,7 @@ class AsPandasFrame(DataTransformer):
|
|
|
191
209
|
"""
|
|
192
210
|
List of records to pandas dataframe transformer
|
|
193
211
|
"""
|
|
212
|
+
|
|
194
213
|
def __init__(self, timestamp_units=None) -> None:
|
|
195
214
|
self.timestamp_units = timestamp_units
|
|
196
215
|
|
|
@@ -198,26 +217,30 @@ class AsPandasFrame(DataTransformer):
|
|
|
198
217
|
self._time_idx = _FIND_TIME_COL_IDX(column_names)
|
|
199
218
|
self._column_names = column_names
|
|
200
219
|
self._frame = pd.DataFrame()
|
|
201
|
-
|
|
220
|
+
|
|
202
221
|
def process_data(self, rows_data: Iterable) -> Any:
|
|
203
222
|
self._frame
|
|
204
223
|
p = pd.DataFrame.from_records(rows_data, columns=self._column_names)
|
|
205
224
|
p.set_index(self._column_names[self._time_idx], drop=True, inplace=True)
|
|
206
|
-
p.index =
|
|
207
|
-
|
|
225
|
+
p.index = (
|
|
226
|
+
pd.to_datetime(p.index, unit=self.timestamp_units)
|
|
227
|
+
if self.timestamp_units
|
|
228
|
+
else p.index
|
|
229
|
+
)
|
|
230
|
+
p.index.rename("timestamp", inplace=True)
|
|
208
231
|
p.sort_index(inplace=True)
|
|
209
232
|
self._frame = pd.concat((self._frame, p), axis=0, sort=True)
|
|
210
233
|
return p
|
|
211
234
|
|
|
212
235
|
def collect(self) -> Any:
|
|
213
|
-
return self._frame
|
|
236
|
+
return self._frame
|
|
214
237
|
|
|
215
238
|
|
|
216
239
|
class AsOhlcvSeries(DataTransformer):
|
|
217
240
|
"""
|
|
218
241
|
Convert incoming data into OHLCV series.
|
|
219
242
|
|
|
220
|
-
Incoming data may have one of the following structures:
|
|
243
|
+
Incoming data may have one of the following structures:
|
|
221
244
|
|
|
222
245
|
```
|
|
223
246
|
ohlcv: time,open,high,low,close,volume|quote_volume,(buy_volume)
|
|
@@ -226,9 +249,9 @@ class AsOhlcvSeries(DataTransformer):
|
|
|
226
249
|
```
|
|
227
250
|
"""
|
|
228
251
|
|
|
229
|
-
def __init__(self, timeframe: str | None = None, timestamp_units=
|
|
252
|
+
def __init__(self, timeframe: str | None = None, timestamp_units="ns") -> None:
|
|
230
253
|
super().__init__()
|
|
231
|
-
self.timeframe = timeframe
|
|
254
|
+
self.timeframe = timeframe
|
|
232
255
|
self._series = None
|
|
233
256
|
self._data_type = None
|
|
234
257
|
self.timestamp_units = timestamp_units
|
|
@@ -238,38 +261,59 @@ class AsOhlcvSeries(DataTransformer):
|
|
|
238
261
|
self._volume_idx = None
|
|
239
262
|
self._b_volume_idx = None
|
|
240
263
|
try:
|
|
241
|
-
self._close_idx = _find_column_index_in_list(column_names,
|
|
242
|
-
self._open_idx = _find_column_index_in_list(column_names,
|
|
243
|
-
self._high_idx = _find_column_index_in_list(column_names,
|
|
244
|
-
self._low_idx = _find_column_index_in_list(column_names,
|
|
264
|
+
self._close_idx = _find_column_index_in_list(column_names, "close")
|
|
265
|
+
self._open_idx = _find_column_index_in_list(column_names, "open")
|
|
266
|
+
self._high_idx = _find_column_index_in_list(column_names, "high")
|
|
267
|
+
self._low_idx = _find_column_index_in_list(column_names, "low")
|
|
245
268
|
|
|
246
269
|
try:
|
|
247
|
-
self._volume_idx = _find_column_index_in_list(
|
|
248
|
-
|
|
270
|
+
self._volume_idx = _find_column_index_in_list(
|
|
271
|
+
column_names, "quote_volume", "volume", "vol"
|
|
272
|
+
)
|
|
273
|
+
except:
|
|
274
|
+
pass
|
|
249
275
|
|
|
250
276
|
try:
|
|
251
|
-
self._b_volume_idx = _find_column_index_in_list(
|
|
252
|
-
|
|
277
|
+
self._b_volume_idx = _find_column_index_in_list(
|
|
278
|
+
column_names,
|
|
279
|
+
"taker_buy_volume",
|
|
280
|
+
"taker_buy_quote_volume",
|
|
281
|
+
"buy_volume",
|
|
282
|
+
)
|
|
283
|
+
except:
|
|
284
|
+
pass
|
|
253
285
|
|
|
254
|
-
self._data_type =
|
|
255
|
-
except:
|
|
286
|
+
self._data_type = "ohlc"
|
|
287
|
+
except:
|
|
256
288
|
try:
|
|
257
|
-
self._ask_idx = _find_column_index_in_list(column_names,
|
|
258
|
-
self._bid_idx = _find_column_index_in_list(column_names,
|
|
259
|
-
self._data_type =
|
|
260
|
-
except:
|
|
289
|
+
self._ask_idx = _find_column_index_in_list(column_names, "ask")
|
|
290
|
+
self._bid_idx = _find_column_index_in_list(column_names, "bid")
|
|
291
|
+
self._data_type = "quotes"
|
|
292
|
+
except:
|
|
261
293
|
|
|
262
294
|
try:
|
|
263
|
-
self._price_idx = _find_column_index_in_list(column_names,
|
|
264
|
-
self._size_idx = _find_column_index_in_list(
|
|
295
|
+
self._price_idx = _find_column_index_in_list(column_names, "price")
|
|
296
|
+
self._size_idx = _find_column_index_in_list(
|
|
297
|
+
column_names, "quote_qty", "qty", "size", "amount", "volume"
|
|
298
|
+
)
|
|
265
299
|
self._taker_idx = None
|
|
266
300
|
try:
|
|
267
|
-
self._taker_idx = _find_column_index_in_list(
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
301
|
+
self._taker_idx = _find_column_index_in_list(
|
|
302
|
+
column_names,
|
|
303
|
+
"is_buyer_maker",
|
|
304
|
+
"side",
|
|
305
|
+
"aggressive",
|
|
306
|
+
"taker",
|
|
307
|
+
"is_taker",
|
|
308
|
+
)
|
|
309
|
+
except:
|
|
310
|
+
pass
|
|
311
|
+
|
|
312
|
+
self._data_type = "trades"
|
|
313
|
+
except:
|
|
314
|
+
raise ValueError(
|
|
315
|
+
f"Can't recognize data for update from header: {column_names}"
|
|
316
|
+
)
|
|
273
317
|
|
|
274
318
|
self._column_names = column_names
|
|
275
319
|
self._name = name
|
|
@@ -280,39 +324,44 @@ class AsOhlcvSeries(DataTransformer):
|
|
|
280
324
|
for d in rows_data:
|
|
281
325
|
self._series.update_by_bar(
|
|
282
326
|
_time(d[self._time_idx], self.timestamp_units),
|
|
283
|
-
d[self._open_idx],
|
|
327
|
+
d[self._open_idx],
|
|
328
|
+
d[self._high_idx],
|
|
329
|
+
d[self._low_idx],
|
|
330
|
+
d[self._close_idx],
|
|
284
331
|
d[self._volume_idx] if self._volume_idx else 0,
|
|
285
|
-
d[self._b_volume_idx] if self._b_volume_idx else 0
|
|
332
|
+
d[self._b_volume_idx] if self._b_volume_idx else 0,
|
|
286
333
|
)
|
|
287
334
|
|
|
288
335
|
def _proc_quotes(self, rows_data: List[List]):
|
|
289
336
|
for d in rows_data:
|
|
290
337
|
self._series.update(
|
|
291
338
|
_time(d[self._time_idx], self.timestamp_units),
|
|
292
|
-
(d[self._ask_idx] + d[self._bid_idx])/2
|
|
339
|
+
(d[self._ask_idx] + d[self._bid_idx]) / 2,
|
|
293
340
|
)
|
|
294
341
|
|
|
295
342
|
def _proc_trades(self, rows_data: List[List]):
|
|
296
343
|
for d in rows_data:
|
|
297
344
|
a = d[self._taker_idx] if self._taker_idx else 0
|
|
298
345
|
s = d[self._size_idx]
|
|
299
|
-
b = s if a else 0
|
|
300
|
-
self._series.update(
|
|
346
|
+
b = s if a else 0
|
|
347
|
+
self._series.update(
|
|
348
|
+
_time(d[self._time_idx], self.timestamp_units), d[self._price_idx], s, b
|
|
349
|
+
)
|
|
301
350
|
|
|
302
351
|
def process_data(self, rows_data: List[List]) -> Any:
|
|
303
352
|
if self._series is None:
|
|
304
353
|
ts = [t[self._time_idx] for t in rows_data[:100]]
|
|
305
354
|
self.timeframe = pd.Timedelta(infer_series_frequency(ts)).asm8.item()
|
|
306
355
|
|
|
307
|
-
# - create instance after first data received if
|
|
356
|
+
# - create instance after first data received if
|
|
308
357
|
self._series = OHLCV(self._name, self.timeframe)
|
|
309
358
|
|
|
310
359
|
match self._data_type:
|
|
311
|
-
case
|
|
360
|
+
case "ohlc":
|
|
312
361
|
self._proc_ohlc(rows_data)
|
|
313
|
-
case
|
|
362
|
+
case "quotes":
|
|
314
363
|
self._proc_quotes(rows_data)
|
|
315
|
-
case
|
|
364
|
+
case "trades":
|
|
316
365
|
self._proc_trades(rows_data)
|
|
317
366
|
|
|
318
367
|
return None
|
|
@@ -330,21 +379,25 @@ class AsQuotes(DataTransformer):
|
|
|
330
379
|
def start_transform(self, name: str, column_names: List[str]):
|
|
331
380
|
self.buffer = list()
|
|
332
381
|
self._time_idx = _FIND_TIME_COL_IDX(column_names)
|
|
333
|
-
self._bid_idx = _find_column_index_in_list(column_names,
|
|
334
|
-
self._ask_idx = _find_column_index_in_list(column_names,
|
|
335
|
-
self._bidvol_idx = _find_column_index_in_list(
|
|
336
|
-
|
|
382
|
+
self._bid_idx = _find_column_index_in_list(column_names, "bid")
|
|
383
|
+
self._ask_idx = _find_column_index_in_list(column_names, "ask")
|
|
384
|
+
self._bidvol_idx = _find_column_index_in_list(
|
|
385
|
+
column_names, "bidvol", "bid_vol", "bidsize", "bid_size"
|
|
386
|
+
)
|
|
387
|
+
self._askvol_idx = _find_column_index_in_list(
|
|
388
|
+
column_names, "askvol", "ask_vol", "asksize", "ask_size"
|
|
389
|
+
)
|
|
337
390
|
|
|
338
391
|
def process_data(self, rows_data: Iterable) -> Any:
|
|
339
|
-
if rows_data is not None:
|
|
392
|
+
if rows_data is not None:
|
|
340
393
|
for d in rows_data:
|
|
341
|
-
t = d[self._time_idx]
|
|
394
|
+
t = d[self._time_idx]
|
|
342
395
|
b = d[self._bid_idx]
|
|
343
396
|
a = d[self._ask_idx]
|
|
344
397
|
bv = d[self._bidvol_idx]
|
|
345
398
|
av = d[self._askvol_idx]
|
|
346
|
-
self.buffer.append(Quote(t.as_unit(
|
|
347
|
-
|
|
399
|
+
self.buffer.append(Quote(t.as_unit("ns").asm8.item(), b, a, bv, av))
|
|
400
|
+
|
|
348
401
|
|
|
349
402
|
class AsTimestampedRecords(DataTransformer):
|
|
350
403
|
"""
|
|
@@ -366,7 +419,7 @@ class AsTimestampedRecords(DataTransformer):
|
|
|
366
419
|
] ```
|
|
367
420
|
"""
|
|
368
421
|
|
|
369
|
-
def __init__(self, timestamp_units: str | None=None) -> None:
|
|
422
|
+
def __init__(self, timestamp_units: str | None = None) -> None:
|
|
370
423
|
self.timestamp_units = timestamp_units
|
|
371
424
|
|
|
372
425
|
def start_transform(self, name: str, column_names: List[str]):
|
|
@@ -383,7 +436,10 @@ class AsTimestampedRecords(DataTransformer):
|
|
|
383
436
|
t = r[self._time_idx]
|
|
384
437
|
if self.timestamp_units:
|
|
385
438
|
t = _time(t, self.timestamp_units)
|
|
386
|
-
di = dict(zip(self._column_names, r)) | {
|
|
439
|
+
di = dict(zip(self._column_names, r)) | {
|
|
440
|
+
"timestamp_ns": t,
|
|
441
|
+
"timestamp": pd.Timestamp(t),
|
|
442
|
+
}
|
|
387
443
|
res.append(di)
|
|
388
444
|
return res
|
|
389
445
|
|
|
@@ -393,12 +449,14 @@ class RestoreTicksFromOHLC(DataTransformer):
|
|
|
393
449
|
Emulates quotes (and trades) from OHLC bars
|
|
394
450
|
"""
|
|
395
451
|
|
|
396
|
-
def __init__(
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
452
|
+
def __init__(
|
|
453
|
+
self,
|
|
454
|
+
trades: bool = False, # if we also wants 'trades'
|
|
455
|
+
default_bid_size=1e9, # default bid/ask is big
|
|
456
|
+
default_ask_size=1e9, # default bid/ask is big
|
|
457
|
+
daily_session_start_end=DEFAULT_DAILY_SESSION,
|
|
458
|
+
spread=0.0,
|
|
459
|
+
):
|
|
402
460
|
super().__init__()
|
|
403
461
|
self._trades = trades
|
|
404
462
|
self._bid_size = default_bid_size
|
|
@@ -409,24 +467,27 @@ class RestoreTicksFromOHLC(DataTransformer):
|
|
|
409
467
|
|
|
410
468
|
def start_transform(self, name: str, column_names: List[str]):
|
|
411
469
|
self.buffer = []
|
|
412
|
-
# - it will fail if receive data doesn't look as ohlcv
|
|
470
|
+
# - it will fail if receive data doesn't look as ohlcv
|
|
413
471
|
self._time_idx = _FIND_TIME_COL_IDX(column_names)
|
|
414
|
-
self._open_idx = _find_column_index_in_list(column_names,
|
|
415
|
-
self._high_idx = _find_column_index_in_list(column_names,
|
|
416
|
-
self._low_idx = _find_column_index_in_list(column_names,
|
|
417
|
-
self._close_idx = _find_column_index_in_list(column_names,
|
|
472
|
+
self._open_idx = _find_column_index_in_list(column_names, "open")
|
|
473
|
+
self._high_idx = _find_column_index_in_list(column_names, "high")
|
|
474
|
+
self._low_idx = _find_column_index_in_list(column_names, "low")
|
|
475
|
+
self._close_idx = _find_column_index_in_list(column_names, "close")
|
|
418
476
|
self._volume_idx = None
|
|
419
477
|
self._freq = None
|
|
420
478
|
try:
|
|
421
|
-
self._volume_idx = _find_column_index_in_list(column_names,
|
|
422
|
-
except:
|
|
479
|
+
self._volume_idx = _find_column_index_in_list(column_names, "volume", "vol")
|
|
480
|
+
except:
|
|
481
|
+
pass
|
|
423
482
|
|
|
424
483
|
if self._volume_idx is None and self._trades:
|
|
425
|
-
logger.warning(
|
|
484
|
+
logger.warning(
|
|
485
|
+
"Input OHLC data doesn't contain volume information so trades can't be emulated !"
|
|
486
|
+
)
|
|
426
487
|
self._trades = False
|
|
427
488
|
|
|
428
|
-
def process_data(self, rows_data:List[List]) -> Any:
|
|
429
|
-
if rows_data is None:
|
|
489
|
+
def process_data(self, rows_data: List[List]) -> Any:
|
|
490
|
+
if rows_data is None:
|
|
430
491
|
return
|
|
431
492
|
|
|
432
493
|
s2 = self._s2
|
|
@@ -436,7 +497,7 @@ class RestoreTicksFromOHLC(DataTransformer):
|
|
|
436
497
|
self._freq = infer_series_frequency(ts)
|
|
437
498
|
|
|
438
499
|
# - timestamps when we emit simulated quotes
|
|
439
|
-
dt = self._freq.astype(
|
|
500
|
+
dt = self._freq.astype("timedelta64[ns]").item()
|
|
440
501
|
if dt < D1:
|
|
441
502
|
self._t_start = dt // 10
|
|
442
503
|
self._t_mid1 = dt // 2 - dt // 10
|
|
@@ -450,41 +511,91 @@ class RestoreTicksFromOHLC(DataTransformer):
|
|
|
450
511
|
|
|
451
512
|
# - input data
|
|
452
513
|
for data in rows_data:
|
|
453
|
-
ti = pd.Timestamp(data[self._time_idx]).as_unit(
|
|
514
|
+
ti = pd.Timestamp(data[self._time_idx]).as_unit("ns").asm8.item()
|
|
454
515
|
o = data[self._open_idx]
|
|
455
|
-
h=
|
|
516
|
+
h = data[self._high_idx]
|
|
456
517
|
l = data[self._low_idx]
|
|
457
518
|
c = data[self._close_idx]
|
|
458
519
|
rv = data[self._volume_idx] if self._volume_idx else 0
|
|
459
520
|
|
|
460
521
|
# - opening quote
|
|
461
|
-
self.buffer.append(
|
|
522
|
+
self.buffer.append(
|
|
523
|
+
Quote(
|
|
524
|
+
ti + self._t_start, o - s2, o + s2, self._bid_size, self._ask_size
|
|
525
|
+
)
|
|
526
|
+
)
|
|
462
527
|
|
|
463
528
|
if c >= o:
|
|
464
529
|
if self._trades:
|
|
465
|
-
self.buffer.append(
|
|
466
|
-
|
|
530
|
+
self.buffer.append(
|
|
531
|
+
Trade(ti + self._t_start, o - s2, rv * (o - l))
|
|
532
|
+
) # sell 1
|
|
533
|
+
self.buffer.append(
|
|
534
|
+
Quote(
|
|
535
|
+
ti + self._t_mid1,
|
|
536
|
+
l - s2,
|
|
537
|
+
l + s2,
|
|
538
|
+
self._bid_size,
|
|
539
|
+
self._ask_size,
|
|
540
|
+
)
|
|
541
|
+
)
|
|
467
542
|
|
|
468
543
|
if self._trades:
|
|
469
|
-
self.buffer.append(
|
|
470
|
-
|
|
544
|
+
self.buffer.append(
|
|
545
|
+
Trade(ti + self._t_mid1, l + s2, rv * (c - o))
|
|
546
|
+
) # buy 1
|
|
547
|
+
self.buffer.append(
|
|
548
|
+
Quote(
|
|
549
|
+
ti + self._t_mid2,
|
|
550
|
+
h - s2,
|
|
551
|
+
h + s2,
|
|
552
|
+
self._bid_size,
|
|
553
|
+
self._ask_size,
|
|
554
|
+
)
|
|
555
|
+
)
|
|
471
556
|
|
|
472
557
|
if self._trades:
|
|
473
|
-
self.buffer.append(
|
|
558
|
+
self.buffer.append(
|
|
559
|
+
Trade(ti + self._t_mid2, h - s2, rv * (h - c))
|
|
560
|
+
) # sell 2
|
|
474
561
|
else:
|
|
475
562
|
if self._trades:
|
|
476
|
-
self.buffer.append(
|
|
477
|
-
|
|
563
|
+
self.buffer.append(
|
|
564
|
+
Trade(ti + self._t_start, o + s2, rv * (h - o))
|
|
565
|
+
) # buy 1
|
|
566
|
+
self.buffer.append(
|
|
567
|
+
Quote(
|
|
568
|
+
ti + self._t_mid1,
|
|
569
|
+
h - s2,
|
|
570
|
+
h + s2,
|
|
571
|
+
self._bid_size,
|
|
572
|
+
self._ask_size,
|
|
573
|
+
)
|
|
574
|
+
)
|
|
478
575
|
|
|
479
576
|
if self._trades:
|
|
480
|
-
self.buffer.append(
|
|
481
|
-
|
|
577
|
+
self.buffer.append(
|
|
578
|
+
Trade(ti + self._t_mid1, h - s2, rv * (o - c))
|
|
579
|
+
) # sell 1
|
|
580
|
+
self.buffer.append(
|
|
581
|
+
Quote(
|
|
582
|
+
ti + self._t_mid2,
|
|
583
|
+
l - s2,
|
|
584
|
+
l + s2,
|
|
585
|
+
self._bid_size,
|
|
586
|
+
self._ask_size,
|
|
587
|
+
)
|
|
588
|
+
)
|
|
482
589
|
|
|
483
590
|
if self._trades:
|
|
484
|
-
self.buffer.append(
|
|
591
|
+
self.buffer.append(
|
|
592
|
+
Trade(ti + self._t_mid2, l + s2, rv * (c - l))
|
|
593
|
+
) # buy 2
|
|
485
594
|
|
|
486
595
|
# - closing quote
|
|
487
|
-
self.buffer.append(
|
|
596
|
+
self.buffer.append(
|
|
597
|
+
Quote(ti + self._t_end, c - s2, c + s2, self._bid_size, self._ask_size)
|
|
598
|
+
)
|
|
488
599
|
|
|
489
600
|
|
|
490
601
|
def _retry(fn):
|
|
@@ -496,10 +607,13 @@ def _retry(fn):
|
|
|
496
607
|
try:
|
|
497
608
|
return fn(*args, **kw)
|
|
498
609
|
except (pg.InterfaceError, pg.OperationalError) as e:
|
|
499
|
-
logger.warning(
|
|
610
|
+
logger.warning(
|
|
611
|
+
"Database Connection [InterfaceError or OperationalError]"
|
|
612
|
+
)
|
|
500
613
|
# print ("Idle for %s seconds" % (cls._reconnect_idle))
|
|
501
614
|
# time.sleep(cls._reconnect_idle)
|
|
502
615
|
cls._connect()
|
|
616
|
+
|
|
503
617
|
return wrapper
|
|
504
618
|
|
|
505
619
|
|
|
@@ -508,10 +622,17 @@ class QuestDBSqlBuilder:
|
|
|
508
622
|
Generic sql builder for QuestDB data
|
|
509
623
|
"""
|
|
510
624
|
|
|
511
|
-
def get_table_name(self, data_id: str, sfx: str=
|
|
625
|
+
def get_table_name(self, data_id: str, sfx: str = "") -> str | None:
|
|
512
626
|
pass
|
|
513
627
|
|
|
514
|
-
def prepare_data_sql(
|
|
628
|
+
def prepare_data_sql(
|
|
629
|
+
self,
|
|
630
|
+
data_id: str,
|
|
631
|
+
start: str | None,
|
|
632
|
+
end: str | None,
|
|
633
|
+
resample: str,
|
|
634
|
+
data_type: str,
|
|
635
|
+
) -> str | None:
|
|
515
636
|
pass
|
|
516
637
|
|
|
517
638
|
def prepare_names_sql(self) -> str:
|
|
@@ -523,49 +644,67 @@ class QuestDBSqlCandlesBuilder(QuestDBSqlBuilder):
|
|
|
523
644
|
Sql builder for candles data
|
|
524
645
|
"""
|
|
525
646
|
|
|
526
|
-
def get_table_name(self, data_id: str, sfx: str=
|
|
647
|
+
def get_table_name(self, data_id: str, sfx: str = "") -> str:
|
|
527
648
|
"""
|
|
528
649
|
Get table name for data_id
|
|
529
650
|
data_id can have format <exchange>.<type>:<symbol>
|
|
530
|
-
for example:
|
|
531
|
-
BINANCE.UM:BTCUSDT or BINANCE:BTCUSDT for spot
|
|
651
|
+
for example:
|
|
652
|
+
BINANCE.UM:BTCUSDT or BINANCE:BTCUSDT for spot
|
|
532
653
|
"""
|
|
533
|
-
_aliases = {
|
|
654
|
+
_aliases = {"um": "umfutures", "cm": "cmfutures", "f": "futures"}
|
|
534
655
|
table_name = data_id
|
|
535
|
-
_ss = data_id.split(
|
|
656
|
+
_ss = data_id.split(":")
|
|
536
657
|
if len(_ss) > 1:
|
|
537
658
|
_exch, symb = _ss
|
|
538
|
-
_mktype =
|
|
539
|
-
_ss = _exch.split(
|
|
659
|
+
_mktype = "spot"
|
|
660
|
+
_ss = _exch.split(".")
|
|
540
661
|
if len(_ss) > 1:
|
|
541
662
|
_exch = _ss[0]
|
|
542
663
|
_mktype = _ss[1]
|
|
543
664
|
_mktype = _mktype.lower()
|
|
544
|
-
table_name =
|
|
665
|
+
table_name = ".".join(
|
|
666
|
+
filter(
|
|
667
|
+
lambda x: x,
|
|
668
|
+
[_exch.lower(), _aliases.get(_mktype, _mktype), symb.lower(), sfx],
|
|
669
|
+
)
|
|
670
|
+
)
|
|
545
671
|
return table_name
|
|
546
672
|
|
|
547
673
|
@staticmethod
|
|
548
674
|
def _convert_time_delta_to_qdb_resample_format(c_tf: str):
|
|
549
675
|
if c_tf:
|
|
550
|
-
_t = re.match(r
|
|
676
|
+
_t = re.match(r"(\d+)(\w+)", c_tf)
|
|
551
677
|
if _t and len(_t.groups()) > 1:
|
|
552
678
|
c_tf = f"{_t[1]}{_t[2][0].lower()}"
|
|
553
|
-
return c_tf
|
|
554
|
-
|
|
555
|
-
def prepare_data_sql(
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
679
|
+
return c_tf
|
|
680
|
+
|
|
681
|
+
def prepare_data_sql(
|
|
682
|
+
self,
|
|
683
|
+
data_id: str,
|
|
684
|
+
start: str | None,
|
|
685
|
+
end: str | None,
|
|
686
|
+
resample: str,
|
|
687
|
+
data_type: str,
|
|
688
|
+
) -> str:
|
|
689
|
+
where = ""
|
|
690
|
+
w0 = f"timestamp >= '{start}'" if start else ""
|
|
691
|
+
w1 = f"timestamp <= '{end}'" if end else ""
|
|
559
692
|
|
|
560
693
|
# - fix: when no data ranges are provided we must skip empy where keyword
|
|
561
|
-
if w0 or w1:
|
|
562
|
-
where = f
|
|
694
|
+
if w0 or w1:
|
|
695
|
+
where = f"where {w0} and {w1}" if (w0 and w1) else f"where {(w0 or w1)}"
|
|
563
696
|
|
|
564
697
|
# - check resample format
|
|
565
|
-
resample =
|
|
566
|
-
|
|
698
|
+
resample = (
|
|
699
|
+
QuestDBSqlCandlesBuilder._convert_time_delta_to_qdb_resample_format(
|
|
700
|
+
resample
|
|
701
|
+
)
|
|
702
|
+
if resample
|
|
703
|
+
else resample
|
|
704
|
+
)
|
|
705
|
+
_rsmpl = f"SAMPLE by {resample}" if resample else ""
|
|
567
706
|
|
|
568
|
-
table_name = self.get_table_name(data_id,
|
|
707
|
+
table_name = self.get_table_name(data_id, data_type)
|
|
569
708
|
return f"""
|
|
570
709
|
select timestamp,
|
|
571
710
|
first(open) as open,
|
|
@@ -578,7 +717,7 @@ class QuestDBSqlCandlesBuilder(QuestDBSqlBuilder):
|
|
|
578
717
|
sum(taker_buy_volume) as taker_buy_volume,
|
|
579
718
|
sum(taker_buy_quote_volume) as taker_buy_quote_volume
|
|
580
719
|
from "{table_name}" {where} {_rsmpl};
|
|
581
|
-
"""
|
|
720
|
+
"""
|
|
582
721
|
|
|
583
722
|
|
|
584
723
|
class QuestDBConnector(DataReader):
|
|
@@ -589,17 +728,24 @@ class QuestDBConnector(DataReader):
|
|
|
589
728
|
>>> db = QuestDBConnector()
|
|
590
729
|
>>> db.read('BINANCE.UM:ETHUSDT', '2024-01-01', transform=AsPandasFrame())
|
|
591
730
|
"""
|
|
731
|
+
|
|
592
732
|
_reconnect_tries = 5
|
|
593
733
|
_reconnect_idle = 0.1 # wait seconds before retying
|
|
594
734
|
_builder: QuestDBSqlBuilder
|
|
595
735
|
|
|
596
|
-
def __init__(
|
|
597
|
-
|
|
736
|
+
def __init__(
|
|
737
|
+
self,
|
|
738
|
+
builder: QuestDBSqlBuilder = QuestDBSqlCandlesBuilder(),
|
|
739
|
+
host="localhost",
|
|
740
|
+
user="admin",
|
|
741
|
+
password="quest",
|
|
742
|
+
port=8812,
|
|
743
|
+
) -> None:
|
|
598
744
|
self._connection = None
|
|
599
745
|
self._cursor = None
|
|
600
746
|
self._host = host
|
|
601
747
|
self._port = port
|
|
602
|
-
self.connection_url = f
|
|
748
|
+
self.connection_url = f"user={user} password={password} host={host} port={port}"
|
|
603
749
|
self._builder = builder
|
|
604
750
|
self._connect()
|
|
605
751
|
|
|
@@ -608,26 +754,57 @@ class QuestDBConnector(DataReader):
|
|
|
608
754
|
self._cursor = self._connection.cursor()
|
|
609
755
|
logger.debug(f"Connected to QuestDB at {self._host}:{self._port}")
|
|
610
756
|
|
|
757
|
+
def read(
|
|
758
|
+
self,
|
|
759
|
+
data_id: str,
|
|
760
|
+
start: str | None = None,
|
|
761
|
+
stop: str | None = None,
|
|
762
|
+
transform: DataTransformer = DataTransformer(),
|
|
763
|
+
chunksize=0, # TODO: use self._cursor.fetchmany in this case !!!!
|
|
764
|
+
timeframe: str = "1m",
|
|
765
|
+
data_type="candles_1m",
|
|
766
|
+
) -> Any:
|
|
767
|
+
return self._read(
|
|
768
|
+
data_id,
|
|
769
|
+
start,
|
|
770
|
+
stop,
|
|
771
|
+
transform,
|
|
772
|
+
chunksize,
|
|
773
|
+
timeframe,
|
|
774
|
+
data_type,
|
|
775
|
+
self._builder,
|
|
776
|
+
)
|
|
777
|
+
|
|
778
|
+
def get_names(self) -> List[str]:
|
|
779
|
+
return self._get_names(self._builder)
|
|
780
|
+
|
|
611
781
|
@_retry
|
|
612
|
-
def
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
782
|
+
def _read(
|
|
783
|
+
self,
|
|
784
|
+
data_id: str,
|
|
785
|
+
start: str | None,
|
|
786
|
+
stop: str | None,
|
|
787
|
+
transform: DataTransformer,
|
|
788
|
+
chunksize: int, # TODO: use self._cursor.fetchmany in this case !!!!
|
|
789
|
+
timeframe: str,
|
|
790
|
+
data_type: str,
|
|
791
|
+
builder: QuestDBSqlBuilder,
|
|
792
|
+
) -> Any:
|
|
616
793
|
start, end = handle_start_stop(start, stop)
|
|
617
|
-
_req =
|
|
794
|
+
_req = builder.prepare_data_sql(data_id, start, end, timeframe, data_type)
|
|
618
795
|
|
|
619
|
-
self._cursor.execute(_req)
|
|
620
|
-
records = self._cursor.fetchall()
|
|
796
|
+
self._cursor.execute(_req) # type: ignore
|
|
797
|
+
records = self._cursor.fetchall() # TODO: for chunksize > 0 use fetchmany etc
|
|
621
798
|
|
|
622
|
-
names = [d.name for d in self._cursor.description]
|
|
799
|
+
names = [d.name for d in self._cursor.description] # type: ignore
|
|
623
800
|
transform.start_transform(data_id, names)
|
|
624
801
|
|
|
625
802
|
transform.process_data(records)
|
|
626
803
|
return transform.collect()
|
|
627
804
|
|
|
628
805
|
@_retry
|
|
629
|
-
def
|
|
630
|
-
self._cursor.execute(
|
|
806
|
+
def _get_names(self, builder: QuestDBSqlBuilder) -> List[str]:
|
|
807
|
+
self._cursor.execute(builder.prepare_names_sql()) # type: ignore
|
|
631
808
|
records = self._cursor.fetchall()
|
|
632
809
|
return [r[0] for r in records]
|
|
633
810
|
|
|
@@ -644,9 +821,11 @@ class SnapshotsBuilder(DataTransformer):
|
|
|
644
821
|
"""
|
|
645
822
|
Snapshots assembler from OB updates
|
|
646
823
|
"""
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
824
|
+
|
|
825
|
+
def __init__(
|
|
826
|
+
self,
|
|
827
|
+
levels: int = -1, # how many levels restore, 1 - TOB, -1 - all
|
|
828
|
+
as_frame=False, # result is dataframe
|
|
650
829
|
):
|
|
651
830
|
self.buffer = []
|
|
652
831
|
self.levels = levels
|
|
@@ -659,7 +838,7 @@ class SnapshotsBuilder(DataTransformer):
|
|
|
659
838
|
|
|
660
839
|
# do additional init stuff here
|
|
661
840
|
|
|
662
|
-
def process_data(self, rows_data:List[List]) -> Any:
|
|
841
|
+
def process_data(self, rows_data: List[List]) -> Any:
|
|
663
842
|
for r in rows_data:
|
|
664
843
|
# restore snapshots and put into buffer or series
|
|
665
844
|
pass
|
|
@@ -667,7 +846,7 @@ class SnapshotsBuilder(DataTransformer):
|
|
|
667
846
|
def collect(self) -> Any:
|
|
668
847
|
# - may be convert it to pandas DataFrame ?
|
|
669
848
|
if self.as_frame:
|
|
670
|
-
return pd.DataFrame.from_records(self.buffer)
|
|
849
|
+
return pd.DataFrame.from_records(self.buffer) # or custom transform
|
|
671
850
|
|
|
672
851
|
# - or just returns as plain list
|
|
673
852
|
return self.buffer
|
|
@@ -678,18 +857,140 @@ class QuestDBSqlOrderBookBilder(QuestDBSqlBuilder):
|
|
|
678
857
|
Sql builder for snapshot data
|
|
679
858
|
"""
|
|
680
859
|
|
|
681
|
-
def get_table_name(self, data_id: str, sfx: str=
|
|
682
|
-
return
|
|
860
|
+
def get_table_name(self, data_id: str, sfx: str = "") -> str:
|
|
861
|
+
return ""
|
|
862
|
+
|
|
863
|
+
def prepare_data_sql(
|
|
864
|
+
self,
|
|
865
|
+
data_id: str,
|
|
866
|
+
start: str | None,
|
|
867
|
+
end: str | None,
|
|
868
|
+
resample: str,
|
|
869
|
+
data_type: str,
|
|
870
|
+
) -> str:
|
|
871
|
+
return ""
|
|
872
|
+
|
|
873
|
+
|
|
874
|
+
class TradeSql(QuestDBSqlCandlesBuilder):
|
|
875
|
+
|
|
876
|
+
def prepare_data_sql(
|
|
877
|
+
self,
|
|
878
|
+
data_id: str,
|
|
879
|
+
start: str | None,
|
|
880
|
+
end: str | None,
|
|
881
|
+
resample: str,
|
|
882
|
+
data_type: str,
|
|
883
|
+
) -> str:
|
|
884
|
+
table_name = self.get_table_name(data_id, data_type)
|
|
885
|
+
where = ""
|
|
886
|
+
w0 = f"timestamp >= '{start}'" if start else ""
|
|
887
|
+
w1 = f"timestamp <= '{end}'" if end else ""
|
|
683
888
|
|
|
684
|
-
|
|
685
|
-
|
|
889
|
+
# - fix: when no data ranges are provided we must skip empy where keyword
|
|
890
|
+
if w0 or w1:
|
|
891
|
+
where = f"where {w0} and {w1}" if (w0 and w1) else f"where {(w0 or w1)}"
|
|
892
|
+
|
|
893
|
+
resample = (
|
|
894
|
+
QuestDBSqlCandlesBuilder._convert_time_delta_to_qdb_resample_format(
|
|
895
|
+
resample
|
|
896
|
+
)
|
|
897
|
+
if resample
|
|
898
|
+
else resample
|
|
899
|
+
)
|
|
900
|
+
if resample:
|
|
901
|
+
sql = f"""
|
|
902
|
+
select timestamp, first(price) as open, max(price) as high, min(price) as low, last(price) as close,
|
|
903
|
+
sum(size) as volume from "{table_name}" {where} SAMPLE by {resample};"""
|
|
904
|
+
else:
|
|
905
|
+
sql = f"""select timestamp, price, size, market_maker from "{table_name}" {where};"""
|
|
686
906
|
|
|
907
|
+
return sql
|
|
687
908
|
|
|
688
|
-
|
|
909
|
+
|
|
910
|
+
class MultiQdbConnector(QuestDBConnector):
|
|
689
911
|
"""
|
|
690
|
-
|
|
912
|
+
Data connector for QuestDB which provides access to following data types:
|
|
913
|
+
- candles
|
|
914
|
+
- trades
|
|
915
|
+
- orderbook snapshots
|
|
916
|
+
- liquidations
|
|
917
|
+
- funding rate
|
|
918
|
+
|
|
919
|
+
Examples:
|
|
920
|
+
1. Retrieving trades:
|
|
921
|
+
qdb.read(
|
|
922
|
+
"BINANCE.UM:BTCUSDT",
|
|
923
|
+
"2023-01-01 00:00",
|
|
924
|
+
"2023-01-01 10:00",
|
|
925
|
+
timeframe="15Min",
|
|
926
|
+
transform=AsPandasFrame(),
|
|
927
|
+
data_type="trade"
|
|
928
|
+
)
|
|
691
929
|
"""
|
|
692
930
|
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
931
|
+
_TYPE_TO_BUILDER = {
|
|
932
|
+
"candles_1m": QuestDBSqlCandlesBuilder(),
|
|
933
|
+
"trade": TradeSql(),
|
|
934
|
+
"orderbook": QuestDBSqlOrderBookBilder(),
|
|
935
|
+
}
|
|
936
|
+
|
|
937
|
+
_TYPE_MAPPINGS = {
|
|
938
|
+
"candles": "candles_1m",
|
|
939
|
+
"trades": "trade",
|
|
940
|
+
"ob": "orderbook",
|
|
941
|
+
"trd": "trade",
|
|
942
|
+
"td": "trade",
|
|
943
|
+
}
|
|
944
|
+
|
|
945
|
+
def __init__(
|
|
946
|
+
self,
|
|
947
|
+
host="localhost",
|
|
948
|
+
user="admin",
|
|
949
|
+
password="quest",
|
|
950
|
+
port=8812,
|
|
951
|
+
) -> None:
|
|
952
|
+
self._connection = None
|
|
953
|
+
self._cursor = None
|
|
954
|
+
self._host = host
|
|
955
|
+
self._port = port
|
|
956
|
+
self._user = user
|
|
957
|
+
self._password = password
|
|
958
|
+
self._connect()
|
|
959
|
+
|
|
960
|
+
@property
|
|
961
|
+
def connection_url(self):
|
|
962
|
+
return " ".join(
|
|
963
|
+
[
|
|
964
|
+
f"user={self._user}",
|
|
965
|
+
f"password={self._password}",
|
|
966
|
+
f"host={self._host}",
|
|
967
|
+
f"port={self._port}",
|
|
968
|
+
]
|
|
969
|
+
)
|
|
970
|
+
|
|
971
|
+
def read(
|
|
972
|
+
self,
|
|
973
|
+
data_id: str,
|
|
974
|
+
start: str | None = None,
|
|
975
|
+
stop: str | None = None,
|
|
976
|
+
transform: DataTransformer = DataTransformer(),
|
|
977
|
+
chunksize=0, # TODO: use self._cursor.fetchmany in this case !!!!
|
|
978
|
+
timeframe: str | None = None,
|
|
979
|
+
data_type="candles",
|
|
980
|
+
) -> Any:
|
|
981
|
+
_mapped_data_type = self._TYPE_MAPPINGS.get(data_type, data_type)
|
|
982
|
+
return self._read(
|
|
983
|
+
data_id,
|
|
984
|
+
start,
|
|
985
|
+
stop,
|
|
986
|
+
transform,
|
|
987
|
+
chunksize,
|
|
988
|
+
timeframe,
|
|
989
|
+
_mapped_data_type,
|
|
990
|
+
self._TYPE_TO_BUILDER[_mapped_data_type],
|
|
991
|
+
)
|
|
992
|
+
|
|
993
|
+
def get_names(self, data_type: str) -> List[str]:
|
|
994
|
+
return self._get_names(
|
|
995
|
+
self._TYPE_TO_BUILDER[self._TYPE_MAPPINGS.get(data_type, data_type)]
|
|
996
|
+
)
|
|
Binary file
|
|
@@ -6,13 +6,13 @@ qubx/core/basics.py,sha256=2u7WV5KX-RbTmzoKfi1yT4HNLDPfQcFMCUZ1pVsM_VE,14777
|
|
|
6
6
|
qubx/core/helpers.py,sha256=gPE78dO718NBY0-JbfqNGCzIvr4BVatFntNIy2RUrEY,11559
|
|
7
7
|
qubx/core/loggers.py,sha256=HpgavBZegoDv9ssihtqX0pitXKULVAPHUpoE_volJw0,11910
|
|
8
8
|
qubx/core/lookups.py,sha256=4aEC7b2AyEXFqHHGDenex3Z1FZGrpDSb8IwzBZrSqIA,13688
|
|
9
|
-
qubx/core/series.cpython-311-x86_64-linux-gnu.so,sha256=
|
|
9
|
+
qubx/core/series.cpython-311-x86_64-linux-gnu.so,sha256=1iRap8PxsH4nQvfSbQk2Cyj17hGPM1XqUOuObeC67MA,698952
|
|
10
10
|
qubx/core/series.pxd,sha256=IS89NQ5FYp3T0YIHe1lELKZIAKrNvX8K6WlLyac44I4,2847
|
|
11
11
|
qubx/core/series.pyx,sha256=WEAjn4j3zn540Cxx68X5gRXilvwa7NGdbki6myzZbIM,28108
|
|
12
12
|
qubx/core/strategy.py,sha256=Fs4fFyHaEGYuz7mBeQHBWFu3Ipg0yFzcxXhskgsPxJE,30330
|
|
13
|
-
qubx/core/utils.cpython-311-x86_64-linux-gnu.so,sha256=
|
|
13
|
+
qubx/core/utils.cpython-311-x86_64-linux-gnu.so,sha256=0H3DSLr2GpIS-iSc4zL6FavxaxdkhafSvhJWqzxpJuY,74216
|
|
14
14
|
qubx/core/utils.pyx,sha256=6dQ8R02bl8V3f-W3Wk9-e86D9OvDz-5-4NA_dlF_xwc,1368
|
|
15
|
-
qubx/data/readers.py,sha256=
|
|
15
|
+
qubx/data/readers.py,sha256=XV5Q9ZuMGciN9zgyBhGo3P6Jo4CXz_1QhYsvuxePgC8,31796
|
|
16
16
|
qubx/impl/ccxt_connector.py,sha256=NqF-tgxfTATnmVqKUonNXCAzECrDU8YrgqM3Nq06fw8,9150
|
|
17
17
|
qubx/impl/ccxt_customizations.py,sha256=kK_4KmOyKvDVgd4MTkVg4CyqdjE-6r41siZIvLj-A-Q,3488
|
|
18
18
|
qubx/impl/ccxt_trading.py,sha256=cmg4P-zd78w-V8j3-IGS2LFxikGhxFPgmCvz3sr065Q,9097
|
|
@@ -23,7 +23,7 @@ qubx/pandaz/__init__.py,sha256=Iw5uzicYGSC3FEKZ-W1O5-7cXq_P0kH11-EcXV0zZhs,175
|
|
|
23
23
|
qubx/pandaz/ta.py,sha256=TUvjrvmk4EQvDcXoRp6Os08-HUap-ZvpSDGawhViOgg,85271
|
|
24
24
|
qubx/pandaz/utils.py,sha256=FyLKQy8spkqxhBij_nPFC_ZzI_L3-IgB9O53MqWKmq0,19109
|
|
25
25
|
qubx/ta/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
26
|
-
qubx/ta/indicators.cpython-311-x86_64-linux-gnu.so,sha256=
|
|
26
|
+
qubx/ta/indicators.cpython-311-x86_64-linux-gnu.so,sha256=cJCIbrFnJlDkbrywhmh-bZzO73GnFuhs6CDQZaB9G1w,284552
|
|
27
27
|
qubx/ta/indicators.pyx,sha256=P-GEYUks2lSHo6hbtUFAB7TWE1AunjLR4jIjwqPHrwU,7708
|
|
28
28
|
qubx/trackers/__init__.py,sha256=1y_yvIy0OQwBqfhAW_EY33NxFzFSWvI0qNAPU6zchYc,60
|
|
29
29
|
qubx/trackers/rebalancers.py,sha256=QCzANCooZBi2VMCBjjCPMq_Dt1h1zrBelATnfmVve74,5522
|
|
@@ -34,6 +34,6 @@ qubx/utils/marketdata/binance.py,sha256=36dl4rxOAGTeY3uoONmiPanj8BkP0oBdDiH-URJJ
|
|
|
34
34
|
qubx/utils/misc.py,sha256=z5rdz5hbRu9-F2QgF47OCkMvhfIkRKs-PHR8L5DWkBM,9831
|
|
35
35
|
qubx/utils/runner.py,sha256=OY7SoRfxHwzn0rKTGB_lbg5zNASEL_49hQXWqs-LiMk,9306
|
|
36
36
|
qubx/utils/time.py,sha256=_DjCdQditzZwMy_8rvPdWyw5tjw__2p24LMPgXdZ8i0,4911
|
|
37
|
-
qubx-0.1.
|
|
38
|
-
qubx-0.1.
|
|
39
|
-
qubx-0.1.
|
|
37
|
+
qubx-0.1.83.dist-info/METADATA,sha256=ZRsVLtllw2blS4L5Wv9iRJKtgI8lHAPZRFj1RqJO2zI,2491
|
|
38
|
+
qubx-0.1.83.dist-info/WHEEL,sha256=MLOa6LysROdjgj4FVxsHitAnIh8Be2D_c9ZSBHKrz2M,110
|
|
39
|
+
qubx-0.1.83.dist-info/RECORD,,
|
|
File without changes
|