Qubx 0.0.1__cp311-cp311-manylinux_2_35_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of Qubx might be problematic. Click here for more details.
- qubx/__init__.py +164 -0
- qubx/_nb_magic.py +69 -0
- qubx/core/__init__.py +0 -0
- qubx/core/basics.py +224 -0
- qubx/core/lookups.py +152 -0
- qubx/core/series.cpython-311-x86_64-linux-gnu.so +0 -0
- qubx/core/series.pxd +94 -0
- qubx/core/series.pyx +763 -0
- qubx/core/strategy.py +89 -0
- qubx/core/utils.cpython-311-x86_64-linux-gnu.so +0 -0
- qubx/core/utils.pyx +54 -0
- qubx/data/readers.py +387 -0
- qubx/math/__init__.py +1 -0
- qubx/math/stats.py +42 -0
- qubx/ta/__init__.py +0 -0
- qubx/ta/indicators.cpython-311-x86_64-linux-gnu.so +0 -0
- qubx/ta/indicators.pyx +258 -0
- qubx/utils/__init__.py +3 -0
- qubx/utils/_pyxreloader.py +271 -0
- qubx/utils/charting/mpl_helpers.py +182 -0
- qubx/utils/marketdata/binance.py +212 -0
- qubx/utils/misc.py +234 -0
- qubx/utils/pandas.py +206 -0
- qubx/utils/time.py +145 -0
- qubx-0.0.1.dist-info/METADATA +39 -0
- qubx-0.0.1.dist-info/RECORD +27 -0
- qubx-0.0.1.dist-info/WHEEL +4 -0
qubx/utils/pandas.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
1
|
+
from typing import Dict, Union
|
|
2
|
+
from datetime import timedelta
|
|
3
|
+
import pandas as pd
|
|
4
|
+
import numpy as np
|
|
5
|
+
|
|
6
|
+
from numpy.lib.stride_tricks import as_strided as stride
|
|
7
|
+
|
|
8
|
+
from qubx.utils.misc import Struct
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def drop_duplicated_indexes(df, keep='first'):
|
|
12
|
+
"""
|
|
13
|
+
Drops duplicated indexes in dataframe/series
|
|
14
|
+
Keeps either first or last occurence (parameter keep)
|
|
15
|
+
"""
|
|
16
|
+
return df[~df.index.duplicated(keep=keep)]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def process_duplicated_indexes(data: Union[pd.DataFrame, pd.Series], ns=1) -> Union[pd.DataFrame, pd.Series]:
|
|
20
|
+
"""
|
|
21
|
+
Finds duplicated indexes in frame/series and add shift (in nS) to every repeating one
|
|
22
|
+
:param data: time indexed dataframe/series
|
|
23
|
+
:param ns: shift constant in nanosec
|
|
24
|
+
:return: return dataframe with all no duplicated rows (each duplicate has own unique index)
|
|
25
|
+
"""
|
|
26
|
+
values = data.index.duplicated(keep='first').astype(float)
|
|
27
|
+
values[values == 0] = np.NaN
|
|
28
|
+
|
|
29
|
+
missings = np.isnan(values)
|
|
30
|
+
cumsum = np.cumsum(~missings)
|
|
31
|
+
diff = np.diff(np.concatenate(([0.], cumsum[missings])))
|
|
32
|
+
values[missings] = -diff
|
|
33
|
+
|
|
34
|
+
# set new index (1 ms)
|
|
35
|
+
data.index = data.index.values + np.cumsum(values).astype(np.timedelta64) * ns
|
|
36
|
+
return data
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def scols(*xs, keys=None, names=None, keep='all') -> pd.DataFrame:
|
|
40
|
+
"""
|
|
41
|
+
Concat dataframes/series from xs into single dataframe by axis 1
|
|
42
|
+
:param keys: keys of new dataframe (see pd.concat's keys parameter)
|
|
43
|
+
:param names: new column names or dict with replacements
|
|
44
|
+
:return: combined dataframe
|
|
45
|
+
|
|
46
|
+
Example
|
|
47
|
+
-------
|
|
48
|
+
>>> scols(
|
|
49
|
+
pd.DataFrame([1,2,3,4,-4], list('abcud')),
|
|
50
|
+
pd.DataFrame([111,21,31,14], list('xyzu')),
|
|
51
|
+
pd.DataFrame([11,21,31,124], list('ertu')),
|
|
52
|
+
pd.DataFrame([11,21,31,14], list('WERT')),
|
|
53
|
+
names=['x', 'y', 'z', 'w'])
|
|
54
|
+
"""
|
|
55
|
+
r = pd.concat((xs), axis=1, keys=keys)
|
|
56
|
+
if names:
|
|
57
|
+
if isinstance(names, (list, tuple)):
|
|
58
|
+
if len(names) == len(r.columns):
|
|
59
|
+
r.columns = names
|
|
60
|
+
else:
|
|
61
|
+
raise ValueError(
|
|
62
|
+
f"if 'names' contains new column names it must have same length as resulting df ({len(r.columns)})")
|
|
63
|
+
elif isinstance(names, dict):
|
|
64
|
+
r = r.rename(columns=names)
|
|
65
|
+
return r
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def srows(*xs, keep='all', sort=True) -> Union[pd.DataFrame, pd.Series]:
|
|
69
|
+
"""
|
|
70
|
+
Concat dataframes/series from xs into single dataframe by axis 0
|
|
71
|
+
:param sort: if true it sorts resulting dataframe by index (default)
|
|
72
|
+
:param keep: how to deal with duplicated indexes.
|
|
73
|
+
If set to 'all' it doesn't do anything (default). Otherwise keeps first or last occurences
|
|
74
|
+
:return: combined dataframe
|
|
75
|
+
|
|
76
|
+
Example
|
|
77
|
+
-------
|
|
78
|
+
>>> srows(
|
|
79
|
+
pd.DataFrame([1,2,3,4,-4], list('abcud')),
|
|
80
|
+
pd.DataFrame([111,21,31,14], list('xyzu')),
|
|
81
|
+
pd.DataFrame([11,21,31,124], list('ertu')),
|
|
82
|
+
pd.DataFrame([11,21,31,14], list('WERT')),
|
|
83
|
+
sort=True, keep='last')
|
|
84
|
+
"""
|
|
85
|
+
r = pd.concat((xs), axis=0)
|
|
86
|
+
r = r.sort_index() if sort else r
|
|
87
|
+
if keep != 'all':
|
|
88
|
+
r = drop_duplicated_indexes(r, keep=keep)
|
|
89
|
+
return r
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def retain_columns_and_join(data: dict, columns) -> pd.DataFrame:
|
|
93
|
+
"""
|
|
94
|
+
Retains given columns from every value of data dictionary and concatenate them into single data frame
|
|
95
|
+
|
|
96
|
+
from qube.datasource import DataSource
|
|
97
|
+
from qube.analysis.tools import retain_columns_and_join
|
|
98
|
+
|
|
99
|
+
ds = DataSource('yahoo::daily')
|
|
100
|
+
data = ds.load_data(['aapl', 'msft', 'spy'], '2000-01-01', 'now')
|
|
101
|
+
|
|
102
|
+
closes = retain_columns_and_join(data, 'close')
|
|
103
|
+
hi_lo = retain_columns_and_join(data, ['high', 'low'])
|
|
104
|
+
|
|
105
|
+
:param data: dictionary with dataframes
|
|
106
|
+
:param columns: columns names need to be retained
|
|
107
|
+
:return: data frame
|
|
108
|
+
"""
|
|
109
|
+
if not isinstance(data, dict):
|
|
110
|
+
raise ValueError('Data must be passed as dictionary')
|
|
111
|
+
|
|
112
|
+
return pd.concat([data[k][columns] for k in data.keys()], axis=1, keys=data.keys())
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def continuous_periods(xs, cond) -> Struct:
|
|
116
|
+
"""
|
|
117
|
+
Detect continues periods on series xs based on condition cond
|
|
118
|
+
"""
|
|
119
|
+
df = scols(xs, cond, keys=['_XS_', 'sig'])
|
|
120
|
+
df['block'] = (df.sig.shift(1) != df.sig).astype(int).cumsum()
|
|
121
|
+
idx_col_name = xs.index.name
|
|
122
|
+
|
|
123
|
+
blk = df[df.sig].reset_index().groupby('block')[idx_col_name].apply(np.array)
|
|
124
|
+
starts = blk.apply(lambda x: x[0])
|
|
125
|
+
ends = blk.apply(lambda x: x[-1])
|
|
126
|
+
se_info = scols(starts, ends, keys=['start', 'end'])
|
|
127
|
+
return Struct(blocks=blk.reset_index(drop=True), periods=se_info)
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def roll(df: pd.DataFrame, w: int, **kwargs):
|
|
131
|
+
"""
|
|
132
|
+
Rolling window on dataframe using multiple columns
|
|
133
|
+
|
|
134
|
+
>>> roll(pd.DataFrame(np.random.randn(10,3), index=list('ABCDEFGHIJ')), 3).apply(print)
|
|
135
|
+
|
|
136
|
+
or alternatively
|
|
137
|
+
|
|
138
|
+
>>> pd.DataFrame(np.random.randn(10,3), index=list('ABCDEFGHIJ')).pipe(roll, 3).apply(lambda x: print(x[2]))
|
|
139
|
+
|
|
140
|
+
:param df: pandas DataFrame
|
|
141
|
+
:param w: window size (only integers)
|
|
142
|
+
:return: rolling window
|
|
143
|
+
"""
|
|
144
|
+
if w > len(df):
|
|
145
|
+
raise ValueError("Window size exceeds number of rows !")
|
|
146
|
+
|
|
147
|
+
v = df.values
|
|
148
|
+
d0, d1 = v.shape
|
|
149
|
+
s0, s1 = v.strides
|
|
150
|
+
a = stride(v, (d0 - (w - 1), w, d1), (s0, s0, s1))
|
|
151
|
+
rolled_df = pd.concat({
|
|
152
|
+
row: pd.DataFrame(values, columns=df.columns)
|
|
153
|
+
for row, values in zip(df.index, a)
|
|
154
|
+
})
|
|
155
|
+
|
|
156
|
+
return rolled_df.groupby(level=0, **kwargs)
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def dict_to_frame(x: dict, index_type=None, orient='index', columns=None, column_types=dict()) -> pd.DataFrame:
|
|
160
|
+
"""
|
|
161
|
+
Utility for convert dictionary to indexed DataFrame
|
|
162
|
+
It's possible to pass columns names and type of index
|
|
163
|
+
"""
|
|
164
|
+
y = pd.DataFrame().from_dict(x, orient=orient)
|
|
165
|
+
if index_type:
|
|
166
|
+
if index_type in ['ns', 'nano']:
|
|
167
|
+
index_type = 'M8[ns]'
|
|
168
|
+
y.index = y.index.astype(index_type)
|
|
169
|
+
|
|
170
|
+
# rename if needed
|
|
171
|
+
if columns:
|
|
172
|
+
columns = [columns] if not isinstance(columns, (list, tuple, set)) else columns
|
|
173
|
+
if len(columns) == len(y.columns):
|
|
174
|
+
y.rename(columns=dict(zip(y.columns, columns)), inplace=True)
|
|
175
|
+
else:
|
|
176
|
+
raise ValueError('dict_to_frame> columns argument must contain %d elements' % len(y.columns))
|
|
177
|
+
|
|
178
|
+
# if additional conversion is required
|
|
179
|
+
if column_types:
|
|
180
|
+
_existing_cols_conversion = {c: v for c, v in column_types.items() if c in y.columns}
|
|
181
|
+
y = y.astype(_existing_cols_conversion)
|
|
182
|
+
|
|
183
|
+
return y
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def select_column_and_join(data: Dict[str, pd.DataFrame], column: str) -> pd.DataFrame:
|
|
187
|
+
"""
|
|
188
|
+
Select given column from every value of data dictionary and concatenate them into single data frame
|
|
189
|
+
|
|
190
|
+
from qube.datasource import DataSource
|
|
191
|
+
from qube.analysis.tools import retain_columns_and_join
|
|
192
|
+
|
|
193
|
+
ds = DataSource('yahoo::daily')
|
|
194
|
+
data = ds.load_data(['aapl', 'msft', 'spy'], '2000-01-01', 'now')
|
|
195
|
+
|
|
196
|
+
closes = select_column_and_join(data, 'close')
|
|
197
|
+
hi_lo = select_column_and_join(data, ['high', 'low'])
|
|
198
|
+
|
|
199
|
+
:param data: dictionary with dataframes
|
|
200
|
+
:param columns: column name need to be selected
|
|
201
|
+
:return: pandas data frame
|
|
202
|
+
"""
|
|
203
|
+
if not isinstance(data, dict):
|
|
204
|
+
raise ValueError('Data must be passed as dictionary of pandas dataframes')
|
|
205
|
+
|
|
206
|
+
return pd.concat([data[k][column] for k in data.keys()], axis=1, keys=data.keys())
|
qubx/utils/time.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
from typing import List, Optional, Union
|
|
3
|
+
import numpy as np
|
|
4
|
+
import re
|
|
5
|
+
|
|
6
|
+
import pandas as pd
|
|
7
|
+
|
|
8
|
+
UNIX_T0 = np.datetime64('1970-01-01T00:00:00')
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
time_to_str = lambda t, u='us': np.datetime_as_string(t if isinstance(t, np.datetime64) else np.datetime64(t, u), unit=u)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def convert_tf_str_td64(c_tf: str) -> np.timedelta64:
|
|
15
|
+
"""
|
|
16
|
+
Convert string timeframe to timedelta64
|
|
17
|
+
|
|
18
|
+
'15Min' -> timedelta64(15, 'm') etc
|
|
19
|
+
"""
|
|
20
|
+
_t = re.findall('(\d+)([A-Za-z]+)', c_tf)
|
|
21
|
+
_dt = 0
|
|
22
|
+
for g in _t:
|
|
23
|
+
unit = g[1].lower()
|
|
24
|
+
n = int(g[0])
|
|
25
|
+
u1 = unit[0]
|
|
26
|
+
u2 = unit[:2]
|
|
27
|
+
unit = u1
|
|
28
|
+
|
|
29
|
+
if u1 in ['d', 'w']:
|
|
30
|
+
unit = u1.upper()
|
|
31
|
+
|
|
32
|
+
if u1 in ['y']:
|
|
33
|
+
n = 356 * n
|
|
34
|
+
unit = 'D'
|
|
35
|
+
|
|
36
|
+
if u2 in ['ms', 'ns', 'us', 'ps']:
|
|
37
|
+
unit = u2
|
|
38
|
+
|
|
39
|
+
_dt += np.timedelta64(n, unit)
|
|
40
|
+
|
|
41
|
+
return _dt
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def convert_seconds_to_str(seconds: int) -> str:
|
|
45
|
+
"""
|
|
46
|
+
Convert seconds to string representation: 310 -> '5Min10S' etc
|
|
47
|
+
"""
|
|
48
|
+
days, seconds = divmod(seconds, 86400)
|
|
49
|
+
hours, seconds = divmod(seconds, 3600)
|
|
50
|
+
minutes, seconds = divmod(seconds, 60)
|
|
51
|
+
r = ''
|
|
52
|
+
if days > 0:
|
|
53
|
+
r += '%dD' % days
|
|
54
|
+
if hours > 0:
|
|
55
|
+
r += '%dH' % hours
|
|
56
|
+
if minutes > 0:
|
|
57
|
+
r += '%dMin' % minutes
|
|
58
|
+
if seconds > 0:
|
|
59
|
+
r += '%dS' % seconds
|
|
60
|
+
return r
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def floor_t64(time: Union[np.datetime64, datetime], dt: Union[np.timedelta64, int, str]):
|
|
64
|
+
"""
|
|
65
|
+
Floor timestamp by dt
|
|
66
|
+
"""
|
|
67
|
+
if isinstance(dt, int):
|
|
68
|
+
dt = np.timedelta64(dt, 's')
|
|
69
|
+
|
|
70
|
+
if isinstance(dt, str):
|
|
71
|
+
dt = convert_tf_str_td64(dt)
|
|
72
|
+
|
|
73
|
+
if isinstance(time, datetime):
|
|
74
|
+
time = np.datetime64(time)
|
|
75
|
+
|
|
76
|
+
return time - (time - UNIX_T0) % dt
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def infer_series_frequency(series: Union[List, pd.DataFrame, pd.Series, pd.DatetimeIndex]) -> np.timedelta64:
|
|
80
|
+
"""
|
|
81
|
+
Infer frequency of given timeseries
|
|
82
|
+
|
|
83
|
+
:param series: Series, DataFrame, DatetimeIndex or list of timestamps object
|
|
84
|
+
:return: timedelta for found frequency
|
|
85
|
+
"""
|
|
86
|
+
if isinstance(series, (pd.DataFrame, pd.Series, pd.DatetimeIndex)):
|
|
87
|
+
times_index = (series if isinstance(series, pd.DatetimeIndex) else series.index).to_pydatetime()
|
|
88
|
+
elif isinstance(series, (set, list, tuple)):
|
|
89
|
+
times_index = np.array(series)
|
|
90
|
+
elif isinstance(series, np.ndarray):
|
|
91
|
+
times_index = series
|
|
92
|
+
else:
|
|
93
|
+
raise ValueError("Can't recognize input data")
|
|
94
|
+
|
|
95
|
+
if times_index.shape[0] < 2:
|
|
96
|
+
raise ValueError("Series must have at least 2 points to determ frequency")
|
|
97
|
+
|
|
98
|
+
values = np.array(sorted([(x if isinstance(x, np.timedelta64) else x.total_seconds()) for x in np.abs(np.diff(times_index))]))
|
|
99
|
+
diff = np.concatenate(([1], np.diff(values)))
|
|
100
|
+
idx = np.concatenate((np.where(diff)[0], [len(values)]))
|
|
101
|
+
freqs = dict(zip(values[idx[:-1]], np.diff(idx)))
|
|
102
|
+
return np.timedelta64(max(freqs, key=freqs.get))
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def handle_start_stop(s: Optional[str], e: Optional[str], convert=str) -> tuple:
|
|
106
|
+
"""
|
|
107
|
+
Process start/stop times
|
|
108
|
+
|
|
109
|
+
handle_start_stop('2020-01-01', '2020-02-01') # 2020-01-01, 2020-02-01
|
|
110
|
+
handle_start_stop('2020-02-01', '2020-01-01') # 2020-01-01, 2020-02-01
|
|
111
|
+
handle_start_stop('2020-01-01', '1w') # 2020-01-01, 2020-01-01 + 1week
|
|
112
|
+
handle_start_stop('1w', '2020-01-01') # 2020-01-01 - 1week, '2020-01-01'
|
|
113
|
+
handle_start_stop('2020-01-01', '-1w') # 2020-01-01 - 1week, 2020-01-01,
|
|
114
|
+
handle_start_stop(None, '2020-01-01') # None, '2020-01-01'
|
|
115
|
+
handle_start_stop('2020-01-01', None) # '2020-01-01', None
|
|
116
|
+
handle_start_stop(None, None) # None, None
|
|
117
|
+
|
|
118
|
+
"""
|
|
119
|
+
def _h_time_like(x):
|
|
120
|
+
try:
|
|
121
|
+
return pd.Timestamp(x), False
|
|
122
|
+
except:
|
|
123
|
+
try: return pd.Timedelta(x), True
|
|
124
|
+
except: pass
|
|
125
|
+
return None, None
|
|
126
|
+
|
|
127
|
+
t0, d0 = _h_time_like(s) if s else (None, False)
|
|
128
|
+
t1, d1 = _h_time_like(e) if e else (None, False)
|
|
129
|
+
converts = lambda xs: [convert(xs[0]) if xs[0] else None, convert(xs[1]) if xs[1] else None]
|
|
130
|
+
|
|
131
|
+
if not t1 and not t0: return None, None
|
|
132
|
+
|
|
133
|
+
if d0 and d1: raise ValueError("Start and stop can't both be deltas !")
|
|
134
|
+
|
|
135
|
+
if d0:
|
|
136
|
+
if not t1: raise ValueError("First argument is delta but stop time is not defined !")
|
|
137
|
+
return converts(sorted([t1 - abs(t0), t1]))
|
|
138
|
+
if d1:
|
|
139
|
+
if not t0: raise ValueError("Second argument is delta but start time is not defined !")
|
|
140
|
+
return converts(sorted([t0, t0 + t1]))
|
|
141
|
+
|
|
142
|
+
if t0 and t1:
|
|
143
|
+
return converts(sorted([t0, t1]))
|
|
144
|
+
|
|
145
|
+
return converts([t0, t1])
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: Qubx
|
|
3
|
+
Version: 0.0.1
|
|
4
|
+
Summary: Qubx - quantitative trading framework
|
|
5
|
+
Home-page: https://github.com/dmarienko/Qubx
|
|
6
|
+
Author: Dmitry Marienko
|
|
7
|
+
Author-email: dmitry@gmail.com
|
|
8
|
+
Requires-Python: >=3.9,<4.0
|
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
|
10
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
14
|
+
Requires-Dist: cython (==3.0.8)
|
|
15
|
+
Requires-Dist: loguru (>=0.7.2,<0.8.0)
|
|
16
|
+
Requires-Dist: ntplib (>=0.4.0,<0.5.0)
|
|
17
|
+
Requires-Dist: numpy (>=1.26.3,<2.0.0)
|
|
18
|
+
Requires-Dist: pyarrow (>=15.0.0,<16.0.0)
|
|
19
|
+
Requires-Dist: pydantic (>=1.10.2,<2.0.0)
|
|
20
|
+
Requires-Dist: pymongo (>=4.6.1,<5.0.0)
|
|
21
|
+
Requires-Dist: pytest[lazyfixture] (>=7.2.0,<8.0.0)
|
|
22
|
+
Requires-Dist: python-binance (>=1.0.19,<2.0.0)
|
|
23
|
+
Requires-Dist: python-dotenv (>=1.0.0,<2.0.0)
|
|
24
|
+
Requires-Dist: scipy (>=1.12.0,<2.0.0)
|
|
25
|
+
Requires-Dist: stackprinter (>=0.2.10,<0.3.0)
|
|
26
|
+
Project-URL: Repository, https://github.com/dmarienko/Qubx
|
|
27
|
+
Description-Content-Type: text/markdown
|
|
28
|
+
|
|
29
|
+
# Qubx
|
|
30
|
+
|
|
31
|
+
### Next generation of Qube quantitative backtesting framework (QUBX)
|
|
32
|
+
```
|
|
33
|
+
/////\
|
|
34
|
+
///// \
|
|
35
|
+
\\\\\ /
|
|
36
|
+
\\\\\/ (c) 2024, by M.D.E
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
qubx/__init__.py,sha256=eEkknShL0X-WdqY3SkF7DNUt1rUDmB10VW3kqRHUgxQ,5133
|
|
2
|
+
qubx/_nb_magic.py,sha256=xKWVljqr71K6Nkv2oI_EAI7xzRLV4pSGEBp7dtpDgiI,2357
|
|
3
|
+
qubx/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
+
qubx/core/basics.py,sha256=Ki_fQbgrmTDvZQRUCzUUU5fjBcykifz8FfYIqE_jTe0,10251
|
|
5
|
+
qubx/core/lookups.py,sha256=XDniSpIiC5HzPwhD_2cmWVpkL4f7VHdA5HOpvbR9vSA,6284
|
|
6
|
+
qubx/core/series.cpython-311-x86_64-linux-gnu.so,sha256=B0-FN_voDNfbDFZWgrDph1Jyp8eBXwWkRAwAFuItA5c,3686656
|
|
7
|
+
qubx/core/series.pxd,sha256=q1XWnq8nPX6-IP9BJDTrLKJq5Pg3eA-ugWInivn6hEE,2283
|
|
8
|
+
qubx/core/series.pyx,sha256=5niSMhSEjAlLZXCs7SbceTM6CiSgIVAWRjQfLCzEu3k,25097
|
|
9
|
+
qubx/core/strategy.py,sha256=T0R0mgalgpfq5qm9X5uoAg9oRwrwUei6Zx9DBxEEqrM,2342
|
|
10
|
+
qubx/core/utils.cpython-311-x86_64-linux-gnu.so,sha256=rGKvoGS0Fqk8Db92swP02skH22pYp9HTk2VIhWSrJvE,324992
|
|
11
|
+
qubx/core/utils.pyx,sha256=6dQ8R02bl8V3f-W3Wk9-e86D9OvDz-5-4NA_dlF_xwc,1368
|
|
12
|
+
qubx/data/readers.py,sha256=6l3GDS3W43xu5slEmwTV4vB3IVmgf5Drek9wlmh1i6o,14928
|
|
13
|
+
qubx/math/__init__.py,sha256=AavTKCtU7gRffG9T10Z0uv4LdI31bVvBn-L_Iv81FRk,33
|
|
14
|
+
qubx/math/stats.py,sha256=LnZZFe_3_vj1yW-wcQdtOmI9t5yGkiYfLWa4kVFXkjA,1176
|
|
15
|
+
qubx/ta/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
|
+
qubx/ta/indicators.cpython-311-x86_64-linux-gnu.so,sha256=Xg6mKakEkfgo0aYonRTHRrrVw2_LLGNe68-0y6PI7D0,1450408
|
|
17
|
+
qubx/ta/indicators.pyx,sha256=P-GEYUks2lSHo6hbtUFAB7TWE1AunjLR4jIjwqPHrwU,7708
|
|
18
|
+
qubx/utils/__init__.py,sha256=mYNqttT_TaTDoz3lUp0Oh4fn7w1q7Mg_d2gmXugQnrw,283
|
|
19
|
+
qubx/utils/_pyxreloader.py,sha256=_YHcM9uya_6Ni-eSSFBPOrkk_0J4iEMtL_2Y8YHUfoE,11894
|
|
20
|
+
qubx/utils/charting/mpl_helpers.py,sha256=nfbyG2lU_cv64CHQu1m4spPeZNSmey-E6NawOcQeR1Q,5937
|
|
21
|
+
qubx/utils/marketdata/binance.py,sha256=cZkCrgSmPYxUE-rxzMnCtP0oLIqj1ctOvui1C4X9Obo,9162
|
|
22
|
+
qubx/utils/misc.py,sha256=thfl1KP6Zv-ci1JFdtOEMO84qZ8DWVZNXpBD7MY57Xg,6908
|
|
23
|
+
qubx/utils/pandas.py,sha256=psgDABGe76Bik5up012xg_JYq0LiALawyXGzw2Y0Rks,7281
|
|
24
|
+
qubx/utils/time.py,sha256=cklczM8-N5S6NMUHzW176v08QKp_9zgGobqVPXfnIeE,4604
|
|
25
|
+
qubx-0.0.1.dist-info/METADATA,sha256=YcSydQX7TVBGegU6jqfGYlfTLsHrnz4Whb_mOeR-nVo,1292
|
|
26
|
+
qubx-0.0.1.dist-info/WHEEL,sha256=MLOa6LysROdjgj4FVxsHitAnIh8Be2D_c9ZSBHKrz2M,110
|
|
27
|
+
qubx-0.0.1.dist-info/RECORD,,
|