Qubx 0.6.52__cp312-cp312-manylinux_2_39_x86_64.whl → 0.6.54__cp312-cp312-manylinux_2_39_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of Qubx might be problematic. Click here for more details.

@@ -3,6 +3,7 @@ import zipfile
3
3
  from collections import defaultdict
4
4
  from pathlib import Path
5
5
 
6
+ import numpy as np
6
7
  import pandas as pd
7
8
  import yaml
8
9
 
@@ -230,6 +231,25 @@ class BacktestsResultsManager:
230
231
 
231
232
  yield info.get("idx", -1)
232
233
 
234
+ def list_variations(self, regex: str = "", detailed=True, sort_by: str | None = "sharpe", ascending=False):
235
+ """
236
+ List only variations of a backtest result.
237
+
238
+ Args:
239
+ - regex (str, optional): Regular expression pattern to filter results by strategy name or class. Defaults to "".
240
+ - sort_by (str, optional): The criterion to sort the results by. Defaults to "sharpe".
241
+ - ascending (bool, optional): Whether to sort the results in ascending order. Defaults to False.
242
+ - detailed (bool, optional): Whether to show each variation run. Defaults to True.
243
+ """
244
+ return self.list(
245
+ regex=regex,
246
+ sort_by=sort_by,
247
+ ascending=ascending,
248
+ show_variations=True,
249
+ show_simulations=False,
250
+ show_each_variation_run=detailed,
251
+ )
252
+
233
253
  def list(
234
254
  self,
235
255
  regex: str = "",
@@ -239,7 +259,9 @@ class BacktestsResultsManager:
239
259
  pretty_print=False,
240
260
  sort_by: str | None = "sharpe",
241
261
  ascending=False,
262
+ show_simulations=True,
242
263
  show_variations=True,
264
+ show_each_variation_run=True,
243
265
  ):
244
266
  """List backtesting results with optional filtering and formatting.
245
267
 
@@ -248,87 +270,93 @@ class BacktestsResultsManager:
248
270
  - with_metrics (bool, optional): Whether to include performance metrics in output. Defaults to True.
249
271
  - params (bool, optional): Whether to display strategy parameters. Defaults to False.
250
272
  - as_table (bool, optional): Return results as a pandas DataFrame instead of printing. Defaults to False.
273
+ - sort_by (str, optional): The criterion to sort the results by. Defaults to "sharpe".
274
+ - ascending (bool, optional): Whether to sort the results in ascending order. Defaults to False.
275
+ - show_simulations (bool, optional): Whether to show simulation results. Defaults to True.
276
+ - show_variations (bool, optional): Whether to show variation results. Defaults to True.
277
+ - show_each_variation_run (bool, optional): Whether to show each variation run. Defaults to True.
251
278
 
252
279
  Returns:
253
280
  - Optional[pd.DataFrame]: If as_table=True, returns a DataFrame containing the results sorted by creation time.
254
281
  - Otherwise prints formatted results to console.
255
282
  """
256
283
  _t_rep = []
257
- for n in sorted(self.results.keys()):
258
- info = self.results[n]
259
- s_cls = info.get("strategy_class", "").split(".")[-1]
284
+ if show_simulations:
285
+ for n in sorted(self.results.keys()):
286
+ info = self.results[n]
287
+ s_cls = info.get("strategy_class", "").split(".")[-1]
260
288
 
261
- if regex:
262
- if not re.match(regex, n, re.IGNORECASE):
263
- # if not re.match(regex, s_cls, re.IGNORECASE):
264
- continue
289
+ if regex:
290
+ if not re.match(regex, n, re.IGNORECASE):
291
+ # if not re.match(regex, s_cls, re.IGNORECASE):
292
+ continue
265
293
 
266
- name = info.get("name", "")
267
- smbs = ", ".join(info.get("symbols", list()))
268
- start = pd.Timestamp(info.get("start", "")).round("1s")
269
- stop = pd.Timestamp(info.get("stop", "")).round("1s")
270
- dscr = info.get("description", "")
271
- created = pd.Timestamp(info.get("creation_time", "")).round("1s")
272
- metrics = info.get("performance", {})
273
- author = info.get("author", "")
274
- _s = f"{yellow(str(info.get('idx')))} - {red(name)} ::: {magenta(created)} by {cyan(author)}"
275
-
276
- _one_line_dscr = ""
277
- if dscr:
278
- dscr = dscr.split("\n")
279
- for _d in dscr:
280
- _s += f"\n\t{magenta('# ' + _d)}"
281
- _one_line_dscr += "\u25cf " + _d + "\n"
282
-
283
- _s += f"\n\tstrategy: {green(s_cls)}"
284
- _s += f"\n\tinterval: {blue(start)} - {blue(stop)}"
285
- _s += f"\n\tcapital: {blue(info.get('capital', ''))} {info.get('base_currency', '')} ({info.get('commissions', '')})"
286
- _s += f"\n\tinstruments: {blue(smbs)}"
287
- if params:
288
- formats = ["{" + f":<{i}" + "}" for i in [50]]
289
- _p = pd.DataFrame.from_dict(info.get("parameters", {}), orient="index")
290
- for i in _p.to_string(
291
- max_colwidth=30,
292
- header=False,
293
- formatters=[(lambda x: cyan(fmt.format(str(x)))) for fmt in formats],
294
- justify="left",
295
- ).split("\n"):
296
- _s += f"\n\t | {yellow(i)}"
297
-
298
- if not as_table:
299
- print(_s)
294
+ name = info.get("name", "")
295
+ smbs = ", ".join(info.get("symbols", list()))
296
+ start = pd.Timestamp(info.get("start", "")).round("1s")
297
+ stop = pd.Timestamp(info.get("stop", "")).round("1s")
298
+ dscr = info.get("description", "")
299
+ created = pd.Timestamp(info.get("creation_time", "")).round("1s")
300
+ metrics = info.get("performance", {})
301
+ author = info.get("author", "")
302
+ _s = f"{yellow(str(info.get('idx')))} - {red(name)} ::: {magenta(created)} by {cyan(author)}"
303
+
304
+ _one_line_dscr = ""
305
+ if dscr:
306
+ dscr = dscr.split("\n")
307
+ for _d in dscr:
308
+ _s += f"\n\t{magenta('# ' + _d)}"
309
+ _one_line_dscr += "\u25cf " + _d + "\n"
310
+
311
+ _s += f"\n\tstrategy: {green(s_cls)}"
312
+ _s += f"\n\tinterval: {blue(start)} - {blue(stop)}"
313
+ _s += f"\n\tcapital: {blue(info.get('capital', ''))} {info.get('base_currency', '')} ({info.get('commissions', '')})"
314
+ _s += f"\n\tinstruments: {blue(smbs)}"
315
+ if params:
316
+ formats = ["{" + f":<{i}" + "}" for i in [50]]
317
+ _p = pd.DataFrame.from_dict(info.get("parameters", {}), orient="index")
318
+ for i in _p.to_string(
319
+ max_colwidth=30,
320
+ header=False,
321
+ formatters=[(lambda x: cyan(fmt.format(str(x)))) for fmt in formats],
322
+ justify="left",
323
+ ).split("\n"):
324
+ _s += f"\n\t | {yellow(i)}"
300
325
 
301
- if with_metrics:
302
- _m_repr = (
303
- pd.DataFrame.from_dict(metrics, orient="index")
304
- .T[["gain", "cagr", "sharpe", "qr", "max_dd_pct", "mdd_usd", "fees", "execs"]]
305
- .astype(float)
306
- )
307
- _m_repr = _m_repr.round(3).to_string(index=False)
308
- _h, _v = _m_repr.split("\n")
309
326
  if not as_table:
310
- print("\t " + red(_h))
311
- print("\t " + cyan(_v))
312
-
313
- if not as_table:
314
- print()
315
- else:
316
- metrics = {
317
- m: round(v, 3)
318
- for m, v in metrics.items()
319
- if m in ["gain", "cagr", "sharpe", "qr", "max_dd_pct", "mdd_usd", "fees", "execs"]
320
- }
321
- _t_rep.append(
322
- {"Index": info.get("idx", ""), "Strategy": name}
323
- | metrics
324
- | {
325
- "start": start,
326
- "stop": stop,
327
- "Created": created,
328
- "Author": author,
329
- "Description": _one_line_dscr,
330
- },
331
- )
327
+ print(_s)
328
+
329
+ if with_metrics:
330
+ _m_repr = (
331
+ pd.DataFrame.from_dict(metrics, orient="index")
332
+ .T[["gain", "cagr", "sharpe", "qr", "max_dd_pct", "mdd_usd", "fees", "execs"]]
333
+ .astype(float)
334
+ )
335
+ _m_repr = _m_repr.round(3).to_string(index=False)
336
+ _h, _v = _m_repr.split("\n")
337
+ if not as_table:
338
+ print("\t " + red(_h))
339
+ print("\t " + cyan(_v))
340
+
341
+ if not as_table:
342
+ print()
343
+ else:
344
+ metrics = {
345
+ m: round(v, 3)
346
+ for m, v in metrics.items()
347
+ if m in ["gain", "cagr", "sharpe", "qr", "max_dd_pct", "mdd_usd", "fees", "execs"]
348
+ }
349
+ _t_rep.append(
350
+ {"Index": info.get("idx", ""), "Strategy": name}
351
+ | metrics
352
+ | {
353
+ "start": start,
354
+ "stop": stop,
355
+ "Created": created,
356
+ "Author": author,
357
+ "Description": _one_line_dscr,
358
+ },
359
+ )
332
360
 
333
361
  # - variations (only if not as_table for the time being)
334
362
  if not as_table and show_variations:
@@ -358,11 +386,12 @@ class BacktestsResultsManager:
358
386
  _m_repr = _m_repr.to_string(index=True)
359
387
 
360
388
  print(_s)
361
- for _i, _l in enumerate(_m_repr.split("\n")):
362
- if _i == 0:
363
- print("\t " + red(_l))
364
- else:
365
- print("\t " + blue(_l))
389
+ if show_each_variation_run:
390
+ for _i, _l in enumerate(_m_repr.split("\n")):
391
+ if _i == 0:
392
+ print("\t " + red(_l))
393
+ else:
394
+ print("\t " + blue(_l))
366
395
 
367
396
  if as_table:
368
397
  _df = pd.DataFrame.from_records(_t_rep, index="Index")
@@ -376,3 +405,105 @@ class BacktestsResultsManager:
376
405
  .replace("<td>", '<td align="left" valign="top">')
377
406
  )
378
407
  return _df
408
+
409
+ def variation_plot(self, variation_idx: int, criterion: str = "sharpe", ascending: bool = False, n=3, h=600):
410
+ """
411
+ Plot a variation of a backtest result.
412
+
413
+ Args:
414
+ - variation_idx (int): The index of the variation to plot.
415
+ - criterion (str): The criterion to plot (e.g. "sharpe", "mdd_usd", "max_dd_pct", etc.).
416
+ - ascending (bool): Whether to sort the results in ascending order.
417
+ - n (int): The number of decimal places to display.
418
+ - h (int): The height of the plot.
419
+
420
+ Returns:
421
+ plotly.graph_objects.Figure: The plot of the variation.
422
+ """
423
+ import plotly.express as px
424
+ from itertools import cycle
425
+ from qubx.utils.misc import string_shortener
426
+
427
+ _vars = self.variations.get(variation_idx)
428
+ if not _vars:
429
+ raise ValueError(f"No variations found for index {variation_idx} !")
430
+
431
+ variations = _vars.get("variations", [])
432
+ name = _vars.get("name", "") or ""
433
+
434
+ _r, _p = {}, {}
435
+ for i, v in enumerate(variations):
436
+ _p[i] = v["parameters"]
437
+ _pp = pd.DataFrame.from_records(_p).T
438
+ # - changed parameters
439
+ _cp = []
440
+ for c in _pp.columns:
441
+ if len(_pp[c].astype(str).unique()) > 1:
442
+ _cp.append(c)
443
+
444
+ # - if nothing was actually changed in parameters, raise an error
445
+ if not _cp:
446
+ raise ValueError(f"No variable parameters found for simulation {name} !")
447
+
448
+ _ms = max([len(string_shortener(x)) for x in _cp]) + 3
449
+ _h = "".join([string_shortener(x).center(_ms) for x in _cp])
450
+
451
+ _sel = lambda ds, _cp: "".join(
452
+ [
453
+ f"<span style='color:{c}'> {str(ds[k]).center(_ms)}</span>"
454
+ for k, c in zip(_cp, cycle(px.colors.qualitative.Plotly))
455
+ if k in k in ds
456
+ ]
457
+ )
458
+ for i, v in enumerate(variations):
459
+ _r[i] = {"name": v["name"], **v["performance"], "parameters": _sel(v["parameters"], _cp)}
460
+
461
+ t1 = pd.DataFrame.from_records(_r).T
462
+ if criterion not in t1.columns:
463
+ raise ValueError(f"Criterion {criterion} not found in results: possible values are {t1.columns}")
464
+ t2 = t1.sort_values(criterion, ascending=ascending)
465
+
466
+ data = pd.Series([np.nan, *t2[criterion].to_list()], index=[_h, *t2["parameters"].to_list()])
467
+
468
+ figure = (
469
+ px.bar(data, orientation="h")
470
+ .update_layout(
471
+ title=dict(
472
+ text=f"{name} | <span style='color:orange'>{criterion.capitalize()}</span>",
473
+ ),
474
+ xaxis=dict(tickfont=dict(family="monospace", size=10, color="#ff4000")),
475
+ yaxis=dict(
476
+ tickfont=dict(family="monospace", size=10, color="#40a000"),
477
+ dtick=1,
478
+ ),
479
+ )
480
+ .update_layout(
481
+ height=h,
482
+ hovermode="x unified",
483
+ showlegend=False,
484
+ hoverdistance=1,
485
+ yaxis={"hoverformat": f".{n}f"},
486
+ dragmode="zoom",
487
+ newshape=dict(line_color="red", line_width=1.0),
488
+ modebar_add=["drawline", "drawopenpath", "drawrect", "eraseshape"],
489
+ hoverlabel=dict(align="auto", bgcolor="rgba(10, 10, 10, 0.5)"),
490
+ )
491
+ .update_xaxes(
492
+ showspikes=True,
493
+ spikemode="across",
494
+ spikesnap="cursor",
495
+ spikecolor="#306020",
496
+ spikethickness=1,
497
+ spikedash="dot",
498
+ title=criterion,
499
+ )
500
+ .update_yaxes(
501
+ spikesnap="cursor",
502
+ spikecolor="#306020",
503
+ tickformat=f".{n}f",
504
+ spikethickness=1,
505
+ title="Parameters",
506
+ autorange="reversed",
507
+ )
508
+ )
509
+ return figure
@@ -16,6 +16,7 @@ from qubx.core.basics import (
16
16
  Timestamped,
17
17
  TriggerEvent,
18
18
  dt_64,
19
+ td_64,
19
20
  )
20
21
  from qubx.core.errors import BaseErrorEvent
21
22
  from qubx.core.exceptions import StrategyExceededMaxNumberOfRuntimeFailuresError
@@ -40,7 +41,7 @@ from qubx.core.series import Bar, OrderBook, Quote, Trade
40
41
 
41
42
  class ProcessingManager(IProcessingManager):
42
43
  MAX_NUMBER_OF_STRATEGY_FAILURES: int = 10
43
- DATA_READY_TIMEOUT_SECONDS: int = 60
44
+ DATA_READY_TIMEOUT: td_64 = td_64(60, "s")
44
45
 
45
46
  _context: IStrategyContext
46
47
  _strategy: IStrategy
@@ -69,6 +70,8 @@ class ProcessingManager(IProcessingManager):
69
70
  _cur_sim_step: int | None = None
70
71
  _updated_instruments: set[Instrument] = set()
71
72
  _data_ready_start_time: dt_64 | None = None
73
+ _last_data_ready_log_time: dt_64 | None = None
74
+ _all_instruments_ready_logged: bool = False
72
75
 
73
76
  def __init__(
74
77
  self,
@@ -114,6 +117,8 @@ class ProcessingManager(IProcessingManager):
114
117
  self._trig_bar_freq_nsec = None
115
118
  self._updated_instruments = set()
116
119
  self._data_ready_start_time = None
120
+ self._last_data_ready_log_time = None
121
+ self._all_instruments_ready_logged = False
117
122
 
118
123
  def set_fit_schedule(self, schedule: str) -> None:
119
124
  rule = process_schedule_spec(schedule)
@@ -350,7 +355,7 @@ class ProcessingManager(IProcessingManager):
350
355
  Check if strategy can start based on data availability with timeout logic.
351
356
 
352
357
  Two-phase approach:
353
- - Phase 1 (0-DATA_READY_TIMEOUT_SECONDS): Wait for ALL instruments to have data
358
+ - Phase 1 (0-DATA_READY_TIMEOUT): Wait for ALL instruments to have data
354
359
  - Phase 2 (after timeout): Wait for at least 1 instrument to have data
355
360
 
356
361
  Returns:
@@ -363,39 +368,73 @@ class ProcessingManager(IProcessingManager):
363
368
  return True
364
369
 
365
370
  ready_instruments = len(self._updated_instruments)
371
+ current_time = self._time_provider.time()
366
372
 
367
373
  # Record start time on first call
368
374
  if self._data_ready_start_time is None:
369
- self._data_ready_start_time = self._time_provider.time()
375
+ self._data_ready_start_time = current_time
370
376
 
371
377
  # Phase 1: Try to get all instruments ready within timeout
372
- elapsed_time_seconds = (self._time_provider.time() - self._data_ready_start_time) / 1e9
378
+ elapsed_td = current_time - self._data_ready_start_time
373
379
 
374
- if elapsed_time_seconds <= self.DATA_READY_TIMEOUT_SECONDS:
380
+ if elapsed_td <= self.DATA_READY_TIMEOUT:
375
381
  # Within timeout period - wait for ALL instruments
376
382
  if ready_instruments == total_instruments:
377
- logger.info(f"All {total_instruments} instruments have data - strategy ready to start")
383
+ if not self._all_instruments_ready_logged:
384
+ logger.info(f"All {total_instruments} instruments have data - strategy ready to start")
385
+ self._all_instruments_ready_logged = True
378
386
  return True
379
387
  else:
380
- # Log periodic status during Phase 1
381
- if int(elapsed_time_seconds) % 10 == 0 and elapsed_time_seconds > 0: # Log every 10 seconds
388
+ # Log periodic status during Phase 1 - throttled to once per 10 seconds
389
+ elapsed_seconds = elapsed_td / td_64(1, "s")
390
+ should_log = self._last_data_ready_log_time is None or (
391
+ current_time - self._last_data_ready_log_time
392
+ ) >= td_64(10, "s")
393
+
394
+ if should_log and elapsed_seconds > 0:
382
395
  missing_instruments = set(self._context.instruments) - self._updated_instruments
383
396
  missing_symbols = [inst.symbol for inst in missing_instruments]
397
+ remaining_timeout = (self.DATA_READY_TIMEOUT - elapsed_td) / td_64(1, "s")
384
398
  logger.info(
385
- f"Phase 1: Waiting for all instruments ({ready_instruments}/{total_instruments} ready). "
386
- f"Missing: {missing_symbols}. Timeout in {self.DATA_READY_TIMEOUT_SECONDS - elapsed_time_seconds}s"
399
+ f"Waiting for all instruments ({ready_instruments}/{total_instruments} ready). "
400
+ f"Missing: {missing_symbols}. Will start with partial data in {remaining_timeout:.0f}s"
387
401
  )
402
+ self._last_data_ready_log_time = current_time
388
403
  return False
389
404
  else:
390
405
  # Phase 2: After timeout - need at least 1 instrument
391
- if ready_instruments >= 1:
406
+ if ready_instruments == total_instruments:
407
+ if not self._all_instruments_ready_logged:
408
+ logger.info(f"All {total_instruments} instruments have data - strategy ready to start")
409
+ self._all_instruments_ready_logged = True
410
+ return True
411
+
412
+ elif ready_instruments >= 1:
392
413
  missing_instruments = set(self._context.instruments) - self._updated_instruments
393
414
  missing_symbols = [inst.symbol for inst in missing_instruments]
394
- logger.info(
395
- f"Starting strategy with {ready_instruments}/{total_instruments} instruments ready. Missing: {missing_symbols}"
396
- )
415
+
416
+ # Log once when entering Phase 2
417
+ should_log = self._last_data_ready_log_time is None or (
418
+ current_time - self._last_data_ready_log_time
419
+ ) >= td_64(10, "s")
420
+ if should_log:
421
+ logger.info(
422
+ f"Timeout reached - starting with {ready_instruments}/{total_instruments} instruments ready. "
423
+ f"Missing: {missing_symbols}"
424
+ )
425
+ self._last_data_ready_log_time = current_time
397
426
  return True
398
427
  else:
428
+ # Still no instruments ready - keep waiting and log periodically
429
+ should_log = self._last_data_ready_log_time is None or (
430
+ current_time - self._last_data_ready_log_time
431
+ ) >= td_64(10, "s")
432
+ if should_log:
433
+ logger.warning(
434
+ f"No instruments ready after timeout - still waiting "
435
+ f"({ready_instruments}/{total_instruments} ready)"
436
+ )
437
+ self._last_data_ready_log_time = current_time
399
438
  return False
400
439
 
401
440
  def __update_base_data(
qubx/data/composite.py CHANGED
@@ -55,10 +55,22 @@ class IteratedDataStreamsSlicer(Iterator[SlicerOutData]):
55
55
  _keys = keys if isinstance(keys, list) else [keys]
56
56
  _rebuild = False
57
57
  for i in _keys:
58
+ # Check and remove from each data structure independently
59
+ removed_any = False
60
+
58
61
  if i in self._buffers:
59
62
  self._buffers.pop(i)
63
+ removed_any = True
64
+
65
+ if i in self._iterators:
60
66
  self._iterators.pop(i)
67
+ removed_any = True
68
+
69
+ if i in self._keys:
61
70
  self._keys.remove(i)
71
+ removed_any = True
72
+
73
+ if removed_any:
62
74
  _rebuild = True
63
75
 
64
76
  # - rebuild strategy
@@ -78,7 +90,7 @@ class IteratedDataStreamsSlicer(Iterator[SlicerOutData]):
78
90
  def _load_next_chunk_to_buffer(self, index: str) -> list[Timestamped]:
79
91
  try:
80
92
  return list(reversed(next(self._iterators[index])))
81
- except StopIteration:
93
+ except (StopIteration, IndexError):
82
94
  return []
83
95
 
84
96
  def _remove_iterator(self, key: str):
@@ -128,6 +140,12 @@ class IteratedDataStreamsSlicer(Iterator[SlicerOutData]):
128
140
  data.extend(self._load_next_chunk_to_buffer(key)) # - get next chunk of data
129
141
  except StopIteration:
130
142
  self._remove_iterator(key)
143
+ # Return empty list if no data is available
144
+ return values
145
+
146
+ # Check if data is still empty after attempting to load
147
+ if not data:
148
+ return values
131
149
 
132
150
  # pull most past elements
133
151
  v = data[-1]
@@ -139,6 +157,9 @@ class IteratedDataStreamsSlicer(Iterator[SlicerOutData]):
139
157
  except StopIteration:
140
158
  self._remove_iterator(key)
141
159
  break
160
+ # Check if data is still empty after loading attempt
161
+ if not data:
162
+ break
142
163
  v = data[-1]
143
164
 
144
165
  return values
@@ -689,7 +689,7 @@ def _run_warmup(
689
689
  # - get the instruments from the warmup runner context
690
690
  _instruments = warmup_runner.ctx.instruments
691
691
  _positions = warmup_account.get_positions()
692
- _positions = {k: v for k, v in _positions.items() if k in _instruments}
692
+ _positions = {k: v for k, v in _positions.items() if k in _instruments and v is not None and v.quantity is not None}
693
693
  _orders = warmup_account.get_orders()
694
694
  instrument_to_orders = defaultdict(list)
695
695
  for o in _orders.values():
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: Qubx
3
- Version: 0.6.52
3
+ Version: 0.6.54
4
4
  Summary: Qubx - Quantitative Trading Framework
5
5
  Author: Dmitry Marienko
6
6
  Author-email: dmitry.marienko@xlydian.com
@@ -4,7 +4,7 @@ qubx/backtester/__init__.py,sha256=OhXhLmj2x6sp6k16wm5IPATvv-E2qRZVIcvttxqPgcg,1
4
4
  qubx/backtester/account.py,sha256=0yvE06icSeK2ymovvaKkuftY8Ou3Z7Y2JrDa6VtkINw,3048
5
5
  qubx/backtester/broker.py,sha256=JMasxycLqCT99NxN50uyQ1uxtpHYL0wpp4sJ3hB6v2M,2688
6
6
  qubx/backtester/data.py,sha256=B1VHioLqBwA6ZnEgTn5Gere1vfOw0KvyFjGwm4vlByQ,6675
7
- qubx/backtester/management.py,sha256=HuyzFsBPgR7j-ei78Ngcx34CeSn65c9atmaii1aTsYg,14900
7
+ qubx/backtester/management.py,sha256=FQSMkdrTZrxKdLRrf4Uiw60pdBMb0xESeFrTfH9AqZk,20713
8
8
  qubx/backtester/ome.py,sha256=BC8EuJkPTiGbl8HliHehVzwdD0OSDlR04g6RVA66FQE,18614
9
9
  qubx/backtester/optimization.py,sha256=HHUIYA6Y66rcOXoePWFOuOVX9iaHGKV0bGt_4d5e6FM,7619
10
10
  qubx/backtester/runner.py,sha256=TnNM0t8PgBE_gnCOZZTIOc28a3RqtXmp2Xj4Gq5j6bo,20504
@@ -49,19 +49,19 @@ qubx/core/lookups.py,sha256=aEuyZqd_N4cQ-oHz3coEHcdX9Yb0cP5-NwDuj-DQyNk,19477
49
49
  qubx/core/metrics.py,sha256=74xIecCvlxVXl0gy0JvgjJ2X5gg-RMmVZw9hQikkHE0,60269
50
50
  qubx/core/mixins/__init__.py,sha256=AMCLvfNuIb1kkQl3bhCj9jIOEl2eKcVPJeyLgrkB-rk,329
51
51
  qubx/core/mixins/market.py,sha256=lBappEimPhIuI0vmUvwVlIztkYjlEjJBpP-AdpfudII,3948
52
- qubx/core/mixins/processing.py,sha256=VEaK6ZjXTa8jvavj_VpCYfGvLFTHpNoL1AKdRAeear8,27394
52
+ qubx/core/mixins/processing.py,sha256=oz-K-zs9T4lIp8zgMDpTnQH3eAdOvSsL74CYQeCrE3E,29319
53
53
  qubx/core/mixins/subscription.py,sha256=V_g9wCPQ8S5SHkU-qOZ84cV5nReAUrV7DoSNAGG0LPY,10372
54
54
  qubx/core/mixins/trading.py,sha256=idfRPaqrvkfMxzu9mXr9i_xfqLee-ZAOrERxkxv6Ruo,7256
55
55
  qubx/core/mixins/universe.py,sha256=tsMpBriLHwK9lAVYvIrO94EIx8_ETSXUlzxN_sDOsL8,9838
56
- qubx/core/series.cpython-312-x86_64-linux-gnu.so,sha256=SpozS5G2UEoeCbgVsaorjvwbKo5U4d5EqxP3pSktzyM,978280
56
+ qubx/core/series.cpython-312-x86_64-linux-gnu.so,sha256=uyinYx_PzV6OQXmDgEoZ1x-nmJgyvbB3MFRscSM3HtU,978280
57
57
  qubx/core/series.pxd,sha256=jBdMwgO8J4Zrue0e_xQ5RlqTXqihpzQNu6V3ckZvvpY,3978
58
58
  qubx/core/series.pyi,sha256=RaHm_oHHiWiNUMJqVfx5FXAXniGLsHxUFOUpacn7GC0,4604
59
59
  qubx/core/series.pyx,sha256=7cM3zZThW59waHiYcZmMxvYj-HYD7Ej_l7nKA4emPjE,46477
60
- qubx/core/utils.cpython-312-x86_64-linux-gnu.so,sha256=c3ZAL_JGRFpOPUlSITmuQ_jjxv4bA1CQAV9c13wtUfI,86568
60
+ qubx/core/utils.cpython-312-x86_64-linux-gnu.so,sha256=nCrEL8ALt8zAaqO4OjTDshUk4UOAJtsiIA3dtm9JmZo,86568
61
61
  qubx/core/utils.pyi,sha256=a-wS13V2p_dM1CnGq40JVulmiAhixTwVwt0ah5By0Hc,348
62
62
  qubx/core/utils.pyx,sha256=k5QHfEFvqhqWfCob89ANiJDKNG8gGbOh-O4CVoneZ8M,1696
63
63
  qubx/data/__init__.py,sha256=ELZykvpPGWc5rX7QoNyNQwMLgdKMG8MACOByA4pM5hA,549
64
- qubx/data/composite.py,sha256=l1FjJ2RnX7pxhah-cDBw7CWQQvwqKBCANXKiHRrZBzc,18233
64
+ qubx/data/composite.py,sha256=nLA3w3kMvzOc8n2rjAZ35sKa5DXAe8SOFsEov8PpES4,18881
65
65
  qubx/data/helpers.py,sha256=VcXBl1kfWzAOqrjadKrP9WemGjJIB0q3xascbesErh4,16268
66
66
  qubx/data/hft.py,sha256=be7AwzTOjqqCENn0ClrZoHDyKv3SFG66IyTp8QadHlM,33687
67
67
  qubx/data/readers.py,sha256=g3hSkyKdMAVziMCgcaZadsukidECaLwHyIEtArSVDSc,66203
@@ -124,7 +124,7 @@ qubx/restorers/signal.py,sha256=0QFoy7OzDkK6AAmJEbbmSsHwmAhjMJYYggVFuLraKjk,1089
124
124
  qubx/restorers/state.py,sha256=dLaVnUwRCNRkUqbYyi0RfZs3Q3AdglkI_qTtQ8GDD5Y,7289
125
125
  qubx/restorers/utils.py,sha256=We2gfqwQKWziUYhuUnjb-xo-5tSlbuHWpPQn0CEMTn0,1155
126
126
  qubx/ta/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
127
- qubx/ta/indicators.cpython-312-x86_64-linux-gnu.so,sha256=PwEMehN97zxTw_faYmjrDUHu_8-QlfsjOxo3h7bPQKo,654440
127
+ qubx/ta/indicators.cpython-312-x86_64-linux-gnu.so,sha256=RMzIKbB01Sto83iqNyQqryMqRlA1k7bMHPTbWdBQFSA,654440
128
128
  qubx/ta/indicators.pxd,sha256=Goo0_N0Xnju8XGo3Xs-3pyg2qr_0Nh5C-_26DK8U_IE,4224
129
129
  qubx/ta/indicators.pyi,sha256=19W0uERft49In5bf9jkJHkzJYEyE9gzudN7_DJ5Vdv8,1963
130
130
  qubx/ta/indicators.pyx,sha256=Xgpew46ZxSXsdfSEWYn3A0Q35MLsopB9n7iyCsXTufs,25969
@@ -158,11 +158,11 @@ qubx/utils/runner/_jupyter_runner.pyt,sha256=fDj4AUs25jsdGmY9DDeSFufH1JkVhLFwy0B
158
158
  qubx/utils/runner/accounts.py,sha256=mpiv6oxr5z97zWt7STYyARMhWQIpc_XFKungb_pX38U,3270
159
159
  qubx/utils/runner/configs.py,sha256=snVZJun6rBC09QZVaUd7BhqNlDZqmDMG7R8gHJeuSkU,3713
160
160
  qubx/utils/runner/factory.py,sha256=eM4-Etcq-FewD2AjH_srFGzP413pm8er95KIZixXRpM,15152
161
- qubx/utils/runner/runner.py,sha256=9s7mu84U29jCE7FtdW_yKKTzQfaXmCSvANF5cb7xd_Y,31399
161
+ qubx/utils/runner/runner.py,sha256=O1hC8zyvj1OInMM9eZeAbUI9sdJbJU3INSnsOagvdAE,31444
162
162
  qubx/utils/time.py,sha256=J0ZFGjzFL5T6GA8RPAel8hKG0sg2LZXeQ5YfDCfcMHA,10055
163
163
  qubx/utils/version.py,sha256=e52fIHyxzCiIuH7svCF6pkHuDlqL64rklqz-2XjWons,5309
164
- qubx-0.6.52.dist-info/LICENSE,sha256=qwMHOSJ2TD0nx6VUJvFhu1ynJdBfNozRMt6tnSul-Ts,35140
165
- qubx-0.6.52.dist-info/METADATA,sha256=Hor3x6zOv1rKp_mKkMdFc6HqE5bWIh14oHlX8ignRLk,4612
166
- qubx-0.6.52.dist-info/WHEEL,sha256=UckHTmFUCaLKpi4yFY8Dewu0c6XkY-KvEAGzGOnaWo8,110
167
- qubx-0.6.52.dist-info/entry_points.txt,sha256=VqilDTe8mVuV9SbR-yVlZJBTjbkHIL2JBgXfQw076HY,47
168
- qubx-0.6.52.dist-info/RECORD,,
164
+ qubx-0.6.54.dist-info/LICENSE,sha256=qwMHOSJ2TD0nx6VUJvFhu1ynJdBfNozRMt6tnSul-Ts,35140
165
+ qubx-0.6.54.dist-info/METADATA,sha256=l-EdSHBJN0LZCQg-guiQOuvc53_Hj3FHBd4sn_4mNpY,4612
166
+ qubx-0.6.54.dist-info/WHEEL,sha256=UckHTmFUCaLKpi4yFY8Dewu0c6XkY-KvEAGzGOnaWo8,110
167
+ qubx-0.6.54.dist-info/entry_points.txt,sha256=VqilDTe8mVuV9SbR-yVlZJBTjbkHIL2JBgXfQw076HY,47
168
+ qubx-0.6.54.dist-info/RECORD,,
File without changes
File without changes