Qubx 0.6.48__cp312-cp312-manylinux_2_39_x86_64.whl → 0.6.50__cp312-cp312-manylinux_2_39_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of Qubx might be problematic. Click here for more details.

qubx/backtester/data.py CHANGED
@@ -139,26 +139,31 @@ class SimulatedDataProvider(IDataProvider):
139
139
  bars = []
140
140
 
141
141
  # - if no records, return empty list to avoid exception from infer_series_frequency
142
- if not records:
142
+ if not records or records is None:
143
143
  return bars
144
144
 
145
- _data_tf = infer_series_frequency([r.time for r in records[:50]])
146
- timeframe_ns = _data_tf.item()
147
-
148
- if records is not None:
149
- for r in records:
150
- # _b_ts_0 = np.datetime64(r.time, "ns").item()
151
- _b_ts_0 = r.time
152
- _b_ts_1 = _b_ts_0 + timeframe_ns - self._open_close_time_indent_ns
153
-
154
- if _b_ts_0 <= cut_time_ns and cut_time_ns < _b_ts_1:
155
- break
156
-
157
- bars.append(
158
- Bar(
159
- _b_ts_0, r.data["open"], r.data["high"], r.data["low"], r.data["close"], r.data.get("volume", 0)
160
- )
145
+ if len(records) > 1:
146
+ _data_tf = infer_series_frequency([r.time for r in records[:50]])
147
+ timeframe_ns = _data_tf.item()
148
+
149
+ for r in records:
150
+ _b_ts_0 = r.time
151
+ _b_ts_1 = _b_ts_0 + timeframe_ns - self._open_close_time_indent_ns
152
+
153
+ if _b_ts_0 <= cut_time_ns and cut_time_ns < _b_ts_1:
154
+ break
155
+
156
+ bars.append(
157
+ Bar(
158
+ _b_ts_0,
159
+ r.data["open"],
160
+ r.data["high"],
161
+ r.data["low"],
162
+ r.data["close"],
163
+ r.data.get("volume", 0),
164
+ r.data.get("bought_volume", 0),
161
165
  )
166
+ )
162
167
 
163
168
  return bars
164
169
 
@@ -48,6 +48,7 @@ def simulate(
48
48
  portfolio_log_freq: str = "5Min",
49
49
  parallel_backend: Literal["loky", "multiprocessing"] = "multiprocessing",
50
50
  emission: EmissionConfig | None = None,
51
+ run_separate_instruments: bool = False,
51
52
  ) -> list[TradingSessionResult]:
52
53
  """
53
54
  Backtest utility for trading strategies or signals using historical data.
@@ -73,6 +74,7 @@ def simulate(
73
74
  - portfolio_log_freq (str): Frequency for portfolio logging, default is "5Min".
74
75
  - parallel_backend (Literal["loky", "multiprocessing"]): Backend for parallel processing, default is "multiprocessing".
75
76
  - emission (EmissionConfig | None): Configuration for metric emitters, default is None.
77
+ - run_separate_instruments (bool): If True, creates separate simulation setups for each instrument, default is False.
76
78
 
77
79
  Returns:
78
80
  - list[TradingSessionResult]: A list of TradingSessionResult objects containing the results of each simulation setup.
@@ -109,6 +111,7 @@ def simulate(
109
111
  commissions=commissions,
110
112
  signal_timeframe=signal_timeframe,
111
113
  accurate_stop_orders_execution=accurate_stop_orders_execution,
114
+ run_separate_instruments=run_separate_instruments,
112
115
  )
113
116
  if not simulation_setups:
114
117
  logger.error(
@@ -117,6 +120,10 @@ def simulate(
117
120
  )
118
121
  raise SimulationError(_msg)
119
122
 
123
+ # - inform about separate instruments mode
124
+ if run_separate_instruments and len(simulation_setups) > 1:
125
+ logger.info(f"Running separate simulations for each instrument. Total simulations: {len(simulation_setups)}")
126
+
120
127
  # - preprocess start and stop and convert to datetime if necessary
121
128
  if stop is None:
122
129
  # - check stop time : here we try to backtest till now (may be we need to get max available time from data reader ?)
@@ -160,24 +167,50 @@ def _run_setups(
160
167
  _main_loop_silent = len(strategies_setups) == 1
161
168
  n_jobs = 1 if _main_loop_silent else n_jobs
162
169
 
163
- reports = ProgressParallel(
164
- n_jobs=n_jobs, total=len(strategies_setups), silent=_main_loop_silent, backend=parallel_backend
165
- )(
166
- delayed(_run_setup)(
167
- id,
168
- f"Simulated-{id}",
169
- setup,
170
- data_setup,
171
- start,
172
- stop,
173
- silent,
174
- show_latency_report,
175
- portfolio_log_freq,
176
- emission,
170
+ if n_jobs == 1:
171
+ reports = [
172
+ _run_setup(
173
+ id,
174
+ f"Simulated-{id}",
175
+ setup,
176
+ data_setup,
177
+ start,
178
+ stop,
179
+ silent,
180
+ show_latency_report,
181
+ portfolio_log_freq,
182
+ emission,
183
+ )
184
+ for id, setup in enumerate(strategies_setups)
185
+ ]
186
+ else:
187
+ reports = ProgressParallel(
188
+ n_jobs=n_jobs, total=len(strategies_setups), silent=_main_loop_silent, backend=parallel_backend
189
+ )(
190
+ delayed(_run_setup)(
191
+ id,
192
+ f"Simulated-{id}",
193
+ setup,
194
+ data_setup,
195
+ start,
196
+ stop,
197
+ silent,
198
+ show_latency_report,
199
+ portfolio_log_freq,
200
+ emission,
201
+ )
202
+ for id, setup in enumerate(strategies_setups)
177
203
  )
178
- for id, setup in enumerate(strategies_setups)
179
- )
180
- return reports # type: ignore
204
+
205
+ # Filter out None results and log warnings for failed simulations
206
+ successful_reports = []
207
+ for i, report in enumerate(reports):
208
+ if report is None:
209
+ logger.warning(f"Simulation setup {i} failed - skipping from results")
210
+ else:
211
+ successful_reports.append(report)
212
+
213
+ return successful_reports
181
214
 
182
215
 
183
216
  def _run_setup(
@@ -191,48 +224,58 @@ def _run_setup(
191
224
  show_latency_report: bool,
192
225
  portfolio_log_freq: str,
193
226
  emission: EmissionConfig | None = None,
194
- ) -> TradingSessionResult:
195
- # Create metric emitter if configured
196
- emitter = None
197
- if emission is not None:
198
- emitter = create_metric_emitters(emission, setup.name)
199
-
200
- runner = SimulationRunner(
201
- setup=setup,
202
- data_config=data_setup,
203
- start=start,
204
- stop=stop,
205
- account_id=account_id,
206
- portfolio_log_freq=portfolio_log_freq,
207
- emitter=emitter,
208
- )
227
+ ) -> TradingSessionResult | None:
228
+ try:
229
+ # Create metric emitter if configured
230
+ emitter = None
231
+ if emission is not None:
232
+ emitter = create_metric_emitters(emission, setup.name)
209
233
 
210
- # - we want to see simulate time in log messages
211
- QubxLogConfig.setup_logger(
212
- level=QubxLogConfig.get_log_level(), custom_formatter=SimulatedLogFormatter(runner.ctx).formatter
213
- )
234
+ runner = SimulationRunner(
235
+ setup=setup,
236
+ data_config=data_setup,
237
+ start=start,
238
+ stop=stop,
239
+ account_id=account_id,
240
+ portfolio_log_freq=portfolio_log_freq,
241
+ emitter=emitter,
242
+ )
214
243
 
215
- runner.run(silent=silent)
216
-
217
- # - service latency report
218
- if show_latency_report:
219
- runner.print_latency_report()
220
-
221
- return TradingSessionResult(
222
- setup_id,
223
- setup.name,
224
- start,
225
- stop,
226
- setup.exchanges,
227
- setup.instruments,
228
- setup.capital,
229
- setup.base_currency,
230
- setup.commissions,
231
- runner.logs_writer.get_portfolio(as_plain_dataframe=True),
232
- runner.logs_writer.get_executions(),
233
- runner.logs_writer.get_signals(),
234
- strategy_class=runner.strategy_class,
235
- parameters=runner.strategy_params,
236
- is_simulation=True,
237
- author=get_current_user(),
238
- )
244
+ # - we want to see simulate time in log messages
245
+ QubxLogConfig.setup_logger(
246
+ level=QubxLogConfig.get_log_level(), custom_formatter=SimulatedLogFormatter(runner.ctx).formatter
247
+ )
248
+
249
+ runner.run(silent=silent)
250
+
251
+ # - service latency report
252
+ if show_latency_report:
253
+ runner.print_latency_report()
254
+
255
+ # Convert commissions to the expected type for TradingSessionResult
256
+ commissions_for_result = setup.commissions
257
+ if isinstance(commissions_for_result, dict):
258
+ # Filter out None values to match TradingSessionResult expected type
259
+ commissions_for_result = {k: v for k, v in commissions_for_result.items() if v is not None}
260
+
261
+ return TradingSessionResult(
262
+ setup_id,
263
+ setup.name,
264
+ start,
265
+ stop,
266
+ setup.exchanges,
267
+ setup.instruments,
268
+ setup.capital,
269
+ setup.base_currency,
270
+ commissions_for_result,
271
+ runner.logs_writer.get_portfolio(as_plain_dataframe=True),
272
+ runner.logs_writer.get_executions(),
273
+ runner.logs_writer.get_signals(),
274
+ strategy_class=runner.strategy_class,
275
+ parameters=runner.strategy_params,
276
+ is_simulation=True,
277
+ author=get_current_user(),
278
+ )
279
+ except Exception as e:
280
+ logger.error(f"Simulation setup {setup_id} failed with error: {e}")
281
+ return None
qubx/backtester/utils.py CHANGED
@@ -419,6 +419,7 @@ def recognize_simulation_configuration(
419
419
  commissions: str | dict[str, str | None] | None,
420
420
  signal_timeframe: str,
421
421
  accurate_stop_orders_execution: bool,
422
+ run_separate_instruments: bool = False,
422
423
  ) -> list[SimulationSetup]:
423
424
  """
424
425
  Recognize and create setups based on the provided simulation configuration.
@@ -438,6 +439,7 @@ def recognize_simulation_configuration(
438
439
  - commissions (str): The commission structure to be applied.
439
440
  - signal_timeframe (str): Timeframe for generated signals.
440
441
  - accurate_stop_orders_execution (bool): If True, enables more accurate stop order execution simulation.
442
+ - run_separate_instruments (bool): If True, creates separate setups for each instrument.
441
443
 
442
444
  Returns:
443
445
  - list[SimulationSetup]: A list of SimulationSetup objects, each representing a
@@ -458,7 +460,7 @@ def recognize_simulation_configuration(
458
460
  r.extend(
459
461
  recognize_simulation_configuration(
460
462
  _n + n, v, instruments, exchanges, capital, basic_currency, commissions,
461
- signal_timeframe, accurate_stop_orders_execution
463
+ signal_timeframe, accurate_stop_orders_execution, run_separate_instruments
462
464
  )
463
465
  )
464
466
 
@@ -474,45 +476,85 @@ def recognize_simulation_configuration(
474
476
  _t = SetupTypes.STRATEGY_AND_TRACKER
475
477
 
476
478
  # - extract actual symbols that have signals
477
- r.append(
478
- SimulationSetup(
479
- _t, name, _s, c1, # type: ignore
480
- _sniffer._pick_instruments(instruments, _s) if _sniffer._is_signal(c0) else instruments,
481
- exchanges, capital, basic_currency, commissions,
482
- signal_timeframe, accurate_stop_orders_execution
479
+ setup_instruments = _sniffer._pick_instruments(instruments, _s) if _sniffer._is_signal(c0) else instruments
480
+
481
+ if run_separate_instruments:
482
+ # Create separate setups for each instrument
483
+ for instrument in setup_instruments:
484
+ r.append(
485
+ SimulationSetup(
486
+ _t, f"{name}/{instrument.symbol}", _s, c1, # type: ignore
487
+ [instrument],
488
+ exchanges, capital, basic_currency, commissions,
489
+ signal_timeframe, accurate_stop_orders_execution
490
+ )
491
+ )
492
+ else:
493
+ r.append(
494
+ SimulationSetup(
495
+ _t, name, _s, c1, # type: ignore
496
+ setup_instruments,
497
+ exchanges, capital, basic_currency, commissions,
498
+ signal_timeframe, accurate_stop_orders_execution
499
+ )
483
500
  )
484
- )
485
501
  else:
486
502
  for j, s in enumerate(configs):
487
503
  r.extend(
488
504
  recognize_simulation_configuration(
489
505
  # name + "/" + str(j), s, instruments, exchange, capital, basic_currency, commissions
490
506
  name, s, instruments, exchanges, capital, basic_currency, commissions, # type: ignore
491
- signal_timeframe, accurate_stop_orders_execution
507
+ signal_timeframe, accurate_stop_orders_execution, run_separate_instruments
492
508
  )
493
509
  )
494
510
 
495
511
  elif _sniffer._is_strategy(configs):
496
- r.append(
497
- SimulationSetup(
498
- SetupTypes.STRATEGY,
499
- name, configs, None, instruments,
500
- exchanges, capital, basic_currency, commissions,
501
- signal_timeframe, accurate_stop_orders_execution
512
+ if run_separate_instruments:
513
+ # Create separate setups for each instrument
514
+ for instrument in instruments:
515
+ r.append(
516
+ SimulationSetup(
517
+ SetupTypes.STRATEGY,
518
+ f"{name}/{instrument.symbol}", configs, None, [instrument],
519
+ exchanges, capital, basic_currency, commissions,
520
+ signal_timeframe, accurate_stop_orders_execution
521
+ )
522
+ )
523
+ else:
524
+ r.append(
525
+ SimulationSetup(
526
+ SetupTypes.STRATEGY,
527
+ name, configs, None, instruments,
528
+ exchanges, capital, basic_currency, commissions,
529
+ signal_timeframe, accurate_stop_orders_execution
530
+ )
502
531
  )
503
- )
504
532
 
505
533
  elif _sniffer._is_signal(configs):
506
534
  # - check structure of signals
507
535
  c1 = _sniffer._check_signals_structure(instruments, configs) # type: ignore
508
- r.append(
509
- SimulationSetup(
510
- SetupTypes.SIGNAL,
511
- name, c1, None, _sniffer._pick_instruments(instruments, c1),
512
- exchanges, capital, basic_currency, commissions,
513
- signal_timeframe, accurate_stop_orders_execution
536
+ setup_instruments = _sniffer._pick_instruments(instruments, c1)
537
+
538
+ if run_separate_instruments:
539
+ # Create separate setups for each instrument
540
+ for instrument in setup_instruments:
541
+ r.append(
542
+ SimulationSetup(
543
+ SetupTypes.SIGNAL,
544
+ f"{name}/{instrument.symbol}", c1, None, [instrument],
545
+ exchanges, capital, basic_currency, commissions,
546
+ signal_timeframe, accurate_stop_orders_execution
547
+ )
548
+ )
549
+ else:
550
+ r.append(
551
+ SimulationSetup(
552
+ SetupTypes.SIGNAL,
553
+ name, c1, None, setup_instruments,
554
+ exchanges, capital, basic_currency, commissions,
555
+ signal_timeframe, accurate_stop_orders_execution
556
+ )
514
557
  )
515
- )
516
558
 
517
559
  # fmt: on
518
560
  return r
qubx/core/loggers.py CHANGED
@@ -10,10 +10,8 @@ from qubx.core.basics import (
10
10
  Position,
11
11
  TargetPosition,
12
12
  )
13
-
14
13
  from qubx.core.series import time_as_nsec
15
14
  from qubx.core.utils import recognize_timeframe
16
-
17
15
  from qubx.utils.misc import Stopwatch
18
16
  from qubx.utils.time import convert_tf_str_td64, floor_t64
19
17
 
@@ -21,14 +19,14 @@ _SW = Stopwatch()
21
19
 
22
20
 
23
21
  class LogsWriter:
24
- account_id: str
25
- strategy_id: str
26
- run_id: str
27
-
28
22
  """
29
23
  Log writer interface with default implementation
30
24
  """
31
25
 
26
+ account_id: str
27
+ strategy_id: str
28
+ run_id: str
29
+
32
30
  def __init__(self, account_id: str, strategy_id: str, run_id: str) -> None:
33
31
  self.account_id = account_id
34
32
  self.strategy_id = strategy_id
@@ -39,7 +39,8 @@ from qubx.core.series import Bar, OrderBook, Quote, Trade
39
39
 
40
40
 
41
41
  class ProcessingManager(IProcessingManager):
42
- MAX_NUMBER_OF_STRATEGY_FAILURES = 10
42
+ MAX_NUMBER_OF_STRATEGY_FAILURES: int = 10
43
+ DATA_READY_TIMEOUT_SECONDS: int = 60
43
44
 
44
45
  _context: IStrategyContext
45
46
  _strategy: IStrategy
@@ -67,6 +68,7 @@ class ProcessingManager(IProcessingManager):
67
68
  _trig_bar_freq_nsec: int | None = None
68
69
  _cur_sim_step: int | None = None
69
70
  _updated_instruments: set[Instrument] = set()
71
+ _data_ready_start_time: dt_64 | None = None
70
72
 
71
73
  def __init__(
72
74
  self,
@@ -111,6 +113,7 @@ class ProcessingManager(IProcessingManager):
111
113
  self._strategy_name = strategy.__class__.__name__
112
114
  self._trig_bar_freq_nsec = None
113
115
  self._updated_instruments = set()
116
+ self._data_ready_start_time = None
114
117
 
115
118
  def set_fit_schedule(self, schedule: str) -> None:
116
119
  rule = process_schedule_spec(schedule)
@@ -344,9 +347,56 @@ class ProcessingManager(IProcessingManager):
344
347
 
345
348
  def _is_data_ready(self) -> bool:
346
349
  """
347
- Check if at least one update was received for all instruments in the context.
350
+ Check if strategy can start based on data availability with timeout logic.
351
+
352
+ Two-phase approach:
353
+ - Phase 1 (0-DATA_READY_TIMEOUT_SECONDS): Wait for ALL instruments to have data
354
+ - Phase 2 (after timeout): Wait for at least 1 instrument to have data
355
+
356
+ Returns:
357
+ bool: True if strategy can start, False if still waiting
348
358
  """
349
- return all(instrument in self._updated_instruments for instrument in self._context.instruments)
359
+ total_instruments = len(self._context.instruments)
360
+
361
+ # Handle edge case: no instruments
362
+ if total_instruments == 0:
363
+ return True
364
+
365
+ ready_instruments = len(self._updated_instruments)
366
+
367
+ # Record start time on first call
368
+ if self._data_ready_start_time is None:
369
+ self._data_ready_start_time = self._time_provider.time()
370
+
371
+ # Phase 1: Try to get all instruments ready within timeout
372
+ elapsed_time_seconds = (self._time_provider.time() - self._data_ready_start_time) / 1e9
373
+
374
+ if elapsed_time_seconds <= self.DATA_READY_TIMEOUT_SECONDS:
375
+ # Within timeout period - wait for ALL instruments
376
+ if ready_instruments == total_instruments:
377
+ logger.info(f"All {total_instruments} instruments have data - strategy ready to start")
378
+ return True
379
+ else:
380
+ # Log periodic status during Phase 1
381
+ if int(elapsed_time_seconds) % 10 == 0 and elapsed_time_seconds > 0: # Log every 10 seconds
382
+ missing_instruments = set(self._context.instruments) - self._updated_instruments
383
+ missing_symbols = [inst.symbol for inst in missing_instruments]
384
+ logger.info(
385
+ f"Phase 1: Waiting for all instruments ({ready_instruments}/{total_instruments} ready). "
386
+ f"Missing: {missing_symbols}. Timeout in {self.DATA_READY_TIMEOUT_SECONDS - elapsed_time_seconds:.1f}s"
387
+ )
388
+ return False
389
+ else:
390
+ # Phase 2: After timeout - need at least 1 instrument
391
+ if ready_instruments >= 1:
392
+ missing_instruments = set(self._context.instruments) - self._updated_instruments
393
+ missing_symbols = [inst.symbol for inst in missing_instruments]
394
+ logger.info(
395
+ f"Starting strategy with {ready_instruments}/{total_instruments} instruments ready. Missing: {missing_symbols}"
396
+ )
397
+ return True
398
+ else:
399
+ return False
350
400
 
351
401
  def __update_base_data(
352
402
  self, instrument: Instrument, event_type: str, data: Timestamped, is_historical: bool = False
@@ -2,8 +2,6 @@ from qubx.core.basics import DataType, Instrument, TargetPosition
2
2
  from qubx.core.helpers import CachedMarketDataHolder
3
3
  from qubx.core.interfaces import (
4
4
  IAccountProcessor,
5
- IBroker,
6
- IDataProvider,
7
5
  IPositionGathering,
8
6
  IStrategy,
9
7
  IStrategyContext,
@@ -14,7 +12,6 @@ from qubx.core.interfaces import (
14
12
  RemovalPolicy,
15
13
  )
16
14
  from qubx.core.loggers import StrategyLogging
17
- from qubx.core.lookups import lookup
18
15
 
19
16
 
20
17
  class UniverseManager(IUniverseManager):
@@ -50,7 +47,7 @@ class UniverseManager(IUniverseManager):
50
47
  self._time_provider = time_provider
51
48
  self._account = account
52
49
  self._position_gathering = position_gathering
53
- self._instruments = []
50
+ self._instruments = set()
54
51
  self._removal_queue = {}
55
52
 
56
53
  def _has_position(self, instrument: Instrument) -> bool:
@@ -72,7 +69,7 @@ class UniverseManager(IUniverseManager):
72
69
  ), "Invalid if_has_position_then policy"
73
70
 
74
71
  new_set = set(instruments)
75
- prev_set = set(self._instruments)
72
+ prev_set = self._instruments.copy()
76
73
 
77
74
  # - determine instruments to remove depending on if_has_position_then policy
78
75
  may_be_removed = list(prev_set - new_set)
@@ -93,9 +90,7 @@ class UniverseManager(IUniverseManager):
93
90
  self._subscription_manager.commit() # apply pending changes
94
91
 
95
92
  # set new instruments
96
- self._instruments.clear()
97
- self._instruments.extend(instruments)
98
- self._instruments.extend(to_keep)
93
+ self._instruments = new_set | set(to_keep)
99
94
 
100
95
  def _get_what_can_be_removed_or_kept(
101
96
  self, may_be_removed: list[Instrument], skip_callback: bool, if_has_position_then: RemovalPolicy
@@ -105,12 +100,11 @@ class UniverseManager(IUniverseManager):
105
100
  for instr in may_be_removed:
106
101
  if immediately_close:
107
102
  to_remove.append(instr)
103
+ elif self._has_position(instr):
104
+ self._removal_queue[instr] = (if_has_position_then, skip_callback)
105
+ to_keep.append(instr)
108
106
  else:
109
- if self._has_position(instr):
110
- self._removal_queue[instr] = (if_has_position_then, skip_callback)
111
- to_keep.append(instr)
112
- else:
113
- to_remove.append(instr)
107
+ to_remove.append(instr)
114
108
  return to_remove, to_keep
115
109
 
116
110
  def __cleanup_removal_queue(self, instruments: list[Instrument]):
@@ -124,7 +118,7 @@ class UniverseManager(IUniverseManager):
124
118
  self.__cleanup_removal_queue(instruments)
125
119
  self._strategy.on_universe_change(self._context, instruments, [])
126
120
  self._subscription_manager.commit()
127
- self._instruments.extend(instruments)
121
+ self._instruments.update(instruments)
128
122
 
129
123
  def remove_instruments(
130
124
  self,
@@ -146,12 +140,11 @@ class UniverseManager(IUniverseManager):
146
140
  self._subscription_manager.commit()
147
141
 
148
142
  # - update instruments list
149
- self._instruments = list(set(self._instruments) - set(to_remove))
150
- self._instruments.extend(to_keep)
143
+ self._instruments = (self._instruments - set(to_remove)) | set(to_keep)
151
144
 
152
145
  @property
153
146
  def instruments(self) -> list[Instrument]:
154
- return self._instruments
147
+ return list(self._instruments)
155
148
 
156
149
  def __do_remove_instruments(self, instruments: list[Instrument]):
157
150
  """
@@ -227,7 +220,7 @@ class UniverseManager(IUniverseManager):
227
220
  # if aux is not None:
228
221
  # instrument._aux_instrument = aux
229
222
  # instruments.append(aux)
230
- # _ = self._trading_service.get_position(aux)
223
+ # _ = self._account.get_position(aux)
231
224
 
232
225
  def on_alter_position(self, instrument: Instrument) -> None:
233
226
  """
@@ -247,7 +240,7 @@ class UniverseManager(IUniverseManager):
247
240
 
248
241
  # - commit changes and remove instrument from the universe
249
242
  self._subscription_manager.commit()
250
- self._instruments.remove(instrument)
243
+ self._instruments.discard(instrument)
251
244
 
252
245
  def is_trading_allowed(self, instrument: Instrument) -> bool:
253
246
  if instrument in self._removal_queue:
@@ -261,7 +254,7 @@ class UniverseManager(IUniverseManager):
261
254
 
262
255
  # - commit changes and remove instrument from the universe
263
256
  self._subscription_manager.commit()
264
- self._instruments.remove(instrument)
257
+ self._instruments.discard(instrument)
265
258
  return False
266
259
 
267
260
  return True
qubx/data/composite.py CHANGED
@@ -71,12 +71,15 @@ class IteratedDataStreamsSlicer(Iterator[SlicerOutData]):
71
71
  return self
72
72
 
73
73
  def _build_initial_iteration_seq(self):
74
- _init_seq = {k: self._time_func(self._buffers[k][-1]) for k in self._keys}
74
+ _init_seq = {k: self._time_func(self._buffers[k][-1]) for k in self._keys if self._buffers[k]}
75
75
  _init_seq = dict(sorted(_init_seq.items(), key=lambda item: item[1]))
76
76
  self._keys = deque(_init_seq.keys())
77
77
 
78
78
  def _load_next_chunk_to_buffer(self, index: str) -> list[Timestamped]:
79
- return list(reversed(next(self._iterators[index])))
79
+ try:
80
+ return list(reversed(next(self._iterators[index])))
81
+ except StopIteration:
82
+ return []
80
83
 
81
84
  def _remove_iterator(self, key: str):
82
85
  self._buffers.pop(key)
@@ -95,6 +98,9 @@ class IteratedDataStreamsSlicer(Iterator[SlicerOutData]):
95
98
  Returns:
96
99
  Timestamped: The most recent timestamped data element from the buffer.
97
100
  """
101
+ if not self._buffers[k]:
102
+ raise StopIteration
103
+
98
104
  v = (data := self._buffers[k]).pop()
99
105
  if not data:
100
106
  try:
@@ -154,6 +160,9 @@ class IteratedDataStreamsSlicer(Iterator[SlicerOutData]):
154
160
  _min_t = math.inf
155
161
  _min_k = self._keys[0]
156
162
  for i in self._keys:
163
+ if not self._buffers[i]:
164
+ continue
165
+
157
166
  _x = self._buffers[i][-1]
158
167
  if self._time_func(_x) < _min_t:
159
168
  _min_t = self._time_func(_x)