Qubx 0.6.40__tar.gz → 0.6.42__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of Qubx might be problematic. Click here for more details.
- {qubx-0.6.40 → qubx-0.6.42}/PKG-INFO +1 -1
- {qubx-0.6.40 → qubx-0.6.42}/pyproject.toml +1 -1
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/_nb_magic.py +1 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/backtester/simulated_exchange.py +1 -3
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/helpers.py +1 -16
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/metrics.py +43 -1
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/mixins/processing.py +15 -4
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/data/tardis.py +18 -14
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/exporters/formatters/incremental.py +10 -1
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/exporters/redis_streams.py +6 -0
- qubx-0.6.42/src/qubx/notifications/__init__.py +19 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/notifications/slack.py +43 -10
- qubx-0.6.42/src/qubx/notifications/throttler.py +182 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/pandaz/utils.py +5 -2
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/runner/factory.py +64 -4
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/runner/runner.py +3 -3
- qubx-0.6.40/src/qubx/notifications/__init__.py +0 -11
- {qubx-0.6.40 → qubx-0.6.42}/LICENSE +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/README.md +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/build.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/backtester/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/backtester/account.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/backtester/broker.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/backtester/data.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/backtester/management.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/backtester/ome.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/backtester/optimization.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/backtester/runner.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/backtester/simulated_data.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/backtester/simulator.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/backtester/utils.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/cli/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/cli/commands.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/cli/deploy.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/cli/misc.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/cli/release.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/connectors/ccxt/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/connectors/ccxt/account.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/connectors/ccxt/broker.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/connectors/ccxt/data.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/connectors/ccxt/exceptions.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/connectors/ccxt/exchanges/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/connectors/ccxt/exchanges/binance/broker.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/connectors/ccxt/exchanges/binance/exchange.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/connectors/ccxt/exchanges/bitfinex/bitfinex.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/connectors/ccxt/exchanges/bitfinex/bitfinex_account.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/connectors/ccxt/exchanges/kraken/kraken.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/connectors/ccxt/factory.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/connectors/ccxt/reader.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/connectors/ccxt/utils.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/connectors/tardis/data.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/connectors/tardis/utils.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/account.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/basics.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/context.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/deque.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/errors.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/exceptions.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/initializer.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/interfaces.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/loggers.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/lookups.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/mixins/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/mixins/market.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/mixins/subscription.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/mixins/trading.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/mixins/universe.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/series.pxd +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/series.pyi +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/series.pyx +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/utils.pyi +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/core/utils.pyx +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/data/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/data/composite.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/data/helpers.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/data/hft.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/data/readers.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/data/registry.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/emitters/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/emitters/base.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/emitters/composite.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/emitters/csv.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/emitters/prometheus.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/emitters/questdb.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/exporters/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/exporters/composite.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/exporters/formatters/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/exporters/formatters/base.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/exporters/formatters/slack.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/exporters/slack.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/features/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/features/core.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/features/orderbook.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/features/price.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/features/trades.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/features/utils.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/gathering/simplest.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/health/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/health/base.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/loggers/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/loggers/csv.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/loggers/factory.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/loggers/inmemory.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/loggers/mongo.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/math/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/math/stats.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/notifications/composite.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/pandaz/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/pandaz/ta.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/resources/_build.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/resources/instruments/symbols-binance.cm.json +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/resources/instruments/symbols-binance.json +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/resources/instruments/symbols-binance.um.json +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/resources/instruments/symbols-bitfinex.f.json +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/resources/instruments/symbols-bitfinex.json +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/resources/instruments/symbols-kraken.f.json +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/resources/instruments/symbols-kraken.json +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/restarts/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/restarts/state_resolvers.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/restarts/time_finders.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/restorers/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/restorers/balance.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/restorers/factory.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/restorers/interfaces.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/restorers/position.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/restorers/signal.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/restorers/state.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/restorers/utils.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/ta/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/ta/indicators.pxd +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/ta/indicators.pyi +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/ta/indicators.pyx +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/trackers/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/trackers/advanced.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/trackers/composite.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/trackers/rebalancers.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/trackers/riskctrl.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/trackers/sizers.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/_pyxreloader.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/charting/lookinglass.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/charting/mpl_helpers.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/collections.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/marketdata/binance.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/marketdata/ccxt.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/marketdata/dukas.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/misc.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/ntp.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/numbers_utils.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/orderbook.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/plotting/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/plotting/dashboard.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/plotting/data.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/plotting/interfaces.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/plotting/renderers/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/plotting/renderers/plotly.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/questdb.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/runner/__init__.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/runner/_jupyter_runner.pyt +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/runner/accounts.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/runner/configs.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/time.py +0 -0
- {qubx-0.6.40 → qubx-0.6.42}/src/qubx/utils/version.py +0 -0
|
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
|
|
|
4
4
|
|
|
5
5
|
[tool.poetry]
|
|
6
6
|
name = "Qubx"
|
|
7
|
-
version = "0.6.
|
|
7
|
+
version = "0.6.42"
|
|
8
8
|
description = "Qubx - Quantitative Trading Framework"
|
|
9
9
|
authors = [ "Dmitry Marienko <dmitry.marienko@xlydian.com>", "Yuriy Arabskyy <yuriy.arabskyy@xlydian.com>",]
|
|
10
10
|
readme = "README.md"
|
|
@@ -155,9 +155,7 @@ class BasicSimulatedExchange(ISimulatedExchange):
|
|
|
155
155
|
if order.id == order_id:
|
|
156
156
|
return self._process_ome_response(o.cancel_order(order_id))
|
|
157
157
|
|
|
158
|
-
logger.
|
|
159
|
-
f"[<y>{self.__class__.__name__}</y>] :: cancel_order :: can't find order with id = 'ValueError{order_id}'!"
|
|
160
|
-
)
|
|
158
|
+
logger.warning(f"[<y>{self.__class__.__name__}</y>] :: cancel_order :: can't find order '{order_id}'!")
|
|
161
159
|
return None
|
|
162
160
|
|
|
163
161
|
ome = self._ome.get(instrument)
|
|
@@ -20,7 +20,7 @@ from qubx.utils.time import convert_seconds_to_str, convert_tf_str_td64, interva
|
|
|
20
20
|
|
|
21
21
|
class CachedMarketDataHolder:
|
|
22
22
|
"""
|
|
23
|
-
Collected cached data updates from
|
|
23
|
+
Collected cached data updates from market
|
|
24
24
|
"""
|
|
25
25
|
|
|
26
26
|
default_timeframe: np.timedelta64
|
|
@@ -35,7 +35,6 @@ class CachedMarketDataHolder:
|
|
|
35
35
|
self._last_bar = defaultdict(lambda: None)
|
|
36
36
|
self._updates = dict()
|
|
37
37
|
self._instr_to_sub_to_buffer = defaultdict(lambda: defaultdict(lambda: deque(maxlen=max_buffer_size)))
|
|
38
|
-
self._ready_instruments = set()
|
|
39
38
|
if default_timeframe:
|
|
40
39
|
self.update_default_timeframe(default_timeframe)
|
|
41
40
|
|
|
@@ -68,19 +67,8 @@ class CachedMarketDataHolder:
|
|
|
68
67
|
self._ohlcvs = other._ohlcvs
|
|
69
68
|
self._updates = other._updates
|
|
70
69
|
self._instr_to_sub_to_buffer = other._instr_to_sub_to_buffer
|
|
71
|
-
self._ready_instruments = set() # reset the ready instruments
|
|
72
70
|
self._last_bar = defaultdict(lambda: None) # reset the last bar
|
|
73
71
|
|
|
74
|
-
def is_data_ready(self) -> bool:
|
|
75
|
-
"""
|
|
76
|
-
Check if at least one update was received for all instruments.
|
|
77
|
-
"""
|
|
78
|
-
# Check if we have at least one update for each instrument
|
|
79
|
-
if not self._ohlcvs:
|
|
80
|
-
return False
|
|
81
|
-
|
|
82
|
-
return all(instrument in self._ready_instruments for instrument in self._ohlcvs)
|
|
83
|
-
|
|
84
72
|
@SW.watch("CachedMarketDataHolder")
|
|
85
73
|
def get_ohlcv(self, instrument: Instrument, timeframe: str | None = None, max_size: float | int = np.inf) -> OHLCV:
|
|
86
74
|
tf = convert_tf_str_td64(timeframe) if timeframe else self.default_timeframe
|
|
@@ -121,9 +109,6 @@ class CachedMarketDataHolder:
|
|
|
121
109
|
if event_type != DataType.OHLC:
|
|
122
110
|
self._instr_to_sub_to_buffer[instrument][event_type].append(data)
|
|
123
111
|
|
|
124
|
-
if not is_historical and is_base_data:
|
|
125
|
-
self._ready_instruments.add(instrument)
|
|
126
|
-
|
|
127
112
|
if not update_ohlc:
|
|
128
113
|
return
|
|
129
114
|
|
|
@@ -21,7 +21,7 @@ from statsmodels.regression.linear_model import OLS
|
|
|
21
21
|
from qubx import logger
|
|
22
22
|
from qubx.core.basics import Instrument
|
|
23
23
|
from qubx.core.series import OHLCV
|
|
24
|
-
from qubx.pandaz.utils import ohlc_resample
|
|
24
|
+
from qubx.pandaz.utils import ohlc_resample, srows
|
|
25
25
|
from qubx.utils.charting.lookinglass import LookingGlass
|
|
26
26
|
from qubx.utils.charting.mpl_helpers import sbp
|
|
27
27
|
from qubx.utils.misc import makedirs, version
|
|
@@ -1500,6 +1500,9 @@ def get_symbol_pnls(
|
|
|
1500
1500
|
|
|
1501
1501
|
|
|
1502
1502
|
def combine_sessions(sessions: list[TradingSessionResult], name: str = "Portfolio") -> TradingSessionResult:
|
|
1503
|
+
"""
|
|
1504
|
+
DEPRECATED: use extend_trading_results instead
|
|
1505
|
+
"""
|
|
1503
1506
|
session = copy(sessions[0])
|
|
1504
1507
|
session.name = name
|
|
1505
1508
|
session.instruments = list(set(chain.from_iterable([e.instruments for e in sessions])))
|
|
@@ -1518,6 +1521,45 @@ def combine_sessions(sessions: list[TradingSessionResult], name: str = "Portfoli
|
|
|
1518
1521
|
return session
|
|
1519
1522
|
|
|
1520
1523
|
|
|
1524
|
+
def extend_trading_results(results: list[TradingSessionResult]) -> TradingSessionResult:
|
|
1525
|
+
"""
|
|
1526
|
+
Combine multiple trading session results into a single result by extending the sessions.
|
|
1527
|
+
"""
|
|
1528
|
+
import os
|
|
1529
|
+
|
|
1530
|
+
pfls, execs, exch, names, instrs, clss = [], [], [], [], [], []
|
|
1531
|
+
cap = 0.0
|
|
1532
|
+
|
|
1533
|
+
for b in sorted(results, key=lambda x: x.start):
|
|
1534
|
+
pfls.append(b.portfolio_log)
|
|
1535
|
+
execs.append(b.executions_log)
|
|
1536
|
+
exch.extend(b.exchanges)
|
|
1537
|
+
names.append(b.name)
|
|
1538
|
+
cap += b.capital if isinstance(b.capital, float) else 0.0 # TODO: add handling dict
|
|
1539
|
+
instrs.extend(b.instruments)
|
|
1540
|
+
clss.append(b.strategy_class)
|
|
1541
|
+
cmn = os.path.commonprefix(names)
|
|
1542
|
+
names = [x[len(cmn) :] for x in names]
|
|
1543
|
+
f_pfls: pd.DataFrame = srows(*pfls, keep="last") # type: ignore
|
|
1544
|
+
f_execs: pd.DataFrame = srows(*execs, keep="last") # type: ignore
|
|
1545
|
+
r = TradingSessionResult(
|
|
1546
|
+
0,
|
|
1547
|
+
cmn + "-".join(names),
|
|
1548
|
+
start=f_pfls.index[0],
|
|
1549
|
+
stop=f_pfls.index[-1],
|
|
1550
|
+
exchanges=list(set(exch)),
|
|
1551
|
+
capital=cap / len(results), # average capital ???
|
|
1552
|
+
instruments=list(set(instrs)),
|
|
1553
|
+
base_currency=results[0].base_currency,
|
|
1554
|
+
commissions=results[0].commissions, # what if different commissions ???
|
|
1555
|
+
portfolio_log=f_pfls,
|
|
1556
|
+
executions_log=f_execs,
|
|
1557
|
+
signals_log=pd.DataFrame(),
|
|
1558
|
+
strategy_class="-".join(set(clss)), # what if different strategy classes ???
|
|
1559
|
+
)
|
|
1560
|
+
return r
|
|
1561
|
+
|
|
1562
|
+
|
|
1521
1563
|
def _plt_to_base64() -> str:
|
|
1522
1564
|
fig = plt.gcf()
|
|
1523
1565
|
|
|
@@ -66,6 +66,7 @@ class ProcessingManager(IProcessingManager):
|
|
|
66
66
|
_pool: ThreadPool | None
|
|
67
67
|
_trig_bar_freq_nsec: int | None = None
|
|
68
68
|
_cur_sim_step: int | None = None
|
|
69
|
+
_updated_instruments: set[Instrument] = set()
|
|
69
70
|
|
|
70
71
|
def __init__(
|
|
71
72
|
self,
|
|
@@ -109,6 +110,7 @@ class ProcessingManager(IProcessingManager):
|
|
|
109
110
|
}
|
|
110
111
|
self._strategy_name = strategy.__class__.__name__
|
|
111
112
|
self._trig_bar_freq_nsec = None
|
|
113
|
+
self._updated_instruments = set()
|
|
112
114
|
|
|
113
115
|
def set_fit_schedule(self, schedule: str) -> None:
|
|
114
116
|
rule = process_schedule_spec(schedule)
|
|
@@ -340,6 +342,12 @@ class ProcessingManager(IProcessingManager):
|
|
|
340
342
|
_d_probe,
|
|
341
343
|
)
|
|
342
344
|
|
|
345
|
+
def _is_data_ready(self) -> bool:
|
|
346
|
+
"""
|
|
347
|
+
Check if at least one update was received for all instruments in the context.
|
|
348
|
+
"""
|
|
349
|
+
return all(instrument in self._updated_instruments for instrument in self._context.instruments)
|
|
350
|
+
|
|
343
351
|
def __update_base_data(
|
|
344
352
|
self, instrument: Instrument, event_type: str, data: Timestamped, is_historical: bool = False
|
|
345
353
|
) -> bool:
|
|
@@ -366,6 +374,9 @@ class ProcessingManager(IProcessingManager):
|
|
|
366
374
|
# update trackers, gatherers on base data
|
|
367
375
|
if not is_historical:
|
|
368
376
|
if is_base_data:
|
|
377
|
+
# - mark instrument as updated
|
|
378
|
+
self._updated_instruments.add(instrument)
|
|
379
|
+
|
|
369
380
|
self._account.update_position_price(self._time_provider.time(), instrument, _update)
|
|
370
381
|
target_positions = self.__process_and_log_target_positions(
|
|
371
382
|
self._position_tracker.update(self._context, instrument, _update)
|
|
@@ -421,13 +432,13 @@ class ProcessingManager(IProcessingManager):
|
|
|
421
432
|
pass
|
|
422
433
|
|
|
423
434
|
def _handle_start(self) -> None:
|
|
424
|
-
if not self.
|
|
435
|
+
if not self._is_data_ready():
|
|
425
436
|
return
|
|
426
437
|
self._strategy.on_start(self._context)
|
|
427
438
|
self._context._strategy_state.is_on_start_called = True
|
|
428
439
|
|
|
429
440
|
def _handle_state_resolution(self) -> None:
|
|
430
|
-
if not self.
|
|
441
|
+
if not self._is_data_ready():
|
|
431
442
|
return
|
|
432
443
|
|
|
433
444
|
resolver = self._context.initializer.get_state_resolver()
|
|
@@ -448,7 +459,7 @@ class ProcessingManager(IProcessingManager):
|
|
|
448
459
|
resolver(self._context, self._context.get_warmup_positions(), self._context.get_warmup_orders())
|
|
449
460
|
|
|
450
461
|
def _handle_warmup_finished(self) -> None:
|
|
451
|
-
if not self.
|
|
462
|
+
if not self._is_data_ready():
|
|
452
463
|
return
|
|
453
464
|
self._strategy.on_warmup_finished(self._context)
|
|
454
465
|
self._context._strategy_state.is_on_warmup_finished_called = True
|
|
@@ -457,7 +468,7 @@ class ProcessingManager(IProcessingManager):
|
|
|
457
468
|
"""
|
|
458
469
|
When scheduled fit event is happened - we need to invoke strategy on_fit method
|
|
459
470
|
"""
|
|
460
|
-
if not self.
|
|
471
|
+
if not self._is_data_ready():
|
|
461
472
|
return
|
|
462
473
|
self._fit_is_running = True
|
|
463
474
|
self._run_in_thread_pool(self.__invoke_on_fit)
|
|
@@ -81,21 +81,25 @@ class TardisCsvDataReader(DataReader):
|
|
|
81
81
|
_filt_files = [file for file in _files if t_0 <= file.stem.split("_")[0] <= t_1]
|
|
82
82
|
|
|
83
83
|
tables = []
|
|
84
|
-
fieldnames = None
|
|
84
|
+
# fieldnames = None
|
|
85
85
|
for f_path in _filt_files:
|
|
86
|
-
table =
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
86
|
+
table = pd.read_csv(f_path)
|
|
87
|
+
tables.append(table)
|
|
88
|
+
# table = csv.read_csv(
|
|
89
|
+
# f_path,
|
|
90
|
+
# parse_options=csv.ParseOptions(ignore_empty_lines=True),
|
|
91
|
+
# )
|
|
92
|
+
# if not fieldnames:
|
|
93
|
+
# fieldnames = table.column_names
|
|
94
|
+
# tables.append(table.to_pandas())
|
|
95
|
+
|
|
96
|
+
return pd.concat(tables)
|
|
97
|
+
|
|
98
|
+
# transform.start_transform(data_id, fieldnames or [], start=start, stop=stop)
|
|
99
|
+
# raw_data = pd.concat(tables).to_numpy()
|
|
100
|
+
# transform.process_data(raw_data)
|
|
101
|
+
|
|
102
|
+
# return transform.collect()
|
|
99
103
|
|
|
100
104
|
def get_exchanges(self) -> list[str]:
|
|
101
105
|
return [exchange.name for exchange in self.path.iterdir() if exchange.is_dir()]
|
|
@@ -13,7 +13,12 @@ class IncrementalFormatter(DefaultFormatter):
|
|
|
13
13
|
based on leverage changes.
|
|
14
14
|
"""
|
|
15
15
|
|
|
16
|
-
def __init__(
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
alert_name: str,
|
|
19
|
+
exchange_mapping: Optional[Dict[str, str]] = None,
|
|
20
|
+
account: Optional[IAccountViewer] = None
|
|
21
|
+
):
|
|
17
22
|
"""
|
|
18
23
|
Initialize the IncrementalFormatter.
|
|
19
24
|
|
|
@@ -21,12 +26,16 @@ class IncrementalFormatter(DefaultFormatter):
|
|
|
21
26
|
alert_name: The name of the alert to include in the messages
|
|
22
27
|
exchange_mapping: Optional mapping of exchange names to use in messages.
|
|
23
28
|
If an exchange is not in the mapping, the instrument's exchange is used.
|
|
29
|
+
account: The account viewer to get account information like total capital, leverage, etc.
|
|
24
30
|
"""
|
|
25
31
|
super().__init__()
|
|
26
32
|
self.alert_name = alert_name
|
|
27
33
|
self.exchange_mapping = exchange_mapping or {}
|
|
28
34
|
self.instrument_leverages: Dict[Instrument, float] = {}
|
|
29
35
|
|
|
36
|
+
if account:
|
|
37
|
+
self.instrument_leverages = dict(account.get_leverages())
|
|
38
|
+
|
|
30
39
|
def format_position_change(
|
|
31
40
|
self, time: dt_64, instrument: Instrument, price: float, account: IAccountViewer
|
|
32
41
|
) -> dict[str, Any]:
|
|
@@ -36,6 +36,7 @@ class RedisStreamsExporter(ITradeDataExport):
|
|
|
36
36
|
max_stream_length: int = 1000,
|
|
37
37
|
formatter: Optional[IExportFormatter] = None,
|
|
38
38
|
max_workers: int = 2,
|
|
39
|
+
account: Optional[IAccountViewer] = None,
|
|
39
40
|
):
|
|
40
41
|
"""
|
|
41
42
|
Initialize the Redis Streams Exporter.
|
|
@@ -52,6 +53,7 @@ class RedisStreamsExporter(ITradeDataExport):
|
|
|
52
53
|
max_stream_length: Maximum length of each stream
|
|
53
54
|
formatter: Formatter to use for formatting data (default: DefaultFormatter)
|
|
54
55
|
max_workers: Maximum number of worker threads for Redis operations
|
|
56
|
+
account: Optional account viewer to get account information like total capital, leverage, etc.
|
|
55
57
|
"""
|
|
56
58
|
self._redis = redis.from_url(redis_url)
|
|
57
59
|
self._strategy_name = strategy_name
|
|
@@ -71,6 +73,9 @@ class RedisStreamsExporter(ITradeDataExport):
|
|
|
71
73
|
|
|
72
74
|
self._executor = ThreadPoolExecutor(max_workers=max_workers, thread_name_prefix="redis_exporter")
|
|
73
75
|
|
|
76
|
+
if account:
|
|
77
|
+
self._instrument_to_previous_leverage = dict(account.get_leverages())
|
|
78
|
+
|
|
74
79
|
logger.info(
|
|
75
80
|
f"[RedisStreamsExporter] Initialized for strategy '{strategy_name}' with "
|
|
76
81
|
f"signals: {export_signals}, targets: {export_targets}, position_changes: {export_position_changes}"
|
|
@@ -201,6 +206,7 @@ class RedisStreamsExporter(ITradeDataExport):
|
|
|
201
206
|
|
|
202
207
|
previous_leverage = self._instrument_to_previous_leverage.get(instrument, 0.0)
|
|
203
208
|
new_leverage = account.get_leverage(instrument)
|
|
209
|
+
self._instrument_to_previous_leverage[instrument] = new_leverage
|
|
204
210
|
|
|
205
211
|
try:
|
|
206
212
|
# Format the leverage change using the formatter
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Notifications package for strategy lifecycle events.
|
|
3
|
+
|
|
4
|
+
This package provides implementations of the IStrategyLifecycleNotifier interface
|
|
5
|
+
for various notification channels.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from .composite import CompositeLifecycleNotifier
|
|
9
|
+
from .slack import SlackLifecycleNotifier
|
|
10
|
+
from .throttler import CountBasedThrottler, IMessageThrottler, NoThrottling, TimeWindowThrottler
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
"CompositeLifecycleNotifier",
|
|
14
|
+
"SlackLifecycleNotifier",
|
|
15
|
+
"IMessageThrottler",
|
|
16
|
+
"TimeWindowThrottler",
|
|
17
|
+
"CountBasedThrottler",
|
|
18
|
+
"NoThrottling"
|
|
19
|
+
]
|
|
@@ -1,17 +1,18 @@
|
|
|
1
1
|
"""
|
|
2
|
-
Slack
|
|
2
|
+
Slack notifications for strategy lifecycle events.
|
|
3
3
|
|
|
4
|
-
This module provides
|
|
4
|
+
This module provides a Slack implementation of IStrategyLifecycleNotifier.
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
7
|
import datetime
|
|
8
8
|
from concurrent.futures import ThreadPoolExecutor
|
|
9
|
-
from typing import
|
|
9
|
+
from typing import Any
|
|
10
10
|
|
|
11
11
|
import requests
|
|
12
12
|
|
|
13
13
|
from qubx import logger
|
|
14
14
|
from qubx.core.interfaces import IStrategyLifecycleNotifier
|
|
15
|
+
from qubx.notifications.throttler import IMessageThrottler, NoThrottling
|
|
15
16
|
|
|
16
17
|
|
|
17
18
|
class SlackLifecycleNotifier(IStrategyLifecycleNotifier):
|
|
@@ -30,6 +31,7 @@ class SlackLifecycleNotifier(IStrategyLifecycleNotifier):
|
|
|
30
31
|
emoji_stop: str = ":checkered_flag:",
|
|
31
32
|
emoji_error: str = ":rotating_light:",
|
|
32
33
|
max_workers: int = 1,
|
|
34
|
+
throttler: IMessageThrottler | None = None,
|
|
33
35
|
):
|
|
34
36
|
"""
|
|
35
37
|
Initialize the Slack Lifecycle Notifier.
|
|
@@ -40,18 +42,28 @@ class SlackLifecycleNotifier(IStrategyLifecycleNotifier):
|
|
|
40
42
|
emoji_start: Emoji to use for start events
|
|
41
43
|
emoji_stop: Emoji to use for stop events
|
|
42
44
|
emoji_error: Emoji to use for error events
|
|
45
|
+
max_workers: Number of worker threads for posting messages
|
|
46
|
+
throttler: Optional message throttler to prevent flooding
|
|
43
47
|
"""
|
|
44
48
|
self._webhook_url = webhook_url
|
|
45
49
|
self._environment = environment
|
|
46
50
|
self._emoji_start = emoji_start
|
|
47
51
|
self._emoji_stop = emoji_stop
|
|
48
52
|
self._emoji_error = emoji_error
|
|
53
|
+
self._throttler = throttler if throttler is not None else NoThrottling()
|
|
49
54
|
|
|
50
55
|
self._executor = ThreadPoolExecutor(max_workers=max_workers, thread_name_prefix="slack_notifier")
|
|
51
56
|
|
|
52
57
|
logger.info(f"[SlackLifecycleNotifier] Initialized for environment '{environment}'")
|
|
53
58
|
|
|
54
|
-
def _post_to_slack(
|
|
59
|
+
def _post_to_slack(
|
|
60
|
+
self,
|
|
61
|
+
message: str,
|
|
62
|
+
emoji: str,
|
|
63
|
+
color: str,
|
|
64
|
+
metadata: dict[str, Any] | None = None,
|
|
65
|
+
throttle_key: str | None = None,
|
|
66
|
+
) -> None:
|
|
55
67
|
"""
|
|
56
68
|
Submit a notification to be posted to Slack by the worker thread.
|
|
57
69
|
|
|
@@ -60,15 +72,26 @@ class SlackLifecycleNotifier(IStrategyLifecycleNotifier):
|
|
|
60
72
|
emoji: Emoji to use in the message
|
|
61
73
|
color: Color for the message attachment
|
|
62
74
|
metadata: Optional dictionary with additional fields to include
|
|
75
|
+
throttle_key: Optional key for throttling (if None, no throttling is applied)
|
|
63
76
|
"""
|
|
64
77
|
try:
|
|
78
|
+
# Check if the message should be throttled
|
|
79
|
+
if throttle_key is not None and not self._throttler.should_send(throttle_key):
|
|
80
|
+
logger.debug(f"[SlackLifecycleNotifier] Throttled message with key '{throttle_key}': {message}")
|
|
81
|
+
return
|
|
82
|
+
|
|
65
83
|
# Submit the task to the executor
|
|
66
|
-
self._executor.submit(self._post_to_slack_impl, message, emoji, color, metadata)
|
|
84
|
+
self._executor.submit(self._post_to_slack_impl, message, emoji, color, metadata, throttle_key)
|
|
67
85
|
except Exception as e:
|
|
68
86
|
logger.error(f"[SlackLifecycleNotifier] Failed to queue Slack message: {e}")
|
|
69
87
|
|
|
70
88
|
def _post_to_slack_impl(
|
|
71
|
-
self,
|
|
89
|
+
self,
|
|
90
|
+
message: str,
|
|
91
|
+
emoji: str,
|
|
92
|
+
color: str,
|
|
93
|
+
metadata: dict[str, Any] | None = None,
|
|
94
|
+
throttle_key: str | None = None,
|
|
72
95
|
) -> bool:
|
|
73
96
|
"""
|
|
74
97
|
Implementation that actually posts to Slack (called from worker thread).
|
|
@@ -78,6 +101,7 @@ class SlackLifecycleNotifier(IStrategyLifecycleNotifier):
|
|
|
78
101
|
emoji: Emoji to use in the message
|
|
79
102
|
color: Color for the message attachment
|
|
80
103
|
metadata: Optional dictionary with additional fields to include
|
|
104
|
+
throttle_key: Optional key used for throttling
|
|
81
105
|
|
|
82
106
|
Returns:
|
|
83
107
|
bool: True if the post was successful, False otherwise
|
|
@@ -107,13 +131,18 @@ class SlackLifecycleNotifier(IStrategyLifecycleNotifier):
|
|
|
107
131
|
|
|
108
132
|
response = requests.post(self._webhook_url, json=data)
|
|
109
133
|
response.raise_for_status()
|
|
134
|
+
|
|
135
|
+
# Register that we sent the message (for throttling)
|
|
136
|
+
if throttle_key is not None:
|
|
137
|
+
self._throttler.register_sent(throttle_key)
|
|
138
|
+
|
|
110
139
|
logger.debug(f"[SlackLifecycleNotifier] Successfully posted message: {message}")
|
|
111
140
|
return True
|
|
112
141
|
except requests.RequestException as e:
|
|
113
142
|
logger.error(f"[SlackLifecycleNotifier] Failed to post to Slack: {e}")
|
|
114
143
|
return False
|
|
115
144
|
|
|
116
|
-
def notify_start(self, strategy_name: str, metadata:
|
|
145
|
+
def notify_start(self, strategy_name: str, metadata: dict[str, Any] | None = None) -> None:
|
|
117
146
|
"""
|
|
118
147
|
Notify that a strategy has started.
|
|
119
148
|
|
|
@@ -128,7 +157,7 @@ class SlackLifecycleNotifier(IStrategyLifecycleNotifier):
|
|
|
128
157
|
except Exception as e:
|
|
129
158
|
logger.error(f"[SlackLifecycleNotifier] Failed to notify start: {e}")
|
|
130
159
|
|
|
131
|
-
def notify_stop(self, strategy_name: str, metadata:
|
|
160
|
+
def notify_stop(self, strategy_name: str, metadata: dict[str, Any] | None = None) -> None:
|
|
132
161
|
"""
|
|
133
162
|
Notify that a strategy has stopped.
|
|
134
163
|
|
|
@@ -143,7 +172,7 @@ class SlackLifecycleNotifier(IStrategyLifecycleNotifier):
|
|
|
143
172
|
except Exception as e:
|
|
144
173
|
logger.error(f"[SlackLifecycleNotifier] Failed to notify stop: {e}")
|
|
145
174
|
|
|
146
|
-
def notify_error(self, strategy_name: str, error: Exception, metadata:
|
|
175
|
+
def notify_error(self, strategy_name: str, error: Exception, metadata: dict[str, Any] | None = None) -> None:
|
|
147
176
|
"""
|
|
148
177
|
Notify that a strategy has encountered an error.
|
|
149
178
|
|
|
@@ -161,7 +190,11 @@ class SlackLifecycleNotifier(IStrategyLifecycleNotifier):
|
|
|
161
190
|
metadata["Error Message"] = str(error)
|
|
162
191
|
|
|
163
192
|
message = f"[{strategy_name}] ALERT: Strategy error in {self._environment}"
|
|
164
|
-
|
|
193
|
+
|
|
194
|
+
# Create a throttle key for this strategy/error type combination
|
|
195
|
+
throttle_key = f"error:{strategy_name}:{type(error).__name__}"
|
|
196
|
+
|
|
197
|
+
self._post_to_slack(message, self._emoji_error, "#FF0000", metadata, throttle_key=throttle_key)
|
|
165
198
|
logger.debug(f"[SlackLifecycleNotifier] Queued error notification for {strategy_name}")
|
|
166
199
|
except Exception as e:
|
|
167
200
|
logger.error(f"[SlackLifecycleNotifier] Failed to notify error: {e}")
|
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Message Throttling for Notifications.
|
|
3
|
+
|
|
4
|
+
This module defines interfaces and implementations for throttling
|
|
5
|
+
notification messages to prevent flooding notification channels.
|
|
6
|
+
|
|
7
|
+
Usage Examples:
|
|
8
|
+
1. Basic TimeWindowThrottler with default settings (allows 1 message per key per 10 seconds):
|
|
9
|
+
```python
|
|
10
|
+
from qubx.notifications.throttler import TimeWindowThrottler
|
|
11
|
+
|
|
12
|
+
throttler = TimeWindowThrottler()
|
|
13
|
+
if throttler.should_send("error:mystrategy:ValueError"):
|
|
14
|
+
# Send the message
|
|
15
|
+
send_message()
|
|
16
|
+
# Update the throttler
|
|
17
|
+
throttler.register_sent("error:mystrategy:ValueError")
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
2. CountBasedThrottler (allows up to N messages per key within a time window):
|
|
21
|
+
```python
|
|
22
|
+
from qubx.notifications.throttler import CountBasedThrottler
|
|
23
|
+
|
|
24
|
+
# Allow up to 5 messages per minute for each key
|
|
25
|
+
throttler = CountBasedThrottler(max_count=5, window_seconds=60.0)
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
3. In a configuration file for SlackLifecycleNotifier:
|
|
29
|
+
```yaml
|
|
30
|
+
notifiers:
|
|
31
|
+
- notifier: SlackLifecycleNotifier
|
|
32
|
+
parameters:
|
|
33
|
+
webhook_url: ${SLACK_WEBHOOK_URL}
|
|
34
|
+
environment: production
|
|
35
|
+
throttle:
|
|
36
|
+
type: TimeWindow
|
|
37
|
+
window_seconds: 30.0
|
|
38
|
+
```
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
import time
|
|
42
|
+
from abc import ABC, abstractmethod
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class IMessageThrottler(ABC):
|
|
46
|
+
"""Interface for message throttlers that can limit the frequency of notifications."""
|
|
47
|
+
|
|
48
|
+
@abstractmethod
|
|
49
|
+
def should_send(self, key: str) -> bool:
|
|
50
|
+
"""
|
|
51
|
+
Determine if a message with the given key should be sent based on throttling rules.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
key: A unique identifier for the type of message being sent
|
|
55
|
+
(e.g., "error:{strategy_name}")
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
bool: True if the message should be sent, False if it should be throttled
|
|
59
|
+
"""
|
|
60
|
+
pass
|
|
61
|
+
|
|
62
|
+
@abstractmethod
|
|
63
|
+
def register_sent(self, key: str) -> None:
|
|
64
|
+
"""
|
|
65
|
+
Register that a message with the given key was sent.
|
|
66
|
+
This updates the internal state of the throttler.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
key: A unique identifier for the type of message that was sent
|
|
70
|
+
"""
|
|
71
|
+
pass
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class TimeWindowThrottler(IMessageThrottler):
|
|
75
|
+
"""
|
|
76
|
+
Throttles messages based on a time window.
|
|
77
|
+
|
|
78
|
+
Only allows one message per key within a specified time window.
|
|
79
|
+
"""
|
|
80
|
+
|
|
81
|
+
def __init__(self, window_seconds: float = 10.0):
|
|
82
|
+
"""
|
|
83
|
+
Initialize the time window throttler.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
window_seconds: Minimum time between messages with the same key, in seconds
|
|
87
|
+
"""
|
|
88
|
+
self._window_seconds = window_seconds
|
|
89
|
+
self._last_sent_times: dict[str, float] = {}
|
|
90
|
+
|
|
91
|
+
def should_send(self, key: str) -> bool:
|
|
92
|
+
"""
|
|
93
|
+
Check if a message with the given key should be sent based on the time window.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
key: Message key to check
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
bool: True if enough time has passed since the last message with this key
|
|
100
|
+
"""
|
|
101
|
+
current_time = time.time()
|
|
102
|
+
last_sent = self._last_sent_times.get(key, 0)
|
|
103
|
+
return (current_time - last_sent) >= self._window_seconds
|
|
104
|
+
|
|
105
|
+
def register_sent(self, key: str) -> None:
|
|
106
|
+
"""
|
|
107
|
+
Register that a message with the given key was sent.
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
key: Key of the message that was sent
|
|
111
|
+
"""
|
|
112
|
+
self._last_sent_times[key] = time.time()
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class CountBasedThrottler(IMessageThrottler):
|
|
116
|
+
"""
|
|
117
|
+
Throttles messages based on a count within a time window.
|
|
118
|
+
|
|
119
|
+
Allows a specified number of messages per key within a time window.
|
|
120
|
+
"""
|
|
121
|
+
|
|
122
|
+
def __init__(self, max_count: int = 3, window_seconds: float = 60.0):
|
|
123
|
+
"""
|
|
124
|
+
Initialize the count-based throttler.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
max_count: Maximum number of messages allowed in the time window
|
|
128
|
+
window_seconds: Time window in seconds
|
|
129
|
+
"""
|
|
130
|
+
self._max_count = max_count
|
|
131
|
+
self._window_seconds = window_seconds
|
|
132
|
+
self._message_history: dict[str, list[float]] = {}
|
|
133
|
+
|
|
134
|
+
def should_send(self, key: str) -> bool:
|
|
135
|
+
"""
|
|
136
|
+
Check if a message with the given key should be sent based on the count limit.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
key: Message key to check
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
bool: True if the message count is below the limit
|
|
143
|
+
"""
|
|
144
|
+
current_time = time.time()
|
|
145
|
+
|
|
146
|
+
# Initialize history for this key if it doesn't exist
|
|
147
|
+
if key not in self._message_history:
|
|
148
|
+
self._message_history[key] = []
|
|
149
|
+
|
|
150
|
+
# Remove timestamps older than the window
|
|
151
|
+
self._message_history[key] = [
|
|
152
|
+
ts for ts in self._message_history[key] if (current_time - ts) < self._window_seconds
|
|
153
|
+
]
|
|
154
|
+
|
|
155
|
+
# Check if we're under the message count limit
|
|
156
|
+
return len(self._message_history[key]) < self._max_count
|
|
157
|
+
|
|
158
|
+
def register_sent(self, key: str) -> None:
|
|
159
|
+
"""
|
|
160
|
+
Register that a message with the given key was sent.
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
key: Key of the message that was sent
|
|
164
|
+
"""
|
|
165
|
+
current_time = time.time()
|
|
166
|
+
|
|
167
|
+
if key not in self._message_history:
|
|
168
|
+
self._message_history[key] = []
|
|
169
|
+
|
|
170
|
+
self._message_history[key].append(current_time)
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
class NoThrottling(IMessageThrottler):
|
|
174
|
+
"""A throttler implementation that doesn't actually throttle - allows all messages."""
|
|
175
|
+
|
|
176
|
+
def should_send(self, key: str) -> bool:
|
|
177
|
+
"""Always returns True, allowing all messages to be sent."""
|
|
178
|
+
return True
|
|
179
|
+
|
|
180
|
+
def register_sent(self, key: str) -> None:
|
|
181
|
+
"""No-op implementation."""
|
|
182
|
+
pass
|