siglab-py 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of siglab-py might be problematic. Click here for more details.

@@ -0,0 +1,197 @@
1
+ import os
2
+ import sys
3
+ import traceback
4
+ from enum import Enum
5
+ import argparse
6
+ from datetime import datetime, timedelta
7
+ import time
8
+ from typing import Dict, Union
9
+ import json
10
+ import asyncio
11
+ import logging
12
+ from ccxt import deribit
13
+ from redis import StrictRedis
14
+ import pandas as pd
15
+
16
+ from util.market_data_util import timestamp_to_datetime_cols, fetch_deribit_btc_option_expiries, fetch_ohlcv_one_candle
17
+
18
+ param : Dict = {
19
+ 'market' : 'BTC',
20
+
21
+ # Provider ID is part of mds publish topic.
22
+ 'provider_id' : 'b0f1b878-c281-43d7-870a-0347f90e6ece',
23
+
24
+ 'archive_file_name' : "deribit_options_expiry.csv",
25
+
26
+ # Publish to message bus
27
+ 'mds' : {
28
+ 'topics' : {
29
+ 'deribit_options_expiry_publish_topic' : 'deribit-options-expiry'
30
+ },
31
+ 'redis' : {
32
+ 'host' : 'localhost',
33
+ 'port' : 6379,
34
+ 'db' : 0,
35
+ 'ttl_ms' : 1000*60*15 # 15 min?
36
+ }
37
+
38
+ }
39
+ }
40
+
41
+ logging.Formatter.converter = time.gmtime
42
+ logger = logging.getLogger()
43
+ log_level = logging.INFO # DEBUG --> INFO --> WARNING --> ERROR
44
+ logger.setLevel(log_level)
45
+ format_str = '%(asctime)s %(message)s'
46
+ formatter = logging.Formatter(format_str)
47
+ sh = logging.StreamHandler()
48
+ sh.setLevel(log_level)
49
+ sh.setFormatter(formatter)
50
+ logger.addHandler(sh)
51
+ # fh = logging.FileHandler(f"{param['job_name']}.log")
52
+ # fh.setLevel(log_level)
53
+ # fh.setFormatter(formatter)
54
+ # logger.addHandler(fh)
55
+
56
+ class LogLevel(Enum):
57
+ CRITICAL = 50
58
+ ERROR = 40
59
+ WARNING = 30
60
+ INFO = 20
61
+ DEBUG = 10
62
+ NOTSET = 0
63
+
64
+ def log(message : str, log_level : LogLevel = LogLevel.INFO):
65
+ if log_level.value<LogLevel.WARNING.value:
66
+ logger.info(f"{datetime.now()} {message}")
67
+
68
+ elif log_level.value==LogLevel.WARNING.value:
69
+ logger.warning(f"{datetime.now()} {message}")
70
+
71
+ elif log_level.value==LogLevel.ERROR.value:
72
+ logger.error(f"{datetime.now()} {message}")
73
+
74
+ def parse_args():
75
+ parser = argparse.ArgumentParser() # type: ignore
76
+
77
+ parser.add_argument("--provider_id", help="candle_provider will go to work if from redis a matching topic partition_assign_topic with provider_id in it.", default=None)
78
+ parser.add_argument("--market", help="Default BTC", default='BTC')
79
+ parser.add_argument("--redis_ttl_ms", help="TTL for items published to redis. Default: 1000*60*60 (i.e. 1hr)",default=1000*60*60)
80
+
81
+ args = parser.parse_args()
82
+ if args.provider_id:
83
+ param['provider_id'] = args.provider_id
84
+ param['market'] = args.market
85
+ param['redis_ttl_ms'] = int(args.redis_ttl_ms)
86
+
87
+
88
+ def init_redis_client() -> StrictRedis:
89
+ redis_client : StrictRedis = StrictRedis(
90
+ host = param['mds']['redis']['host'],
91
+ port = param['mds']['redis']['port'],
92
+ db = 0,
93
+ ssl = False
94
+ )
95
+ try:
96
+ redis_client.keys()
97
+ except ConnectionError as redis_conn_error:
98
+ err_msg = f"Failed to connect to redis: {param['mds']['redis']['host']}, port: {param['mds']['redis']['port']}"
99
+ raise ConnectionError(err_msg)
100
+
101
+ return redis_client
102
+
103
+
104
+ def _fetch_historical_daily_candle_height(
105
+ exchange,
106
+ normalized_symbol : str,
107
+ timestamp_ms : int,
108
+ offset_days : int,
109
+ candle_height : float,
110
+ reload_candle_height : bool = False
111
+ ):
112
+ if not candle_height or reload_candle_height:
113
+ dt = datetime.fromtimestamp(int(timestamp_ms/1000)) + timedelta(days=offset_days)
114
+ dt = datetime(dt.year, dt.month, dt.day)
115
+ timestamp_ms = int(dt.timestamp()) * 1000
116
+ if dt < datetime(datetime.today().year, datetime.today().month, datetime.today().day):
117
+ historical_day_candle = fetch_ohlcv_one_candle(exchange=exchange, normalized_symbol=normalized_symbol, timestamp_ms=timestamp_ms, ref_timeframe='1d')
118
+ if historical_day_candle:
119
+ return historical_day_candle['close'] - historical_day_candle['open']
120
+ else:
121
+ return None
122
+ else:
123
+ return None
124
+ else:
125
+ return None
126
+
127
+ async def main():
128
+ parse_args()
129
+
130
+ param['job_name'] = f'candles_provider_{param["provider_id"]}'
131
+ redis_client : StrictRedis = init_redis_client()
132
+
133
+ exchange = deribit()
134
+
135
+ i = 0
136
+ while True:
137
+ try:
138
+ pd_old_expiry_data = None
139
+ if os.path.isfile(param['archive_file_name']):
140
+ pd_old_expiry_data = pd.read_csv(param['archive_file_name'])
141
+ pd_old_expiry_data.drop(pd_old_expiry_data.columns[pd_old_expiry_data.columns.str.contains('unnamed',case = False)],axis = 1, inplace = True)
142
+ pd_old_expiry_data['datetime'] = pd.to_datetime(pd_old_expiry_data['datetime'])
143
+ pd_old_expiry_data['datetime'] = pd_old_expiry_data['datetime'].dt.tz_localize(None)
144
+
145
+ start = time.time()
146
+ expiry_data = fetch_deribit_btc_option_expiries(market = param['market'])
147
+ expiry_data = expiry_data['by_expiry']
148
+ elapsed_sec = int((time.time() - start))
149
+ log(f"#{i} Took {elapsed_sec} sec to fetch option expiry data from Deribit")
150
+
151
+ publish_key = param['mds']['topics']['deribit_options_expiry_publish_topic']
152
+ expiry_sec = int(int(param['mds']['redis']['ttl_ms'])/1000)
153
+ redis_client.set(name=publish_key, value=json.dumps(expiry_data).encode('utf-8'), ex=expiry_sec)
154
+
155
+ pd_new_expiry_data = pd.DataFrame([ { 'datetime' : x[0], 'notional_usd' : x[1] } for x in expiry_data ])
156
+
157
+ pd_new_expiry_data['symbol'] = f"{param['market']}/USDT"
158
+
159
+ pd_new_expiry_data['datetime'] = pd.to_datetime(pd_new_expiry_data['datetime'])
160
+ pd_new_expiry_data['datetime'] = pd_new_expiry_data['datetime'].dt.tz_localize(None)
161
+ pd_new_expiry_data['timestamp_sec'] = pd_new_expiry_data['datetime'].apply(lambda dt: int(dt.timestamp()))
162
+ pd_new_expiry_data['timestamp_ms'] = pd_new_expiry_data['timestamp_sec'] * 1000
163
+ timestamp_to_datetime_cols(pd_new_expiry_data)
164
+
165
+ if pd_old_expiry_data is not None:
166
+ min_datetime_from_new_expiry_data = pd_new_expiry_data['datetime'].min()
167
+
168
+ pd_old_expiry_data = pd_old_expiry_data[
169
+ pd_old_expiry_data['datetime'] < min_datetime_from_new_expiry_data # Take from new update, OI can change
170
+ ]
171
+ pd_merged_expiry_data = pd.concat([pd_old_expiry_data, pd_new_expiry_data], axis=0, ignore_index=True)
172
+
173
+ else:
174
+ pd_merged_expiry_data = pd_new_expiry_data
175
+
176
+ if not 'daily_candle_height_tm0' in pd_merged_expiry_data.columns:
177
+ pd_merged_expiry_data['daily_candle_height_tm0'] = None
178
+ pd_merged_expiry_data['daily_candle_height_tm1'] = None
179
+ pd_merged_expiry_data['daily_candle_height_tm2'] = None
180
+ pd_merged_expiry_data['daily_candle_height_tm3'] = None
181
+
182
+ # candle_height = close - open (Can be positive, can be negative)
183
+ pd_merged_expiry_data['daily_candle_height_tm0'] = pd_merged_expiry_data.apply(lambda rw : _fetch_historical_daily_candle_height(exchange, rw['symbol'], rw['timestamp_ms'], 0, rw['daily_candle_height_tm0']), axis=1) # type: ignore
184
+ pd_merged_expiry_data['daily_candle_height_tm1'] = pd_merged_expiry_data.apply(lambda rw : _fetch_historical_daily_candle_height(exchange, rw['symbol'], rw['timestamp_ms'], -1, rw['daily_candle_height_tm1']), axis=1) # type: ignore
185
+ pd_merged_expiry_data['daily_candle_height_tm2'] = pd_merged_expiry_data.apply(lambda rw : _fetch_historical_daily_candle_height(exchange, rw['symbol'], rw['timestamp_ms'], -2, rw['daily_candle_height_tm2']), axis=1) # type: ignore
186
+ pd_merged_expiry_data['daily_candle_height_tm3'] = pd_merged_expiry_data.apply(lambda rw : _fetch_historical_daily_candle_height(exchange, rw['symbol'], rw['timestamp_ms'], -3, rw['daily_candle_height_tm3']), axis=1) # type: ignore
187
+
188
+ pd_merged_expiry_data.to_csv(param['archive_file_name'])
189
+
190
+ except Exception as loop_err:
191
+ log(f"Loop error {loop_err} {str(sys.exc_info()[0])} {str(sys.exc_info()[1])} {traceback.format_exc()}", log_level=LogLevel.ERROR)
192
+ finally:
193
+ i += 1
194
+
195
+ async def _run_jobs():
196
+ await main()
197
+ asyncio.get_event_loop().run_until_complete(_run_jobs())
@@ -0,0 +1,359 @@
1
+ import sys
2
+ import traceback
3
+ from enum import Enum
4
+ import argparse
5
+ import time
6
+ from datetime import datetime, timedelta
7
+ import operator
8
+ from typing import Any, Dict, Union, Mapping
9
+ import logging
10
+ import json
11
+ from tabulate import tabulate
12
+ import asyncio
13
+ from threading import Thread
14
+ from collections import defaultdict
15
+ import pandas as pd
16
+ import numpy as np
17
+ from redis import StrictRedis
18
+ from redis.client import PubSub
19
+
20
+ from ccxt.base.exchange import Exchange
21
+ import ccxt.pro as ccxtpro
22
+
23
+ '''
24
+ To start from command prompt:
25
+ set PYTHONPATH=%PYTHONPATH%;D:\dev\siglab
26
+ python orderbooks_provider.py --provider_id aaa --instance_capacity 25 --ts_delta_observation_ms_threshold 150 --ts_delta_consecutive_ms_threshold 150 --redis_ttl_ms 3600000
27
+
28
+ This script is pypy compatible.
29
+
30
+ Key parameters you may want to modify:
31
+ provider_id: You can trigger this provider instance using test_provider.py. Of course, you'd write your own.
32
+ instance_capacity: max # tickers this provider instance will handle.
33
+ ts_delta_observation_ms_threshold: default to 150ms. "Observation Delta" is clock diff between orderbook timestamp, and your local server clock.
34
+ ts_delta_consecutive_ms_threshold: default to 150ms. "Consecutive Delta" is time elapsed between consecutive orderbook updates.
35
+ redis_ttl_ms: This is how long orderbook snapshot will last on redis when provider publishes to it.
36
+
37
+ Launch.json if you wish to debug from VSCode:
38
+ {
39
+ "version": "0.2.0",
40
+ "configurations": [
41
+ {
42
+ "name": "Python Debugger: Current File",
43
+ "type": "debugpy",
44
+ "request": "launch",
45
+ "program": "${file}",
46
+ "console": "integratedTerminal",
47
+ "args" : [
48
+ "--provider_id", "YourProviderNameHere",
49
+ "--instance_capacity", 25",
50
+ "--ts_delta_observation_ms_threshold", "150",
51
+ "--ts_delta_consecutive_ms_threshold","150",
52
+ "--redis_ttl_ms", "3600000"
53
+ ],
54
+ "env": {
55
+ "PYTHONPATH": "${workspaceFolder}"
56
+ }
57
+ }
58
+ ]
59
+ }
60
+ '''
61
+
62
+ class LogLevel(Enum):
63
+ CRITICAL = 50
64
+ ERROR = 40
65
+ WARNING = 30
66
+ INFO = 20
67
+ DEBUG = 10
68
+ NOTSET = 0
69
+
70
+ param : Dict = {
71
+ 'market_type' : 'linear', # For spots, set to "spot". For perpectual, you need to look at ccxt doc, for most exchanges, it's 'linear' or 'swap' for perpetuals. Example, https://github.com/ccxt/ccxt/blob/master/python/ccxt/okx.py?plain=1#L1110
72
+
73
+ # Provider ID is part of mds publish topic.
74
+ 'provider_id' : 'ceaafe1d-e320-44ec-a959-da73edb9c4b1',
75
+
76
+ "instance_capacity" : "25",
77
+
78
+ # Keep track of latency issues
79
+ # a) ts_delta_observation_ms: Keep track of server clock vs timestamp from exchange
80
+ # b) ts_delta_consecutive_ms: Keep track of gap between consecutive updates
81
+ 'ts_delta_observation_ms_threshold' : 150,
82
+ 'ts_delta_consecutive_ms_threshold' : 150,
83
+
84
+ # Publish to message bus
85
+ 'mds' : {
86
+ 'topics' : {
87
+ 'partition_assign_topic' : 'mds_assign_$PROVIDER_ID$',
88
+ 'candles_publish_topic' : 'orderbooks_$SYMBOL$_$EXCHANGE$'
89
+ },
90
+ 'redis' : {
91
+ 'host' : 'localhost',
92
+ 'port' : 6379,
93
+ 'db' : 0,
94
+ 'ttl_ms' : 1000*60*15 # 15 min?
95
+ }
96
+ }
97
+ }
98
+
99
+ logging.Formatter.converter = time.gmtime
100
+ logger = logging.getLogger()
101
+ log_level = logging.INFO # DEBUG --> INFO --> WARNING --> ERROR
102
+ logger.setLevel(log_level)
103
+ format_str = '%(asctime)s %(message)s'
104
+ formatter = logging.Formatter(format_str)
105
+ sh = logging.StreamHandler()
106
+ sh.setLevel(log_level)
107
+ sh.setFormatter(formatter)
108
+ logger.addHandler(sh)
109
+ # fh = logging.FileHandler(f"{param['job_name']}.log")
110
+ # fh.setLevel(log_level)
111
+ # fh.setFormatter(formatter)
112
+ # logger.addHandler(fh)
113
+
114
+ market_type : str = param['market_type']
115
+
116
+ exchange_params : Dict = {
117
+ 'newUpdates': False,
118
+ 'options' : {
119
+ 'defaultType' : 'swap' # spot, swap
120
+ }
121
+ }
122
+
123
+ async def instantiate_exhange(
124
+ exchange_name : str,
125
+ old_exchange : Union[Exchange, None]
126
+ ) -> Exchange:
127
+ if old_exchange:
128
+ await old_exchange.close() # type: ignore Otherwise, Error: Cannot access attribute "close" for class "Exchange  Attribute "close" is unknown
129
+ if exchange_name==f"binance_{market_type}":
130
+ exchange = ccxtpro.binance(exchange_params)
131
+ elif exchange_name==f"okx_{market_type}":
132
+ exchange = ccxtpro.okx(exchange_params)
133
+ elif exchange_name==f"bybit_{market_type}":
134
+ exchange = ccxtpro.bybit(exchange_params)
135
+ else:
136
+ exchange = ccxtpro.binance(exchange_params)
137
+ exchange.name = exchange_name # type: ignore Otherwise, Error: Cannot assign to attribute "name" for class "binance" "str" is not assignable to "None"
138
+ return exchange
139
+
140
+ def log(message : str, log_level : LogLevel = LogLevel.INFO):
141
+ if log_level.value<LogLevel.WARNING.value:
142
+ logger.info(f"{datetime.now()} {message}")
143
+
144
+ elif log_level.value==LogLevel.WARNING.value:
145
+ logger.warning(f"{datetime.now()} {message}")
146
+
147
+ elif log_level.value==LogLevel.ERROR.value:
148
+ logger.error(f"{datetime.now()} {message}")
149
+
150
+ def parse_args():
151
+ parser = argparse.ArgumentParser() # type: ignore
152
+
153
+ parser.add_argument("--provider_id", help="candle_provider will go to work if from redis a matching topic partition_assign_topic with provider_id in it.", default=None)
154
+ parser.add_argument("--instance_capacity", help="Instance capacity in num of tickers it can process. -1: No limit.", default=-1)
155
+ parser.add_argument("--ts_delta_observation_ms_threshold", help="max threshold in ms: server clock vs update timestamp",default=param['ts_delta_observation_ms_threshold'])
156
+ parser.add_argument("--ts_delta_consecutive_ms_threshold", help="max threshold in ms: gap between consecutive updates",default=param['ts_delta_consecutive_ms_threshold'])
157
+
158
+ parser.add_argument("--redis_ttl_ms", help="TTL for items published to redis. Default: 1000*60*60 (i.e. 1hr)",default=1000*60*60)
159
+
160
+ args = parser.parse_args()
161
+ if args.provider_id:
162
+ param['provider_id'] = args.provider_id
163
+ param['instance_capacity'] = int(args.instance_capacity)
164
+ param['ts_delta_observation_ms_threshold'] = int(args.ts_delta_observation_ms_threshold)
165
+ param['ts_delta_consecutive_ms_threshold'] = int(args.ts_delta_consecutive_ms_threshold)
166
+ param['redis_ttl_ms'] = int(args.redis_ttl_ms)
167
+
168
+ def init_redis_client() -> StrictRedis:
169
+ redis_client : StrictRedis = StrictRedis(
170
+ host = param['mds']['redis']['host'],
171
+ port = param['mds']['redis']['port'],
172
+ db = 0,
173
+ ssl = False
174
+ )
175
+ try:
176
+ redis_client.keys()
177
+ except ConnectionError as redis_conn_error:
178
+ err_msg = f"Failed to connect to redis: {param['mds']['redis']['host']}, port: {param['mds']['redis']['port']}"
179
+ raise ConnectionError(err_msg)
180
+
181
+ return redis_client
182
+
183
+ def init_redis_channel_subscription(redis_client : StrictRedis, partition_assign_topic : str) -> PubSub:
184
+ redis_client = init_redis_client()
185
+ pubsub = redis_client.pubsub()
186
+ pubsub.subscribe(partition_assign_topic)
187
+ return pubsub
188
+
189
+
190
+ class OrderBook:
191
+ def __init__(
192
+ self,
193
+ ticker : str,
194
+ exchange_name : str
195
+ ) -> None:
196
+ self.ticker : str = ticker
197
+ self.exchange_name : str = exchange_name
198
+ self.bids : Dict = {}
199
+ self.asks : Dict = {}
200
+
201
+ self.last_timestamp_ms : Union[int,None] = None
202
+ self.timestamp : Union[int,None] = None # order book update timestamp in sec
203
+ self.timestamp_ms : Union[int,None] = None # order book update timestamp in ms
204
+ self.ts_delta_consecutive_ms : int = 0
205
+ self.ts_delta_observation_ms : int = 0
206
+ self.is_valid : bool = True
207
+
208
+ def update_book(
209
+ self,
210
+ update : Mapping,
211
+ param : dict
212
+ ) -> None:
213
+ update_ts_ms = update['timestamp']
214
+ if len(str(update_ts_ms))==10:
215
+ update_ts_ms = update_ts_ms*1000
216
+ self.last_timestamp_ms = self.timestamp_ms
217
+ self.timestamp_ms = int(update_ts_ms)
218
+ self.timestamp = int(self.timestamp_ms/1000)
219
+
220
+ '''
221
+ Keep track of latency issues
222
+ a) ts_delta_observation_ms: Keep track of server clock vs timestamp from exchange
223
+ b) ts_delta_consecutive_ms: Keep track of gap between consecutive updates
224
+ '''
225
+ self.ts_delta_observation_ms = int(datetime.now().timestamp()*1000) - self.timestamp_ms
226
+ self.ts_delta_consecutive_ms = self.timestamp_ms - self.last_timestamp_ms if self.last_timestamp_ms else 0
227
+
228
+ self.is_valid = True
229
+ if self.ts_delta_observation_ms>param['ts_delta_observation_ms_threshold']:
230
+ self.is_valid = False
231
+ if self.ts_delta_consecutive_ms>param['ts_delta_consecutive_ms_threshold']:
232
+ self.is_valid = False
233
+
234
+ self.bids.update((float(bid[0]), float(bid[1])) for bid in update.get('bids', []))
235
+ self.asks.update((float(ask[0]), float(ask[1])) for ask in update.get('asks', []))
236
+ self.bids = { key:val for key,val in self.bids.items() if val!=0}
237
+ self.asks = { key:val for key,val in self.asks.items() if val!=0}
238
+ if self.bids and self.asks:
239
+ best_ask = float(min(self.asks.keys()))
240
+ best_bid = float(max(self.bids.keys()))
241
+ if best_ask<best_bid:
242
+ raise ValueError(f"{self.exchange_name} {self.ticker} best bid >= best ask!?! How?")
243
+ self.bids = dict(sorted(self.bids.items(), reverse=True))
244
+ self.asks = dict(sorted(self.asks.items()))
245
+
246
+ def to_dict(self):
247
+ bids = ([float(price), float(amount)] for price, amount in self.bids.items() if float(amount))
248
+ asks = ([float(price), float(amount)] for price, amount in self.asks.items() if float(amount))
249
+ data = {
250
+ "denormalized_ticker" : self.ticker,
251
+ "exchange_name" : self.exchange_name,
252
+ "bids" : sorted(bids, key=operator.itemgetter(0), reverse=True),
253
+ "asks" : sorted(asks, key=operator.itemgetter(0)),
254
+ "timestamp" : self.timestamp, # in sec
255
+ "timestamp_ms" : self.timestamp_ms, # in ms (timestamp in update from exchange)
256
+ 'ts_delta_observation_ms' : self.ts_delta_observation_ms,
257
+ 'ts_delta_consecutive_ms' : self.ts_delta_consecutive_ms,
258
+ "is_valid" : self.is_valid
259
+ }
260
+
261
+ data['best_ask'] = min(data['asks'])
262
+ data['best_bid'] = min(data['bids'])
263
+ return data
264
+
265
+ class ThreadTask:
266
+ def __init__(self) -> None:
267
+ self.keep_running = True
268
+
269
+ def handle_ticker(
270
+ exchange_name : str,
271
+ ticker : str,
272
+ candles_publish_topic : str,
273
+ redis_client : StrictRedis,
274
+ task : ThreadTask
275
+ ):
276
+ asyncio.run(_handle_ticker(
277
+ exchange_name=exchange_name,
278
+ ticker=ticker,
279
+ candles_publish_topic=candles_publish_topic,
280
+ redis_client=redis_client,
281
+ task=task
282
+ )
283
+ )
284
+
285
+ async def _handle_ticker(
286
+ exchange_name : str,
287
+ ticker : str,
288
+ candles_publish_topic : str,
289
+ redis_client : StrictRedis,
290
+ task : ThreadTask
291
+ ):
292
+ exchange = await instantiate_exhange(exchange_name=exchange_name, old_exchange=None)
293
+ ob = OrderBook(ticker=ticker, exchange_name=exchange_name)
294
+ candles_publish_topic = candles_publish_topic.replace("$SYMBOL$", ticker)
295
+ candles_publish_topic = candles_publish_topic.replace("$EXCHANGE$", exchange_name)
296
+
297
+ local_server_timestamp_ms = datetime.now().timestamp()*1000
298
+ exchange_timestamp_ms = await exchange.fetch_time() # type: ignore Otherwise, Error: Cannot access attribute "fetch_time" for class "Coroutine[Any, Any, binance | okx | bybit]"
299
+ timestamp_gap_ms = local_server_timestamp_ms - exchange_timestamp_ms
300
+ log(f"{exchange_name} {ticker} local_server_timestamp_ms: {local_server_timestamp_ms} vs exchange_timestamp_ms: {exchange_timestamp_ms}. timestamp_gap_ms: {timestamp_gap_ms}")
301
+
302
+ while task.keep_running:
303
+ try:
304
+ update = await exchange.watch_order_book(ticker) # type: ignore Otherwise, Error: Cannot access attribute "watch_order_book" for class "Coroutine[Any, Any, binance | okx | bybit]"
305
+ ob.update_book(update=update, param=param)
306
+
307
+ ob_dict = ob.to_dict()
308
+
309
+ redis_client.set(name=candles_publish_topic, value=json.dumps(ob_dict), ex=int(param['mds']['redis']['ttl_ms']/1000))
310
+
311
+ ob_dict.pop('bids')
312
+ ob_dict.pop('asks')
313
+ pd_ob = pd.DataFrame(ob_dict)
314
+ log(f"{tabulate(pd_ob, headers=pd_ob.columns)}") # type: ignore Otherwise, Error: Argument of type "DataFrame" cannot be assigned to parameter "tabular_data" of type "Mapping[str, Iterable[Any]]
315
+
316
+ except ValueError as update_error:
317
+ log(f"Update error! {update_error}")
318
+ exchange = await instantiate_exhange(exchange_name=exchange_name, old_exchange=exchange) # type: ignore Otherwise, Error: Argument of type "Coroutine[Any, Any, Exchange] | Exchange" cannot be assigned to parameter "old_exchange" of type "Exchange | None" in function "instantiate_exhange"
319
+ ob = OrderBook(ticker=ticker, exchange_name=exchange_name)
320
+
321
+
322
+ async def main():
323
+ parse_args()
324
+
325
+ param['job_name'] = f'candles_provider_{param["provider_id"]}'
326
+
327
+ redis_client : StrictRedis = init_redis_client()
328
+ partition_assign_topic : str = param['mds']['topics']['partition_assign_topic']
329
+ partition_assign_topic = partition_assign_topic.replace("$PROVIDER_ID$", param['provider_id'])
330
+ candles_publish_topic : str = param['mds']['topics']['candles_publish_topic']
331
+ redis_pubsub : PubSub = init_redis_channel_subscription(redis_client, partition_assign_topic)
332
+
333
+ log(f"orderbooks_provider {param['provider_id']} started, waiting for trigger. (Can use test_provider.py to trigger it)")
334
+
335
+ tasks = []
336
+ for message in redis_pubsub.listen():
337
+ if message['type'] == 'message' and message['channel'].decode()==partition_assign_topic:
338
+ if tasks:
339
+ for task in tasks:
340
+ task.keep_running = False # Everytime provider get triggered, it'd cancel previous tasks
341
+
342
+ tickers = json.loads(message['data'].decode('utf-8'))
343
+ tickers = [ { 'exchange' : x.split('|')[0], 'ticker' : x.split('|')[-1] } for x in tickers ]
344
+ logger.info(f"{partition_assign_topic} {message}")
345
+
346
+ ticker_count : int = 1
347
+ for entry in tickers:
348
+ exchange_name : str = entry['exchange']
349
+ ticker : str = entry['ticker']
350
+ task : ThreadTask = ThreadTask()
351
+ t = Thread(target=handle_ticker, args = (exchange_name, ticker, candles_publish_topic, redis_client, task))
352
+ t.start()
353
+ log(f"Task created for {exchange_name}, {ticker}")
354
+
355
+ if ticker_count>=param['instance_capacity']:
356
+ break
357
+ ticker_count+=1
358
+
359
+ asyncio.run(main())
@@ -0,0 +1,70 @@
1
+ from enum import Enum
2
+ import argparse
3
+ import time
4
+ from datetime import datetime
5
+ from typing import Any, Dict, List, Union
6
+ import logging
7
+ import json
8
+ import pandas as pd
9
+ import numpy as np
10
+ from redis import StrictRedis
11
+ from redis.client import PubSub
12
+
13
+ '''
14
+ set PYTHONPATH=%PYTHONPATH%;D:\dev\siglab\siglab_py
15
+ python test_provider.py --provider_id aaa
16
+ '''
17
+
18
+ param : Dict[str, str] = {
19
+ 'provider_id' : '---'
20
+ }
21
+
22
+ def parse_args():
23
+ parser = argparse.ArgumentParser() # type: ignore
24
+ parser.add_argument("--provider_id", help="candle_provider will go to work if from redis a matching topic partition_assign_topic with provider_id in it.", default=None)
25
+
26
+ args = parser.parse_args()
27
+ param['provider_id'] = args.provider_id
28
+
29
+ def init_redis_client() -> StrictRedis:
30
+ redis_client : StrictRedis = StrictRedis(
31
+ host = 'localhost',
32
+ port = 6379,
33
+ db = 0,
34
+ ssl = False
35
+ )
36
+ try:
37
+ redis_client.keys()
38
+ except ConnectionError as redis_conn_error:
39
+ err_msg = f"Failed to connect to redis: {'localhost'}, port: {6379}"
40
+ raise ConnectionError(err_msg)
41
+
42
+ return redis_client
43
+
44
+ def trigger_producers(
45
+ redis_client : StrictRedis,
46
+ exchange_tickers : List,
47
+ candles_partition_assign_topic : str):
48
+ # https://redis.io/commands/publish/
49
+ redis_client.publish(channel=candles_partition_assign_topic, message=json.dumps(exchange_tickers).encode('utf-8'))
50
+
51
+ if __name__ == '__main__':
52
+ parse_args()
53
+
54
+ provider_id : str = param['provider_id']
55
+ partition_assign_topic = 'mds_assign_$PROVIDER_ID$'
56
+ candles_partition_assign_topic = partition_assign_topic.replace("$PROVIDER_ID$", provider_id)
57
+ redis_client : StrictRedis = init_redis_client()
58
+
59
+ exchange_tickers : List[str] = [
60
+ 'okx_linear|BTC/USDT:USDT',
61
+ 'okx_linear|ETH/USDT:USDT',
62
+ 'okx_linear|SOL/USDT:USDT',
63
+ ]
64
+ trigger_producers(
65
+ redis_client=redis_client,
66
+ exchange_tickers=exchange_tickers,
67
+ candles_partition_assign_topic=candles_partition_assign_topic)
68
+
69
+ print(f"Sent {exchange_tickers}")
70
+
File without changes