siglab-py 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of siglab-py might be problematic. Click here for more details.

@@ -0,0 +1,342 @@
1
+ import sys
2
+ import traceback
3
+ from enum import Enum
4
+ import argparse
5
+ import time
6
+ from datetime import datetime, timedelta
7
+ from typing import Any, Dict, Union
8
+ import logging
9
+ import json
10
+ from tabulate import tabulate
11
+ import asyncio
12
+ from threading import Thread
13
+ from collections import defaultdict
14
+ import pandas as pd
15
+ import numpy as np
16
+ from redis import StrictRedis
17
+ from redis.client import PubSub
18
+
19
+ from ccxt.binance import binance
20
+ from ccxt.okx import okx
21
+ from ccxt.bybit import bybit
22
+ from ccxt.base.exchange import Exchange
23
+
24
+ from util.market_data_util import fetch_candles
25
+
26
+
27
+ '''
28
+ To start from command prompt:
29
+ set PYTHONPATH=%PYTHONPATH%;D:\dev\siglab\siglab_py
30
+ python candles_provider.py --provider_id aaa --candle_size 1h --how_many_candles 2169 --redis_ttl_ms 3600000
31
+
32
+ This script is pypy compatible:
33
+ pypy candles_provider.py --provider_id aaa --candle_size 1h --how_many_candles 2169 --redis_ttl_ms 3600000
34
+
35
+ Key parameters you may want to modify:
36
+ provider_id: You can trigger this provider instance using test_provider.py. Of course, you'd write your own.
37
+ candle_size: 1m, 5m, 15min, 1h, 1d for example.
38
+ how_many_candles: default to 2169 (24 x 90).
39
+ redis_ttl_ms: This is how long orderbook snapshot will last on redis when provider publishes to it.
40
+
41
+ Launch.json if you wish to debug from VSCode:
42
+ {
43
+ "version": "0.2.0",
44
+ "configurations": [
45
+ {
46
+ "name": "Python Debugger: Current File",
47
+ "type": "debugpy",
48
+ "request": "launch",
49
+ "program": "${file}",
50
+ "console": "integratedTerminal",
51
+ "args" : [
52
+ "--provider_id", "YourProviderNameHere",
53
+ "--candle_size", "1h",
54
+ "--how_many_candles", "2169",
55
+ "--redis_ttl_ms", "3600000"
56
+ ],
57
+ "env": {
58
+ "PYTHONPATH": "${workspaceFolder}"
59
+ }
60
+ }
61
+ ]
62
+ }
63
+ '''
64
+ class LogLevel(Enum):
65
+ CRITICAL = 50
66
+ ERROR = 40
67
+ WARNING = 30
68
+ INFO = 20
69
+ DEBUG = 10
70
+ NOTSET = 0
71
+
72
+ param : Dict = {
73
+ "candle_size" : "1h",
74
+ 'how_many_candles' : 24*90,
75
+
76
+ 'market_type' : 'linear', # For spots, set to "spot". For perpectual, you need to look at ccxt doc, for most exchanges, it's 'linear' or 'swap' for perpetuals. Example, https://github.com/ccxt/ccxt/blob/master/python/ccxt/okx.py?plain=1#L1110
77
+
78
+ # Provider ID is part of mds publish topic.
79
+ 'provider_id' : 'b0f1b878-c281-43d7-870a-0347f90e6ece',
80
+
81
+ # Publish to message bus
82
+ 'mds' : {
83
+ 'topics' : {
84
+ 'partition_assign_topic' : 'mds_assign_$PROVIDER_ID$',
85
+ 'candles_publish_topic' : 'candles-$DENORMALIZED_SYMBOL$-$EXCHANGE_NAME$-$INTERVAL$'
86
+ },
87
+ 'redis' : {
88
+ 'host' : 'localhost',
89
+ 'port' : 6379,
90
+ 'db' : 0,
91
+ 'ttl_ms' : 1000*60*15 # 15 min?
92
+ }
93
+
94
+ }
95
+ }
96
+
97
+ logging.Formatter.converter = time.gmtime
98
+ logger = logging.getLogger()
99
+ log_level = logging.INFO # DEBUG --> INFO --> WARNING --> ERROR
100
+ logger.setLevel(log_level)
101
+ format_str = '%(asctime)s %(message)s'
102
+ formatter = logging.Formatter(format_str)
103
+ sh = logging.StreamHandler()
104
+ sh.setLevel(log_level)
105
+ sh.setFormatter(formatter)
106
+ logger.addHandler(sh)
107
+ # fh = logging.FileHandler(f"{param['job_name']}.log")
108
+ # fh.setLevel(log_level)
109
+ # fh.setFormatter(formatter)
110
+ # logger.addHandler(fh)
111
+
112
+ market_type : str = param['market_type']
113
+
114
+ binance_exchange = binance({
115
+ 'defaultType' : market_type
116
+ })
117
+
118
+ okx_exchange = okx({
119
+ 'defaultType' : market_type
120
+ })
121
+
122
+ bybit_exchange = bybit({
123
+ 'defaultType' : market_type
124
+ })
125
+
126
+ exchanges = {
127
+ f"binance_{market_type}" : binance_exchange,
128
+ f"okx_{market_type}" : okx_exchange,
129
+ f"bybit_{market_type}" : bybit_exchange
130
+ }
131
+
132
+ def log(message : str, log_level : LogLevel = LogLevel.INFO):
133
+ if log_level.value<LogLevel.WARNING.value:
134
+ logger.info(f"{datetime.now()} {message}")
135
+
136
+ elif log_level.value==LogLevel.WARNING.value:
137
+ logger.warning(f"{datetime.now()} {message}")
138
+
139
+ elif log_level.value==LogLevel.ERROR.value:
140
+ logger.error(f"{datetime.now()} {message}")
141
+
142
+ def parse_args():
143
+ parser = argparse.ArgumentParser() # type: ignore
144
+
145
+ parser.add_argument("--provider_id", help="candle_provider will go to work if from redis a matching topic partition_assign_topic with provider_id in it.", default=None)
146
+ parser.add_argument("--candle_size", help="candle interval: 1m, 1h, 1d... etc", default='1h')
147
+ parser.add_argument("--how_many_candles", help="how_many_candles", default=24*7)
148
+
149
+ parser.add_argument("--redis_ttl_ms", help="TTL for items published to redis. Default: 1000*60*60 (i.e. 1hr)",default=1000*60*60)
150
+
151
+ args = parser.parse_args()
152
+ if args.provider_id:
153
+ param['provider_id'] = args.provider_id
154
+ param['candle_size'] = args.candle_size
155
+ param['how_many_candles'] = int(args.how_many_candles)
156
+
157
+ param['redis_ttl_ms'] = int(args.redis_ttl_ms)
158
+
159
+ def init_redis_client() -> StrictRedis:
160
+ redis_client : StrictRedis = StrictRedis(
161
+ host = param['mds']['redis']['host'],
162
+ port = param['mds']['redis']['port'],
163
+ db = 0,
164
+ ssl = False
165
+ )
166
+ try:
167
+ redis_client.keys()
168
+ except ConnectionError as redis_conn_error:
169
+ err_msg = f"Failed to connect to redis: {param['mds']['redis']['host']}, port: {param['mds']['redis']['port']}"
170
+ raise ConnectionError(err_msg)
171
+
172
+ return redis_client
173
+
174
+ def init_redis_channel_subscription(redis_client : StrictRedis, partition_assign_topic : str) -> PubSub:
175
+ redis_client = init_redis_client()
176
+ pubsub = redis_client.pubsub()
177
+ pubsub.subscribe(partition_assign_topic)
178
+ return pubsub
179
+
180
+ def process_universe(
181
+ universe : pd.DataFrame,
182
+ task,
183
+ redis_client : StrictRedis
184
+ ):
185
+ # Key = ticker
186
+ subscribed : Dict[tuple, Dict[str, Any]] = defaultdict(lambda : {'candles': None, 'num_candles': 0} )
187
+
188
+ while task.keep_running:
189
+ start = time.time()
190
+
191
+ num_fetches_this_wave = 0
192
+
193
+ i = 1
194
+ for index, row in universe.iterrows():
195
+ exchange_name : str = row['exchange']
196
+ ticker : str = row['ticker']
197
+
198
+ this_row_header = f'({i} of {universe.shape[0]}) exchange_name: {exchange_name}, ticker: {ticker}'
199
+
200
+ try:
201
+ exchange = exchanges[exchange_name]
202
+
203
+ fetch_again = False
204
+ last_fetch = None
205
+ last_fetch_ts = None
206
+ if subscribed[(exchange_name, ticker)]:
207
+ last_fetch = subscribed[(exchange_name, ticker)]['candles']
208
+ if subscribed[(exchange_name, ticker)]['num_candles']>0:
209
+ last_fetch_ts = last_fetch.iloc[-1]['timestamp_ms']/1000 # type: ignore Otherwise, Error: Cannot access attribute "iloc" for class "None"
210
+ candle_size = param['candle_size']
211
+ interval = candle_size[-1]
212
+ number_intervals = param['how_many_candles']
213
+
214
+ start_date : datetime = datetime.now()
215
+ end_date : datetime = start_date
216
+ if interval=="m":
217
+ end_date = datetime.now()
218
+ end_date = datetime(end_date.year, end_date.month, end_date.day, end_date.hour, end_date.minute, 0)
219
+ start_date = end_date + timedelta(minutes=-number_intervals)
220
+
221
+ num_sec_since_last_fetch = (end_date.timestamp() - last_fetch_ts) if last_fetch_ts else sys.maxsize
222
+ fetch_again = True if num_sec_since_last_fetch >= 60 / 10 else False
223
+
224
+ elif interval=="h":
225
+ end_date = datetime.now()
226
+ end_date = datetime(end_date.year, end_date.month, end_date.day, end_date.hour, 0, 0)
227
+ start_date = end_date + timedelta(hours=-number_intervals)
228
+
229
+ num_sec_since_last_fetch = (end_date.timestamp() - last_fetch_ts) if last_fetch_ts else sys.maxsize
230
+ fetch_again = True if num_sec_since_last_fetch >= 60*60 / 10 else False
231
+
232
+ elif interval=="d":
233
+ end_date = datetime.now()
234
+ end_date = datetime(end_date.year, end_date.month, end_date.day, 0, 0, 0)
235
+ start_date = end_date + timedelta(days=-number_intervals)
236
+
237
+ num_sec_since_last_fetch = (end_date.timestamp() - last_fetch_ts) if last_fetch_ts else sys.maxsize
238
+ fetch_again = True if num_sec_since_last_fetch >= 24*60*60 / 10 else False
239
+
240
+ cutoff_ts = int(start_date.timestamp()) # in seconds
241
+
242
+ if fetch_again:
243
+ if datetime.now().minute==0:
244
+ time.sleep(10) # Give some time for the exchange
245
+
246
+ candles = fetch_candles(
247
+ start_ts=cutoff_ts,
248
+ end_ts=int(end_date.timestamp()),
249
+ exchange=exchange, normalized_symbols=[ticker],
250
+ candle_size = candle_size,
251
+ num_candles_limit = 100,
252
+ logger = None
253
+ )
254
+ subscribed[(exchange_name, ticker)] = {
255
+ 'candles' : candles[ ticker ],
256
+ 'num_candles' : candles[ ticker ].shape[0] # type: ignore Otherwise, Error: "shape" is not a known attribute of "None"
257
+ }
258
+ num_fetches_this_wave += 1
259
+
260
+ denormalized_ticker = next(iter([ exchange.markets[x] for x in exchange.markets if exchange.markets[x]['symbol']==ticker]))['id']
261
+
262
+ publish_key = param['mds']['topics']['candles_publish_topic']
263
+ publish_key = publish_key.replace('$DENORMALIZED_SYMBOL$', denormalized_ticker)
264
+ publish_key = publish_key.replace('$EXCHANGE_NAME$', exchange_name)
265
+ publish_key = publish_key.replace('$INTERVAL$', param['candle_size'])
266
+
267
+ data = candles[ticker].to_json(orient='records') # type: ignore Otherwise, Error: "to_json" is not a known attribute of "None"
268
+
269
+ start = time.time()
270
+ if redis_client:
271
+ '''
272
+ https://redis.io/commands/set/
273
+ '''
274
+ expiry_sec : int = 0
275
+ if interval=="m":
276
+ expiry_sec = 60 + 60*15
277
+ elif interval=="h":
278
+ expiry_sec = 60*60 + 60*15
279
+ elif interval=="d":
280
+ expiry_sec = 60*60*24
281
+ expiry_sec += 60*15 # additional 15min
282
+
283
+ redis_client.set(name=publish_key, value=json.dumps(data).encode('utf-8'), ex=expiry_sec)
284
+
285
+ redis_set_elapsed_ms = int((time.time() - start) *1000)
286
+
287
+ log(f"published candles {this_row_header} {publish_key} {sys.getsizeof(data, -1)} bytes to mds elapsed {redis_set_elapsed_ms} ms")
288
+
289
+ except Exception as loop_error:
290
+ log(f"Failed to process {this_row_header}. Error: {loop_error} {str(sys.exc_info()[0])} {str(sys.exc_info()[1])} {traceback.format_exc()}")
291
+
292
+ if not task.keep_running:
293
+ break
294
+
295
+ i += 1
296
+
297
+ if num_fetches_this_wave>0:
298
+ log(f"Fetch candles for whole universe done. elapsed: {time.time()-start} sec, universe_reload_id: {task.universe_reload_id}. # tickers: {len(subscribed)}")
299
+ else:
300
+ log(f"universe_reload_id: {task.universe_reload_id}, Nothing to fetch this wave. Sleep a bit.")
301
+ time.sleep(3)
302
+
303
+ log(f"process_universe exit, universe_reload_id: {task.universe_reload_id}")
304
+
305
+ async def main():
306
+ parse_args()
307
+
308
+ param['job_name'] = f'candles_provider_{param["provider_id"]}'
309
+
310
+ redis_client : StrictRedis = init_redis_client()
311
+ partition_assign_topic : str = param['mds']['topics']['partition_assign_topic']
312
+ partition_assign_topic = partition_assign_topic.replace("$PROVIDER_ID$", param['provider_id'])
313
+ redis_pubsub : PubSub = init_redis_channel_subscription(redis_client, partition_assign_topic)
314
+
315
+ class ThreadTask:
316
+ def __init__(self, universe_reload_id) -> None:
317
+ self.keep_running = True
318
+ self.universe_reload_id = universe_reload_id
319
+ task = None
320
+
321
+ log(f"candles_provider {param['provider_id']} started, waiting for trigger. (Can use test_provider.py to trigger it)")
322
+
323
+ universe_reload_id = 1
324
+ for message in redis_pubsub.listen():
325
+ if message['type'] == 'message' and message['channel'].decode()==partition_assign_topic:
326
+ if task:
327
+ task.keep_running = False
328
+
329
+ tickers = json.loads(message['data'].decode('utf-8'))
330
+ tickers = [ { 'exchange' : x.split('|')[0], 'ticker' : x.split('|')[-1] } for x in tickers ]
331
+ universe = pd.DataFrame(tickers)
332
+ logger.info(f"{partition_assign_topic} {message}")
333
+
334
+ task = ThreadTask(universe_reload_id=universe_reload_id)
335
+ t = Thread(target=process_universe, args = (universe, task, redis_client))
336
+ t.start()
337
+
338
+ universe_reload_id += 1
339
+
340
+ async def _run_jobs():
341
+ await main()
342
+ asyncio.get_event_loop().run_until_complete(_run_jobs())
@@ -0,0 +1,263 @@
1
+ import sys
2
+ import traceback
3
+ from enum import Enum
4
+ import argparse
5
+ import time
6
+ from datetime import datetime, timedelta
7
+ from typing import Any, Dict, Union
8
+ import hashlib
9
+ from collections import deque
10
+ import logging
11
+ import json
12
+ from io import StringIO
13
+ import re
14
+ from re import Pattern
15
+ from tabulate import tabulate
16
+ import pandas as pd
17
+ import numpy as np
18
+ from redis import StrictRedis
19
+
20
+ from util.analytic_util import compute_candles_stats
21
+
22
+ '''
23
+ candles_provider.py will feed candles to redis.
24
+ key example: candles-BTC-USDT-SWAP-okx_linear-1h.
25
+ key format: candles-$DENORMALIZED_SYMBOL$-$EXCHANGE_NAME$-$INTERVAL$
26
+
27
+ candles_ta_provider.py will scan for candles-$DENORMALIZED_SYMBOL$-$EXCHANGE_NAME$-$INTERVAL$
28
+ Then read candles from redis. If candle's timestamp hasnt been previously processed, it'd perform TA calculations.
29
+ After perform TA calc, it'd publish back to redis under:
30
+ key example: candles_ta-BTC-USDT-SWAP-okx_linear-1h
31
+ key format: candles_ta-$DENORMALIZED_SYMBOL$-$EXCHANGE_NAME$-$INTERVAL$
32
+
33
+ From command prompt:
34
+ set PYTHONPATH=%PYTHONPATH%;D:\dev\siglab\siglab_py
35
+ python candles_ta_provider.py --candle_size 1h --ma_long_intervals 24 --ma_short_intervals 8 --boillenger_std_multiples 2 --redis_ttl_ms 3600000 --processed_hash_queue_max_size 999 --pypy_compat N
36
+
37
+ This script is pypy compatible but you'd need specify --pypy_compat Y so from analyti_util we'd skip import scipy and statsmodels (They are not pypy compatible).
38
+ pypy candles_ta_provider.py --candle_size 1h --ma_long_intervals 24 --ma_short_intervals 8 --boillenger_std_multiples 2 --redis_ttl_ms 3600000 --processed_hash_queue_max_size 999 --pypy_compat Y
39
+
40
+ Launch.json if you wish to debug from VSCode:
41
+ {
42
+ "version": "0.2.0",
43
+ "configurations": [
44
+ {
45
+ "name": "Python Debugger: Current File",
46
+ "type": "debugpy",
47
+ "request": "launch",
48
+ "program": "${file}",
49
+ "console": "integratedTerminal",
50
+ "args" : [
51
+ "--candle_size", "1h",
52
+ "--ma_long_intervals", "24",
53
+ "--ma_short_intervals", "8",
54
+ "--boillenger_std_multiples", "2",
55
+ "--redis_ttl_ms", "3600000",
56
+ "--processed_hash_queue_max_size", "999"
57
+ ],
58
+ "env": {
59
+ "PYTHONPATH": "${workspaceFolder}"
60
+ }
61
+ }
62
+ ]
63
+ }
64
+ '''
65
+ class LogLevel(Enum):
66
+ CRITICAL = 50
67
+ ERROR = 40
68
+ WARNING = 30
69
+ INFO = 20
70
+ DEBUG = 10
71
+ NOTSET = 0
72
+
73
+ param : Dict = {
74
+ "candle_size" : "1h", # This instance candles_ta_provider will only process candles with interval specified by "candle_size"
75
+
76
+ # For MA (Moving Averages), window sizes are defined by below.
77
+ "ma_long_intervals" : 24,
78
+ 'ma_short_intervals' : 8,
79
+ "boillenger_std_multiples" : 2,
80
+
81
+ # regex corresponding to candles_publish_topic. If you want specific instances to process specific tickers only (performance concerns), you can use this regex filter to do the trick.
82
+ "candles_ta_publish_topic_regex" : r"^candles-[A-Z]+-[A-Z]+-[A-Z]+-[a-z_]+-\d+[smhdwMy]$",
83
+
84
+ # processed_hash_queue is how we avoid reprocess already processed messages. We store hash of candles read in 'processed_hash_queue'.
85
+ # Depending on how many tickers this instance is monitoring, you may want to adjust this queue size.
86
+ "processed_hash_queue_max_size" : 999,
87
+
88
+ 'job_name' : 'candles_ta_provider',
89
+
90
+ # Publish to message bus
91
+ 'mds' : {
92
+ 'topics' : {
93
+ 'candles_publish_topic' : 'candles-$DENORMALIZED_SYMBOL$-$EXCHANGE_NAME$-$INTERVAL$', # candles_ta_provider will scan redis for matching keys
94
+ },
95
+ 'redis' : {
96
+ 'host' : 'localhost',
97
+ 'port' : 6379,
98
+ 'db' : 0,
99
+ 'ttl_ms' : 1000*60*15 # 15 min?
100
+ }
101
+
102
+ }
103
+ }
104
+
105
+ logging.Formatter.converter = time.gmtime
106
+ logger = logging.getLogger()
107
+ log_level = logging.INFO # DEBUG --> INFO --> WARNING --> ERROR
108
+ logger.setLevel(log_level)
109
+ format_str = '%(asctime)s %(message)s'
110
+ formatter = logging.Formatter(format_str)
111
+ sh = logging.StreamHandler()
112
+ sh.setLevel(log_level)
113
+ sh.setFormatter(formatter)
114
+ logger.addHandler(sh)
115
+
116
+ def log(message : str, log_level : LogLevel = LogLevel.INFO):
117
+ if log_level.value<LogLevel.WARNING.value:
118
+ logger.info(f"{datetime.now()} {message}")
119
+
120
+ elif log_level.value==LogLevel.WARNING.value:
121
+ logger.warning(f"{datetime.now()} {message}")
122
+
123
+ elif log_level.value==LogLevel.ERROR.value:
124
+ logger.error(f"{datetime.now()} {message}")
125
+
126
+ def parse_args():
127
+ parser = argparse.ArgumentParser() # type: ignore
128
+
129
+ parser.add_argument("--candle_size", help="candle interval: 1m, 1h, 1d... etc", default='1h')
130
+ parser.add_argument("--ma_long_intervals", help="Window size in number of intervals for higher timeframe", default=24)
131
+ parser.add_argument("--ma_short_intervals", help="Window size in number of intervals for lower timeframe", default=8)
132
+ parser.add_argument("--boillenger_std_multiples", help="Boillenger bands: # std", default=2)
133
+ parser.add_argument("--redis_ttl_ms", help="TTL for items published to redis. Default: 1000*60*60 (i.e. 1hr)",default=1000*60*60)
134
+ parser.add_argument("--processed_hash_queue_max_size", help="processed_hash_queue is how we avoid reprocess already processed messages. We store hash of candles read in 'processed_hash_queue'", default=999)
135
+
136
+ parser.add_argument("--pypy_compatible", help="pypy_compatible: If Y, analytic_util will import statsmodels.api (slopes and divergence calc). In any case, partition_sliding_window requires scipy.stats.linregress and cannot be used with pypy. Y or N (default).", default='N')
137
+
138
+ args = parser.parse_args()
139
+ param['candle_size'] = args.candle_size
140
+ param['ma_long_intervals'] = int(args.ma_long_intervals)
141
+ param['ma_short_intervals'] = int(args.ma_short_intervals)
142
+ param['boillenger_std_multiples'] = int(args.boillenger_std_multiples)
143
+
144
+ param['redis_ttl_ms'] = int(args.redis_ttl_ms)
145
+ param['processed_hash_queue_max_size'] = int(args.processed_hash_queue_max_size)
146
+
147
+ if args.pypy_compatible:
148
+ if args.pypy_compatible=='Y':
149
+ param['pypy_compatible'] = True
150
+ else:
151
+ param['pypy_compatible'] = False
152
+ else:
153
+ param['pypy_compatible'] = False
154
+
155
+ def init_redis_client() -> StrictRedis:
156
+ redis_client : StrictRedis = StrictRedis(
157
+ host = param['mds']['redis']['host'],
158
+ port = param['mds']['redis']['port'],
159
+ db = 0,
160
+ ssl = False
161
+ )
162
+ try:
163
+ redis_client.keys()
164
+ except ConnectionError as redis_conn_error:
165
+ err_msg = f"Failed to connect to redis: {param['mds']['redis']['host']}, port: {param['mds']['redis']['port']}"
166
+ raise ConnectionError(err_msg)
167
+
168
+ return redis_client
169
+
170
+ def work(
171
+ boillenger_std_multiples : float,
172
+ ma_long_intervals : int,
173
+ ma_short_intervals : int,
174
+ candle_size : str,
175
+ redis_client : StrictRedis
176
+ ):
177
+ candles_ta_publish_topic_regex : str = param['candles_ta_publish_topic_regex']
178
+ candles_ta_publish_topic_regex_pattern : Pattern = re.compile(candles_ta_publish_topic_regex)
179
+
180
+ # This is how we avoid reprocess same message twice. We check message hash and cache it.
181
+ processed_hash_queue = deque(maxlen=10)
182
+
183
+ while True:
184
+ try:
185
+ keys = redis_client.keys()
186
+ for key in keys:
187
+ try:
188
+ s_key : str = key.decode("utf-8")
189
+ if candles_ta_publish_topic_regex_pattern.match(s_key):
190
+
191
+ publish_key : str = s_key.replace('candles-', 'candles_ta-')
192
+
193
+ candles = None
194
+ message = redis_client.get(key)
195
+ if message:
196
+ # When candles_provider.py republish candles to same key (i.e. overwrites it), we'd know.
197
+ message_hash = hashlib.sha256(message).hexdigest()
198
+ message = message.decode('utf-8')
199
+ if message_hash not in processed_hash_queue: # Dont process what's been processed before.
200
+ processed_hash_queue.append(message_hash)
201
+
202
+ candles = json.loads(message)
203
+ pd_candles = pd.read_json(StringIO(candles), convert_dates=False)
204
+
205
+ start = time.time()
206
+ compute_candles_stats(
207
+ pd_candles=pd_candles,
208
+ boillenger_std_multiples=boillenger_std_multiples,
209
+ sliding_window_how_many_candles=ma_long_intervals,
210
+ slow_fast_interval_ratio=(ma_long_intervals/ma_short_intervals),
211
+ pypy_compat=param['pypy_compatible']
212
+ )
213
+ compute_candles_stats_elapsed_ms = int((time.time() - start) *1000)
214
+ data = pd_candles.to_json(orient='records') # type: ignore Otherwise, Error: "to_json" is not a known attribute of "None"
215
+
216
+ start = time.time()
217
+ if redis_client:
218
+ '''
219
+ https://redis.io/commands/set/
220
+ '''
221
+ expiry_sec : int = 0
222
+ if candle_size=="m":
223
+ expiry_sec = 60 + 60*15
224
+ elif candle_size=="h":
225
+ expiry_sec = 60*60 + 60*15
226
+ elif candle_size=="d":
227
+ expiry_sec = 60*60*24
228
+ expiry_sec += 60*15 # additional 15min
229
+
230
+ redis_client.set(name=publish_key, value=json.dumps(data).encode('utf-8'), ex=expiry_sec)
231
+ redis_set_elapsed_ms = int((time.time() - start) *1000)
232
+
233
+ log(f"published candles {publish_key} {sys.getsizeof(data, -1)} bytes to mds elapsed {redis_set_elapsed_ms} ms, compute_candles_stats_elapsed_ms: {compute_candles_stats_elapsed_ms}")
234
+ else:
235
+ log(f"{s_key} message with hash {message_hash} been processed previously.")
236
+
237
+
238
+ except Exception as key_error:
239
+ log(f"Failed to process {key}. Error: {key_error} {str(sys.exc_info()[0])} {str(sys.exc_info()[1])} {traceback.format_exc()}")
240
+
241
+ except Exception as loop_error:
242
+ log(f"Error: {loop_error} {str(sys.exc_info()[0])} {str(sys.exc_info()[1])} {traceback.format_exc()}")
243
+
244
+ def main():
245
+ parse_args()
246
+
247
+ # candles_ta_provider instances go by 'candle_size'
248
+ param['job_name'] = "".join([ param['job_name'], "", param['candle_size'] ])
249
+
250
+ # fh = logging.FileHandler(f"{param['job_name']}.log")
251
+ # fh.setLevel(log_level)
252
+ # fh.setFormatter(formatter)
253
+ # logger.addHandler(fh)
254
+
255
+ redis_client : StrictRedis = init_redis_client()
256
+ work(
257
+ boillenger_std_multiples=param['boillenger_std_multiples'],
258
+ ma_long_intervals=param['ma_long_intervals'],
259
+ ma_short_intervals=param['ma_short_intervals'],
260
+ candle_size=param['candle_size'],
261
+ redis_client=redis_client)
262
+
263
+ main()