siglab-py 0.6.12__py3-none-any.whl → 0.6.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of siglab-py might be problematic. Click here for more details.

@@ -0,0 +1,468 @@
1
+ '''
2
+ Command line:
3
+ python macdrsi_h_tc_crypto.py --white_list_tickers BTC/USDT:USDT,ETH/USDT:USDT,BNB/USDT:USDT,SOL/USDT:USDT,XRP/USDT:USDT --reference_ticker BTC/USDT:USDT --force_reload Y --block_entries_on_impacting_ecoevents N
4
+
5
+ Debug from vscode, Launch.json:
6
+ {
7
+ "version": "0.2.0",
8
+ "configurations": [
9
+ {
10
+ "name": "Python: Current File",
11
+ "type": "python",
12
+ "request": "launch",
13
+ "program": "${file}",
14
+ "console": "integratedTerminal",
15
+ "justMyCode": true,
16
+ "args" : [
17
+ "--white_list_tickers", "BTC/USDT:USDT,ETH/USDT:USDT,BNB/USDT:USDT,SOL/USDT:USDT,XRP/USDT:USDT",
18
+ "--reference_ticker", "BTC/USDT:USDT",
19
+ "--force_reload", "Y",
20
+ "--block_entries_on_impacting_ecoevents", "N"
21
+ ]
22
+ }
23
+ ]
24
+ }
25
+ '''
26
+ import os
27
+ import sys
28
+ import argparse
29
+ import json
30
+ from datetime import datetime, timedelta, timezone
31
+ import time
32
+ from typing import Dict, List, Tuple, Any, Callable
33
+ import pandas as pd
34
+
35
+ from ccxt.base.exchange import Exchange
36
+ from ccxt.bybit import bybit
37
+
38
+ from backtest_core import parseargs, get_logger, spawn_parameters, generic_pnl_eval, generic_tp_eval, generic_sort_filter_universe, run_all_scenario, dump_trades_to_disk
39
+
40
+ PYPY_COMPAT : bool = True
41
+
42
+ sys.path.append('../gizmo')
43
+ # from market_data_gizmo import fetch_historical_price, fetch_candles, fix_column_types, compute_candles_stats, partition_sliding_window, estimate_fib_retracement
44
+ base_dir : str = f"{os.path.dirname(sys.path[0])}\\single_leg_ta"
45
+
46
+ REPORT_NAME : str = "backtest_macdrsi_crosses_strategy_15m_tc_crypto"
47
+ CACHE_CANDLES : str = f"{os.path.dirname(sys.path[0])}\\cache\\candles"
48
+
49
+ '''
50
+ white_list_tickers : List[str] = [
51
+ "BTC/USDT:USDT",
52
+ "ETH/USDT:USDT",
53
+ "BNB/USDT:USDT",
54
+ "SOL/USDT:USDT",
55
+ "XRP/USDT:USDT",
56
+ "DOGE/USDT:USDT",
57
+ "ADA/USDT:USDT",
58
+ "TRX/USDT:USDT",
59
+ "AVAX/USDT:USDT",
60
+ "LINK/USDT:USDT",
61
+ "DOT/USDT:USDT",
62
+ "TON/USDT:USDT",
63
+ "MATIC/USDT:USDT",
64
+ "SHIB/USDT:USDT",
65
+ "LTC/USDT:USDT",
66
+ "BCH/USDT:USDT",
67
+ "UNI/USDT:USDT",
68
+ "NEAR/USDT:USDT",
69
+ "ICP/USDT:USDT",
70
+ "APT/USDT:USDT"
71
+ ]
72
+ '''
73
+ white_list_tickers : List[str] = [ "SOL/USDT:USDT" ]
74
+
75
+ force_reload : bool = False
76
+
77
+ num_candles_limit = 100 # Depends on exchange but generally 100 ok!
78
+ param = {
79
+ 'apiKey' : None,
80
+ 'secret' : None,
81
+ 'password' : None, # Other exchanges dont require this! This is saved in exchange.password!
82
+ 'subaccount' : None,
83
+ 'rateLimit' : 100, # In ms
84
+ 'options' : {
85
+ 'defaultType': 'linear',
86
+ 'leg_room_bps' : 5,
87
+ 'trade_fee_bps' : 10,
88
+
89
+ 'list_ts_field' : 'listTime' # list_ts_field: Response field in exchange.markets[symbol] to indiate timestamp of symbol's listing date in ms. For bybit, markets['launchTime'] is list date. For okx, it's markets['listTime'].
90
+ }
91
+ }
92
+
93
+ exchanges = [
94
+ bybit(param),
95
+ ]
96
+
97
+ exchanges[0].name='bybit_linear'
98
+
99
+ commission_bps : float = 5
100
+
101
+ '''
102
+ ******** STRATEGY_SPECIFIC parameters ********
103
+ '''
104
+ additional_trade_fields : List[str] = [
105
+ # Add fields you want to include in trade extract
106
+ ]
107
+
108
+
109
+ '''
110
+ ******** GENERIC parameters ********
111
+ '''
112
+ strategy_mode_values : List[str]= [ 'long_short'] # 'long_only', 'short_only', 'long_short'
113
+
114
+ '''
115
+ For example, Monday's are weird. Entries, SL adjustments ...etc may have STRATEGY_SPECIFIC logic around this.
116
+ '''
117
+ CAUTIOUS_DAYOFWEEK : List[int] = [ 0 ]
118
+ how_many_last_candles : int = 3
119
+ last_candles_timeframe : str = 'lo' # Either hi or lo (default)
120
+ enable_wait_entry : bool = True
121
+ enable_sliced_entry : bool = False
122
+ enable_athatl_logic : bool = False # If you have special logic in 'allow_entry_initial' or 'allow_entry_final'.
123
+
124
+ '''
125
+ Economic events comes from 'economic_calanedar.csv' in same folder.
126
+
127
+ Block entries if pending economic event in next x-intervals (applied on lo timeframe)
128
+ Set to -1 to disable this.
129
+ '''
130
+ adj_sl_on_ecoevents = False
131
+ block_entries_on_impacting_ecoevents = True
132
+ num_intervals_block_pending_ecoevents = 3
133
+ ECOEVENTS_MAPPED_REGIONS = [ 'united_states' ]
134
+
135
+ mapped_event_codes = [
136
+ 'core_inflation_rate_mom', 'core_inflation_rate_yoy',
137
+ 'inflation_rate_mom', 'inflation_rate_yoy',
138
+ 'fed_interest_rate_decision',
139
+ 'fed_chair_speech',
140
+ 'core_pce_price_index_mom',
141
+ 'core_pce_price_index_yoy',
142
+ 'unemployment_rate',
143
+ 'non_farm_payrolls',
144
+ 'gdp_growth_rate_qoq_adv',
145
+ 'gdp_growth_rate_qoq_final',
146
+ 'gdp_growth_rate_yoy'
147
+ ]
148
+
149
+ num_intervals_current_ecoevents = 8
150
+
151
+ sl_num_intervals_delay_values : List[float] = [ 15*4*8 ]
152
+ sl_hard_percent_values : List[float] = [ 2.5 ]
153
+ sl_percent_trailing_values : List[float] = [ 35 ]
154
+ use_gradual_tightened_trailing_stops : bool = True
155
+ trailing_stop_mode : str = "linear" # linear or parabolic
156
+
157
+ '''
158
+ This is for trailing stops slope calc.
159
+ Say if your trade's max profit potential is tp_max_percent=3%=300bps.
160
+ tp_min_percent = 0.3 means you will NOT TP until at least pnl > 0.3% or 30bps.
161
+ '''
162
+ tp_min_percent = 3
163
+ tp_max_percent = 5
164
+
165
+ POST_MOVE_NUM_INTERVALS : int = 24*3
166
+ POST_MOVE_PERCENT_THRESHOLD : int = 3
167
+
168
+ enable_hi_timeframe_confirm : bool = True
169
+
170
+ start_dates : List[datetime] = [
171
+ datetime(2024, 4, 1)
172
+ ]
173
+
174
+ hi_how_many_candles_values : List[Tuple[str, int, int]] = [
175
+ ('1h', 24*3, 24*572)
176
+ ]
177
+
178
+ lo_how_many_candles_values : List[Tuple[str, int, int]] = [
179
+ ('15m', 15 *10, 15*4*24 *572)
180
+ ]
181
+
182
+ hi_ma_short_vs_long_interval_values : List[Tuple[int, int]] = [ (12, 30) ]
183
+ lo_ma_short_vs_long_interval_values : List[Tuple[int, int]] = [ (5, 10) ]
184
+
185
+ rsi_sliding_window_how_many_candles : int = 14 # For RSI, 14 is standard. If you want see spikes >70 and <30, use this config.
186
+ rsi_trend_sliding_window_how_many_candles : int = 30 # This is for purpose of RSI trend identification (Locating local peaks/troughs in RSI). This should typically be multiples of 'rsi_sliding_window_how_many_candles'.
187
+ rsi_upper_threshold_values : List[float] = [ 60 ]
188
+ rsi_lower_threshold_values : List[float] = [ 40 ]
189
+ rsi_midrangeonly : bool = False
190
+
191
+ target_fib_level : float = 0.618
192
+ boillenger_std_multiples_values : List[float] = [ 2 ]
193
+ allow_entry_sit_bb : bool = True
194
+ hurst_exp_window_how_many_candles : int = 125 # For hurst, at least 125.
195
+
196
+
197
+ # 'strategy_mode' decides if strategy can long_only, short_only, long_short at get go of back test. If long_above_btc_ema_short_below==True, strategy can long at bottom only if BTC (General market) stands above say 90d EMA. Or short only if BTC below 90d EMA for the given point in time.
198
+ ref_ema_num_days_fast : int = 5
199
+ ref_ema_num_days_slow : int = 90
200
+ long_above_ref_ema_short_below : bool = True
201
+ ref_price_vs_ema_percent_threshold : float = 2
202
+ ath_atl_close_gap_threshold_percent : float = 3
203
+
204
+ ema_short_slope_threshold_values : List[float] = [ 999 ] # 999 essentially turn it off
205
+
206
+ initial_cash_values : List[float] = [ 100000 ]
207
+
208
+ entry_percent_initial_cash_values : List[float] = [ 70 ]
209
+ target_position_size_percent_total_equity_values : List[float] = [ 100 ]
210
+ min_volume_usdt_threshold_values : List[float] = [ 100000 ]
211
+ clip_order_notional_to_best_volumes : bool = False
212
+ constant_order_notional : bool = True if min(start_dates) <= datetime(2024,1,1) else False # This is avoid snowball effect in long dated back tests
213
+
214
+ dayofweek_adj_map_order_notional : Dict = {
215
+ 0 : 1,
216
+ 1 : 1,
217
+ 2 : 1,
218
+ 3 : 1,
219
+ 4 : 1,
220
+ 5 : 1,
221
+ 6 : 1
222
+ }
223
+
224
+ dayofweek_sl_adj_map : Dict = {
225
+ 0 : 1,
226
+ 1 : 1,
227
+ 2 : 1,
228
+ 3 : 1,
229
+ 4 : 1,
230
+ 5 : 1,
231
+ 6 : 0.5
232
+ }
233
+
234
+ # Segmentation related parameters https://norman-lm-fung.medium.com/time-series-slicer-and-price-pattern-extractions-81f9dd1108fd
235
+ sliding_window_ratio : float = 16
236
+ smoothing_window_size_ratio : int = 3
237
+ linregress_stderr_threshold : float = 10
238
+ max_recur_depth : int = 2
239
+ min_segment_size_how_many_candles : int = 15
240
+ segment_consolidate_slope_ratio_threshold : float = 2
241
+ sideway_price_condition_threshold : float = 0.05 # i.e. Price if stay within 5% between start and close it's considered 'Sideway' market.
242
+
243
+ ECONOMIC_CALENDARS_FILE : str = "economic_calanedar_archive.csv"
244
+
245
+ default_level_granularity : float = 0.001
246
+
247
+ args = parseargs()
248
+ force_reload = args['force_reload']
249
+ white_list_tickers : List[str] = args['white_list_tickers']
250
+ reference_ticker : str = args['reference_ticker']
251
+ block_entries_on_impacting_ecoevents = args['block_entries_on_impacting_ecoevents']
252
+ enable_sliced_entry = args['enable_sliced_entry']
253
+ asymmetric_tp_bps : int = args['asymmetric_tp_bps']
254
+
255
+ full_report_name = f"{REPORT_NAME}_{start_dates[0].strftime('%Y%m%d')}"
256
+ trade_extract_filename : str = f"{full_report_name}_{white_list_tickers[0].replace(':','').replace('/','')}_trades.csv"
257
+
258
+ logger = get_logger(full_report_name)
259
+
260
+ import inspect
261
+ import builtins
262
+ def is_external(obj):
263
+ if inspect.ismodule(obj):
264
+ return True
265
+ module = getattr(obj, '__module__', None)
266
+ return module and not module.startswith('__') # Exclude built-in/dunder modules
267
+
268
+ local_vars = {
269
+ k: v
270
+ for k, v in locals().items()
271
+ if not (k.startswith('__') and k.endswith('__')) # Exclude dunders
272
+ and not is_external(v) # Exclude anything from external modules
273
+ }
274
+
275
+ algo_params : List[Dict] = spawn_parameters(local_vars)
276
+
277
+ logger.info(f"#algo_params: {len(algo_params)}")
278
+
279
+
280
+ '''
281
+ ******** STRATEGY_SPECIFIC Logic here ********
282
+ a. order_notional_adj
283
+ Specific logic to adjust order sizes based on market condition(s) for example.
284
+ b. entry (initial + final)
285
+ 'allow_entry_initial' is first pass entry conditions determination.
286
+ If 'allow_entry_initial' allow entry, 'allow_entry_final' will perform the second pass entry condition determinations.
287
+ 'allow_entry_final' is generally for more expensive operations, keep 'allow_entry_initial' fast and nimble.
288
+ c. 'pnl_eval' (You may wish to use specific prices to mark your TPs)
289
+ d. 'tp_eval' (Logic to fire TP)
290
+ e. 'sl_adj'
291
+ Adjustment to sl_percent_hard
292
+ f. 'trailing_stop_threshold_eval'
293
+ g. 'sort_filter_universe' (optional, if 'white_list_tickers' only has one ticker for example, then you don't need bother)
294
+ h. 'additional_trade_fields' to be included in the trade extract file
295
+ '''
296
+ def order_notional_adj(
297
+ algo_param : Dict,
298
+ ) -> Dict[str, float]:
299
+ initial_cash : float = algo_param['initial_cash']
300
+ entry_percent_initial_cash : float = algo_param['entry_percent_initial_cash']
301
+ target_order_notional = initial_cash * entry_percent_initial_cash/100
302
+ return {
303
+ 'target_order_notional' : target_order_notional
304
+ }
305
+
306
+ def allow_entry_initial(
307
+ lo_row_tm1,
308
+ hi_row_tm1
309
+ ) -> Dict[str, bool]:
310
+ return {
311
+ 'long' : _allow_entry_initial('long', lo_row_tm1, hi_row_tm1),
312
+ 'short' : _allow_entry_initial('short', lo_row_tm1, hi_row_tm1)
313
+ }
314
+ def _allow_entry_initial(
315
+ long_or_short : str, # long or short
316
+ lo_row_tm1,
317
+ hi_row_tm1
318
+ ) -> Dict[str, bool]:
319
+ if long_or_short == "long":
320
+ if (
321
+ lo_row_tm1['macd_cross'] == 'bullish'
322
+ and (
323
+ lo_row_tm1.name >= lo_row_tm1['macd_bullish_cross_last_id']
324
+ and
325
+ (lo_row_tm1.name - lo_row_tm1['macd_bullish_cross_last_id']) < 5
326
+ )
327
+ and lo_row_tm1['rsi_trend']=="up"
328
+ and lo_row_tm1['close']>hi_row_tm1['ema_close']
329
+ ):
330
+ return True
331
+ else:
332
+ return False
333
+ elif long_or_short == "short":
334
+ if (
335
+ lo_row_tm1['macd_cross'] == 'bearish'
336
+ and (
337
+ lo_row_tm1.name >= lo_row_tm1['macd_bearish_cross_last_id']
338
+ and
339
+ (lo_row_tm1.name - lo_row_tm1['macd_bearish_cross_last_id']) < 5
340
+ )
341
+ and lo_row_tm1['rsi_trend']=="down"
342
+ and lo_row_tm1['close']<hi_row_tm1['ema_close']
343
+ ):
344
+ return True
345
+ else:
346
+ return False
347
+
348
+ def allow_entry_final(
349
+ lo_row,
350
+ algo_param : Dict
351
+
352
+ ) -> bool:
353
+ reference_ticker = algo_param['reference_ticker']
354
+ timestamp_ms : int = lo_row['timestamp_ms']
355
+ open : float = lo_row['open']
356
+
357
+ entry_price_long, entry_price_short = open, open
358
+ allow_long, allow_short = True, True
359
+ reference_price = None
360
+
361
+ pnl_potential_bps = algo_param['tp_max_percent']*100
362
+
363
+ target_price_long = entry_price_long * (1 + pnl_potential_bps/10000)
364
+ target_price_short = entry_price_short * (1 - pnl_potential_bps/10000)
365
+
366
+ return {
367
+ 'long' : allow_long,
368
+ 'short' : allow_short,
369
+
370
+ # In additional to allow or not, allow_entry_final also calculate a few things which you may need to mark the entry trades.
371
+ 'entry_price_long' : entry_price_long,
372
+ 'entry_price_short' : entry_price_short,
373
+ 'target_price_long' : target_price_long,
374
+ 'target_price_short' : target_price_short,
375
+ 'reference_price' : reference_price
376
+ }
377
+
378
+ allow_slice_entry = allow_entry_initial
379
+
380
+ def sl_adj(
381
+ max_unrealized_pnl_live : float,
382
+ current_position_usdt : float,
383
+ algo_param : Dict
384
+ ):
385
+ tp_min_percent = algo_param['tp_min_percent']
386
+ max_pnl_percent_notional = max_unrealized_pnl_live / current_position_usdt * 100
387
+ running_sl_percent_hard = algo_param['sl_hard_percent']
388
+ return {
389
+ 'running_sl_percent_hard' : running_sl_percent_hard
390
+ }
391
+
392
+ def trailing_stop_threshold_eval(
393
+ algo_param : Dict
394
+ ) -> Dict[str, float]:
395
+ tp_min_percent = algo_param['tp_min_percent']
396
+ tp_max_percent = algo_param['tp_max_percent']
397
+ return {
398
+ 'tp_min_percent' : tp_min_percent,
399
+ 'tp_max_percent' : tp_max_percent
400
+ }
401
+
402
+ def pnl_eval (
403
+ this_candle,
404
+ lo_row_tm1,
405
+ running_sl_percent_hard : float,
406
+ this_ticker_open_trades : List[Dict],
407
+ algo_param : Dict
408
+ ) -> Dict[str, float]:
409
+ return generic_pnl_eval(
410
+ this_candle,
411
+ running_sl_percent_hard,
412
+ this_ticker_open_trades,
413
+ algo_param,
414
+ long_tp_indicator_name=None,
415
+ short_tp_indicator_name=None
416
+ )
417
+
418
+ def tp_eval (
419
+ this_ticker_open_positions_side : str,
420
+ lo_row,
421
+ this_ticker_open_trades : List[Dict],
422
+ algo_param : Dict
423
+ ) -> bool:
424
+ '''
425
+ Be very careful, backtest_core 'generic_pnl_eval' may use a) some indicator (tp_indicator_name), or b) target_price to evaluate 'unrealized_pnl_tp'.
426
+ 'tp_eval' only return True or False but it needs be congruent with backtest_core 'generic_pnl_eval', otherwise incorrect rosy pnl may be reported.
427
+ '''
428
+ return generic_tp_eval(lo_row, this_ticker_open_trades)
429
+
430
+ def sort_filter_universe(
431
+ tickers : List[str],
432
+ exchange : Exchange,
433
+
434
+ # Use "i" (row index) to find current/last interval's market data or TAs from "all_exchange_candles"
435
+ i,
436
+ all_exchange_candles : Dict[str, Dict[str, Dict[str, pd.DataFrame]]],
437
+
438
+ max_num_tickers : int = 10
439
+ ) -> List[str]:
440
+ return generic_sort_filter_universe(
441
+ tickers=tickers,
442
+ exchange=exchange,
443
+ i=i,
444
+ all_exchange_candles=all_exchange_candles,
445
+ max_num_tickers=max_num_tickers
446
+ )
447
+
448
+ algo_results : List[Dict] = run_all_scenario(
449
+ algo_params=algo_params,
450
+ exchanges=exchanges,
451
+ order_notional_adj_func=order_notional_adj,
452
+ allow_entry_initial_func=allow_entry_initial,
453
+ allow_entry_final_func=allow_entry_final,
454
+ allow_slice_entry_func=allow_slice_entry,
455
+ sl_adj_func=sl_adj,
456
+ trailing_stop_threshold_eval_func=trailing_stop_threshold_eval,
457
+ pnl_eval_func=pnl_eval,
458
+ tp_eval_func=tp_eval,
459
+ sort_filter_universe_func=sort_filter_universe,
460
+
461
+ logger=logger
462
+ )
463
+
464
+ dump_trades_to_disk(
465
+ algo_results,
466
+ trade_extract_filename,
467
+ logger
468
+ )
@@ -1,6 +1,7 @@
1
1
  import unittest
2
2
  from datetime import datetime, timedelta
3
3
  from typing import Union
4
+ import logging
4
5
  from pathlib import Path
5
6
 
6
7
  from util.market_data_util import *
@@ -107,7 +108,8 @@ class MarketDataUtilTests(unittest.TestCase):
107
108
  end_ts=end_date.timestamp(),
108
109
  exchange=exchange,
109
110
  normalized_symbols=normalized_symbols,
110
- candle_size='1h'
111
+ candle_size='1h',
112
+ logger=logging.getLogger()
111
113
  )[normalized_symbols[0]]
112
114
 
113
115
  assert pd_candles is not None
@@ -1,7 +1,9 @@
1
1
  import unittest
2
2
  from typing import List, Dict, Union
3
3
 
4
- from util.simple_math import generate_rand_nums, round_to_level, bucket_series, bucketize_val
4
+ from numpy import equal
5
+
6
+ from util.simple_math import generate_rand_nums, round_to_level, compute_adjacent_levels, bucket_series, bucketize_val
5
7
 
6
8
  class SimpleMathTests(unittest.TestCase):
7
9
 
@@ -107,6 +109,21 @@ class SimpleMathTests(unittest.TestCase):
107
109
  print(f"{price} rounded to: {rounded_price}")
108
110
  assert(rounded_price==expected)
109
111
 
112
+ def test_compute_adjacent_levels(self):
113
+ gold_price = 4450
114
+ level_granularity = 0.025 # So levels are $100 apart
115
+ adjacent_levels = compute_adjacent_levels(num=gold_price, level_granularity=level_granularity, num_levels_per_side=3)
116
+ assert(adjacent_levels)
117
+ assert(len(adjacent_levels)==7)
118
+ equal(adjacent_levels, [4100,4200,4300,4400,4500,4600,4700])
119
+
120
+ btc_price = 95000
121
+ level_granularity = 0.01 # So levels are $1000 apart
122
+ adjacent_levels = compute_adjacent_levels(num=btc_price, level_granularity=level_granularity, num_levels_per_side=3)
123
+ assert(adjacent_levels)
124
+ assert(len(adjacent_levels)==7)
125
+ equal(adjacent_levels, [92000,93000,94000,95000,96000,97000,98000])
126
+
110
127
  def test_bucket_series(self):
111
128
 
112
129
  level_granularity : float = 0.1
@@ -0,0 +1,39 @@
1
+ from datetime import datetime, timedelta
2
+ from typing import Dict
3
+
4
+ def parse_trading_window(
5
+ today : datetime,
6
+ window : Dict[str, str]
7
+ ) :
8
+ window_start : str = window['start']
9
+ window_end : str = window['end']
10
+
11
+ DayOfWeekMap : Dict[str, int] = {
12
+ 'Mon' : 0,
13
+ 'Tue' : 1,
14
+ 'Wed' : 2,
15
+ 'Thur' : 3,
16
+ 'Fri' : 4,
17
+ 'Sat' : 5,
18
+ 'Sun' : 6
19
+ }
20
+ today_dayofweek = today.weekday()
21
+
22
+ window_start_dayofweek : int = DayOfWeekMap[window_start.split('_')[0]]
23
+ window_start_hr : int = int(window_start.split('_')[-1].split(':')[0])
24
+ window_start_min : int = int(window_start.split('_')[-1].split(':')[1])
25
+ dt_window_start = today + timedelta(days=(window_start_dayofweek-today_dayofweek))
26
+ dt_window_start = dt_window_start.replace(hour=window_start_hr, minute=window_start_min)
27
+
28
+ window_end_dayofweek : int = DayOfWeekMap[window_end.split('_')[0]]
29
+ window_end_hr : int = int(window_end.split('_')[-1].split(':')[0])
30
+ window_end_min : int = int(window_end.split('_')[-1].split(':')[1])
31
+ dt_window_end = today + timedelta(days=(window_end_dayofweek-today_dayofweek))
32
+ dt_window_end = dt_window_end.replace(hour=window_end_hr, minute=window_end_min)
33
+
34
+ return {
35
+ 'today' : today,
36
+ 'start' : dt_window_start,
37
+ 'end' : dt_window_end,
38
+ 'in_window' : (today<=dt_window_end) and (today>=dt_window_start)
39
+ }
@@ -591,7 +591,8 @@ def fetch_candles(
591
591
  exchange=exchange,
592
592
  normalized_symbols=normalized_symbols,
593
593
  candle_size=candle_size,
594
- num_candles_limit=num_candles_limit
594
+ num_candles_limit=num_candles_limit,
595
+ logger=logger
595
596
  )
596
597
  if num_intervals!=1:
597
598
  for symbol in exchange_candles:
@@ -656,10 +657,9 @@ def _fetch_candles_ccxt(
656
657
  exchange,
657
658
  normalized_symbols : List[str],
658
659
  candle_size : str,
659
- num_candles_limit : int = 100
660
+ num_candles_limit : int = 100,
661
+ logger = None
660
662
  ) -> Dict[str, Union[pd.DataFrame, None]]:
661
- logger = logging.getLogger()
662
-
663
663
  rsp = {}
664
664
 
665
665
  exchange.load_markets()
@@ -667,7 +667,7 @@ def _fetch_candles_ccxt(
667
667
  num_tickers = len(normalized_symbols)
668
668
  i = 0
669
669
  for ticker in normalized_symbols:
670
- @retry(num_attempts=3, pause_between_retries_ms=1000)
670
+ @retry(num_attempts=3, pause_between_retries_ms=1000, logger=logger)
671
671
  def _fetch_ohlcv(exchange, symbol, timeframe, since, limit, params) -> Union[List, NoReturn]:
672
672
  one_timeframe = f"1{timeframe[-1]}"
673
673
  candles = exchange.fetch_ohlcv(symbol=symbol, timeframe=one_timeframe, since=since, limit=limit, params=params)
@@ -690,7 +690,8 @@ def _fetch_candles_ccxt(
690
690
  raise ValueError(f"Invalid candle_size {candle_size}")
691
691
  return num_intervals * increment
692
692
 
693
- logger.info(f"{i}/{num_tickers} Fetching {candle_size} candles for {ticker}.")
693
+ if logger:
694
+ logger.info(f"{i}/{num_tickers} Fetching {candle_size} candles for {ticker}.")
694
695
 
695
696
  '''
696
697
  It uses a while loop to implement a sliding window to download candles between start_ts and end_ts.
@@ -2,7 +2,8 @@ import time
2
2
 
3
3
  def retry(
4
4
  num_attempts : int = 1,
5
- pause_between_retries_ms : int = 1000
5
+ pause_between_retries_ms : int = 1000,
6
+ logger = None
6
7
  ):
7
8
  def decorator(method):
8
9
  def wrapper(*args, **kw):
@@ -15,6 +16,10 @@ def retry(
15
16
  except Exception as retry_error:
16
17
  if i==(num_attempts-1):
17
18
  err_msg = f"retry_util.retry gave up {method.__name__} after {num_attempts} calls. {args} {kw}. {retry_error}"
19
+ if logger:
20
+ logger.error(err_msg)
21
+ else:
22
+ print(err_msg)
18
23
  raise Exception(err_msg) from retry_error
19
24
  finally:
20
25
  time.sleep(int(pause_between_retries_ms/1000))
@@ -32,6 +32,18 @@ def generate_rand_nums(
32
32
 
33
33
  return result
34
34
 
35
+ def compute_level_increment(
36
+ num : float,
37
+ level_granularity : float = 0.01
38
+ ) -> float:
39
+ if math.isnan(num):
40
+ return num
41
+ level_size = num * level_granularity
42
+ magnitude = math.floor(math.log10(abs(level_size)))
43
+ base_increment = 10 ** magnitude
44
+ rounded_level_size = round(level_size / base_increment) * base_increment
45
+ return rounded_level_size
46
+
35
47
  # https://norman-lm-fung.medium.com/levels-are-psychological-7176cdefb5f2
36
48
  def round_to_level(
37
49
  num : float,
@@ -39,13 +51,23 @@ def round_to_level(
39
51
  ) -> float:
40
52
  if math.isnan(num):
41
53
  return num
42
- level_size = num * level_granularity
43
- magnitude = math.floor(math.log10(abs(level_size)))
44
- base_increment = 10 ** magnitude
45
- rounded_level_size = round(level_size / base_increment) * base_increment
54
+ rounded_level_size = compute_level_increment(num, level_granularity)
46
55
  rounded_num = round(num / rounded_level_size) * rounded_level_size
47
56
  return rounded_num
48
57
 
58
+ def compute_adjacent_levels(
59
+ num : float,
60
+ level_granularity : float = 0.01,
61
+ num_levels_per_side : int = 1
62
+ ) -> Union[None, List[float]]:
63
+ if math.isnan(num):
64
+ return None
65
+ rounded_level_size = compute_level_increment(num, level_granularity)
66
+ rounded_num = round(num / rounded_level_size) * rounded_level_size
67
+ levels = [ rounded_num ]
68
+ levels = list(reversed([ rounded_num - (i+1)*rounded_level_size for i in list(range(num_levels_per_side))])) + levels + [ rounded_num + (i+1)*rounded_level_size for i in list(range(num_levels_per_side))]
69
+ return levels
70
+
49
71
  def bucket_series(
50
72
  values : List[float],
51
73
  outlier_threshold_percent : float = 0,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: siglab_py
3
- Version: 0.6.12
3
+ Version: 0.6.16
4
4
  Summary: Market data fetches, TA calculations and generic order gateway.
5
5
  Author: r0bbarh00d
6
6
  Author-email: r0bbarh00d <r0bbarh00d@gmail.com>