siglab-py 0.5.30__py3-none-any.whl → 0.6.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of siglab-py might be problematic. Click here for more details.

Files changed (34) hide show
  1. siglab_py/backtests/__init__.py +0 -0
  2. siglab_py/backtests/backtest_core.py +2371 -0
  3. siglab_py/backtests/coinflip_15m_crypto.py +432 -0
  4. siglab_py/backtests/fibonacci_d_mv_crypto.py +541 -0
  5. siglab_py/backtests/macdrsi_crosses_15m_tc_crypto.py +468 -0
  6. siglab_py/constants.py +5 -0
  7. siglab_py/exchanges/binance.py +38 -0
  8. siglab_py/exchanges/deribit.py +83 -0
  9. siglab_py/exchanges/futubull.py +11 -2
  10. siglab_py/market_data_providers/candles_provider.py +2 -2
  11. siglab_py/market_data_providers/candles_ta_provider.py +3 -3
  12. siglab_py/market_data_providers/futu_candles_ta_to_csv.py +6 -4
  13. siglab_py/market_data_providers/google_monitor.py +320 -0
  14. siglab_py/market_data_providers/orderbooks_provider.py +15 -12
  15. siglab_py/market_data_providers/tg_monitor.py +6 -2
  16. siglab_py/market_data_providers/{test_provider.py → trigger_provider.py} +9 -8
  17. siglab_py/ordergateway/encrypt_keys_util.py +1 -1
  18. siglab_py/ordergateway/gateway.py +97 -35
  19. siglab_py/tests/integration/market_data_util_tests.py +37 -1
  20. siglab_py/tests/unit/analytic_util_tests.py +37 -10
  21. siglab_py/tests/unit/simple_math_tests.py +252 -0
  22. siglab_py/tests/unit/trading_util_tests.py +0 -21
  23. siglab_py/util/analytic_util.py +195 -33
  24. siglab_py/util/datetime_util.py +39 -0
  25. siglab_py/util/market_data_util.py +184 -65
  26. siglab_py/util/notification_util.py +1 -1
  27. siglab_py/util/retry_util.py +6 -1
  28. siglab_py/util/simple_math.py +262 -0
  29. siglab_py/util/trading_util.py +0 -12
  30. {siglab_py-0.5.30.dist-info → siglab_py-0.6.16.dist-info}/METADATA +1 -1
  31. siglab_py-0.6.16.dist-info/RECORD +50 -0
  32. {siglab_py-0.5.30.dist-info → siglab_py-0.6.16.dist-info}/WHEEL +1 -1
  33. siglab_py-0.5.30.dist-info/RECORD +0 -39
  34. {siglab_py-0.5.30.dist-info → siglab_py-0.6.16.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,252 @@
1
+ import unittest
2
+ from typing import List, Dict, Union
3
+
4
+ from numpy import equal
5
+
6
+ from util.simple_math import generate_rand_nums, round_to_level, compute_adjacent_levels, bucket_series, bucketize_val
7
+
8
+ class SimpleMathTests(unittest.TestCase):
9
+
10
+ def test_generate_rand_nums(self):
11
+ range_min : float = 0
12
+ range_max : float = 1
13
+ size : int = 100
14
+ percentage_in_range : float = 91
15
+ abs_min : float = -0.5
16
+ abs_max : float = 1.1
17
+
18
+ rand_nums : List[float] = generate_rand_nums(
19
+ range_min = range_min,
20
+ range_max = range_max,
21
+ size = size,
22
+ percent_in_range = percentage_in_range,
23
+ abs_min = abs_min,
24
+ abs_max = abs_max
25
+ )
26
+
27
+ assert(len(rand_nums)==size)
28
+ assert(len([x for x in rand_nums if x>=range_min and x<=range_max]) == (percentage_in_range/100) * size)
29
+ assert(len([x for x in rand_nums if x<abs_min or x>abs_max]) == 0)
30
+
31
+
32
+ range_min = -1
33
+ range_max = 1
34
+ percentage_in_range = 91
35
+ abs_min = -1.5
36
+ abs_max = 1.5
37
+
38
+ rand_nums : List[float] = generate_rand_nums(
39
+ range_min = range_min,
40
+ range_max = range_max,
41
+ size = size,
42
+ percent_in_range = percentage_in_range,
43
+ abs_min = abs_min,
44
+ abs_max = abs_max
45
+ )
46
+
47
+ assert(len(rand_nums)==size)
48
+ assert(len([x for x in rand_nums if x>=range_min and x<=range_max]) == (percentage_in_range/100) * size)
49
+ assert(len([x for x in rand_nums if x<abs_min or x>abs_max]) == 0)
50
+
51
+
52
+ range_min = 0
53
+ range_max = 100
54
+ percentage_in_range = 91
55
+ abs_min = -150
56
+ abs_max = 150
57
+
58
+ rand_nums : List[float] = generate_rand_nums(
59
+ range_min = range_min,
60
+ range_max = range_max,
61
+ size = size,
62
+ percent_in_range = percentage_in_range,
63
+ abs_min = abs_min,
64
+ abs_max = abs_max
65
+ )
66
+
67
+ assert(len(rand_nums)==size)
68
+ assert(len([x for x in rand_nums if x>=range_min and x<=range_max]) == (percentage_in_range/100) * size)
69
+ assert(len([x for x in rand_nums if x<abs_min or x>abs_max]) == 0)
70
+
71
+
72
+ range_min = -100
73
+ range_max = 100
74
+ percentage_in_range = 91
75
+ abs_min = -150
76
+ abs_max = 150
77
+
78
+ rand_nums : List[float] = generate_rand_nums(
79
+ range_min = range_min,
80
+ range_max = range_max,
81
+ size = size,
82
+ percent_in_range = percentage_in_range,
83
+ abs_min = abs_min,
84
+ abs_max = abs_max
85
+ )
86
+
87
+ assert(len(rand_nums)==size)
88
+ assert(len([x for x in rand_nums if x>=range_min and x<=range_max]) == (percentage_in_range/100) * size)
89
+ assert(len([x for x in rand_nums if x<abs_min or x>abs_max]) == 0)
90
+
91
+ def test_round_to_level(self):
92
+ prices = [
93
+ { 'price' : 15080, 'rounded' : 15000},
94
+ { 'price' : 15180, 'rounded' : 15200},
95
+ { 'price' : 25080, 'rounded' : 25200},
96
+ { 'price' : 25180, 'rounded' : 25200},
97
+ { 'price' : 25380, 'rounded' : 25500},
98
+ { 'price' : 95332, 'rounded' : 95000},
99
+ { 'price' : 95878, 'rounded' : 96000},
100
+ { 'price' : 103499, 'rounded' : 103000},
101
+ { 'price' : 103500, 'rounded' : 104000},
102
+ { 'price' : 150800, 'rounded' : 150000},
103
+ { 'price' : 151800, 'rounded' : 152000}
104
+ ]
105
+ for entry in prices:
106
+ price = entry['price']
107
+ expected = entry['rounded']
108
+ rounded_price = round_to_level(price, level_granularity=0.01)
109
+ print(f"{price} rounded to: {rounded_price}")
110
+ assert(rounded_price==expected)
111
+
112
+ def test_compute_adjacent_levels(self):
113
+ gold_price = 4450
114
+ level_granularity = 0.025 # So levels are $100 apart
115
+ adjacent_levels = compute_adjacent_levels(num=gold_price, level_granularity=level_granularity, num_levels_per_side=3)
116
+ assert(adjacent_levels)
117
+ assert(len(adjacent_levels)==7)
118
+ equal(adjacent_levels, [4100,4200,4300,4400,4500,4600,4700])
119
+
120
+ btc_price = 95000
121
+ level_granularity = 0.01 # So levels are $1000 apart
122
+ adjacent_levels = compute_adjacent_levels(num=btc_price, level_granularity=level_granularity, num_levels_per_side=3)
123
+ assert(adjacent_levels)
124
+ assert(len(adjacent_levels)==7)
125
+ equal(adjacent_levels, [92000,93000,94000,95000,96000,97000,98000])
126
+
127
+ def test_bucket_series(self):
128
+
129
+ level_granularity : float = 0.1
130
+
131
+ range_min : float = 0
132
+ range_max : float = 1
133
+ size : int = 100
134
+ percentage_in_range : float = 91
135
+ abs_min : float = -0.5
136
+ abs_max : float = 1.1
137
+
138
+ rand_nums : List[float] = generate_rand_nums(
139
+ range_min = range_min,
140
+ range_max = range_max,
141
+ size = size,
142
+ percent_in_range = percentage_in_range,
143
+ abs_min = abs_min,
144
+ abs_max = abs_max
145
+ )
146
+
147
+ buckets : Dict[
148
+ str,
149
+ Dict[str,Union[float, List[float]]]
150
+ ] = bucket_series(
151
+ values = rand_nums,
152
+ outlier_threshold_percent = 10,
153
+ level_granularity=level_granularity
154
+ )
155
+
156
+ bucketized = []
157
+ for num in rand_nums:
158
+ bucketized.append(
159
+ bucketize_val(num, buckets=buckets)
160
+ )
161
+
162
+
163
+ range_min = -1
164
+ range_max = 1
165
+ size : int = 100
166
+ percentage_in_range = 91
167
+ abs_min = -1.5
168
+ abs_max = 1.5
169
+
170
+ rand_nums : List[float] = generate_rand_nums(
171
+ range_min = range_min,
172
+ range_max = range_max,
173
+ size = size,
174
+ percent_in_range = percentage_in_range,
175
+ abs_min = abs_min,
176
+ abs_max = abs_max
177
+ )
178
+
179
+ buckets = bucket_series(
180
+ values = rand_nums,
181
+ outlier_threshold_percent = 10,
182
+ level_granularity=level_granularity
183
+ )
184
+
185
+
186
+ range_min = 0
187
+ range_max = 100
188
+ size : int = 100
189
+ percentage_in_range = 91
190
+ abs_min = -0.5
191
+ abs_max = 150
192
+
193
+ rand_nums : List[float] = generate_rand_nums(
194
+ range_min = range_min,
195
+ range_max = range_max,
196
+ size = size,
197
+ percent_in_range = percentage_in_range,
198
+ abs_min = abs_min,
199
+ abs_max = abs_max
200
+ )
201
+
202
+ buckets = bucket_series(
203
+ values = rand_nums,
204
+ outlier_threshold_percent = 10,
205
+ level_granularity=level_granularity
206
+ )
207
+
208
+
209
+ range_min = -100
210
+ range_max = 100
211
+ size : int = 100
212
+ percentage_in_range = 91
213
+ abs_min = -150
214
+ abs_max = 150
215
+
216
+ rand_nums : List[float] = generate_rand_nums(
217
+ range_min = range_min,
218
+ range_max = range_max,
219
+ size = size,
220
+ percent_in_range = percentage_in_range,
221
+ abs_min = abs_min,
222
+ abs_max = abs_max
223
+ )
224
+
225
+ buckets = bucket_series(
226
+ values = rand_nums,
227
+ outlier_threshold_percent = 10,
228
+ level_granularity=level_granularity
229
+ )
230
+
231
+
232
+ range_min = 20_000
233
+ range_max = 120_000
234
+ size : int = 100
235
+ percentage_in_range = 91
236
+ abs_min = 15_000
237
+ abs_max = 130_000
238
+
239
+ rand_nums : List[float] = generate_rand_nums(
240
+ range_min = range_min,
241
+ range_max = range_max,
242
+ size = size,
243
+ percent_in_range = percentage_in_range,
244
+ abs_min = abs_min,
245
+ abs_max = abs_max
246
+ )
247
+
248
+ buckets = bucket_series(
249
+ values = rand_nums,
250
+ outlier_threshold_percent = 10,
251
+ level_granularity=level_granularity
252
+ )
@@ -63,24 +63,3 @@ class TradingUtilTests(unittest.TestCase):
63
63
  default_effective_tp_trailing_percent = default_effective_tp_trailing_percent
64
64
  )
65
65
  assert(effective_tp_trailing_percent==0) # Most tight trailing SL
66
-
67
- def test_round_to_level(self):
68
- prices = [
69
- { 'price' : 15080, 'rounded' : 15000},
70
- { 'price' : 15180, 'rounded' : 15200},
71
- { 'price' : 25080, 'rounded' : 25200},
72
- { 'price' : 25180, 'rounded' : 25200},
73
- { 'price' : 25380, 'rounded' : 25500},
74
- { 'price' : 95332, 'rounded' : 95000},
75
- { 'price' : 95878, 'rounded' : 96000},
76
- { 'price' : 103499, 'rounded' : 103000},
77
- { 'price' : 103500, 'rounded' : 104000},
78
- { 'price' : 150800, 'rounded' : 150000},
79
- { 'price' : 151800, 'rounded' : 152000}
80
- ]
81
- for entry in prices:
82
- price = entry['price']
83
- expected = entry['rounded']
84
- rounded_price = round_to_level(price, level_granularity=0.01)
85
- print(f"{price} rounded to: {rounded_price}")
86
- assert(rounded_price==expected)
@@ -11,9 +11,48 @@ from hurst import compute_Hc # compatible with pypy
11
11
  from ccxt.base.exchange import Exchange as CcxtExchange
12
12
  from ccxt import deribit
13
13
 
14
+ from siglab_py.util.simple_math import bucket_series, bucketize_val
14
15
  from siglab_py.util.market_data_util import fix_column_types
15
16
  from siglab_py.constants import TrendDirection
16
17
 
18
+ def classify_candle(
19
+ candle : pd.Series,
20
+ min_candle_height_ratio : float = 5,
21
+ distance_from_mid_doji_threshold_bps : float = 10
22
+ ) -> Union[str, None]:
23
+ candle_class : Union[str, None] = None
24
+ open = candle['open']
25
+ high = candle['high']
26
+ low = candle['low']
27
+ close = candle['close']
28
+ candle_full_height = high - low # always positive
29
+ candle_body_height = close - open # can be negative
30
+ candle_full_mid = (high + low)/2
31
+ candle_body_mid = (open + close)/2
32
+ distance_from_mid_bps = (candle_full_mid/candle_body_mid -1)*10000 if candle_full_mid>candle_body_mid else (candle_body_mid/candle_full_mid -1)*10000
33
+
34
+ candle_height_ratio = candle_full_height / abs(candle_body_height) if candle_body_height!=0 else float('inf')
35
+
36
+ if (
37
+ candle_height_ratio>=min_candle_height_ratio
38
+ and close>low
39
+ ):
40
+ candle_class = 'hammer'
41
+ elif (
42
+ candle_height_ratio>=min_candle_height_ratio
43
+ and close<high
44
+ ):
45
+ candle_class = 'shooting_star'
46
+ elif(
47
+ candle_height_ratio>=min_candle_height_ratio
48
+ and distance_from_mid_bps<=distance_from_mid_doji_threshold_bps
49
+ ):
50
+ candle_class = 'doji'
51
+
52
+ # Keep add more ...
53
+
54
+ return candle_class
55
+
17
56
  # Fibonacci
18
57
  MAGIC_FIB_LEVELS = [0, 0.236, 0.382, 0.5, 0.618, 0.786, 1.00, 1.618, 2.618, 3.618, 4.236]
19
58
 
@@ -83,14 +122,15 @@ def trend_from_lows(series: np.ndarray) -> float:
83
122
  '''
84
123
  compute_candles_stats will calculate typical/basic technical indicators using in many trading strategies:
85
124
  a. Basic SMA/EMAs (And slopes)
86
- b. ATR
87
- c. Boillenger bands (Yes incorrect spelling sorry)
88
- d. FVG
89
- e. Hurst Exponent
90
- f. RSI, MFI
91
- g. MACD
92
- h. Fibonacci
93
- i. Inflections points: where 'close' crosses EMA from above or below.
125
+ b. EMA crosses
126
+ c. ATR
127
+ d. Boillenger bands (Yes incorrect spelling sorry)
128
+ e. FVG
129
+ f. Hurst Exponent
130
+ g. RSI, MFI
131
+ h. MACD
132
+ i. Fibonacci
133
+ j. Inflections points: where 'close' crosses EMA from above or below.
94
134
 
95
135
  Parameters:
96
136
  a. boillenger_std_multiples: For boillenger upper and lower calc
@@ -119,7 +159,14 @@ def compute_candles_stats(
119
159
  target_fib_level : float = 0.618,
120
160
  pypy_compat : bool = True
121
161
  ):
162
+ BUCKETS_m0_100 = bucket_series(
163
+ values=list([i for i in range(0,100)]),
164
+ outlier_threshold_percent=10,
165
+ level_granularity=0.1
166
+ )
167
+
122
168
  pd_candles['candle_height'] = pd_candles['high'] - pd_candles['low']
169
+ pd_candles['candle_body_height'] = pd_candles['close'] - pd_candles['open']
123
170
 
124
171
  '''
125
172
  market_data_gizmo inserted dummy lines --> Need exclude those or "TypeError: unorderable types for comparison": pd_btc_candles = pd_btc_candles[pd_btc_candles.close.notnull()]
@@ -136,12 +183,13 @@ def compute_candles_stats(
136
183
 
137
184
  pd_candles['is_green'] = pd_candles['close'] >= pd_candles['open']
138
185
 
186
+ pd_candles['candle_class'] = pd_candles.apply(lambda row: classify_candle(row), axis=1) # type: ignore
187
+
139
188
  close_short_periods_rolling = pd_candles['close'].rolling(window=int(sliding_window_how_many_candles/slow_fast_interval_ratio))
140
189
  close_long_periods_rolling = pd_candles['close'].rolling(window=sliding_window_how_many_candles)
141
190
  close_short_periods_ewm = pd_candles['close'].ewm(span=int(sliding_window_how_many_candles/slow_fast_interval_ratio), adjust=False)
142
191
  close_long_periods_ewm = pd_candles['close'].ewm(span=sliding_window_how_many_candles, adjust=False)
143
192
 
144
-
145
193
  pd_candles['pct_change_close'] = pd_candles['close'].pct_change() * 100
146
194
  pd_candles['sma_short_periods'] = close_short_periods_rolling.mean()
147
195
  pd_candles['sma_long_periods'] = close_long_periods_rolling.mean()
@@ -157,6 +205,9 @@ def compute_candles_stats(
157
205
  pd_candles['candle_height_percent'] = pd_candles['candle_height'] / pd_candles['ema_close'] * 100
158
206
  pd_candles['candle_height_percent_rounded'] = pd_candles['candle_height_percent'].round().astype('Int64')
159
207
 
208
+ pd_candles['candle_body_height_percent'] = pd_candles['candle_body_height'] / pd_candles['ema_close'] * 100
209
+ pd_candles['candle_body_height_percent_rounded'] = pd_candles['candle_body_height_percent'].round().astype('Int64')
210
+
160
211
  '''
161
212
  To annualize volatility:
162
213
  if candle_interval == '1m':
@@ -168,6 +219,8 @@ def compute_candles_stats(
168
219
  pd_candles['annualized_volatility'] = (
169
220
  pd_candles['interval_historical_volatility'] * annualization_factor
170
221
  )
222
+
223
+ Why log return? Trading Dude https://python.plainenglish.io/stop-using-percentage-returns-logarithmic-returns-explained-with-code-64a4634b883a
171
224
  '''
172
225
  pd_candles['log_return'] = np.log(pd_candles['close'] / pd_candles['close'].shift(1))
173
226
  pd_candles['interval_hist_vol'] = pd_candles['log_return'].rolling(window=sliding_window_how_many_candles).std()
@@ -178,7 +231,6 @@ def compute_candles_stats(
178
231
  annualization_factor = np.sqrt(candles_per_year)
179
232
  pd_candles['annualized_hist_vol'] = pd_candles['interval_hist_vol'] * annualization_factor
180
233
 
181
-
182
234
  pd_candles['chop_against_ema'] = (
183
235
  (~pd_candles['is_green'] & (pd_candles['close'] > pd_candles['ema_close'])) | # Case 1: Green candle and close > EMA
184
236
  (pd_candles['is_green'] & (pd_candles['close'] < pd_candles['ema_close'])) # Case 2: Red candle and close < EMA
@@ -199,22 +251,29 @@ def compute_candles_stats(
199
251
  bearish_ema_crosses = (ema_short_periods_prev >= ema_long_periods_prev) & (ema_short_periods_curr < ema_long_periods_curr)
200
252
  pd_candles.loc[bullish_ema_crosses, 'ema_cross'] = 1
201
253
  pd_candles.loc[bearish_ema_crosses, 'ema_cross'] = -1
202
- pd_candles['ema_bullish_cross_last_id'] = pd_candles['ema_cross'].rolling(window=sliding_window_how_many_candles).apply(lambda x : x.idxmax())
203
- pd_candles['ema_bearish_cross_last_id'] = pd_candles['ema_cross'].rolling(window=sliding_window_how_many_candles).apply(lambda x : x.idxmin())
204
- pd_candles['ema_cross_last'] = np.where(
205
- pd_candles['ema_bullish_cross_last_id'] > pd_candles['ema_bearish_cross_last_id'],
206
- 'bullish',
207
- np.where(
208
- pd_candles['ema_bearish_cross_last_id'] > pd_candles['ema_bullish_cross_last_id'],
209
- 'bearish',
210
- None # type: ignore
211
- )
212
- )
213
-
214
- pd_candles['ema_cross_last'] = pd_candles['ema_cross_last'].where(
215
- pd_candles['ema_bullish_cross_last_id'].isnull() & pd_candles['ema_bearish_cross_last_id'].isnull(),
216
- None
217
- )
254
+ bullish_indices = pd.Series(pd_candles.index.where(pd_candles['ema_cross'] == 1), index=pd_candles.index).astype('Int64')
255
+ bearish_indices = pd.Series(pd_candles.index.where(pd_candles['ema_cross'] == -1), index=pd_candles.index).astype('Int64')
256
+ pd_candles['ema_bullish_cross_last_id'] = bullish_indices.rolling(window=pd_candles.shape[0], min_periods=1).max().astype('Int64')
257
+ pd_candles['ema_bearish_cross_last_id'] = bearish_indices.rolling(window=pd_candles.shape[0], min_periods=1).max().astype('Int64')
258
+ conditions = [
259
+ (pd_candles['ema_bullish_cross_last_id'].notna() &
260
+ pd_candles['ema_bearish_cross_last_id'].notna() &
261
+ (pd_candles['ema_bullish_cross_last_id'] > pd_candles['ema_bearish_cross_last_id'])),
262
+
263
+ (pd_candles['ema_bullish_cross_last_id'].notna() &
264
+ pd_candles['ema_bearish_cross_last_id'].notna() &
265
+ (pd_candles['ema_bearish_cross_last_id'] > pd_candles['ema_bullish_cross_last_id'])),
266
+
267
+ (pd_candles['ema_bullish_cross_last_id'].notna() &
268
+ pd_candles['ema_bearish_cross_last_id'].isna()),
269
+
270
+ (pd_candles['ema_bearish_cross_last_id'].notna() &
271
+ pd_candles['ema_bullish_cross_last_id'].isna())
272
+ ]
273
+ choices = ['bullish', 'bearish', 'bullish', 'bearish']
274
+ pd_candles['ema_cross_last'] = np.select(conditions, choices, default=None) # type: ignore
275
+ pd_candles.loc[bullish_ema_crosses, 'ema_cross'] = 'bullish'
276
+ pd_candles.loc[bearish_ema_crosses, 'ema_cross'] = 'bearish'
218
277
 
219
278
  pd_candles['max_short_periods'] = close_short_periods_rolling.max()
220
279
  pd_candles['max_long_periods'] = close_long_periods_rolling.max()
@@ -226,6 +285,11 @@ def compute_candles_stats(
226
285
  pd_candles['idmin_short_periods'] = close_short_periods_rolling.apply(lambda x : x.idxmin())
227
286
  pd_candles['idmin_long_periods'] = close_long_periods_rolling.apply(lambda x : x.idxmin())
228
287
 
288
+ pd_candles['max_candle_body_height_percent_long_periods'] = pd_candles['candle_body_height_percent'].rolling(window=sliding_window_how_many_candles).max()
289
+ pd_candles['idmax_candle_body_height_percent_long_periods'] = pd_candles['candle_body_height_percent'].rolling(window=sliding_window_how_many_candles).apply(lambda x : x.idxmax())
290
+ pd_candles['min_candle_body_height_percent_long_periods'] = pd_candles['candle_body_height_percent'].rolling(window=sliding_window_how_many_candles).min()
291
+ pd_candles['idmin_candle_body_height_percent_long_periods'] = pd_candles['candle_body_height_percent'].rolling(window=sliding_window_how_many_candles).apply(lambda x : x.idxmin())
292
+
229
293
  pd_candles['price_swing_short_periods'] = np.where(
230
294
  pd_candles['idmax_short_periods'] > pd_candles['idmin_short_periods'],
231
295
  pd_candles['max_short_periods'] - pd_candles['min_short_periods'], # Up swing
@@ -311,14 +375,14 @@ def compute_candles_stats(
311
375
  first_breach_index = aggressive_mask.idxmax()
312
376
  candle_high = pd_candles.at[first_breach_index, 'high']
313
377
  candle_low = pd_candles.at[first_breach_index, 'low']
314
- candle_height = candle_high - candle_low
378
+ candle_height = candle_high - candle_low # type: ignore
315
379
  else:
316
380
  aggressive_mask = window['close'] <= window['boillenger_lower_agg']
317
381
  if aggressive_mask.any():
318
382
  first_breach_index = aggressive_mask.idxmax()
319
383
  candle_high = pd_candles.at[first_breach_index, 'high']
320
384
  candle_low = pd_candles.at[first_breach_index, 'low']
321
- candle_height = candle_high - candle_low
385
+ candle_height = candle_high - candle_low # type: ignore
322
386
 
323
387
  return {
324
388
  'aggressive_move': aggressive_mask.any(),
@@ -423,10 +487,13 @@ def compute_candles_stats(
423
487
  mitigated = pd_candles.iloc[idx + 1:row.name]['close'].lt(row['fvg_high']).any()
424
488
  return mitigated
425
489
 
426
- pd_candles['fvg_mitigated'] = pd_candles.apply(lambda row: compute_fvg_mitigated(row, pd_candles), axis=1)
490
+ pd_candles['fvg_mitigated'] = pd_candles.apply(lambda row: compute_fvg_mitigated(row, pd_candles), axis=1) # type: ignore
427
491
 
428
-
429
- # RSI - https://www.youtube.com/watch?v=G9oUTi-PI18&t=809s
492
+ '''
493
+ RSI
494
+ Divergences from Bybit Learn https://www.youtube.com/watch?v=G9oUTi-PI18&t=809s
495
+ RSI Reversals from BK Traders https://www.youtube.com/watch?v=MvkbrHjiQlI
496
+ '''
430
497
  pd_candles.loc[:,'close_delta'] = pd_candles['close'].diff()
431
498
  pd_candles.loc[:,'close_delta_percent'] = pd_candles['close'].pct_change()
432
499
  lo_up = pd_candles['close_delta'].clip(lower=0)
@@ -452,6 +519,7 @@ def compute_candles_stats(
452
519
 
453
520
  lo_rs = lo_ma_up / lo_ma_down
454
521
  pd_candles.loc[:,'rsi'] = 100 - (100/(1 + lo_rs))
522
+ pd_candles['rsi_bucket'] = pd_candles['rsi'].apply(lambda x: bucketize_val(x, buckets=BUCKETS_m0_100))
455
523
  pd_candles['ema_rsi'] = pd_candles['rsi'].ewm(
456
524
  span=rsi_sliding_window_how_many_candles,
457
525
  adjust=False).mean()
@@ -513,13 +581,43 @@ def compute_candles_stats(
513
581
  rsi_sliding_window_how_many_candles if rsi_sliding_window_how_many_candles else sliding_window_how_many_candles).sum()
514
582
  pd_candles['money_flow_ratio'] = pd_candles['positive_flow_sum'] / pd_candles['negative_flow_sum']
515
583
  pd_candles['mfi'] = 100 - (100 / (1 + pd_candles['money_flow_ratio']))
584
+ pd_candles['mfi_bucket'] = pd_candles['mfi'].apply(lambda x: bucketize_val(x, buckets=BUCKETS_m0_100))
516
585
 
517
586
 
518
587
  # MACD https://www.investopedia.com/terms/m/macd.asp
519
588
  # https://www.youtube.com/watch?v=jmPCL3l08ss
520
589
  pd_candles['macd'] = pd_candles['ema_short_periods'] - pd_candles['ema_long_periods']
521
590
  pd_candles['signal'] = pd_candles['macd'].ewm(span=int(sliding_window_how_many_candles/slow_fast_interval_ratio), adjust=False).mean()
522
- pd_candles['macd_minus_signal'] = pd_candles['macd'] - pd_candles['signal']
591
+ pd_candles['macd_minus_signal'] = pd_candles['macd'] - pd_candles['signal'] # MACD histogram
592
+ macd_cur = pd_candles['macd_minus_signal']
593
+ macd_prev = pd_candles['macd_minus_signal'].shift(1)
594
+ bullish_macd_crosses = (macd_prev < 0) & (macd_cur > 0)
595
+ bearish_macd_crosses = (macd_prev > 0) & (macd_cur < 0)
596
+ pd_candles.loc[bullish_macd_crosses, 'macd_cross'] = 1
597
+ pd_candles.loc[bearish_macd_crosses, 'macd_cross'] = -1
598
+ bullish_indices = pd.Series(pd_candles.index.where(pd_candles['macd_cross'] == 1), index=pd_candles.index).astype('Int64')
599
+ bearish_indices = pd.Series(pd_candles.index.where(pd_candles['macd_cross'] == -1), index=pd_candles.index).astype('Int64')
600
+ pd_candles['macd_bullish_cross_last_id'] = bullish_indices.rolling(window=pd_candles.shape[0], min_periods=1).max().astype('Int64')
601
+ pd_candles['macd_bearish_cross_last_id'] = bearish_indices.rolling(window=pd_candles.shape[0], min_periods=1).max().astype('Int64')
602
+ conditions = [
603
+ (pd_candles['macd_bullish_cross_last_id'].notna() &
604
+ pd_candles['macd_bearish_cross_last_id'].notna() &
605
+ (pd_candles['macd_bullish_cross_last_id'] > pd_candles['macd_bearish_cross_last_id'])),
606
+
607
+ (pd_candles['macd_bullish_cross_last_id'].notna() &
608
+ pd_candles['macd_bearish_cross_last_id'].notna() &
609
+ (pd_candles['macd_bearish_cross_last_id'] > pd_candles['macd_bullish_cross_last_id'])),
610
+
611
+ (pd_candles['macd_bullish_cross_last_id'].notna() &
612
+ pd_candles['macd_bearish_cross_last_id'].isna()),
613
+
614
+ (pd_candles['macd_bearish_cross_last_id'].notna() &
615
+ pd_candles['macd_bullish_cross_last_id'].isna())
616
+ ]
617
+ choices = ['bullish', 'bearish', 'bullish', 'bearish']
618
+ pd_candles['macd_cross_last'] = np.select(conditions, choices, default=None) # type: ignore
619
+ pd_candles.loc[bullish_macd_crosses, 'macd_cross'] = 'bullish'
620
+ pd_candles.loc[bearish_macd_crosses, 'macd_cross'] = 'bearish'
523
621
 
524
622
  if not pypy_compat:
525
623
  calculate_slope(
@@ -568,7 +666,7 @@ def compute_candles_stats(
568
666
  pd_data=pd_candles,
569
667
  src_col_name='ema_rsi',
570
668
  slope_col_name='ema_rsi_slope',
571
- sliding_window_how_many_candles=int(sliding_window_how_many_candles)
669
+ sliding_window_how_many_candles=int(rsi_trend_sliding_window_how_many_candles)
572
670
  )
573
671
 
574
672
  pd_candles['regular_divergence'] = (
@@ -591,6 +689,8 @@ def compute_candles_stats(
591
689
 
592
690
  # Inflection points
593
691
  pd_candles['gap_close_vs_ema'] = pd_candles['close'] - pd_candles['ema_long_periods']
692
+ pd_candles['gap_close_vs_ema_percent'] = pd_candles['gap_close_vs_ema']/pd_candles['close'] *100
693
+
594
694
  pd_candles['close_above_or_below_ema'] = None
595
695
  pd_candles.loc[pd_candles['gap_close_vs_ema'] > 0, 'close_above_or_below_ema'] = 'above'
596
696
  pd_candles.loc[pd_candles['gap_close_vs_ema'] < 0, 'close_above_or_below_ema'] = 'below'
@@ -600,6 +700,68 @@ def compute_candles_stats(
600
700
  'close_vs_ema_inflection'
601
701
  ] = np.sign(pd_candles['close'] - pd_candles['ema_long_periods'])
602
702
 
703
+ def lookup_fib_target(
704
+ row,
705
+ pd_candles,
706
+ target_fib_level : float = 0.618
707
+ ) -> Union[Dict, None]:
708
+ if row is None:
709
+ return None
710
+
711
+ fib_target_short_periods = None
712
+ fib_target_long_periods = None
713
+
714
+ max_short_periods = row['max_short_periods']
715
+ idmax_short_periods = int(row['idmax_short_periods']) if not math.isnan(row['idmax_short_periods']) else None
716
+ max_long_periods = row['max_long_periods']
717
+ idmax_long_periods = int(row['idmax_long_periods']) if not math.isnan(row['idmax_long_periods']) else None
718
+
719
+ min_short_periods = row['min_short_periods']
720
+ idmin_short_periods = int(row['idmin_short_periods']) if not math.isnan(row['idmin_short_periods']) else None
721
+ min_long_periods = row['min_long_periods']
722
+ idmin_long_periods = int(row['idmin_long_periods']) if not math.isnan(row['idmin_long_periods']) else None
723
+
724
+ if idmax_short_periods and idmin_short_periods and idmax_short_periods>0 and idmin_short_periods>0:
725
+ if idmax_short_periods>idmin_short_periods and idmax_short_periods < len(pd_candles):
726
+ # Falling from prev peak
727
+ last_peak = pd_candles.iloc[idmax_short_periods]
728
+ fib_target_short_periods = last_peak[f'fib_{target_fib_level}_short_periods'] if not math.isnan(last_peak[f'fib_{target_fib_level}_short_periods']) else None
729
+
730
+ else:
731
+ # Bouncing from prev bottom
732
+ if idmin_short_periods < len(pd_candles):
733
+ last_bottom = pd_candles.iloc[idmin_short_periods]
734
+ fib_target_short_periods = last_bottom[f'fib_{target_fib_level}_short_periods'] if not math.isnan(last_bottom[f'fib_{target_fib_level}_short_periods']) else None
735
+
736
+ if idmax_long_periods and idmin_long_periods and idmax_long_periods>0 and idmin_long_periods>0:
737
+ if idmax_long_periods>idmin_long_periods and idmax_long_periods < len(pd_candles):
738
+ # Falling from prev peak
739
+ last_peak = pd_candles.iloc[idmax_long_periods]
740
+ fib_target_long_periods = last_peak[f'fib_{target_fib_level}_long_periods'] if not math.isnan(last_peak[f'fib_{target_fib_level}_long_periods']) else None
741
+
742
+ else:
743
+ # Bouncing from prev bottom
744
+ if idmin_long_periods < len(pd_candles):
745
+ last_bottom = pd_candles.iloc[idmin_long_periods]
746
+ fib_target_long_periods = last_bottom[f'fib_{target_fib_level}_long_periods'] if not math.isnan(last_bottom[f'fib_{target_fib_level}_long_periods']) else None
747
+
748
+ return {
749
+ 'short_periods' : {
750
+ 'idmin' : idmin_short_periods,
751
+ 'idmax' : idmax_short_periods,
752
+ 'min' : min_short_periods,
753
+ 'max' : max_short_periods,
754
+ 'fib_target' : fib_target_short_periods,
755
+ },
756
+ 'long_periods' : {
757
+ 'idmin' : idmin_long_periods,
758
+ 'idmax' : idmax_long_periods,
759
+ 'min' : min_long_periods,
760
+ 'max' : max_long_periods,
761
+ 'fib_target' : fib_target_long_periods
762
+ }
763
+ }
764
+
603
765
  '''
604
766
  The implementation from Geeksforgeeks https://www.geeksforgeeks.org/find-indices-of-all-local-maxima-and-local-minima-in-an-array/ is wrong.
605
767
  If you have consecutive-duplicates, things will gall apart!