siglab-py 0.1.19__py3-none-any.whl → 0.6.33__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. siglab_py/algo/__init__.py +0 -0
  2. siglab_py/algo/macdrsi_crosses_15m_tc_strategy.py +107 -0
  3. siglab_py/algo/strategy_base.py +122 -0
  4. siglab_py/algo/strategy_executor.py +1308 -0
  5. siglab_py/algo/tp_algo.py +529 -0
  6. siglab_py/backtests/__init__.py +0 -0
  7. siglab_py/backtests/backtest_core.py +2405 -0
  8. siglab_py/backtests/coinflip_15m_crypto.py +432 -0
  9. siglab_py/backtests/fibonacci_d_mv_crypto.py +541 -0
  10. siglab_py/backtests/macdrsi_crosses_15m_tc_crypto.py +473 -0
  11. siglab_py/constants.py +26 -1
  12. siglab_py/exchanges/binance.py +38 -0
  13. siglab_py/exchanges/deribit.py +83 -0
  14. siglab_py/exchanges/futubull.py +33 -3
  15. siglab_py/market_data_providers/candles_provider.py +11 -10
  16. siglab_py/market_data_providers/candles_ta_provider.py +5 -5
  17. siglab_py/market_data_providers/ccxt_candles_ta_to_csv.py +238 -0
  18. siglab_py/market_data_providers/futu_candles_ta_to_csv.py +224 -0
  19. siglab_py/market_data_providers/google_monitor.py +320 -0
  20. siglab_py/market_data_providers/orderbooks_provider.py +15 -12
  21. siglab_py/market_data_providers/tg_monitor.py +428 -0
  22. siglab_py/market_data_providers/{test_provider.py → trigger_provider.py} +9 -8
  23. siglab_py/ordergateway/client.py +172 -41
  24. siglab_py/ordergateway/encrypt_keys_util.py +1 -1
  25. siglab_py/ordergateway/gateway.py +456 -344
  26. siglab_py/ordergateway/test_ordergateway.py +8 -7
  27. siglab_py/tests/integration/market_data_util_tests.py +80 -6
  28. siglab_py/tests/unit/analytic_util_tests.py +67 -4
  29. siglab_py/tests/unit/market_data_util_tests.py +96 -0
  30. siglab_py/tests/unit/simple_math_tests.py +252 -0
  31. siglab_py/tests/unit/trading_util_tests.py +65 -0
  32. siglab_py/util/analytic_util.py +484 -66
  33. siglab_py/util/datetime_util.py +39 -0
  34. siglab_py/util/market_data_util.py +564 -74
  35. siglab_py/util/module_util.py +40 -0
  36. siglab_py/util/notification_util.py +78 -0
  37. siglab_py/util/retry_util.py +16 -3
  38. siglab_py/util/simple_math.py +262 -0
  39. siglab_py/util/slack_notification_util.py +59 -0
  40. siglab_py/util/trading_util.py +118 -0
  41. {siglab_py-0.1.19.dist-info → siglab_py-0.6.33.dist-info}/METADATA +5 -13
  42. siglab_py-0.6.33.dist-info/RECORD +56 -0
  43. {siglab_py-0.1.19.dist-info → siglab_py-0.6.33.dist-info}/WHEEL +1 -1
  44. siglab_py-0.1.19.dist-info/RECORD +0 -31
  45. {siglab_py-0.1.19.dist-info → siglab_py-0.6.33.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,40 @@
1
+ import os
2
+ import glob
3
+ import importlib.util
4
+ import inspect
5
+
6
+ def load_module_class(strategy_name: str):
7
+ if not strategy_name:
8
+ return None
9
+
10
+ caller_frame = inspect.stack()[1] # [0] = here, [1] = direct caller
11
+ caller_file = caller_frame.filename
12
+ folder = os.path.dirname(caller_file)
13
+ print(f"folder searched: {folder}")
14
+
15
+ pattern = os.path.join(folder, "*.py")
16
+
17
+ for filepath in glob.glob(pattern):
18
+ filename = os.path.basename(filepath)
19
+ print(f"filename: {filename}")
20
+
21
+ if filename.startswith('_') or filename == 'strategy_executor.py':
22
+ continue
23
+
24
+ module_name = filename[:-3]
25
+
26
+ try:
27
+ spec = importlib.util.spec_from_file_location(module_name, filepath)
28
+ module = importlib.util.module_from_spec(spec)
29
+ spec.loader.exec_module(module)
30
+
31
+ if hasattr(module, strategy_name):
32
+ cls = getattr(module, strategy_name)
33
+ if isinstance(cls, type):
34
+ print(f"Loaded strategy: {strategy_name} from {filename}")
35
+ return cls
36
+ except Exception as e:
37
+ print(f"Skipping {filename} – error: {e}")
38
+
39
+ print(f"{strategy_name} not found.")
40
+ return None
@@ -0,0 +1,78 @@
1
+ import json
2
+ from typing import Any, Dict, Union
3
+ from datetime import datetime, timezone
4
+ import pandas as pd
5
+ import numpy as np
6
+ from tabulate import tabulate
7
+
8
+ from siglab_py.util.slack_notification_util import slack_dispatch_notification
9
+
10
+ from siglab_py.constants import LogLevel
11
+
12
+ def dispatch_notification(
13
+ title : str,
14
+ message : Union[str, Dict, pd.DataFrame],
15
+ footer : str,
16
+ params : Dict[str, Any],
17
+ log_level : LogLevel = LogLevel.INFO,
18
+ logger = None
19
+ ):
20
+ try:
21
+ if isinstance(message, Dict):
22
+ _message = json.dumps(message, indent=2, separators=(' ', ':'))
23
+ elif isinstance(message, pd.DataFrame):
24
+ _message = tabulate(message, headers='keys', tablefmt='orgtbl') # type: ignore
25
+ else:
26
+ _message = message
27
+
28
+ utc_time = datetime.now(timezone.utc)
29
+ footer = f"UTC {utc_time} {footer}"
30
+
31
+ slack_dispatch_notification(title, _message, footer, params, log_level)
32
+ except Exception as any_notification_error:
33
+ if logger:
34
+ logger.info(f"Failed to dispatch notification for {str(title)}: {any_notification_error}")
35
+
36
+ if __name__ == '__main__':
37
+ params : Dict[str, Any] = {
38
+ "slack" : {
39
+ "info" : {
40
+ "webhook_url" : "https://hooks.slack.com/services/xxx"
41
+ },
42
+ "critical" : {
43
+ "webhook_url" : "https://hooks.slack.com/services/xxx"
44
+ },
45
+ "alert" : {
46
+ "webhook_url" : "https://hooks.slack.com/services/xxx"
47
+ }
48
+ },
49
+ }
50
+
51
+ log_level : LogLevel = LogLevel.CRITICAL
52
+
53
+ title : str = "Test message"
54
+ footer : str = "... some footer .."
55
+
56
+ message1 : str = "Testing 1"
57
+ dispatch_notification(title=title, message=message1, footer=footer, params=params, log_level=log_level)
58
+
59
+ message2 : Dict[str, Any] = {
60
+ 'aaa' : 123,
61
+ 'bbb' : 456,
62
+ 'ccc' : {
63
+ 'ddd' : 789
64
+ }
65
+ }
66
+ dispatch_notification(title=title, message=message2, footer=footer, params=params, log_level=log_level)
67
+
68
+ start_date = pd.to_datetime('2024-01-01 00:00:00')
69
+ datetimes = pd.date_range(start=start_date, periods=20, freq='H')
70
+ np.random.seed(42)
71
+ close_prices = np.random.uniform(80000, 90000, size=20).round(2)
72
+ data : pd.DataFrame = pd.DataFrame({
73
+ 'datetime': datetimes,
74
+ 'close': close_prices
75
+ })
76
+ data['timestamp_ms'] = data['datetime'].astype('int64')
77
+ message3 = data
78
+ dispatch_notification(title=title, message=message3, footer=footer, params=params, log_level=log_level)
@@ -1,15 +1,28 @@
1
- def retry(num_attempts : int = 1):
1
+ import time
2
+
3
+ def retry(
4
+ num_attempts : int = 1,
5
+ pause_between_retries_ms : int = 1000,
6
+ logger = None
7
+ ):
2
8
  def decorator(method):
3
9
  def wrapper(*args, **kw):
4
10
  for i in range(num_attempts):
5
11
  try:
6
12
  result = method(*args, **kw)
7
13
  if i>0:
8
- print(f"retry_gizmo.retry succeeded: {method.__name__} on #{i+1} invocation. {args} {kw}")
14
+ print(f"retry_util.retry done {method.__name__} on #{i+1} call. {args} {kw}")
9
15
  return result
10
16
  except Exception as retry_error:
11
17
  if i==(num_attempts-1):
12
- err_msg = f"retry_gizmo.retry failed: {method.__name__} after {num_attempts} invocations. {args} {kw}. {retry_error}"
18
+ err_msg = f"retry_util.retry gave up {method.__name__} after {num_attempts} calls. {args} {kw}. {retry_error}"
19
+ if logger:
20
+ logger.error(err_msg)
21
+ else:
22
+ print(err_msg)
13
23
  raise Exception(err_msg) from retry_error
24
+ finally:
25
+ time.sleep(int(pause_between_retries_ms/1000))
26
+
14
27
  return wrapper
15
28
  return decorator
@@ -0,0 +1,262 @@
1
+ import math
2
+ import random
3
+ from typing import List, Dict, Union
4
+
5
+ from pandas import isna
6
+
7
+ def generate_rand_nums(
8
+ range_min : float = 0,
9
+ range_max : float = 1,
10
+ size=100, # list size
11
+ percent_in_range : float = 100,
12
+ abs_min : float = 0,
13
+ abs_max : float = 1
14
+ ) -> List[float]:
15
+ assert(range_min<range_max)
16
+
17
+ if abs_min>range_min:
18
+ abs_min = range_min
19
+ if abs_max<range_max:
20
+ abs_max = range_max
21
+
22
+ result : List[float] = []
23
+ for _ in range(int(size * percent_in_range/100)):
24
+ result.append(random.uniform(range_min, range_max))
25
+ for _ in range(size - len(result)):
26
+ if random.uniform(0, 1)>0.5:
27
+ result.append(random.uniform(abs_min, range_min))
28
+ else:
29
+ result.append(random.uniform(range_max, abs_max))
30
+
31
+ random.shuffle(result)
32
+
33
+ return result
34
+
35
+ def compute_level_increment(
36
+ num : float,
37
+ level_granularity : float = 0.01
38
+ ) -> float:
39
+ if math.isnan(num):
40
+ return num
41
+ level_size = num * level_granularity
42
+ magnitude = math.floor(math.log10(abs(level_size)))
43
+ base_increment = 10 ** magnitude
44
+ rounded_level_size = round(level_size / base_increment) * base_increment
45
+ return rounded_level_size
46
+
47
+ # https://norman-lm-fung.medium.com/levels-are-psychological-7176cdefb5f2
48
+ def round_to_level(
49
+ num : float,
50
+ level_granularity : float = 0.01
51
+ ) -> float:
52
+ if math.isnan(num):
53
+ return num
54
+ rounded_level_size = compute_level_increment(num, level_granularity)
55
+ rounded_num = round(num / rounded_level_size) * rounded_level_size
56
+ return rounded_num
57
+
58
+ def compute_adjacent_levels(
59
+ num : float,
60
+ level_granularity : float = 0.01,
61
+ num_levels_per_side : int = 1
62
+ ) -> Union[None, List[float]]:
63
+ if math.isnan(num):
64
+ return None
65
+ rounded_level_size = compute_level_increment(num, level_granularity)
66
+ rounded_num = round(num / rounded_level_size) * rounded_level_size
67
+ levels = [ rounded_num ]
68
+ levels = list(reversed([ rounded_num - (i+1)*rounded_level_size for i in list(range(num_levels_per_side))])) + levels + [ rounded_num + (i+1)*rounded_level_size for i in list(range(num_levels_per_side))]
69
+ return levels
70
+
71
+ def bucket_series(
72
+ values : List[float],
73
+ outlier_threshold_percent : float = 0,
74
+ level_granularity : float = 0.1 # 0.1 = 10%
75
+ ) -> Dict[
76
+ str,
77
+ Dict[str,Union[float, List[float]]]
78
+ ]:
79
+ buckets : Dict[
80
+ str,
81
+ Dict[str,Union[float, List[float]]]
82
+ ] = {}
83
+ list_0_to_1 : bool = True if len([x for x in values if x<0 or x>1])/len(values)*100 <= outlier_threshold_percent else False
84
+ list_m1_to_1 : bool = True if len([x for x in values if x<-1 or x>1])/len(values)*100 <= outlier_threshold_percent else False
85
+
86
+ list_0_to_100 : bool = True if len([x for x in values if x<0 or x>100])/len(values)*100 <= outlier_threshold_percent else False
87
+ if (
88
+ list_0_to_100
89
+ and (
90
+ not min(values)<100*(outlier_threshold_percent/100) or not max(values)>100*(1-outlier_threshold_percent/100)
91
+ )
92
+ ):
93
+ list_0_to_100 = False
94
+ list_m100_to_100 : bool = True if len([x for x in values if x<-100 or x>100])/len(values)*100 <= outlier_threshold_percent else False
95
+ if (
96
+ list_m100_to_100
97
+ and (
98
+ not min(values)<-100*(1-outlier_threshold_percent/100) or not max(values)>100*(1-outlier_threshold_percent/100)
99
+ )
100
+ ):
101
+ list_m100_to_100 = False
102
+
103
+ def _generate_sequence(start, stop, step):
104
+ result = []
105
+ current = start
106
+ num_steps = int((stop - start) / step) + 1
107
+ for i in range(num_steps):
108
+ result.append(round(start + i * step, 10))
109
+ return result
110
+
111
+ if list_0_to_1:
112
+ step = round_to_level(
113
+ 1 * level_granularity,
114
+ level_granularity=level_granularity
115
+ )
116
+ intervals = _generate_sequence(0.1, 1, step)
117
+ last_interval = 0
118
+ buckets[f"< 0"] = {
119
+ 'min' : float("-inf"),
120
+ 'max' : 0,
121
+ 'values' : [ x for x in values if x<0 ]
122
+ }
123
+ for interval in intervals:
124
+ buckets[f"{last_interval} - {interval}"] = {
125
+ 'min' : last_interval,
126
+ 'max' : interval,
127
+ 'values' : [ x for x in values if x>=last_interval and x<interval ]
128
+ }
129
+ last_interval = interval
130
+ buckets[f">1"] = {
131
+ 'min' : last_interval,
132
+ 'max' : float("inf"),
133
+ 'values' : [ x for x in values if x>=1 ]
134
+ }
135
+
136
+ elif not list_0_to_1 and list_m1_to_1:
137
+ step = round_to_level(
138
+ 1 * level_granularity,
139
+ level_granularity=level_granularity
140
+ )
141
+ intervals = _generate_sequence(-0.9, 1, step)
142
+ last_interval = -1
143
+ buckets[f"< -1"] = {
144
+ 'min' : float("-inf"),
145
+ 'max' : -1,
146
+ 'values' : [ x for x in values if x<-1 ]
147
+ }
148
+ for interval in intervals:
149
+ buckets[f"{last_interval} - {interval}"] = {
150
+ 'min' : last_interval,
151
+ 'max' : interval,
152
+ 'values' : [ x for x in values if x>=last_interval and x<interval ]
153
+ }
154
+ last_interval = interval
155
+ buckets[f">1"] = {
156
+ 'min' : last_interval,
157
+ 'max' : float("inf"),
158
+ 'values' : [ x for x in values if x>=1 ]
159
+ }
160
+
161
+ elif not list_0_to_1 and not list_m1_to_1 and list_0_to_100:
162
+ step = round_to_level(
163
+ 100 * level_granularity,
164
+ level_granularity=level_granularity
165
+ )
166
+ intervals = _generate_sequence(10, 100, step)
167
+ last_interval = 0
168
+ buckets[f"<0"] = {
169
+ 'min' : float("-inf"),
170
+ 'max' : 0,
171
+ 'values' : [ x for x in values if x<0 ]
172
+ }
173
+ for interval in intervals:
174
+ buckets[f"{last_interval} - {interval}"] = {
175
+ 'min' : last_interval,
176
+ 'max' : interval,
177
+ 'values' : [ x for x in values if x>=last_interval and x<interval ]
178
+ }
179
+ last_interval = interval
180
+ buckets[f">100"] = {
181
+ 'min' : last_interval,
182
+ 'max' : float("inf"),
183
+ 'values' : [ x for x in values if x>=100 ]
184
+ }
185
+
186
+ elif not list_0_to_1 and not list_m1_to_1 and not list_0_to_100 and list_m100_to_100:
187
+ step = round_to_level(
188
+ 100 * level_granularity,
189
+ level_granularity=level_granularity
190
+ )
191
+ intervals = _generate_sequence(-90, 100, step)
192
+ last_interval = -100
193
+ buckets[f"<-100"] = {
194
+ 'min' : float("-inf"),
195
+ 'max' : -100,
196
+ 'values' : [ x for x in values if x<-100 ]
197
+ }
198
+ for interval in intervals:
199
+ buckets[f"{last_interval} - {interval}"] = {
200
+ 'min' : last_interval,
201
+ 'max' : interval,
202
+ 'values' : [ x for x in values if x>=last_interval and x<interval ]
203
+ }
204
+ last_interval = interval
205
+ buckets[f">100"] = {
206
+ 'min' : last_interval,
207
+ 'max' : float("inf"),
208
+ 'values' : [ x for x in values if x>=100 ]
209
+ }
210
+
211
+ else:
212
+ range_min = round_to_level(
213
+ min(values),
214
+ level_granularity=level_granularity
215
+ )
216
+ range_max = round_to_level(
217
+ max(values),
218
+ level_granularity=level_granularity
219
+ )
220
+ step = round_to_level(
221
+ abs(range_max - range_min) * level_granularity,
222
+ level_granularity=level_granularity
223
+ )
224
+
225
+ intervals = _generate_sequence(range_min+step, range_max, step)
226
+ last_interval = range_min
227
+ buckets[f"< {range_min}"] = {
228
+ 'min' : float("-inf"),
229
+ 'max' : range_min,
230
+ 'values' : [ x for x in values if x<range_min ]
231
+ }
232
+ for interval in intervals:
233
+ buckets[f"{last_interval} - {interval}"] = {
234
+ 'min' : last_interval,
235
+ 'max' : interval,
236
+ 'values' : [ x for x in values if x>=last_interval and x<interval ]
237
+ }
238
+ last_interval = interval
239
+ buckets[f"> {range_max}"] = {
240
+ 'min' : last_interval,
241
+ 'max' : float("inf"),
242
+ 'values' : [ x for x in values if x>=range_max ]
243
+ }
244
+
245
+ for key in buckets:
246
+ bucket = buckets[key]
247
+ assert(len([x for x in bucket['values'] if x<bucket['min'] or x>bucket['max']])==0) # type: ignore
248
+
249
+ return buckets
250
+
251
+ def bucketize_val(
252
+ x : float,
253
+ buckets : Dict[
254
+ str,
255
+ Dict[str,Union[float, List[float]]]
256
+ ]
257
+ ) -> Union[str,None]:
258
+ for key in buckets:
259
+ bucket = buckets[key]
260
+ if x>=bucket['min'] and x<=bucket['max']: # type: ignore
261
+ return key
262
+ return None
@@ -0,0 +1,59 @@
1
+ '''
2
+ https://medium.com/@natalia_assad/how-send-a-table-to-slack-using-python-d1a20b08abe0
3
+ '''
4
+ import sys
5
+ from typing import Any, Dict
6
+ import json
7
+ import requests
8
+
9
+ from siglab_py.constants import LogLevel
10
+
11
+ def slack_dispatch_notification(
12
+ title : str,
13
+ message : str,
14
+ footer : str,
15
+ params : Dict[str, Any],
16
+ log_level : LogLevel = LogLevel.INFO,
17
+ max_message_len : int = 1800
18
+ ):
19
+ slack_params = params['slack']
20
+
21
+ # Slack slack ... https://stackoverflow.com/questions/60344831/slack-api-invalid-block
22
+ message = message[:max_message_len]
23
+
24
+ if log_level.value==LogLevel.INFO.value or log_level.value==LogLevel.DEBUG.value:
25
+ webhook_url = slack_params['info']['webhook_url']
26
+ elif log_level.value==LogLevel.CRITICAL.value:
27
+ webhook_url = slack_params['critical']['webhook_url']
28
+ elif log_level.value==LogLevel.ERROR.value:
29
+ webhook_url = slack_params['alert']['webhook_url']
30
+ else:
31
+ webhook_url = slack_params['info']['webhook_url']
32
+
33
+ if not webhook_url:
34
+ return
35
+
36
+ data = {
37
+ "username": "siglab_py",
38
+ "type": "section",
39
+ "blocks": [
40
+ {
41
+ "type": "header",
42
+ "text": { "type": "plain_text", "text": f"{title}" }
43
+ },
44
+ {
45
+ "type": "section",
46
+ "text": { "type": "mrkdwn", "text": message }
47
+ },
48
+ {
49
+ "type": "section",
50
+ "text": { "type": "plain_text", "text": footer }
51
+ }
52
+ ]
53
+ }
54
+
55
+ byte_size = str(sys.getsizeof(data, 2000))
56
+ req_headers = { 'Content-Length': byte_size, 'Content-Type': "application/json"}
57
+ rsp = requests.post(webhook_url, headers=req_headers, data=json.dumps(data))
58
+ if rsp.status_code != 200:
59
+ raise Exception(rsp.status_code, rsp.text)
@@ -0,0 +1,118 @@
1
+ import math
2
+
3
+ '''
4
+ pnl_percent_notional = Trade's current pnl in percent.
5
+
6
+ Examples,
7
+ y-axis:
8
+ max (i.e most tight) = 0%
9
+ sl_percent_trailing = 50% (Trailing stop loss in percent)
10
+
11
+ x-axis:
12
+ min TP = 1.5% <-- min TP
13
+ max TP = 2.5% <-- max TP
14
+
15
+ slope = (0-50)/(2.5-1.5) = -50/+1 = -50
16
+ effective_tp_trailing_percent = slope * (pnl_percent_notional - 1.5%) + sl_percent_trailing
17
+
18
+ Case 1. pnl_percent_notional = 1.5% (Trade starting off, only +50bps pnl. i.e. min TP)
19
+ effective_tp_trailing_percent = slope * (pnl_percent_notional - 1.5%) + sl_percent_trailing
20
+ = -50 * (1.5-1.5) + 50%
21
+ = 0 + 50
22
+ = 50% (Most loose)
23
+
24
+ Case 2. pnl_percent_notional = 2% (Deeper into profit, +200bps pnl)
25
+ effective_tp_trailing_percent = slope * (pnl_percent_notional - 1.5%) + sl_percent_trailing
26
+ = -50 * (2-1.5) +50%
27
+ = -25 + 50
28
+ = 25% (Somewhat tight)
29
+
30
+ Case 3. pnl_percent_notional = 2.5% (Very deep in profit, +250bps pnl. i.e. max TP)
31
+ effective_tp_trailing_percent = slope * (pnl_percent_notional - 1.5%) + sl_percent_trailing
32
+ = -50 * (2.5-1.5) +50%
33
+ = -50 + 50
34
+ = 0 (Most tight)
35
+
36
+ So you see, effective_tp_trailing_percent gets smaller and smaller as pnl approach max TP, finally zero.
37
+
38
+ How to use it?
39
+ if loss_trailing>=effective_tp_trailing_percent and pnl_percent_notional > tp_min_percent:
40
+ Fire trailing stops and take profit.
41
+
42
+ What's 'loss_trailing'? 'loss_trailing' is essentially pnl drop from max_unrealized_pnl_live.
43
+
44
+ Say, when trade started off:
45
+ unrealized_pnl_live = $80
46
+ max_unrealized_pnl_live = $100
47
+ loss_trailing = (1 - unrealized_pnl_live/max_unrealized_pnl_live) = (1-80/100) = 0.2 (Or 20%)
48
+
49
+ If pnl worsen:
50
+ unrealized_pnl_live = $40
51
+ max_unrealized_pnl_live = $100
52
+ loss_trailing = (1 - unrealized_pnl_live/max_unrealized_pnl_live) = (1-40/100) = 0.6 (Or 60%)
53
+
54
+ Have a look at this for a visual explaination how "Gradually tightened stops" works:
55
+ https://github.com/r0bbar/siglab/blob/master/siglab_py/tests/manual/trading_util_tests.ipynb
56
+ https://norman-lm-fung.medium.com/gradually-tightened-trailing-stops-f7854bf1e02b
57
+ '''
58
+ def calc_eff_trailing_sl(
59
+ tp_min_percent : float,
60
+ tp_max_percent : float,
61
+ sl_percent_trailing : float,
62
+ pnl_percent_notional : float,
63
+ default_effective_tp_trailing_percent : float = float('inf'), # inf: essentially saying, don't fire off trailing stop.
64
+ linear : bool = True,
65
+ pow : float = 5 # This is for non-linear trailing stops
66
+ ) -> float:
67
+ if pnl_percent_notional>tp_max_percent:
68
+ return 0
69
+ if pnl_percent_notional<tp_min_percent:
70
+ return default_effective_tp_trailing_percent
71
+
72
+ if linear:
73
+ slope = (0 - sl_percent_trailing) / (tp_max_percent - tp_min_percent)
74
+ effective_tp_trailing_percent = (
75
+ slope * (pnl_percent_notional - tp_min_percent) + sl_percent_trailing
76
+ if pnl_percent_notional>=tp_min_percent
77
+ else default_effective_tp_trailing_percent
78
+ )
79
+ else:
80
+ def y(
81
+ x : float,
82
+ x_shift : float,
83
+ pow : float
84
+ ) -> float:
85
+ return -1 * ( (x+x_shift)**pow)
86
+
87
+ y_min = y(
88
+ x=tp_min_percent,
89
+ x_shift=tp_min_percent,
90
+ pow=pow
91
+ )
92
+ y_max = y(
93
+ x=tp_max_percent,
94
+ x_shift=tp_min_percent,
95
+ pow=pow
96
+ )
97
+ y_shift = abs(y_max) - abs(y_min)
98
+
99
+ if y_shift!=0:
100
+ y_normalized = y(
101
+ x=pnl_percent_notional,
102
+ x_shift=tp_min_percent,
103
+ pow=pow
104
+ ) / y_shift
105
+ effective_tp_trailing_percent = (
106
+ y_normalized * sl_percent_trailing + sl_percent_trailing
107
+ if pnl_percent_notional>=tp_min_percent
108
+ else default_effective_tp_trailing_percent
109
+ )
110
+ else:
111
+ '''
112
+ y_shift = 0 when tp_max_percent==tp_min_percent.
113
+ If default_effective_tp_trailing_percent==float('inf'), essentially it means trailing stops won't fire.
114
+ Client side needs handle this.
115
+ '''
116
+ effective_tp_trailing_percent = default_effective_tp_trailing_percent
117
+
118
+ return effective_tp_trailing_percent
@@ -1,22 +1,19 @@
1
- Metadata-Version: 2.1
2
- Name: siglab-py
3
- Version: 0.1.19
1
+ Metadata-Version: 2.4
2
+ Name: siglab_py
3
+ Version: 0.6.33
4
4
  Summary: Market data fetches, TA calculations and generic order gateway.
5
5
  Author: r0bbarh00d
6
6
  Author-email: r0bbarh00d <r0bbarh00d@gmail.com>
7
7
  License: MIT
8
8
  Project-URL: Homepage, https://github.com/r0bbar/siglab/blob/master/siglab_py/README.md
9
- Classifier: Programming Language :: Python :: 3
10
- Classifier: License :: OSI Approved :: MIT License
11
- Classifier: Operating System :: OS Independent
12
9
  Requires-Python: >=3.9.19
13
10
  Description-Content-Type: text/markdown
14
11
  Requires-Dist: python-dotenv
15
12
  Requires-Dist: dotmap
16
- Requires-Dist: typing-extensions
13
+ Requires-Dist: typing_extensions
17
14
  Requires-Dist: arrow
18
15
  Requires-Dist: tzlocal
19
- Requires-Dist: nest-asyncio
16
+ Requires-Dist: nest_asyncio
20
17
  Requires-Dist: pandas
21
18
  Requires-Dist: numpy
22
19
  Requires-Dist: boto3
@@ -25,12 +22,7 @@ Requires-Dist: ccxt
25
22
  Requires-Dist: ccxtpro
26
23
  Requires-Dist: yfinance
27
24
  Requires-Dist: yahoofinancials
28
- Requires-Dist: scipy
29
- Requires-Dist: statsmodels
30
- Requires-Dist: scikit-learn
31
- Requires-Dist: sklearn.preprocessing
32
25
  Requires-Dist: hurst
33
26
  Requires-Dist: redis
34
27
  Requires-Dist: redis-py-cluster
35
28
  Requires-Dist: kafka-python
36
-