openfund-core 1.0.7__tar.gz → 1.0.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: openfund-core
3
- Version: 1.0.7
3
+ Version: 1.0.9
4
4
  Summary: Openfund-core.
5
5
  Requires-Python: >=3.9,<4.0
6
6
  Classifier: Programming Language :: Python :: 3
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "openfund-core"
3
- version = "1.0.7"
3
+ version = "1.0.9"
4
4
  description = "Openfund-core."
5
5
  authors = []
6
6
  readme = "README.md"
@@ -47,12 +47,12 @@ class SMCFVG(SMCStruct):
47
47
  # 使用向量化操作替代apply,提高性能
48
48
  if side == self.BUY_SIDE:
49
49
  condition = df[self.HIGH_COL].shift(1) < df[self.LOW_COL].shift(-1)
50
- side_value = "Bullish"
50
+ side_value = self.BULLISH_TREND
51
51
  price_top = df[self.LOW_COL].shift(-1)
52
52
  price_bot = df[self.HIGH_COL].shift(1)
53
53
  else:
54
54
  condition = df[self.LOW_COL].shift(1) > df[self.HIGH_COL].shift(-1)
55
- side_value = "Bearish"
55
+ side_value = self.BEARISH_TREND
56
56
  price_top = df[self.LOW_COL].shift(1)
57
57
  price_bot = df[self.HIGH_COL].shift(-1)
58
58
 
@@ -64,9 +64,9 @@ class SMCFVG(SMCStruct):
64
64
  df.loc[:, self.FVG_MID] = (df[self.FVG_TOP] + df[self.FVG_BOT]) / 2
65
65
 
66
66
  fvg_df = df[
67
- df[self.FVG_SIDE] == "Bullish"
67
+ df[self.FVG_SIDE] == self.BULLISH_TREND
68
68
  if side == self.BUY_SIDE
69
- else df[self.FVG_SIDE] == "Bearish"
69
+ else df[self.FVG_SIDE] == self.BEARISH_TREND
70
70
  ]
71
71
  fvg_df = fvg_df.copy()
72
72
  if check_balanced:
@@ -1,9 +1,7 @@
1
1
  import logging
2
- from re import S
3
- import pandas as pd
2
+ from decimal import Decimal
4
3
  from core.smc.SMCStruct import SMCStruct
5
- from pandas.core.strings.accessor import F
6
- from pandas.io.parquet import catch_warnings
4
+
7
5
 
8
6
  class SMCLiquidity(SMCStruct):
9
7
  EQUAL_HIGH_COL = "equal_high"
@@ -30,20 +28,17 @@ class SMCLiquidity(SMCStruct):
30
28
  df = data.copy()
31
29
 
32
30
  # 识别高点
33
- df[self.LIQU_HIGH_COL] = 0
31
+ df[self.LIQU_HIGH_COL] = Decimal(0.0)
34
32
  for i in range(pivot_length, len(df) - pivot_length):
35
33
  if df[self.HIGH_COL].iloc[i] == max(df[self.HIGH_COL].iloc[i-pivot_length:i+pivot_length+1]):
36
- df.loc[df.index[i], self.LIQU_HIGH_COL] = df[self.HIGH_COL].iloc[i]
37
-
34
+ df.loc[df.index[i], self.LIQU_HIGH_COL] = df[self.HIGH_COL].iloc[i]
38
35
  # 识别低点
39
- df[self.LIQU_LOW_COL] = 0
36
+ df[self.LIQU_LOW_COL] = Decimal(0.0)
40
37
  for i in range(pivot_length, len(df) - pivot_length):
41
38
 
42
39
  if df[self.LOW_COL].iloc[i] == min(df[self.LOW_COL].iloc[i-pivot_length:i+pivot_length+1]):
43
40
  df.loc[df.index[i], self.LIQU_LOW_COL] = df[self.LOW_COL].iloc[i]
44
41
 
45
-
46
-
47
42
  return df
48
43
 
49
44
  def find_EQH_EQL(self, data, trend, end_idx=-1, atr_offset=0.1) -> dict:
@@ -66,7 +61,7 @@ class SMCLiquidity(SMCStruct):
66
61
  try:
67
62
  self.check_columns(df, check_columns)
68
63
  except ValueError as e:
69
- self.logger.warning(f"DataFrame must contain columns {check_columns} : {str(e)}")
64
+ # self.logger.warning(f"DataFrame must contain columns {check_columns} : {str(e)}")
70
65
  df = self._identify_liquidity_pivots(df)
71
66
 
72
67
  df = df[(df[self.LIQU_HIGH_COL] > 0) | (df[self.LIQU_LOW_COL] > 0)]
@@ -34,7 +34,7 @@ class SMCOrderBlock(SMCStruct):
34
34
  symbol (_type_): _description_
35
35
  data (pd.DataFrame): _description_
36
36
  side (_type_): _description_ 如果是None, 则返回所有OB boxes(包括bullish和bearish)
37
- pivot_index (int): _description_ 开始的位置
37
+ start_index (int): _description_ 开始的位置
38
38
  is_valid (bool): _description_ 找到有效的OB,没有被crossed
39
39
  if_combine (bool): _description_ 是否合并OB
40
40
  Returns:
@@ -63,8 +63,8 @@ class SMCOrderBlock(SMCStruct):
63
63
  else any(df.loc[row.name + 1 :, self.HIGH_COL] >= row[self.OB_HIGH_COL]),
64
64
  axis=1,
65
65
  )
66
-
67
- ob_df = ob_df[~ob_df[self.OB_WAS_CROSSED]]
66
+ if is_valid :
67
+ ob_df = ob_df[~ob_df[self.OB_WAS_CROSSED]]
68
68
 
69
69
  if if_combine:
70
70
  # 合并OB
@@ -238,7 +238,7 @@ class SMCOrderBlock(SMCStruct):
238
238
  # df.at[i, self.OB_START_TS_COL] = df.loc[index, self.TIMESTAMP_COL]
239
239
  df.at[index, self.OB_ATR] = atr
240
240
 
241
- def get_lastest_OB(self, data, trend, start_index=-1):
241
+ def get_latest_OB(self, data, trend, start_index=-1):
242
242
  """
243
243
  获取最新的Order Block
244
244
 
@@ -0,0 +1,126 @@
1
+ import logging
2
+ import pandas as pd
3
+
4
+ from core.smc.SMCFVG import SMCFVG
5
+ from core.smc.SMCOrderBlock import SMCOrderBlock
6
+
7
+ class SMCPDArray(SMCFVG,SMCOrderBlock):
8
+ PD_HIGH_COL = "pd_high"
9
+ PD_LOW_COL = "pd_low"
10
+ PD_MID_COL = "pd_mid"
11
+ PD_TYPE_COL = "pd_type"
12
+ PD_WAS_BALANCED_COL = "pd_was_balanced"
13
+
14
+ def __init__(self):
15
+ super().__init__()
16
+ self.logger = logging.getLogger(__name__)
17
+
18
+ def find_PDArrays(
19
+ self, struct: pd.DataFrame, side, start_index=-1, balanced=False,
20
+ ) -> pd.DataFrame:
21
+ """_summary_
22
+ 寻找PDArrays,包括Fair Value Gap (FVG)|Order Block (OB)|Breaker Block(BB)|Mitigation Block(BB)
23
+ Args:
24
+ data (pd.DataFrame): K线数据
25
+ side (_type_): 交易方向 'buy'|'sell'
26
+ start_index (int): 开始查找索引的起点,默认为-1
27
+ balanced (bool): PD是否有效,默认为False。PD被crossed过,则是无效PD
28
+
29
+ Returns:
30
+ pd.DataFrame: _description_
31
+
32
+ """
33
+
34
+ df = (
35
+ struct.copy()
36
+ if start_index == -1
37
+ else struct.copy().iloc[max(0, start_index - 1) :]
38
+ )
39
+
40
+ df_FVGs = self.find_FVGs(df, side, start_index)
41
+
42
+ if not balanced:
43
+ df_FVGs = df_FVGs[~df_FVGs[self.FVG_WAS_BALANCED]]
44
+ # self.logger.info(f"fvgs:\n{df_FVGs[['timestamp', self.FVG_SIDE, self.FVG_TOP, self.FVG_BOT, self.FVG_WAS_BALANCED]]}")
45
+
46
+ is_valid = not balanced
47
+ df_OBs = self.find_OBs(struct=df, side=side, start_index=start_index, is_valid=is_valid)
48
+ # self.logger.info("find_OBs:\n %s", df_OBs)
49
+
50
+ # 使用更简洁的方式重命名和合并时间戳列
51
+ timestamp_mapping = {self.TIMESTAMP_COL: ['ts_OBs', 'ts_FVGs']}
52
+ df_OBs = df_OBs.rename(columns={self.TIMESTAMP_COL: timestamp_mapping[self.TIMESTAMP_COL][0]})
53
+ df_FVGs = df_FVGs.rename(columns={self.TIMESTAMP_COL: timestamp_mapping[self.TIMESTAMP_COL][1]})
54
+
55
+ # 使用更高效的方式合并数据框
56
+ df_PDArrays = pd.concat(
57
+ [df_OBs, df_FVGs],
58
+ axis=1,
59
+ join='outer'
60
+ ).sort_index()
61
+
62
+ # 使用更清晰的方式合并时间戳列
63
+ df_PDArrays[self.TIMESTAMP_COL] = df_PDArrays[timestamp_mapping[self.TIMESTAMP_COL][0]].fillna(
64
+ df_PDArrays[timestamp_mapping[self.TIMESTAMP_COL][1]]
65
+ )
66
+
67
+ df_PDArrays[self.PD_WAS_BALANCED_COL] = df_PDArrays[[self.OB_WAS_CROSSED, self.FVG_WAS_BALANCED]].apply(
68
+ lambda x: x.iloc[0] if pd.notna(x.iloc[0]) else x.iloc[1], axis=1)
69
+
70
+ df_PDArrays[self.PD_TYPE_COL] = df_PDArrays[[self.FVG_SIDE, self.OB_DIRECTION_COL]].apply(
71
+ lambda x: 'FVG-OB' if pd.notna(x.iloc[0]) and pd.notna(x.iloc[1]) else 'FVG' if pd.notna(x.iloc[0]) else 'OB', axis=1
72
+ )
73
+
74
+ df_PDArrays.loc[:, self.PD_HIGH_COL] = df_PDArrays[[self.FVG_TOP, self.OB_HIGH_COL]].max(axis=1)
75
+ df_PDArrays.loc[:, self.PD_LOW_COL] = df_PDArrays[[self.FVG_BOT, self.OB_LOW_COL]].min(axis=1)
76
+ df_PDArrays.loc[:, self.PD_MID_COL] = (df_PDArrays[self.PD_HIGH_COL] + df_PDArrays[self.PD_LOW_COL]) / 2
77
+
78
+
79
+ # 根据balanced参数过滤PDArrays,返回符合条件的数据
80
+
81
+ return df_PDArrays[df_PDArrays[self.PD_WAS_BALANCED_COL] == balanced]
82
+
83
+
84
+ def get_latest_PDArray(self, df_PDArrays: pd.DataFrame, side, start_index=-1, balanced=False, mask=None) -> dict:
85
+ """_summary_
86
+ 过滤PDArrays,只保留指定方向的PDArrays
87
+ Args:
88
+ df_PDArrays (pd.DataFrame): _description_
89
+ mask (str): _description_
90
+
91
+ Returns:
92
+ pd.DataFrame: _description_
93
+ """
94
+
95
+ # 检查数据中是否包含必要的列
96
+ df = df_PDArrays.copy()
97
+ check_columns = [self.STRUCT_COL]
98
+ try:
99
+ self.check_columns(df, check_columns)
100
+ except ValueError as e:
101
+ df = self.build_struct(df)
102
+
103
+
104
+ check_columns = [self.PD_TYPE_COL]
105
+ try:
106
+ self.check_columns(df, check_columns)
107
+ except ValueError as e:
108
+ df = self.find_PDArrays(df, side, start_index, balanced)
109
+
110
+ if mask is not None:
111
+ df = df[mask]
112
+
113
+ if len(df) == 0:
114
+ self.logger.info("未找到PDArray.")
115
+ return None
116
+ else:
117
+ self.logger.debug(f"PDArray:\n{df[[self.TIMESTAMP_COL, self.PD_TYPE_COL, self.PD_HIGH_COL, self.PD_LOW_COL, self.PD_MID_COL,self.PD_WAS_BALANCED_COL,self.OB_WAS_CROSSED,self.FVG_WAS_BALANCED]]}")
118
+ last_pd = df.iloc[-1]
119
+ return {
120
+ self.TIMESTAMP_COL: last_pd[self.TIMESTAMP_COL],
121
+ self.PD_TYPE_COL: last_pd[self.PD_TYPE_COL],
122
+ self.PD_HIGH_COL: last_pd[self.PD_HIGH_COL],
123
+ self.PD_LOW_COL: last_pd[self.PD_LOW_COL],
124
+ self.PD_MID_COL: last_pd[self.PD_MID_COL],
125
+ self.PD_WAS_BALANCED_COL: last_pd[self.PD_WAS_BALANCED_COL],
126
+ }
@@ -265,7 +265,7 @@ class SMCStruct(SMCBase):
265
265
  df.at[i, self.STRUCT_HIGH_INDEX_COL] = structure[self.HIGH_START_COL]
266
266
  df.at[i, self.STRUCT_LOW_INDEX_COL] = structure[self.LOW_START_COL]
267
267
 
268
- def get_last_struct(self, df):
268
+ def get_latest_struct(self, df):
269
269
  """
270
270
  获取最新的结构
271
271
  """
@@ -1,75 +0,0 @@
1
- import logging
2
- import pandas as pd
3
-
4
- from core.smc.SMCFVG import SMCFVG
5
- from core.smc.SMCOrderBlock import SMCOrderBlock
6
-
7
- class SMCPDArray(SMCFVG,SMCOrderBlock):
8
- PD_HIGH_COL = "pd_high"
9
- PD_LOW_COL = "pd_low"
10
- PD_MID_COL = "pd_mid"
11
- PD_TYPE_COL = "pd_type"
12
-
13
- def __init__(self):
14
- super().__init__()
15
- self.logger = logging.getLogger(__name__)
16
-
17
- def find_PDArrays(
18
- self, struct: pd.DataFrame, side, start_index=-1
19
- ) -> pd.DataFrame:
20
- """_summary_
21
- 寻找PDArrays,包括Fair Value Gap (FVG)|Order Block (OB)|Breaker Block(BB)|Mitigation Block(BB)
22
- Args:
23
- data (pd.DataFrame): K线数据
24
- side (_type_): 交易方向 'buy'|'sell'
25
- threshold (_type_): 阈值价格,通常为溢价和折价区的CE
26
- check_balanced (bool): 是否检查FVG是否被平衡过,默认为True
27
- start_index (int): 开始查找索引的起点,默认为-1
28
-
29
- Returns:
30
- pd.DataFrame: _description_
31
-
32
- """
33
-
34
- df = (
35
- struct.copy()
36
- if start_index == -1
37
- else struct.copy().iloc[max(0, start_index - 1) :]
38
- )
39
-
40
- df_FVGs = self.find_FVGs(df, side)
41
- # self.logger.info(f"fvgs:\n{df_FVGs[['timestamp', self.FVG_SIDE, self.FVG_TOP, self.FVG_BOT, self.FVG_WAS_BALANCED]]}")
42
-
43
-
44
- df_OBs = self.find_OBs(df, side)
45
- # self.logger.info("find_OBs:\n %s", df_OBs)
46
-
47
- # 使用更简洁的方式重命名和合并时间戳列
48
- timestamp_mapping = {self.TIMESTAMP_COL: ['ts_OBs', 'ts_FVGs']}
49
- df_OBs = df_OBs.rename(columns={self.TIMESTAMP_COL: timestamp_mapping[self.TIMESTAMP_COL][0]})
50
- df_FVGs = df_FVGs.rename(columns={self.TIMESTAMP_COL: timestamp_mapping[self.TIMESTAMP_COL][1]})
51
-
52
- # 使用更高效的方式合并数据框
53
- df_PDArrays = pd.concat(
54
- [df_OBs, df_FVGs],
55
- axis=1,
56
- join='outer'
57
- ).sort_index()
58
-
59
- # 使用更清晰的方式合并时间戳列
60
- df_PDArrays[self.TIMESTAMP_COL] = df_PDArrays[timestamp_mapping[self.TIMESTAMP_COL][0]].fillna(
61
- df_PDArrays[timestamp_mapping[self.TIMESTAMP_COL][1]]
62
- )
63
- df_PDArrays[self.PD_TYPE_COL] = df_PDArrays[[self.FVG_SIDE, self.OB_DIRECTION_COL]].apply(
64
- lambda x: 'FVG-OB' if pd.notna(x.iloc[0]) and pd.notna(x.iloc[1]) else 'FVG' if pd.notna(x.iloc[0]) else 'OB', axis=1
65
- )
66
-
67
- df_PDArrays.loc[:, self.PD_HIGH_COL] = df_PDArrays[[self.FVG_TOP, self.OB_HIGH_COL]].max(axis=1)
68
- df_PDArrays.loc[:, self.PD_LOW_COL] = df_PDArrays[[self.FVG_BOT, self.OB_LOW_COL]].min(axis=1)
69
- df_PDArrays.loc[:, self.PD_MID_COL] = (df_PDArrays[self.PD_HIGH_COL] + df_PDArrays[self.PD_LOW_COL]) / 2
70
-
71
-
72
-
73
-
74
- return df_PDArrays
75
-
File without changes