kaq-quant-common 0.2.7__py3-none-any.whl → 0.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,106 +1,106 @@
1
- import threading
2
- import traceback
3
- from abc import abstractmethod
4
-
5
- import dolphindb as ddb
6
- import numpy as np
7
- from kaq_quant_common.common.monitor_base import MonitorBase
8
- from kaq_quant_common.utils import logger_utils
9
-
10
- mutex = threading.Lock()
11
-
12
-
13
- # ddb表订阅监听器
14
- class DdbTableMonitor(MonitorBase):
15
-
16
- def __init__(self, table_name: str, action_name: str, batch_size=1000, filter=[]):
17
- # 表名
18
- self._table_name = table_name
19
- #
20
- self._action_name = action_name
21
- #
22
- self._batch_size = batch_size
23
- #
24
- self._filter = filter
25
-
26
- # logger
27
- self._logger = logger_utils.get_logger(self)
28
- #
29
- super().__init__()
30
-
31
- # ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓ abstract methods
32
- def _do_init(self):
33
- # 初始化ddb
34
- self._init_ddb()
35
-
36
- def _do_start(self):
37
- # 开启ddb订阅
38
- self._start_subscribe()
39
-
40
- def _do_stop(self):
41
- # 关闭订阅
42
- self._stop_subscribe()
43
-
44
- # ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓
45
- # 初始化ddb
46
- def _init_ddb(self):
47
- '''
48
- 创建ddb连接 && 添加ddb流数据表支持
49
- '''
50
- try:
51
- ddb_config = self._on_get_ddb_config()
52
- host, port, user, passwd = ddb_config['host'], ddb_config['port'], ddb_config['user'], ddb_config['passwd']
53
- mutex.acquire()
54
- self._session = ddb.session(enableASYNC=True)
55
- self._host = host
56
- self._port = port
57
- self._user = user
58
- self._passwd = passwd
59
- self._session.connect(host, port, user, passwd)
60
- self._session.enableStreaming()
61
- except Exception as e:
62
- self._logger.error(f'DdbTableMonitor._init_ddb error: {str(e)} - {str(traceback.format_exc())}')
63
- finally:
64
- mutex.release()
65
-
66
- # 开启订阅
67
- def _start_subscribe(self):
68
- '''
69
- 订阅ddb表
70
- '''
71
- self._session.subscribe(
72
- self._host,
73
- self._port,
74
- self._handle,
75
- tableName=self._table_name,
76
- actionName=self._action_name,
77
- filter=np.array(self._filter),
78
- offset=-1,
79
- batchSize=self._batch_size,
80
- throttle=5,
81
- msgAsTable=True,
82
- )
83
- self._logger.info(f'开始订阅 {self._host}:{self._port} {self._table_name} - {self._action_name}')
84
-
85
- def _stop_subscribe(self):
86
- # TODO
87
- # script = f"""
88
- # existsSubscriptionTopic(,`{self._table_name},`{self._action_name})
89
- # """
90
- # exitsTopic = self._session.run(script)
91
- exitsTopic = True
92
- if exitsTopic is True:
93
- self._session.unsubscribe(self._host, self._port, self._table_name, self._action_name)
94
- self._logger.info(f'取消订阅 {self._table_name} - {self._action_name}')
95
- if not self._session.isClosed():
96
- self._session.close()
97
-
98
- # ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓ abstract methods
99
- # 需要返回ddb配置,包含host, port, user, passwd 添加类型提示
100
- @abstractmethod
101
- def _on_get_ddb_config(self, data) -> dict:
102
- pass
103
-
104
- @abstractmethod
105
- def _handle(self, data):
106
- pass
1
+ import threading
2
+ import traceback
3
+ from abc import abstractmethod
4
+
5
+ import dolphindb as ddb
6
+ import numpy as np
7
+ from kaq_quant_common.common.monitor_base import MonitorBase
8
+ from kaq_quant_common.utils import logger_utils
9
+
10
+ mutex = threading.Lock()
11
+
12
+
13
+ # ddb表订阅监听器
14
+ class DdbTableMonitor(MonitorBase):
15
+
16
+ def __init__(self, table_name: str, action_name: str, batch_size=1000, filter=[]):
17
+ # 表名
18
+ self._table_name = table_name
19
+ #
20
+ self._action_name = action_name
21
+ #
22
+ self._batch_size = batch_size
23
+ #
24
+ self._filter = filter
25
+
26
+ # logger
27
+ self._logger = logger_utils.get_logger(self)
28
+ #
29
+ super().__init__()
30
+
31
+ # ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓ abstract methods
32
+ def _do_init(self):
33
+ # 初始化ddb
34
+ self._init_ddb()
35
+
36
+ def _do_start(self):
37
+ # 开启ddb订阅
38
+ self._start_subscribe()
39
+
40
+ def _do_stop(self):
41
+ # 关闭订阅
42
+ self._stop_subscribe()
43
+
44
+ # ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓
45
+ # 初始化ddb
46
+ def _init_ddb(self):
47
+ '''
48
+ 创建ddb连接 && 添加ddb流数据表支持
49
+ '''
50
+ try:
51
+ ddb_config = self._on_get_ddb_config()
52
+ host, port, user, passwd = ddb_config['host'], ddb_config['port'], ddb_config['user'], ddb_config['passwd']
53
+ mutex.acquire()
54
+ self._session = ddb.session(enableASYNC=True)
55
+ self._host = host
56
+ self._port = port
57
+ self._user = user
58
+ self._passwd = passwd
59
+ self._session.connect(host, port, user, passwd)
60
+ self._session.enableStreaming()
61
+ except Exception as e:
62
+ self._logger.error(f'DdbTableMonitor._init_ddb error: {str(e)} - {str(traceback.format_exc())}')
63
+ finally:
64
+ mutex.release()
65
+
66
+ # 开启订阅
67
+ def _start_subscribe(self):
68
+ '''
69
+ 订阅ddb表
70
+ '''
71
+ self._session.subscribe(
72
+ self._host,
73
+ self._port,
74
+ self._handle,
75
+ tableName=self._table_name,
76
+ actionName=self._action_name,
77
+ filter=np.array(self._filter),
78
+ offset=-1,
79
+ batchSize=self._batch_size,
80
+ throttle=5,
81
+ msgAsTable=True,
82
+ )
83
+ self._logger.info(f'开始订阅 {self._host}:{self._port} {self._table_name} - {self._action_name}')
84
+
85
+ def _stop_subscribe(self):
86
+ # TODO
87
+ # script = f"""
88
+ # existsSubscriptionTopic(,`{self._table_name},`{self._action_name})
89
+ # """
90
+ # exitsTopic = self._session.run(script)
91
+ exitsTopic = True
92
+ if exitsTopic is True:
93
+ self._session.unsubscribe(self._host, self._port, self._table_name, self._action_name)
94
+ self._logger.info(f'取消订阅 {self._table_name} - {self._action_name}')
95
+ if not self._session.isClosed():
96
+ self._session.close()
97
+
98
+ # ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓ abstract methods
99
+ # 需要返回ddb配置,包含host, port, user, passwd 添加类型提示
100
+ @abstractmethod
101
+ def _on_get_ddb_config(self, data) -> dict:
102
+ pass
103
+
104
+ @abstractmethod
105
+ def _handle(self, data):
106
+ pass
@@ -1,69 +1,69 @@
1
- import threading
2
- import time
3
- from abc import abstractmethod
4
-
5
- from kaq_quant_common.common.monitor_base import MonitorBase
6
- from kaq_quant_common.common.ws_wrapper import WsWrapper
7
- from kaq_quant_common.utils import logger_utils
8
-
9
-
10
- # 封装http定时请求
11
- class HttpMonitor(MonitorBase):
12
- def __init__(self, interval=5):
13
- super().__init__()
14
- # 执行间隔
15
- self._interval = interval
16
- self._logger = logger_utils.get_logger()
17
-
18
- def _do_start(self):
19
- # 开启一条线程,定时执行http请求
20
- self._ticker_thread_event = threading.Event()
21
-
22
- def http_request():
23
- # 上次请求时间
24
- last_request_time = 0
25
- while True:
26
- # 检查是否需要退出
27
- if self._ticker_thread_event.is_set():
28
- self._logger.info("ticker thread exit")
29
- break
30
-
31
- # 当前时间
32
- current_time = time.time()
33
- # 如果上次请求时间距离当前时间不足,等待
34
- if current_time - last_request_time < self._interval:
35
- time.sleep(0.1)
36
- continue
37
-
38
- #
39
- last_request_time = time.time()
40
-
41
- # self._logger.debug('tick start')
42
- try:
43
- self._do_request()
44
- except Exception as e:
45
- self._logger.error(f"http request error: {e}")
46
- # self._logger.debug('tick finish')
47
- #
48
- time.sleep(0.1)
49
-
50
- # 开启线程
51
- self._ticker_thread = threading.Thread(target=http_request)
52
- # 设置为守护线程
53
- self._ticker_thread.daemon = True
54
- self._ticker_thread.start()
55
-
56
- def _do_stop(self):
57
- if self._ticker_thread_event is not None:
58
- self._ticker_thread_event.set()
59
-
60
- # ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓
61
-
62
- @abstractmethod
63
- def _do_request(self):
64
- """
65
- 子类实现
66
- """
67
- pass
68
-
69
- # ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓
1
+ import threading
2
+ import time
3
+ from abc import abstractmethod
4
+
5
+ from kaq_quant_common.common.monitor_base import MonitorBase
6
+ from kaq_quant_common.common.ws_wrapper import WsWrapper
7
+ from kaq_quant_common.utils import logger_utils
8
+
9
+
10
+ # 封装http定时请求
11
+ class HttpMonitor(MonitorBase):
12
+ def __init__(self, interval=5):
13
+ super().__init__()
14
+ # 执行间隔
15
+ self._interval = interval
16
+ self._logger = logger_utils.get_logger()
17
+
18
+ def _do_start(self):
19
+ # 开启一条线程,定时执行http请求
20
+ self._ticker_thread_event = threading.Event()
21
+
22
+ def http_request():
23
+ # 上次请求时间
24
+ last_request_time = 0
25
+ while True:
26
+ # 检查是否需要退出
27
+ if self._ticker_thread_event.is_set():
28
+ self._logger.info("ticker thread exit")
29
+ break
30
+
31
+ # 当前时间
32
+ current_time = time.time()
33
+ # 如果上次请求时间距离当前时间不足,等待
34
+ if current_time - last_request_time < self._interval:
35
+ time.sleep(0.1)
36
+ continue
37
+
38
+ #
39
+ last_request_time = time.time()
40
+
41
+ # self._logger.debug('tick start')
42
+ try:
43
+ self._do_request()
44
+ except Exception as e:
45
+ self._logger.error(f"http request error: {e}")
46
+ # self._logger.debug('tick finish')
47
+ #
48
+ time.sleep(0.1)
49
+
50
+ # 开启线程
51
+ self._ticker_thread = threading.Thread(target=http_request)
52
+ # 设置为守护线程
53
+ self._ticker_thread.daemon = True
54
+ self._ticker_thread.start()
55
+
56
+ def _do_stop(self):
57
+ if self._ticker_thread_event is not None:
58
+ self._ticker_thread_event.set()
59
+
60
+ # ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓
61
+
62
+ @abstractmethod
63
+ def _do_request(self):
64
+ """
65
+ 子类实现
66
+ """
67
+ pass
68
+
69
+ # ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓
@@ -26,10 +26,10 @@ class FundingRateHelper:
26
26
  self._stop_event = threading.Event()
27
27
  self._flusher_thread = threading.Thread(target=self._flush_loop, daemon=True)
28
28
  self._flusher_thread.name = "FundingRateHelperFlusherThread"
29
- self._is_df = False
30
29
 
31
30
  #
32
31
  self._ddb = ddb
32
+ self._isMtwDdb = isinstance(self._ddb, KaqQuantDdbStreamMTWWriteRepository)
33
33
  self._ddb_table_name = ddb_table_name
34
34
 
35
35
  #
@@ -66,39 +66,54 @@ class FundingRateHelper:
66
66
  now = int(datetime.datetime.now().timestamp() * 1000)
67
67
 
68
68
  for symbol, (data, arg) in to_process:
69
- sub_df = self._build_data(symbol, data, arg)
70
- if isinstance(sub_df, pd.DataFrame):
71
- self._is_df = True
72
- # 输出一下
73
- # data_first_now = int(sub_df["create_time"].iloc[0])
74
- # if now - data_first_now > 2000:
75
- # self._logger.debug(
76
- # f"数据时间{data_first_now} 与当前时间{now} 差值{now - data_first_now} 超过2000ms"
77
- # )
78
-
79
- if df is None:
80
- df = sub_df
69
+ sub_data = self._build_data(symbol, data, arg)
70
+
71
+ if not self._isMtwDdb:
72
+ if is_df:
73
+ # df就用df的方式写入
74
+ # data_first_now = int(sub_data["create_time"].iloc[0])
75
+ # if now - data_first_now > 2000:
76
+ # self._logger.warning(f"数据时间{data_first_now} 与当前时间{now} 差值{now - data_first_now} 超过2000ms")
77
+ # pass
78
+
79
+ if df is None:
80
+ df = sub_data
81
+ else:
82
+ df = pd.concat([df, sub_data], ignore_index=True)
81
83
  else:
82
- df = pd.concat([df, sub_df], ignore_index=True)
84
+ # 数组就用数组的方式写入
85
+ # data_first_now = int(sub_data[0])
86
+ # if now - data_first_now > 2000:
87
+ # self._logger.warning(f"数据时间{data_first_now} 与当前时间{now} 差值{now - data_first_now} 超过2000ms")
88
+ # pass
89
+
90
+ list_data.append(sub_data)
83
91
  else:
84
- list_data = sub_df
92
+ # 只能是数组
93
+ if len(sub_data) > 0:
94
+ # 直接调用 save2stream_list 写入
95
+ try:
96
+ self._ddb.save2stream_list(sub_data)
97
+ except Exception as e:
98
+ # 避免刷库异常导致线程退出
99
+ self._logger.error(f"批量写入数组失败: {e}")
85
100
 
86
101
  # 入库
87
- if self._is_df:
102
+ if not self._isMtwDdb:
103
+ # 兼容df和数组
88
104
  if df is not None and not df.empty:
89
105
  try:
90
106
  self._ddb.save2stream_batch(self._ddb_table_name, df=df)
91
107
  except Exception as e:
92
108
  # 避免刷库异常导致线程退出
93
109
  self._logger.error(f"批量写入df失败: {e}")
94
- else:
95
110
  if len(list_data) > 0:
96
111
  try:
97
- self._ddb.save2stream_list(list_data)
112
+ self._ddb.save2stream_batch_list(self._ddb_table_name, data=list_data)
98
113
  except Exception as e:
99
114
  # 避免刷库异常导致线程退出
100
- self._logger.error(f"批量写入数组失败: {e}")
115
+ self._logger.error(f"批量写入list失败: {e}")
101
116
 
102
- # 是dataFrame的才睡眠,数组处理交由ddb自己控制节奏
103
- if self._is_df:
117
+ # mtw交由ddb自己控制节奏
118
+ if self._isMtwDdb:
104
119
  time.sleep(self._flush_interval_ms / 1000.0)
@@ -4,7 +4,6 @@ import threading
4
4
  import time
5
5
 
6
6
  import pandas as pd
7
-
8
7
  from kaq_quant_common.resources.kaq_ddb_stream_write_resources import (
9
8
  KaqQuantDdbStreamMTWWriteRepository,
10
9
  KaqQuantDdbStreamWriteRepository,
@@ -25,10 +24,10 @@ class LimitOrderHelper:
25
24
  self._stop_event = threading.Event()
26
25
  self._flusher_thread = threading.Thread(target=self._flush_loop, daemon=True)
27
26
  self._flusher_thread.name = "LimitOrderHelperFlusherThread"
28
- self._is_df = False
29
27
 
30
28
  #
31
29
  self._ddb = ddb
30
+ self._isMtwDdb = isinstance(self._ddb, KaqQuantDdbStreamMTWWriteRepository)
32
31
  self._ddb_table_name = ddb_table_name
33
32
 
34
33
  #
@@ -51,6 +50,10 @@ class LimitOrderHelper:
51
50
  self._flusher_thread.join()
52
51
 
53
52
  def _flush_loop(self):
53
+ cum_count = 0
54
+ cum_convert_time = 0
55
+ cum_write_ddb_time = 0
56
+ cum_total_use_time = 0
54
57
  # 周期性地将每个symbol的最新快照批量入库
55
58
  while not self._stop_event.is_set():
56
59
  to_process = None
@@ -65,42 +68,76 @@ class LimitOrderHelper:
65
68
  now = int(datetime.datetime.now().timestamp() * 1000)
66
69
 
67
70
  for symbol, (data, arg) in to_process:
68
- sub_df = self._build_data(symbol, data, arg)
69
- if isinstance(sub_df, pd.DataFrame):
70
- self._is_df = True
71
- # 输出一下
72
- data_first_now = int(sub_df["create_time"].iloc[0])
73
- if now - data_first_now > 2000:
74
- self._logger.warning(f"数据时间{data_first_now} 与当前时间{now} 差值{now - data_first_now} 超过2000ms")
75
-
76
- if df is None:
77
- df = sub_df
71
+ sub_data = self._build_data(symbol, data, arg)
72
+
73
+ if not self._isMtwDdb:
74
+ # 可以是数组,可以是dataFrame
75
+ is_df = type(sub_data) is pd.DataFrame
76
+
77
+ if is_df:
78
+ # df就用df的方式写入
79
+ data_first_now = int(sub_data["create_time"].iloc[0])
80
+ if now - data_first_now > 2000:
81
+ self._logger.debug(f"数据时间{data_first_now} 与当前时间{now} 差值{now - data_first_now} 超过2000ms")
82
+ pass
83
+
84
+ if df is None:
85
+ df = sub_data
86
+ else:
87
+ df = pd.concat([df, sub_data], ignore_index=True)
78
88
  else:
79
- df = pd.concat([df, sub_df], ignore_index=True)
89
+ # 数组就用数组的方式写入
90
+ data_first_now = int(sub_data[0])
91
+ if now - data_first_now > 2000:
92
+ self._logger.debug(f"数据时间{data_first_now} 与当前时间{now} 差值{now - data_first_now} 超过2000ms")
93
+ pass
94
+
95
+ list_data.append(sub_data)
80
96
  else:
81
- list_data = sub_df
97
+ # 只能是数组
98
+ if len(sub_data) > 0:
99
+ # 直接调用 save2stream_list 写入
100
+ try:
101
+ self._ddb.save2stream_list(sub_data)
102
+ except Exception as e:
103
+ # 避免刷库异常导致线程退出
104
+ self._logger.error(f"批量写入数组失败: {e}")
105
+
106
+ convert_time = int(datetime.datetime.now().timestamp() * 1000)
82
107
 
83
108
  # 入库
84
- if self._is_df:
109
+ if not self._isMtwDdb:
110
+ # 兼容df和数组
85
111
  if df is not None and not df.empty:
86
112
  try:
87
113
  self._ddb.save2stream_batch(self._ddb_table_name, df=df)
88
114
  except Exception as e:
89
115
  # 避免刷库异常导致线程退出
90
116
  self._logger.error(f"批量写入df失败: {e}")
91
- else:
92
117
  if len(list_data) > 0:
93
118
  try:
94
- self._ddb.save2stream_list(list_data)
119
+ self._ddb.save2stream_batch_list(self._ddb_table_name, data=list_data)
95
120
  except Exception as e:
96
121
  # 避免刷库异常导致线程退出
97
- self._logger.error(f"批量写入数组失败: {e}")
122
+ self._logger.error(f"批量写入list失败: {e}")
98
123
 
99
124
  # 统计一下
100
125
  end = int(datetime.datetime.now().timestamp() * 1000)
101
- if (end - now) > self._flush_interval_ms:
102
- self._logger.warning(f"批量写入{len(to_process)}条数据耗时{end - now}ms")
103
-
104
- # 是dataFrame的才睡眠,数组处理交由ddb自己控制节奏
105
- if self._is_df:
126
+ total_use_time = end - now
127
+ convert_use = convert_time - now
128
+ write_ddb_use = total_use_time - convert_use
129
+
130
+ #
131
+ cum_count += len(to_process)
132
+ cum_convert_time += convert_use
133
+ cum_write_ddb_time += write_ddb_use
134
+ cum_total_use_time += total_use_time
135
+
136
+ if total_use_time > 500 and cum_count > 0:
137
+ self._logger.debug(
138
+ f"批量写入{len(to_process)}条数据耗时{total_use_time}ms(avg {cum_total_use_time / cum_count:.2f}ms) 转换耗时{convert_use}ms(avg {cum_convert_time / cum_count:.2f}ms) 写入ddb耗时{write_ddb_use}ms(avg {cum_write_ddb_time / cum_count:.2f}ms)"
139
+ )
140
+
141
+ # mtw交由ddb自己控制节奏
142
+ if self._isMtwDdb:
106
143
  time.sleep(self._flush_interval_ms / 1000.0)