kaq-quant-common 0.1.97__py3-none-any.whl → 0.1.99__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kaq_quant_common/api/common/__init__.py +1 -1
- kaq_quant_common/api/common/api_interface.py +38 -38
- kaq_quant_common/api/rest/api_client_base.py +42 -42
- kaq_quant_common/api/rest/instruction/helper/order_helper.py +342 -324
- kaq_quant_common/api/rest/instruction/models/__init__.py +17 -17
- kaq_quant_common/api/rest/instruction/models/transfer.py +32 -32
- kaq_quant_common/api/ws/exchange/models.py +23 -23
- kaq_quant_common/api/ws/exchange/ws_exchange_server.py +440 -440
- kaq_quant_common/common/ddb_table_monitor.py +106 -106
- kaq_quant_common/common/http_monitor.py +69 -69
- kaq_quant_common/common/modules/limit_order_helper.py +81 -81
- kaq_quant_common/common/monitor_base.py +84 -84
- kaq_quant_common/common/monitor_group.py +97 -97
- kaq_quant_common/common/ws_wrapper.py +21 -21
- kaq_quant_common/resources/kaq_ddb_stream_write_resources.py +13 -0
- kaq_quant_common/utils/logger_utils.py +5 -5
- kaq_quant_common/utils/signal_utils.py +23 -23
- kaq_quant_common/utils/uuid_utils.py +5 -5
- {kaq_quant_common-0.1.97.dist-info → kaq_quant_common-0.1.99.dist-info}/METADATA +2 -2
- {kaq_quant_common-0.1.97.dist-info → kaq_quant_common-0.1.99.dist-info}/RECORD +21 -21
- {kaq_quant_common-0.1.97.dist-info → kaq_quant_common-0.1.99.dist-info}/WHEEL +1 -1
|
@@ -1,106 +1,106 @@
|
|
|
1
|
-
import threading
|
|
2
|
-
import traceback
|
|
3
|
-
from abc import abstractmethod
|
|
4
|
-
|
|
5
|
-
import dolphindb as ddb
|
|
6
|
-
import numpy as np
|
|
7
|
-
from kaq_quant_common.common.monitor_base import MonitorBase
|
|
8
|
-
from kaq_quant_common.utils import logger_utils
|
|
9
|
-
|
|
10
|
-
mutex = threading.Lock()
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
# ddb表订阅监听器
|
|
14
|
-
class DdbTableMonitor(MonitorBase):
|
|
15
|
-
|
|
16
|
-
def __init__(self, table_name: str, action_name: str, batch_size=1000, filter=[]):
|
|
17
|
-
# 表名
|
|
18
|
-
self._table_name = table_name
|
|
19
|
-
#
|
|
20
|
-
self._action_name = action_name
|
|
21
|
-
#
|
|
22
|
-
self._batch_size = batch_size
|
|
23
|
-
#
|
|
24
|
-
self._filter = filter
|
|
25
|
-
|
|
26
|
-
# logger
|
|
27
|
-
self._logger = logger_utils.get_logger(self)
|
|
28
|
-
#
|
|
29
|
-
super().__init__()
|
|
30
|
-
|
|
31
|
-
# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓ abstract methods
|
|
32
|
-
def _do_init(self):
|
|
33
|
-
# 初始化ddb
|
|
34
|
-
self._init_ddb()
|
|
35
|
-
|
|
36
|
-
def _do_start(self):
|
|
37
|
-
# 开启ddb订阅
|
|
38
|
-
self._start_subscribe()
|
|
39
|
-
|
|
40
|
-
def _do_stop(self):
|
|
41
|
-
# 关闭订阅
|
|
42
|
-
self._stop_subscribe()
|
|
43
|
-
|
|
44
|
-
# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓
|
|
45
|
-
# 初始化ddb
|
|
46
|
-
def _init_ddb(self):
|
|
47
|
-
'''
|
|
48
|
-
创建ddb连接 && 添加ddb流数据表支持
|
|
49
|
-
'''
|
|
50
|
-
try:
|
|
51
|
-
ddb_config = self._on_get_ddb_config()
|
|
52
|
-
host, port, user, passwd = ddb_config['host'], ddb_config['port'], ddb_config['user'], ddb_config['passwd']
|
|
53
|
-
mutex.acquire()
|
|
54
|
-
self._session = ddb.session(enableASYNC=True)
|
|
55
|
-
self._host = host
|
|
56
|
-
self._port = port
|
|
57
|
-
self._user = user
|
|
58
|
-
self._passwd = passwd
|
|
59
|
-
self._session.connect(host, port, user, passwd)
|
|
60
|
-
self._session.enableStreaming()
|
|
61
|
-
except Exception as e:
|
|
62
|
-
self._logger.error(f'DdbTableMonitor._init_ddb error: {str(e)} - {str(traceback.format_exc())}')
|
|
63
|
-
finally:
|
|
64
|
-
mutex.release()
|
|
65
|
-
|
|
66
|
-
# 开启订阅
|
|
67
|
-
def _start_subscribe(self):
|
|
68
|
-
'''
|
|
69
|
-
订阅ddb表
|
|
70
|
-
'''
|
|
71
|
-
self._session.subscribe(
|
|
72
|
-
self._host,
|
|
73
|
-
self._port,
|
|
74
|
-
self._handle,
|
|
75
|
-
tableName=self._table_name,
|
|
76
|
-
actionName=self._action_name,
|
|
77
|
-
filter=np.array(self._filter),
|
|
78
|
-
offset=-1,
|
|
79
|
-
batchSize=self._batch_size,
|
|
80
|
-
throttle=5,
|
|
81
|
-
msgAsTable=True,
|
|
82
|
-
)
|
|
83
|
-
self._logger.info(f'开始订阅 {self._host}:{self._port} {self._table_name} - {self._action_name}')
|
|
84
|
-
|
|
85
|
-
def _stop_subscribe(self):
|
|
86
|
-
# TODO
|
|
87
|
-
# script = f"""
|
|
88
|
-
# existsSubscriptionTopic(,`{self._table_name},`{self._action_name})
|
|
89
|
-
# """
|
|
90
|
-
# exitsTopic = self._session.run(script)
|
|
91
|
-
exitsTopic = True
|
|
92
|
-
if exitsTopic is True:
|
|
93
|
-
self._session.unsubscribe(self._host, self._port, self._table_name, self._action_name)
|
|
94
|
-
self._logger.info(f'取消订阅 {self._table_name} - {self._action_name}')
|
|
95
|
-
if not self._session.isClosed():
|
|
96
|
-
self._session.close()
|
|
97
|
-
|
|
98
|
-
# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓ abstract methods
|
|
99
|
-
# 需要返回ddb配置,包含host, port, user, passwd 添加类型提示
|
|
100
|
-
@abstractmethod
|
|
101
|
-
def _on_get_ddb_config(self, data) -> dict:
|
|
102
|
-
pass
|
|
103
|
-
|
|
104
|
-
@abstractmethod
|
|
105
|
-
def _handle(self, data):
|
|
106
|
-
pass
|
|
1
|
+
import threading
|
|
2
|
+
import traceback
|
|
3
|
+
from abc import abstractmethod
|
|
4
|
+
|
|
5
|
+
import dolphindb as ddb
|
|
6
|
+
import numpy as np
|
|
7
|
+
from kaq_quant_common.common.monitor_base import MonitorBase
|
|
8
|
+
from kaq_quant_common.utils import logger_utils
|
|
9
|
+
|
|
10
|
+
mutex = threading.Lock()
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# ddb表订阅监听器
|
|
14
|
+
class DdbTableMonitor(MonitorBase):
|
|
15
|
+
|
|
16
|
+
def __init__(self, table_name: str, action_name: str, batch_size=1000, filter=[]):
|
|
17
|
+
# 表名
|
|
18
|
+
self._table_name = table_name
|
|
19
|
+
#
|
|
20
|
+
self._action_name = action_name
|
|
21
|
+
#
|
|
22
|
+
self._batch_size = batch_size
|
|
23
|
+
#
|
|
24
|
+
self._filter = filter
|
|
25
|
+
|
|
26
|
+
# logger
|
|
27
|
+
self._logger = logger_utils.get_logger(self)
|
|
28
|
+
#
|
|
29
|
+
super().__init__()
|
|
30
|
+
|
|
31
|
+
# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓ abstract methods
|
|
32
|
+
def _do_init(self):
|
|
33
|
+
# 初始化ddb
|
|
34
|
+
self._init_ddb()
|
|
35
|
+
|
|
36
|
+
def _do_start(self):
|
|
37
|
+
# 开启ddb订阅
|
|
38
|
+
self._start_subscribe()
|
|
39
|
+
|
|
40
|
+
def _do_stop(self):
|
|
41
|
+
# 关闭订阅
|
|
42
|
+
self._stop_subscribe()
|
|
43
|
+
|
|
44
|
+
# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓
|
|
45
|
+
# 初始化ddb
|
|
46
|
+
def _init_ddb(self):
|
|
47
|
+
'''
|
|
48
|
+
创建ddb连接 && 添加ddb流数据表支持
|
|
49
|
+
'''
|
|
50
|
+
try:
|
|
51
|
+
ddb_config = self._on_get_ddb_config()
|
|
52
|
+
host, port, user, passwd = ddb_config['host'], ddb_config['port'], ddb_config['user'], ddb_config['passwd']
|
|
53
|
+
mutex.acquire()
|
|
54
|
+
self._session = ddb.session(enableASYNC=True)
|
|
55
|
+
self._host = host
|
|
56
|
+
self._port = port
|
|
57
|
+
self._user = user
|
|
58
|
+
self._passwd = passwd
|
|
59
|
+
self._session.connect(host, port, user, passwd)
|
|
60
|
+
self._session.enableStreaming()
|
|
61
|
+
except Exception as e:
|
|
62
|
+
self._logger.error(f'DdbTableMonitor._init_ddb error: {str(e)} - {str(traceback.format_exc())}')
|
|
63
|
+
finally:
|
|
64
|
+
mutex.release()
|
|
65
|
+
|
|
66
|
+
# 开启订阅
|
|
67
|
+
def _start_subscribe(self):
|
|
68
|
+
'''
|
|
69
|
+
订阅ddb表
|
|
70
|
+
'''
|
|
71
|
+
self._session.subscribe(
|
|
72
|
+
self._host,
|
|
73
|
+
self._port,
|
|
74
|
+
self._handle,
|
|
75
|
+
tableName=self._table_name,
|
|
76
|
+
actionName=self._action_name,
|
|
77
|
+
filter=np.array(self._filter),
|
|
78
|
+
offset=-1,
|
|
79
|
+
batchSize=self._batch_size,
|
|
80
|
+
throttle=5,
|
|
81
|
+
msgAsTable=True,
|
|
82
|
+
)
|
|
83
|
+
self._logger.info(f'开始订阅 {self._host}:{self._port} {self._table_name} - {self._action_name}')
|
|
84
|
+
|
|
85
|
+
def _stop_subscribe(self):
|
|
86
|
+
# TODO
|
|
87
|
+
# script = f"""
|
|
88
|
+
# existsSubscriptionTopic(,`{self._table_name},`{self._action_name})
|
|
89
|
+
# """
|
|
90
|
+
# exitsTopic = self._session.run(script)
|
|
91
|
+
exitsTopic = True
|
|
92
|
+
if exitsTopic is True:
|
|
93
|
+
self._session.unsubscribe(self._host, self._port, self._table_name, self._action_name)
|
|
94
|
+
self._logger.info(f'取消订阅 {self._table_name} - {self._action_name}')
|
|
95
|
+
if not self._session.isClosed():
|
|
96
|
+
self._session.close()
|
|
97
|
+
|
|
98
|
+
# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓ abstract methods
|
|
99
|
+
# 需要返回ddb配置,包含host, port, user, passwd 添加类型提示
|
|
100
|
+
@abstractmethod
|
|
101
|
+
def _on_get_ddb_config(self, data) -> dict:
|
|
102
|
+
pass
|
|
103
|
+
|
|
104
|
+
@abstractmethod
|
|
105
|
+
def _handle(self, data):
|
|
106
|
+
pass
|
|
@@ -1,69 +1,69 @@
|
|
|
1
|
-
import threading
|
|
2
|
-
import time
|
|
3
|
-
from abc import abstractmethod
|
|
4
|
-
|
|
5
|
-
from kaq_quant_common.common.monitor_base import MonitorBase
|
|
6
|
-
from kaq_quant_common.common.ws_wrapper import WsWrapper
|
|
7
|
-
from kaq_quant_common.utils import logger_utils
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
# 封装http定时请求
|
|
11
|
-
class HttpMonitor(MonitorBase):
|
|
12
|
-
def __init__(self, interval=5):
|
|
13
|
-
super().__init__()
|
|
14
|
-
# 执行间隔
|
|
15
|
-
self._interval = interval
|
|
16
|
-
self._logger = logger_utils.get_logger()
|
|
17
|
-
|
|
18
|
-
def _do_start(self):
|
|
19
|
-
# 开启一条线程,定时执行http请求
|
|
20
|
-
self._ticker_thread_event = threading.Event()
|
|
21
|
-
|
|
22
|
-
def http_request():
|
|
23
|
-
# 上次请求时间
|
|
24
|
-
last_request_time = 0
|
|
25
|
-
while True:
|
|
26
|
-
# 检查是否需要退出
|
|
27
|
-
if self._ticker_thread_event.is_set():
|
|
28
|
-
self._logger.info("ticker thread exit")
|
|
29
|
-
break
|
|
30
|
-
|
|
31
|
-
# 当前时间
|
|
32
|
-
current_time = time.time()
|
|
33
|
-
# 如果上次请求时间距离当前时间不足,等待
|
|
34
|
-
if current_time - last_request_time < self._interval:
|
|
35
|
-
time.sleep(0.1)
|
|
36
|
-
continue
|
|
37
|
-
|
|
38
|
-
#
|
|
39
|
-
last_request_time = time.time()
|
|
40
|
-
|
|
41
|
-
# self._logger.debug('tick start')
|
|
42
|
-
try:
|
|
43
|
-
self._do_request()
|
|
44
|
-
except Exception as e:
|
|
45
|
-
self._logger.error(f"http request error: {e}")
|
|
46
|
-
# self._logger.debug('tick finish')
|
|
47
|
-
#
|
|
48
|
-
time.sleep(0.1)
|
|
49
|
-
|
|
50
|
-
# 开启线程
|
|
51
|
-
self._ticker_thread = threading.Thread(target=http_request)
|
|
52
|
-
# 设置为守护线程
|
|
53
|
-
self._ticker_thread.daemon = True
|
|
54
|
-
self._ticker_thread.start()
|
|
55
|
-
|
|
56
|
-
def _do_stop(self):
|
|
57
|
-
if self._ticker_thread_event is not None:
|
|
58
|
-
self._ticker_thread_event.set()
|
|
59
|
-
|
|
60
|
-
# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓
|
|
61
|
-
|
|
62
|
-
@abstractmethod
|
|
63
|
-
def _do_request(self):
|
|
64
|
-
"""
|
|
65
|
-
子类实现
|
|
66
|
-
"""
|
|
67
|
-
pass
|
|
68
|
-
|
|
69
|
-
# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓
|
|
1
|
+
import threading
|
|
2
|
+
import time
|
|
3
|
+
from abc import abstractmethod
|
|
4
|
+
|
|
5
|
+
from kaq_quant_common.common.monitor_base import MonitorBase
|
|
6
|
+
from kaq_quant_common.common.ws_wrapper import WsWrapper
|
|
7
|
+
from kaq_quant_common.utils import logger_utils
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# 封装http定时请求
|
|
11
|
+
class HttpMonitor(MonitorBase):
|
|
12
|
+
def __init__(self, interval=5):
|
|
13
|
+
super().__init__()
|
|
14
|
+
# 执行间隔
|
|
15
|
+
self._interval = interval
|
|
16
|
+
self._logger = logger_utils.get_logger()
|
|
17
|
+
|
|
18
|
+
def _do_start(self):
|
|
19
|
+
# 开启一条线程,定时执行http请求
|
|
20
|
+
self._ticker_thread_event = threading.Event()
|
|
21
|
+
|
|
22
|
+
def http_request():
|
|
23
|
+
# 上次请求时间
|
|
24
|
+
last_request_time = 0
|
|
25
|
+
while True:
|
|
26
|
+
# 检查是否需要退出
|
|
27
|
+
if self._ticker_thread_event.is_set():
|
|
28
|
+
self._logger.info("ticker thread exit")
|
|
29
|
+
break
|
|
30
|
+
|
|
31
|
+
# 当前时间
|
|
32
|
+
current_time = time.time()
|
|
33
|
+
# 如果上次请求时间距离当前时间不足,等待
|
|
34
|
+
if current_time - last_request_time < self._interval:
|
|
35
|
+
time.sleep(0.1)
|
|
36
|
+
continue
|
|
37
|
+
|
|
38
|
+
#
|
|
39
|
+
last_request_time = time.time()
|
|
40
|
+
|
|
41
|
+
# self._logger.debug('tick start')
|
|
42
|
+
try:
|
|
43
|
+
self._do_request()
|
|
44
|
+
except Exception as e:
|
|
45
|
+
self._logger.error(f"http request error: {e}")
|
|
46
|
+
# self._logger.debug('tick finish')
|
|
47
|
+
#
|
|
48
|
+
time.sleep(0.1)
|
|
49
|
+
|
|
50
|
+
# 开启线程
|
|
51
|
+
self._ticker_thread = threading.Thread(target=http_request)
|
|
52
|
+
# 设置为守护线程
|
|
53
|
+
self._ticker_thread.daemon = True
|
|
54
|
+
self._ticker_thread.start()
|
|
55
|
+
|
|
56
|
+
def _do_stop(self):
|
|
57
|
+
if self._ticker_thread_event is not None:
|
|
58
|
+
self._ticker_thread_event.set()
|
|
59
|
+
|
|
60
|
+
# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓
|
|
61
|
+
|
|
62
|
+
@abstractmethod
|
|
63
|
+
def _do_request(self):
|
|
64
|
+
"""
|
|
65
|
+
子类实现
|
|
66
|
+
"""
|
|
67
|
+
pass
|
|
68
|
+
|
|
69
|
+
# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓
|
|
@@ -1,81 +1,81 @@
|
|
|
1
|
-
# 避免写入导致阻塞
|
|
2
|
-
import datetime
|
|
3
|
-
import threading
|
|
4
|
-
import time
|
|
5
|
-
|
|
6
|
-
import pandas as pd
|
|
7
|
-
from kaq_quant_common.resources.kaq_ddb_stream_write_resources import (
|
|
8
|
-
KaqQuantDdbStreamWriteRepository,
|
|
9
|
-
)
|
|
10
|
-
from kaq_quant_common.utils import logger_utils
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
class LimitOrderHelper:
|
|
14
|
-
|
|
15
|
-
def __init__(self, ddb: KaqQuantDdbStreamWriteRepository, ddb_table_name: str):
|
|
16
|
-
# 最新快照缓存与刷库线程控制
|
|
17
|
-
self._latest_snapshots: dict[str, tuple] = {}
|
|
18
|
-
self._latest_lock = threading.Lock()
|
|
19
|
-
# 写入到ddb的频率,默认100ms
|
|
20
|
-
self._flush_interval_ms = 100
|
|
21
|
-
self._stop_event = threading.Event()
|
|
22
|
-
self._flusher_thread = threading.Thread(target=self._flush_loop, daemon=True)
|
|
23
|
-
self._flusher_thread.name = "LimitOrderHelperFlusherThread"
|
|
24
|
-
|
|
25
|
-
#
|
|
26
|
-
self._ddb = ddb
|
|
27
|
-
self._ddb_table_name = ddb_table_name
|
|
28
|
-
|
|
29
|
-
#
|
|
30
|
-
self._logger = logger_utils.get_logger(self)
|
|
31
|
-
|
|
32
|
-
self._build_data: callable = None
|
|
33
|
-
|
|
34
|
-
def set_build_data(self, build_data: callable):
|
|
35
|
-
self._build_data = build_data
|
|
36
|
-
|
|
37
|
-
def push_data(self, symbol: str, data: dict, arg: dict = None):
|
|
38
|
-
with self._latest_lock:
|
|
39
|
-
self._latest_snapshots[symbol] = (data, arg)
|
|
40
|
-
|
|
41
|
-
def start(self):
|
|
42
|
-
self._flusher_thread.start()
|
|
43
|
-
|
|
44
|
-
def stop(self):
|
|
45
|
-
self._stop_event.set()
|
|
46
|
-
self._flusher_thread.join()
|
|
47
|
-
|
|
48
|
-
def _flush_loop(self):
|
|
49
|
-
# 周期性地将每个symbol的最新快照批量入库
|
|
50
|
-
while not self._stop_event.is_set():
|
|
51
|
-
to_process = None
|
|
52
|
-
with self._latest_lock:
|
|
53
|
-
if self._latest_snapshots:
|
|
54
|
-
to_process = list(self._latest_snapshots.items())
|
|
55
|
-
self._latest_snapshots.clear()
|
|
56
|
-
|
|
57
|
-
if to_process:
|
|
58
|
-
df: pd.DataFrame = None
|
|
59
|
-
now = int(datetime.datetime.now().timestamp() * 1000)
|
|
60
|
-
|
|
61
|
-
for symbol, (data, arg) in to_process:
|
|
62
|
-
sub_df = self._build_data(symbol, data, arg)
|
|
63
|
-
# 输出一下
|
|
64
|
-
data_first_now = int(sub_df["create_time"].iloc[0])
|
|
65
|
-
if now - data_first_now > 2000:
|
|
66
|
-
self._logger.debug(f"数据时间{data_first_now} 与当前时间{now} 差值{now - data_first_now} 超过2000ms")
|
|
67
|
-
|
|
68
|
-
if df is None:
|
|
69
|
-
df = sub_df
|
|
70
|
-
else:
|
|
71
|
-
df = pd.concat([df, sub_df], ignore_index=True)
|
|
72
|
-
|
|
73
|
-
# 入库
|
|
74
|
-
if df is not None and not df.empty:
|
|
75
|
-
try:
|
|
76
|
-
self._ddb.save2stream_batch(self._ddb_table_name, df=df)
|
|
77
|
-
except Exception as e:
|
|
78
|
-
# 避免刷库异常导致线程退出
|
|
79
|
-
self._logger.error(f"批量写入失败: {e}")
|
|
80
|
-
|
|
81
|
-
time.sleep(self._flush_interval_ms / 1000.0)
|
|
1
|
+
# 避免写入导致阻塞
|
|
2
|
+
import datetime
|
|
3
|
+
import threading
|
|
4
|
+
import time
|
|
5
|
+
|
|
6
|
+
import pandas as pd
|
|
7
|
+
from kaq_quant_common.resources.kaq_ddb_stream_write_resources import (
|
|
8
|
+
KaqQuantDdbStreamWriteRepository,
|
|
9
|
+
)
|
|
10
|
+
from kaq_quant_common.utils import logger_utils
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class LimitOrderHelper:
|
|
14
|
+
|
|
15
|
+
def __init__(self, ddb: KaqQuantDdbStreamWriteRepository, ddb_table_name: str):
|
|
16
|
+
# 最新快照缓存与刷库线程控制
|
|
17
|
+
self._latest_snapshots: dict[str, tuple] = {}
|
|
18
|
+
self._latest_lock = threading.Lock()
|
|
19
|
+
# 写入到ddb的频率,默认100ms
|
|
20
|
+
self._flush_interval_ms = 100
|
|
21
|
+
self._stop_event = threading.Event()
|
|
22
|
+
self._flusher_thread = threading.Thread(target=self._flush_loop, daemon=True)
|
|
23
|
+
self._flusher_thread.name = "LimitOrderHelperFlusherThread"
|
|
24
|
+
|
|
25
|
+
#
|
|
26
|
+
self._ddb = ddb
|
|
27
|
+
self._ddb_table_name = ddb_table_name
|
|
28
|
+
|
|
29
|
+
#
|
|
30
|
+
self._logger = logger_utils.get_logger(self)
|
|
31
|
+
|
|
32
|
+
self._build_data: callable = None
|
|
33
|
+
|
|
34
|
+
def set_build_data(self, build_data: callable):
|
|
35
|
+
self._build_data = build_data
|
|
36
|
+
|
|
37
|
+
def push_data(self, symbol: str, data: dict, arg: dict = None):
|
|
38
|
+
with self._latest_lock:
|
|
39
|
+
self._latest_snapshots[symbol] = (data, arg)
|
|
40
|
+
|
|
41
|
+
def start(self):
|
|
42
|
+
self._flusher_thread.start()
|
|
43
|
+
|
|
44
|
+
def stop(self):
|
|
45
|
+
self._stop_event.set()
|
|
46
|
+
self._flusher_thread.join()
|
|
47
|
+
|
|
48
|
+
def _flush_loop(self):
|
|
49
|
+
# 周期性地将每个symbol的最新快照批量入库
|
|
50
|
+
while not self._stop_event.is_set():
|
|
51
|
+
to_process = None
|
|
52
|
+
with self._latest_lock:
|
|
53
|
+
if self._latest_snapshots:
|
|
54
|
+
to_process = list(self._latest_snapshots.items())
|
|
55
|
+
self._latest_snapshots.clear()
|
|
56
|
+
|
|
57
|
+
if to_process:
|
|
58
|
+
df: pd.DataFrame = None
|
|
59
|
+
now = int(datetime.datetime.now().timestamp() * 1000)
|
|
60
|
+
|
|
61
|
+
for symbol, (data, arg) in to_process:
|
|
62
|
+
sub_df = self._build_data(symbol, data, arg)
|
|
63
|
+
# 输出一下
|
|
64
|
+
data_first_now = int(sub_df["create_time"].iloc[0])
|
|
65
|
+
if now - data_first_now > 2000:
|
|
66
|
+
self._logger.debug(f"数据时间{data_first_now} 与当前时间{now} 差值{now - data_first_now} 超过2000ms")
|
|
67
|
+
|
|
68
|
+
if df is None:
|
|
69
|
+
df = sub_df
|
|
70
|
+
else:
|
|
71
|
+
df = pd.concat([df, sub_df], ignore_index=True)
|
|
72
|
+
|
|
73
|
+
# 入库
|
|
74
|
+
if df is not None and not df.empty:
|
|
75
|
+
try:
|
|
76
|
+
self._ddb.save2stream_batch(self._ddb_table_name, df=df)
|
|
77
|
+
except Exception as e:
|
|
78
|
+
# 避免刷库异常导致线程退出
|
|
79
|
+
self._logger.error(f"批量写入失败: {e}")
|
|
80
|
+
|
|
81
|
+
time.sleep(self._flush_interval_ms / 1000.0)
|