kaq-quant-common 0.1.83__py3-none-any.whl → 0.1.85__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kaq_quant_common/api/common/__init__.py +1 -1
- kaq_quant_common/api/common/api_interface.py +38 -38
- kaq_quant_common/api/rest/api_client_base.py +42 -42
- kaq_quant_common/api/rest/instruction/helper/order_helper.py +324 -324
- kaq_quant_common/api/rest/instruction/instruction_client.py +76 -88
- kaq_quant_common/api/rest/instruction/instruction_server_base.py +133 -132
- kaq_quant_common/api/rest/instruction/models/__init__.py +17 -17
- kaq_quant_common/api/rest/instruction/models/account.py +24 -24
- kaq_quant_common/api/rest/instruction/models/order.py +248 -223
- kaq_quant_common/api/rest/instruction/models/position.py +56 -56
- kaq_quant_common/api/rest/instruction/models/transfer.py +32 -32
- kaq_quant_common/api/ws/exchange/models.py +23 -23
- kaq_quant_common/api/ws/exchange/ws_exchange_server.py +440 -440
- kaq_quant_common/common/ddb_table_monitor.py +106 -106
- kaq_quant_common/common/http_monitor.py +69 -69
- kaq_quant_common/common/modules/limit_order_helper.py +81 -81
- kaq_quant_common/common/modules/limit_order_symbol_monitor.py +76 -76
- kaq_quant_common/common/modules/limit_order_symbol_monitor_group.py +69 -69
- kaq_quant_common/common/monitor_base.py +84 -84
- kaq_quant_common/common/monitor_group.py +97 -97
- kaq_quant_common/common/statistics/funding_rate_history_statistics.py +208 -0
- kaq_quant_common/common/statistics/kline_history_statistics.py +36 -13
- kaq_quant_common/common/ws_wrapper.py +21 -21
- kaq_quant_common/resources/kaq_ddb_stream_write_resources.py +2 -0
- kaq_quant_common/utils/logger_utils.py +5 -5
- kaq_quant_common/utils/signal_utils.py +23 -23
- kaq_quant_common/utils/uuid_utils.py +5 -5
- {kaq_quant_common-0.1.83.dist-info → kaq_quant_common-0.1.85.dist-info}/METADATA +1 -1
- {kaq_quant_common-0.1.83.dist-info → kaq_quant_common-0.1.85.dist-info}/RECORD +30 -29
- {kaq_quant_common-0.1.83.dist-info → kaq_quant_common-0.1.85.dist-info}/WHEEL +1 -1
|
@@ -1,106 +1,106 @@
|
|
|
1
|
-
import threading
|
|
2
|
-
import traceback
|
|
3
|
-
from abc import abstractmethod
|
|
4
|
-
|
|
5
|
-
import dolphindb as ddb
|
|
6
|
-
import numpy as np
|
|
7
|
-
from kaq_quant_common.common.monitor_base import MonitorBase
|
|
8
|
-
from kaq_quant_common.utils import logger_utils
|
|
9
|
-
|
|
10
|
-
mutex = threading.Lock()
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
# ddb表订阅监听器
|
|
14
|
-
class DdbTableMonitor(MonitorBase):
|
|
15
|
-
|
|
16
|
-
def __init__(self, table_name: str, action_name: str, batch_size=1000, filter=[]):
|
|
17
|
-
# 表名
|
|
18
|
-
self._table_name = table_name
|
|
19
|
-
#
|
|
20
|
-
self._action_name = action_name
|
|
21
|
-
#
|
|
22
|
-
self._batch_size = batch_size
|
|
23
|
-
#
|
|
24
|
-
self._filter = filter
|
|
25
|
-
|
|
26
|
-
# logger
|
|
27
|
-
self._logger = logger_utils.get_logger(self)
|
|
28
|
-
#
|
|
29
|
-
super().__init__()
|
|
30
|
-
|
|
31
|
-
# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓ abstract methods
|
|
32
|
-
def _do_init(self):
|
|
33
|
-
# 初始化ddb
|
|
34
|
-
self._init_ddb()
|
|
35
|
-
|
|
36
|
-
def _do_start(self):
|
|
37
|
-
# 开启ddb订阅
|
|
38
|
-
self._start_subscribe()
|
|
39
|
-
|
|
40
|
-
def _do_stop(self):
|
|
41
|
-
# 关闭订阅
|
|
42
|
-
self._stop_subscribe()
|
|
43
|
-
|
|
44
|
-
# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓
|
|
45
|
-
# 初始化ddb
|
|
46
|
-
def _init_ddb(self):
|
|
47
|
-
'''
|
|
48
|
-
创建ddb连接 && 添加ddb流数据表支持
|
|
49
|
-
'''
|
|
50
|
-
try:
|
|
51
|
-
ddb_config = self._on_get_ddb_config()
|
|
52
|
-
host, port, user, passwd = ddb_config['host'], ddb_config['port'], ddb_config['user'], ddb_config['passwd']
|
|
53
|
-
mutex.acquire()
|
|
54
|
-
self._session = ddb.session(enableASYNC=True)
|
|
55
|
-
self._host = host
|
|
56
|
-
self._port = port
|
|
57
|
-
self._user = user
|
|
58
|
-
self._passwd = passwd
|
|
59
|
-
self._session.connect(host, port, user, passwd)
|
|
60
|
-
self._session.enableStreaming()
|
|
61
|
-
except Exception as e:
|
|
62
|
-
self._logger.error(f'DdbTableMonitor._init_ddb error: {str(e)} - {str(traceback.format_exc())}')
|
|
63
|
-
finally:
|
|
64
|
-
mutex.release()
|
|
65
|
-
|
|
66
|
-
# 开启订阅
|
|
67
|
-
def _start_subscribe(self):
|
|
68
|
-
'''
|
|
69
|
-
订阅ddb表
|
|
70
|
-
'''
|
|
71
|
-
self._session.subscribe(
|
|
72
|
-
self._host,
|
|
73
|
-
self._port,
|
|
74
|
-
self._handle,
|
|
75
|
-
tableName=self._table_name,
|
|
76
|
-
actionName=self._action_name,
|
|
77
|
-
filter=np.array(self._filter),
|
|
78
|
-
offset=-1,
|
|
79
|
-
batchSize=self._batch_size,
|
|
80
|
-
throttle=5,
|
|
81
|
-
msgAsTable=True,
|
|
82
|
-
)
|
|
83
|
-
self._logger.info(f'开始订阅 {self._host}:{self._port} {self._table_name} - {self._action_name}')
|
|
84
|
-
|
|
85
|
-
def _stop_subscribe(self):
|
|
86
|
-
# TODO
|
|
87
|
-
# script = f"""
|
|
88
|
-
# existsSubscriptionTopic(,`{self._table_name},`{self._action_name})
|
|
89
|
-
# """
|
|
90
|
-
# exitsTopic = self._session.run(script)
|
|
91
|
-
exitsTopic = True
|
|
92
|
-
if exitsTopic is True:
|
|
93
|
-
self._session.unsubscribe(self._host, self._port, self._table_name, self._action_name)
|
|
94
|
-
self._logger.info(f'取消订阅 {self._table_name} - {self._action_name}')
|
|
95
|
-
if not self._session.isClosed():
|
|
96
|
-
self._session.close()
|
|
97
|
-
|
|
98
|
-
# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓ abstract methods
|
|
99
|
-
# 需要返回ddb配置,包含host, port, user, passwd 添加类型提示
|
|
100
|
-
@abstractmethod
|
|
101
|
-
def _on_get_ddb_config(self, data) -> dict:
|
|
102
|
-
pass
|
|
103
|
-
|
|
104
|
-
@abstractmethod
|
|
105
|
-
def _handle(self, data):
|
|
106
|
-
pass
|
|
1
|
+
import threading
|
|
2
|
+
import traceback
|
|
3
|
+
from abc import abstractmethod
|
|
4
|
+
|
|
5
|
+
import dolphindb as ddb
|
|
6
|
+
import numpy as np
|
|
7
|
+
from kaq_quant_common.common.monitor_base import MonitorBase
|
|
8
|
+
from kaq_quant_common.utils import logger_utils
|
|
9
|
+
|
|
10
|
+
mutex = threading.Lock()
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# ddb表订阅监听器
|
|
14
|
+
class DdbTableMonitor(MonitorBase):
|
|
15
|
+
|
|
16
|
+
def __init__(self, table_name: str, action_name: str, batch_size=1000, filter=[]):
|
|
17
|
+
# 表名
|
|
18
|
+
self._table_name = table_name
|
|
19
|
+
#
|
|
20
|
+
self._action_name = action_name
|
|
21
|
+
#
|
|
22
|
+
self._batch_size = batch_size
|
|
23
|
+
#
|
|
24
|
+
self._filter = filter
|
|
25
|
+
|
|
26
|
+
# logger
|
|
27
|
+
self._logger = logger_utils.get_logger(self)
|
|
28
|
+
#
|
|
29
|
+
super().__init__()
|
|
30
|
+
|
|
31
|
+
# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓ abstract methods
|
|
32
|
+
def _do_init(self):
|
|
33
|
+
# 初始化ddb
|
|
34
|
+
self._init_ddb()
|
|
35
|
+
|
|
36
|
+
def _do_start(self):
|
|
37
|
+
# 开启ddb订阅
|
|
38
|
+
self._start_subscribe()
|
|
39
|
+
|
|
40
|
+
def _do_stop(self):
|
|
41
|
+
# 关闭订阅
|
|
42
|
+
self._stop_subscribe()
|
|
43
|
+
|
|
44
|
+
# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓
|
|
45
|
+
# 初始化ddb
|
|
46
|
+
def _init_ddb(self):
|
|
47
|
+
'''
|
|
48
|
+
创建ddb连接 && 添加ddb流数据表支持
|
|
49
|
+
'''
|
|
50
|
+
try:
|
|
51
|
+
ddb_config = self._on_get_ddb_config()
|
|
52
|
+
host, port, user, passwd = ddb_config['host'], ddb_config['port'], ddb_config['user'], ddb_config['passwd']
|
|
53
|
+
mutex.acquire()
|
|
54
|
+
self._session = ddb.session(enableASYNC=True)
|
|
55
|
+
self._host = host
|
|
56
|
+
self._port = port
|
|
57
|
+
self._user = user
|
|
58
|
+
self._passwd = passwd
|
|
59
|
+
self._session.connect(host, port, user, passwd)
|
|
60
|
+
self._session.enableStreaming()
|
|
61
|
+
except Exception as e:
|
|
62
|
+
self._logger.error(f'DdbTableMonitor._init_ddb error: {str(e)} - {str(traceback.format_exc())}')
|
|
63
|
+
finally:
|
|
64
|
+
mutex.release()
|
|
65
|
+
|
|
66
|
+
# 开启订阅
|
|
67
|
+
def _start_subscribe(self):
|
|
68
|
+
'''
|
|
69
|
+
订阅ddb表
|
|
70
|
+
'''
|
|
71
|
+
self._session.subscribe(
|
|
72
|
+
self._host,
|
|
73
|
+
self._port,
|
|
74
|
+
self._handle,
|
|
75
|
+
tableName=self._table_name,
|
|
76
|
+
actionName=self._action_name,
|
|
77
|
+
filter=np.array(self._filter),
|
|
78
|
+
offset=-1,
|
|
79
|
+
batchSize=self._batch_size,
|
|
80
|
+
throttle=5,
|
|
81
|
+
msgAsTable=True,
|
|
82
|
+
)
|
|
83
|
+
self._logger.info(f'开始订阅 {self._host}:{self._port} {self._table_name} - {self._action_name}')
|
|
84
|
+
|
|
85
|
+
def _stop_subscribe(self):
|
|
86
|
+
# TODO
|
|
87
|
+
# script = f"""
|
|
88
|
+
# existsSubscriptionTopic(,`{self._table_name},`{self._action_name})
|
|
89
|
+
# """
|
|
90
|
+
# exitsTopic = self._session.run(script)
|
|
91
|
+
exitsTopic = True
|
|
92
|
+
if exitsTopic is True:
|
|
93
|
+
self._session.unsubscribe(self._host, self._port, self._table_name, self._action_name)
|
|
94
|
+
self._logger.info(f'取消订阅 {self._table_name} - {self._action_name}')
|
|
95
|
+
if not self._session.isClosed():
|
|
96
|
+
self._session.close()
|
|
97
|
+
|
|
98
|
+
# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓ abstract methods
|
|
99
|
+
# 需要返回ddb配置,包含host, port, user, passwd 添加类型提示
|
|
100
|
+
@abstractmethod
|
|
101
|
+
def _on_get_ddb_config(self, data) -> dict:
|
|
102
|
+
pass
|
|
103
|
+
|
|
104
|
+
@abstractmethod
|
|
105
|
+
def _handle(self, data):
|
|
106
|
+
pass
|
|
@@ -1,69 +1,69 @@
|
|
|
1
|
-
import threading
|
|
2
|
-
import time
|
|
3
|
-
from abc import abstractmethod
|
|
4
|
-
|
|
5
|
-
from kaq_quant_common.common.monitor_base import MonitorBase
|
|
6
|
-
from kaq_quant_common.common.ws_wrapper import WsWrapper
|
|
7
|
-
from kaq_quant_common.utils import logger_utils
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
# 封装http定时请求
|
|
11
|
-
class HttpMonitor(MonitorBase):
|
|
12
|
-
def __init__(self, interval=5):
|
|
13
|
-
super().__init__()
|
|
14
|
-
# 执行间隔
|
|
15
|
-
self._interval = interval
|
|
16
|
-
self._logger = logger_utils.get_logger()
|
|
17
|
-
|
|
18
|
-
def _do_start(self):
|
|
19
|
-
# 开启一条线程,定时执行http请求
|
|
20
|
-
self._ticker_thread_event = threading.Event()
|
|
21
|
-
|
|
22
|
-
def http_request():
|
|
23
|
-
# 上次请求时间
|
|
24
|
-
last_request_time = 0
|
|
25
|
-
while True:
|
|
26
|
-
# 检查是否需要退出
|
|
27
|
-
if self._ticker_thread_event.is_set():
|
|
28
|
-
self._logger.info("ticker thread exit")
|
|
29
|
-
break
|
|
30
|
-
|
|
31
|
-
# 当前时间
|
|
32
|
-
current_time = time.time()
|
|
33
|
-
# 如果上次请求时间距离当前时间不足,等待
|
|
34
|
-
if current_time - last_request_time < self._interval:
|
|
35
|
-
time.sleep(0.1)
|
|
36
|
-
continue
|
|
37
|
-
|
|
38
|
-
#
|
|
39
|
-
last_request_time = time.time()
|
|
40
|
-
|
|
41
|
-
# self._logger.debug('tick start')
|
|
42
|
-
try:
|
|
43
|
-
self._do_request()
|
|
44
|
-
except Exception as e:
|
|
45
|
-
self._logger.error(f"http request error: {e}")
|
|
46
|
-
# self._logger.debug('tick finish')
|
|
47
|
-
#
|
|
48
|
-
time.sleep(0.1)
|
|
49
|
-
|
|
50
|
-
# 开启线程
|
|
51
|
-
self._ticker_thread = threading.Thread(target=http_request)
|
|
52
|
-
# 设置为守护线程
|
|
53
|
-
self._ticker_thread.daemon = True
|
|
54
|
-
self._ticker_thread.start()
|
|
55
|
-
|
|
56
|
-
def _do_stop(self):
|
|
57
|
-
if self._ticker_thread_event is not None:
|
|
58
|
-
self._ticker_thread_event.set()
|
|
59
|
-
|
|
60
|
-
# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓
|
|
61
|
-
|
|
62
|
-
@abstractmethod
|
|
63
|
-
def _do_request(self):
|
|
64
|
-
"""
|
|
65
|
-
子类实现
|
|
66
|
-
"""
|
|
67
|
-
pass
|
|
68
|
-
|
|
69
|
-
# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓
|
|
1
|
+
import threading
|
|
2
|
+
import time
|
|
3
|
+
from abc import abstractmethod
|
|
4
|
+
|
|
5
|
+
from kaq_quant_common.common.monitor_base import MonitorBase
|
|
6
|
+
from kaq_quant_common.common.ws_wrapper import WsWrapper
|
|
7
|
+
from kaq_quant_common.utils import logger_utils
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# 封装http定时请求
|
|
11
|
+
class HttpMonitor(MonitorBase):
|
|
12
|
+
def __init__(self, interval=5):
|
|
13
|
+
super().__init__()
|
|
14
|
+
# 执行间隔
|
|
15
|
+
self._interval = interval
|
|
16
|
+
self._logger = logger_utils.get_logger()
|
|
17
|
+
|
|
18
|
+
def _do_start(self):
|
|
19
|
+
# 开启一条线程,定时执行http请求
|
|
20
|
+
self._ticker_thread_event = threading.Event()
|
|
21
|
+
|
|
22
|
+
def http_request():
|
|
23
|
+
# 上次请求时间
|
|
24
|
+
last_request_time = 0
|
|
25
|
+
while True:
|
|
26
|
+
# 检查是否需要退出
|
|
27
|
+
if self._ticker_thread_event.is_set():
|
|
28
|
+
self._logger.info("ticker thread exit")
|
|
29
|
+
break
|
|
30
|
+
|
|
31
|
+
# 当前时间
|
|
32
|
+
current_time = time.time()
|
|
33
|
+
# 如果上次请求时间距离当前时间不足,等待
|
|
34
|
+
if current_time - last_request_time < self._interval:
|
|
35
|
+
time.sleep(0.1)
|
|
36
|
+
continue
|
|
37
|
+
|
|
38
|
+
#
|
|
39
|
+
last_request_time = time.time()
|
|
40
|
+
|
|
41
|
+
# self._logger.debug('tick start')
|
|
42
|
+
try:
|
|
43
|
+
self._do_request()
|
|
44
|
+
except Exception as e:
|
|
45
|
+
self._logger.error(f"http request error: {e}")
|
|
46
|
+
# self._logger.debug('tick finish')
|
|
47
|
+
#
|
|
48
|
+
time.sleep(0.1)
|
|
49
|
+
|
|
50
|
+
# 开启线程
|
|
51
|
+
self._ticker_thread = threading.Thread(target=http_request)
|
|
52
|
+
# 设置为守护线程
|
|
53
|
+
self._ticker_thread.daemon = True
|
|
54
|
+
self._ticker_thread.start()
|
|
55
|
+
|
|
56
|
+
def _do_stop(self):
|
|
57
|
+
if self._ticker_thread_event is not None:
|
|
58
|
+
self._ticker_thread_event.set()
|
|
59
|
+
|
|
60
|
+
# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓
|
|
61
|
+
|
|
62
|
+
@abstractmethod
|
|
63
|
+
def _do_request(self):
|
|
64
|
+
"""
|
|
65
|
+
子类实现
|
|
66
|
+
"""
|
|
67
|
+
pass
|
|
68
|
+
|
|
69
|
+
# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓
|
|
@@ -1,81 +1,81 @@
|
|
|
1
|
-
# 避免写入导致阻塞
|
|
2
|
-
import datetime
|
|
3
|
-
import threading
|
|
4
|
-
import time
|
|
5
|
-
|
|
6
|
-
import pandas as pd
|
|
7
|
-
from kaq_quant_common.resources.kaq_ddb_stream_write_resources import (
|
|
8
|
-
KaqQuantDdbStreamWriteRepository,
|
|
9
|
-
)
|
|
10
|
-
from kaq_quant_common.utils import logger_utils
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
class LimitOrderHelper:
|
|
14
|
-
|
|
15
|
-
def __init__(self, ddb: KaqQuantDdbStreamWriteRepository, ddb_table_name: str):
|
|
16
|
-
# 最新快照缓存与刷库线程控制
|
|
17
|
-
self._latest_snapshots: dict[str, tuple] = {}
|
|
18
|
-
self._latest_lock = threading.Lock()
|
|
19
|
-
# 写入到ddb的频率,默认100ms
|
|
20
|
-
self._flush_interval_ms = 100
|
|
21
|
-
self._stop_event = threading.Event()
|
|
22
|
-
self._flusher_thread = threading.Thread(target=self._flush_loop, daemon=True)
|
|
23
|
-
self._flusher_thread.name = "LimitOrderHelperFlusherThread"
|
|
24
|
-
|
|
25
|
-
#
|
|
26
|
-
self._ddb = ddb
|
|
27
|
-
self._ddb_table_name = ddb_table_name
|
|
28
|
-
|
|
29
|
-
#
|
|
30
|
-
self._logger = logger_utils.get_logger(self)
|
|
31
|
-
|
|
32
|
-
self._build_data: callable = None
|
|
33
|
-
|
|
34
|
-
def set_build_data(self, build_data: callable):
|
|
35
|
-
self._build_data = build_data
|
|
36
|
-
|
|
37
|
-
def push_data(self, symbol: str, data: dict, arg: dict = None):
|
|
38
|
-
with self._latest_lock:
|
|
39
|
-
self._latest_snapshots[symbol] = (data, arg)
|
|
40
|
-
|
|
41
|
-
def start(self):
|
|
42
|
-
self._flusher_thread.start()
|
|
43
|
-
|
|
44
|
-
def stop(self):
|
|
45
|
-
self._stop_event.set()
|
|
46
|
-
self._flusher_thread.join()
|
|
47
|
-
|
|
48
|
-
def _flush_loop(self):
|
|
49
|
-
# 周期性地将每个symbol的最新快照批量入库
|
|
50
|
-
while not self._stop_event.is_set():
|
|
51
|
-
to_process = None
|
|
52
|
-
with self._latest_lock:
|
|
53
|
-
if self._latest_snapshots:
|
|
54
|
-
to_process = list(self._latest_snapshots.items())
|
|
55
|
-
self._latest_snapshots.clear()
|
|
56
|
-
|
|
57
|
-
if to_process:
|
|
58
|
-
df: pd.DataFrame = None
|
|
59
|
-
now = int(datetime.datetime.now().timestamp() * 1000)
|
|
60
|
-
|
|
61
|
-
for symbol, (data, arg) in to_process:
|
|
62
|
-
sub_df = self._build_data(symbol, data, arg)
|
|
63
|
-
# 输出一下
|
|
64
|
-
data_first_now = int(sub_df["create_time"].iloc[0])
|
|
65
|
-
if now - data_first_now > 2000:
|
|
66
|
-
self._logger.debug(f"数据时间{data_first_now} 与当前时间{now} 差值{now - data_first_now} 超过2000ms")
|
|
67
|
-
|
|
68
|
-
if df is None:
|
|
69
|
-
df = sub_df
|
|
70
|
-
else:
|
|
71
|
-
df = pd.concat([df, sub_df], ignore_index=True)
|
|
72
|
-
|
|
73
|
-
# 入库
|
|
74
|
-
if df is not None and not df.empty:
|
|
75
|
-
try:
|
|
76
|
-
self._ddb.save2stream_batch(self._ddb_table_name, df=df)
|
|
77
|
-
except Exception as e:
|
|
78
|
-
# 避免刷库异常导致线程退出
|
|
79
|
-
self._logger.error(f"批量写入失败: {e}")
|
|
80
|
-
|
|
81
|
-
time.sleep(self._flush_interval_ms / 1000.0)
|
|
1
|
+
# 避免写入导致阻塞
|
|
2
|
+
import datetime
|
|
3
|
+
import threading
|
|
4
|
+
import time
|
|
5
|
+
|
|
6
|
+
import pandas as pd
|
|
7
|
+
from kaq_quant_common.resources.kaq_ddb_stream_write_resources import (
|
|
8
|
+
KaqQuantDdbStreamWriteRepository,
|
|
9
|
+
)
|
|
10
|
+
from kaq_quant_common.utils import logger_utils
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class LimitOrderHelper:
|
|
14
|
+
|
|
15
|
+
def __init__(self, ddb: KaqQuantDdbStreamWriteRepository, ddb_table_name: str):
|
|
16
|
+
# 最新快照缓存与刷库线程控制
|
|
17
|
+
self._latest_snapshots: dict[str, tuple] = {}
|
|
18
|
+
self._latest_lock = threading.Lock()
|
|
19
|
+
# 写入到ddb的频率,默认100ms
|
|
20
|
+
self._flush_interval_ms = 100
|
|
21
|
+
self._stop_event = threading.Event()
|
|
22
|
+
self._flusher_thread = threading.Thread(target=self._flush_loop, daemon=True)
|
|
23
|
+
self._flusher_thread.name = "LimitOrderHelperFlusherThread"
|
|
24
|
+
|
|
25
|
+
#
|
|
26
|
+
self._ddb = ddb
|
|
27
|
+
self._ddb_table_name = ddb_table_name
|
|
28
|
+
|
|
29
|
+
#
|
|
30
|
+
self._logger = logger_utils.get_logger(self)
|
|
31
|
+
|
|
32
|
+
self._build_data: callable = None
|
|
33
|
+
|
|
34
|
+
def set_build_data(self, build_data: callable):
|
|
35
|
+
self._build_data = build_data
|
|
36
|
+
|
|
37
|
+
def push_data(self, symbol: str, data: dict, arg: dict = None):
|
|
38
|
+
with self._latest_lock:
|
|
39
|
+
self._latest_snapshots[symbol] = (data, arg)
|
|
40
|
+
|
|
41
|
+
def start(self):
|
|
42
|
+
self._flusher_thread.start()
|
|
43
|
+
|
|
44
|
+
def stop(self):
|
|
45
|
+
self._stop_event.set()
|
|
46
|
+
self._flusher_thread.join()
|
|
47
|
+
|
|
48
|
+
def _flush_loop(self):
|
|
49
|
+
# 周期性地将每个symbol的最新快照批量入库
|
|
50
|
+
while not self._stop_event.is_set():
|
|
51
|
+
to_process = None
|
|
52
|
+
with self._latest_lock:
|
|
53
|
+
if self._latest_snapshots:
|
|
54
|
+
to_process = list(self._latest_snapshots.items())
|
|
55
|
+
self._latest_snapshots.clear()
|
|
56
|
+
|
|
57
|
+
if to_process:
|
|
58
|
+
df: pd.DataFrame = None
|
|
59
|
+
now = int(datetime.datetime.now().timestamp() * 1000)
|
|
60
|
+
|
|
61
|
+
for symbol, (data, arg) in to_process:
|
|
62
|
+
sub_df = self._build_data(symbol, data, arg)
|
|
63
|
+
# 输出一下
|
|
64
|
+
data_first_now = int(sub_df["create_time"].iloc[0])
|
|
65
|
+
if now - data_first_now > 2000:
|
|
66
|
+
self._logger.debug(f"数据时间{data_first_now} 与当前时间{now} 差值{now - data_first_now} 超过2000ms")
|
|
67
|
+
|
|
68
|
+
if df is None:
|
|
69
|
+
df = sub_df
|
|
70
|
+
else:
|
|
71
|
+
df = pd.concat([df, sub_df], ignore_index=True)
|
|
72
|
+
|
|
73
|
+
# 入库
|
|
74
|
+
if df is not None and not df.empty:
|
|
75
|
+
try:
|
|
76
|
+
self._ddb.save2stream_batch(self._ddb_table_name, df=df)
|
|
77
|
+
except Exception as e:
|
|
78
|
+
# 避免刷库异常导致线程退出
|
|
79
|
+
self._logger.error(f"批量写入失败: {e}")
|
|
80
|
+
|
|
81
|
+
time.sleep(self._flush_interval_ms / 1000.0)
|
|
@@ -1,76 +1,76 @@
|
|
|
1
|
-
import json
|
|
2
|
-
|
|
3
|
-
from kaq_quant_common.common.redis_table_monitor import RedisTableMonitor
|
|
4
|
-
from kaq_quant_common.utils import yml_utils
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
# 订单簿交易对监听器
|
|
8
|
-
class LimitOrderSymbolMonitor(RedisTableMonitor):
|
|
9
|
-
def __init__(self, exchange: str, handler: callable = None, default_symbols=["BTCUSDT", "ETHUSDT"]):
|
|
10
|
-
#
|
|
11
|
-
self._exchange = exchange
|
|
12
|
-
# 回调
|
|
13
|
-
self._handler = handler
|
|
14
|
-
#
|
|
15
|
-
self._support_symbols = default_symbols
|
|
16
|
-
# 记录一下之前的字符串
|
|
17
|
-
self._str_value = None
|
|
18
|
-
|
|
19
|
-
# 间隔1秒
|
|
20
|
-
super().__init__(table_name="kaq_all_futures_limit_order_symbols_config", interval=1)
|
|
21
|
-
|
|
22
|
-
# 输出一下支持的交易对
|
|
23
|
-
# self._logger.info(f"init {self._exchange} limit order support symbols: {self._support_symbols}")
|
|
24
|
-
|
|
25
|
-
def _on_get_redis_config(self):
|
|
26
|
-
# 获取redis配置
|
|
27
|
-
host, port, passwd = yml_utils.get(f"kaq_{self._exchange}_quant", "redis", ["host", "port", "passwd"])
|
|
28
|
-
return {"host": host, "port": int(port), "passwd": passwd}
|
|
29
|
-
|
|
30
|
-
def _do_query(self) -> str:
|
|
31
|
-
# 获取字符串
|
|
32
|
-
str = super()._do_query()
|
|
33
|
-
try:
|
|
34
|
-
# 解析为json
|
|
35
|
-
json_obj = json.loads(str)
|
|
36
|
-
# 只要平台的值
|
|
37
|
-
symbols = json_obj.get(self._exchange, self._support_symbols)
|
|
38
|
-
|
|
39
|
-
#
|
|
40
|
-
tmp_symbols = []
|
|
41
|
-
tmp_symbols.extend(symbols)
|
|
42
|
-
|
|
43
|
-
if self._support_symbols is not None and len(self._support_symbols) > 0:
|
|
44
|
-
# !! 注意,这里需要过滤一下,只保留支持的交易对
|
|
45
|
-
tmp_symbols = [symbol for symbol in symbols if symbol in self._support_symbols]
|
|
46
|
-
# df_symbols = pd.Series(symbols)
|
|
47
|
-
# df_symbols = df_symbols[df_symbols.isin(self._support_symbols)]
|
|
48
|
-
# symbols = df_symbols.tolist()
|
|
49
|
-
if tmp_symbols != symbols and self._str_value != str:
|
|
50
|
-
# 输出一下日志
|
|
51
|
-
self._logger.warning(f"{self._exchange} limit order symbols contain unsupported symbols, before: {symbols} filtered: {tmp_symbols}")
|
|
52
|
-
|
|
53
|
-
symbols = tmp_symbols
|
|
54
|
-
except Exception as e:
|
|
55
|
-
symbols = self._support_symbols
|
|
56
|
-
|
|
57
|
-
self._str_value = str
|
|
58
|
-
|
|
59
|
-
return symbols
|
|
60
|
-
|
|
61
|
-
def _do_compare(self, value1: list[str], value2: list[str]) -> bool:
|
|
62
|
-
# 判断数组是否一样,数组也是这样判断也可以
|
|
63
|
-
return value1 == value2
|
|
64
|
-
|
|
65
|
-
def _on_value_change(self, value: list[str]):
|
|
66
|
-
# 输出一下变化的交易对
|
|
67
|
-
self._logger.info(f"{self._exchange} limit order symbols changed: {value}")
|
|
68
|
-
if self._handler is not None:
|
|
69
|
-
self._handler(value)
|
|
70
|
-
|
|
71
|
-
# get, set
|
|
72
|
-
def set_handler(self, handler: callable):
|
|
73
|
-
self._handler = handler
|
|
74
|
-
|
|
75
|
-
def get_symbols(self) -> list[str]:
|
|
76
|
-
return self._value
|
|
1
|
+
import json
|
|
2
|
+
|
|
3
|
+
from kaq_quant_common.common.redis_table_monitor import RedisTableMonitor
|
|
4
|
+
from kaq_quant_common.utils import yml_utils
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
# 订单簿交易对监听器
|
|
8
|
+
class LimitOrderSymbolMonitor(RedisTableMonitor):
|
|
9
|
+
def __init__(self, exchange: str, handler: callable = None, default_symbols=["BTCUSDT", "ETHUSDT"]):
|
|
10
|
+
#
|
|
11
|
+
self._exchange = exchange
|
|
12
|
+
# 回调
|
|
13
|
+
self._handler = handler
|
|
14
|
+
#
|
|
15
|
+
self._support_symbols = default_symbols
|
|
16
|
+
# 记录一下之前的字符串
|
|
17
|
+
self._str_value = None
|
|
18
|
+
|
|
19
|
+
# 间隔1秒
|
|
20
|
+
super().__init__(table_name="kaq_all_futures_limit_order_symbols_config", interval=1)
|
|
21
|
+
|
|
22
|
+
# 输出一下支持的交易对
|
|
23
|
+
# self._logger.info(f"init {self._exchange} limit order support symbols: {self._support_symbols}")
|
|
24
|
+
|
|
25
|
+
def _on_get_redis_config(self):
|
|
26
|
+
# 获取redis配置
|
|
27
|
+
host, port, passwd = yml_utils.get(f"kaq_{self._exchange}_quant", "redis", ["host", "port", "passwd"])
|
|
28
|
+
return {"host": host, "port": int(port), "passwd": passwd}
|
|
29
|
+
|
|
30
|
+
def _do_query(self) -> str:
|
|
31
|
+
# 获取字符串
|
|
32
|
+
str = super()._do_query()
|
|
33
|
+
try:
|
|
34
|
+
# 解析为json
|
|
35
|
+
json_obj = json.loads(str)
|
|
36
|
+
# 只要平台的值
|
|
37
|
+
symbols = json_obj.get(self._exchange, self._support_symbols)
|
|
38
|
+
|
|
39
|
+
#
|
|
40
|
+
tmp_symbols = []
|
|
41
|
+
tmp_symbols.extend(symbols)
|
|
42
|
+
|
|
43
|
+
if self._support_symbols is not None and len(self._support_symbols) > 0:
|
|
44
|
+
# !! 注意,这里需要过滤一下,只保留支持的交易对
|
|
45
|
+
tmp_symbols = [symbol for symbol in symbols if symbol in self._support_symbols]
|
|
46
|
+
# df_symbols = pd.Series(symbols)
|
|
47
|
+
# df_symbols = df_symbols[df_symbols.isin(self._support_symbols)]
|
|
48
|
+
# symbols = df_symbols.tolist()
|
|
49
|
+
if tmp_symbols != symbols and self._str_value != str:
|
|
50
|
+
# 输出一下日志
|
|
51
|
+
self._logger.warning(f"{self._exchange} limit order symbols contain unsupported symbols, before: {symbols} filtered: {tmp_symbols}")
|
|
52
|
+
|
|
53
|
+
symbols = tmp_symbols
|
|
54
|
+
except Exception as e:
|
|
55
|
+
symbols = self._support_symbols
|
|
56
|
+
|
|
57
|
+
self._str_value = str
|
|
58
|
+
|
|
59
|
+
return symbols
|
|
60
|
+
|
|
61
|
+
def _do_compare(self, value1: list[str], value2: list[str]) -> bool:
|
|
62
|
+
# 判断数组是否一样,数组也是这样判断也可以
|
|
63
|
+
return value1 == value2
|
|
64
|
+
|
|
65
|
+
def _on_value_change(self, value: list[str]):
|
|
66
|
+
# 输出一下变化的交易对
|
|
67
|
+
self._logger.info(f"{self._exchange} limit order symbols changed: {value}")
|
|
68
|
+
if self._handler is not None:
|
|
69
|
+
self._handler(value)
|
|
70
|
+
|
|
71
|
+
# get, set
|
|
72
|
+
def set_handler(self, handler: callable):
|
|
73
|
+
self._handler = handler
|
|
74
|
+
|
|
75
|
+
def get_symbols(self) -> list[str]:
|
|
76
|
+
return self._value
|