hikyuu 2.7.0__py3-none-manylinux2014_aarch64.whl → 2.7.3__py3-none-manylinux2014_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hikyuu/__init__.py +25 -7
- hikyuu/__init__.pyi +23 -12
- hikyuu/analysis/__init__.pyi +6 -1
- hikyuu/analysis/analysis.pyi +7 -2
- hikyuu/core.pyi +8 -3
- hikyuu/cpp/core310.pyi +94 -24
- hikyuu/cpp/core310.so +0 -0
- hikyuu/cpp/core311.pyi +94 -24
- hikyuu/cpp/core311.so +0 -0
- hikyuu/cpp/core312.pyi +94 -24
- hikyuu/cpp/core312.so +0 -0
- hikyuu/cpp/core313.pyi +94 -24
- hikyuu/cpp/core313.so +0 -0
- hikyuu/cpp/i18n/zh_CN/hikyuu.mo +0 -0
- hikyuu/cpp/i18n/zh_CN/hikyuu_plugin.mo +0 -0
- hikyuu/cpp/libboost_atomic.so +0 -0
- hikyuu/cpp/libboost_atomic.so.1.90.0 +0 -0
- hikyuu/cpp/{libboost_charconv-mt.so → libboost_charconv.so} +0 -0
- hikyuu/cpp/{libboost_charconv-mt.so.1.88.0 → libboost_charconv.so.1.90.0} +0 -0
- hikyuu/cpp/libboost_chrono.so +0 -0
- hikyuu/cpp/libboost_chrono.so.1.90.0 +0 -0
- hikyuu/cpp/libboost_container.so +0 -0
- hikyuu/cpp/libboost_container.so.1.90.0 +0 -0
- hikyuu/cpp/libboost_date_time.so +0 -0
- hikyuu/cpp/libboost_date_time.so.1.90.0 +0 -0
- hikyuu/cpp/libboost_locale.so +0 -0
- hikyuu/cpp/libboost_locale.so.1.90.0 +0 -0
- hikyuu/cpp/libboost_random.so +0 -0
- hikyuu/cpp/libboost_random.so.1.90.0 +0 -0
- hikyuu/cpp/libboost_serialization.so +0 -0
- hikyuu/cpp/libboost_serialization.so.1.90.0 +0 -0
- hikyuu/cpp/libboost_thread.so +0 -0
- hikyuu/cpp/libboost_thread.so.1.90.0 +0 -0
- hikyuu/cpp/libboost_wserialization.so +0 -0
- hikyuu/cpp/libboost_wserialization.so.1.90.0 +0 -0
- hikyuu/cpp/libhikyuu.so +0 -0
- hikyuu/cpp/libsqlite3.so +0 -0
- hikyuu/data/clickhouse_upgrade/0001.sql +2 -0
- hikyuu/data/common_clickhouse.py +1 -3
- hikyuu/data/download_block.py +1 -1
- hikyuu/data/hku_config_template.py +30 -3
- hikyuu/data/mysql_upgrade/0029.sql +2 -0
- hikyuu/data/pytdx_to_clickhouse.py +86 -32
- hikyuu/data/pytdx_to_h5.py +73 -28
- hikyuu/data/pytdx_to_mysql.py +65 -21
- hikyuu/data/pytdx_weight_to_clickhouse.py +2 -0
- hikyuu/data/pytdx_weight_to_mysql.py +2 -0
- hikyuu/data/pytdx_weight_to_sqlite.py +2 -0
- hikyuu/data/sqlite_upgrade/0029.sql +4 -0
- hikyuu/data/tdx_to_clickhouse.py +2 -2
- hikyuu/data/tdx_to_h5.py +11 -11
- hikyuu/data/tdx_to_mysql.py +2 -2
- hikyuu/draw/drawplot/bokeh_draw.pyi +14 -7
- hikyuu/draw/drawplot/echarts_draw.pyi +14 -7
- hikyuu/draw/drawplot/matplotlib_draw.py +8 -2
- hikyuu/draw/drawplot/matplotlib_draw.pyi +14 -7
- hikyuu/extend.pyi +8 -3
- hikyuu/gui/HikyuuTDX.py +42 -3
- hikyuu/gui/data/MainWindow.py +189 -129
- hikyuu/hub.pyi +6 -6
- hikyuu/include/hikyuu/StockManager.h +17 -2
- hikyuu/include/hikyuu/StrategyContext.h +4 -4
- hikyuu/include/hikyuu/data_driver/BaseInfoDriver.h +2 -1
- hikyuu/include/hikyuu/data_driver/KDataDriver.h +2 -4
- hikyuu/include/hikyuu/data_driver/kdata/mysql/MySQLKDataDriver.h +5 -1
- hikyuu/include/hikyuu/data_driver/kdata/sqlite/SQLiteKDataDriver.h +1 -1
- hikyuu/include/hikyuu/global/sysinfo.h +24 -5
- hikyuu/include/hikyuu/indicator/IndicatorImp.h +1 -1
- hikyuu/include/hikyuu/plugin/KDataToClickHouseImporter.h +40 -0
- hikyuu/include/hikyuu/plugin/KDataToMySQLImporter.h +40 -0
- hikyuu/include/hikyuu/plugin/checkdata.h +20 -0
- hikyuu/include/hikyuu/plugin/extind.h +3 -0
- hikyuu/include/hikyuu/plugin/hkuextra.h +2 -0
- hikyuu/include/hikyuu/plugin/interface/CheckDataPluginInterface.h +25 -0
- hikyuu/include/hikyuu/plugin/interface/HkuExtraPluginInterface.h +2 -0
- hikyuu/include/hikyuu/plugin/interface/ImportKDataToClickHousePluginInterface.h +44 -0
- hikyuu/include/hikyuu/plugin/interface/ImportKDataToMySQLPluginInterface.h +42 -0
- hikyuu/include/hikyuu/plugin/interface/plugins.h +6 -0
- hikyuu/include/hikyuu/python/convert_any.h +9 -6
- hikyuu/include/hikyuu/python/pybind_utils.h +1 -1
- hikyuu/include/hikyuu/strategy/Strategy.h +1 -1
- hikyuu/include/hikyuu/trade_manage/TradeManagerBase.h +0 -1
- hikyuu/include/hikyuu/trade_manage/TradeRecord.h +2 -1
- hikyuu/include/hikyuu/trade_sys/allocatefunds/AllocateFundsBase.h +0 -1
- hikyuu/include/hikyuu/trade_sys/allocatefunds/build_in.h +1 -0
- hikyuu/include/hikyuu/trade_sys/allocatefunds/crt/AF_FixedAmount.h +26 -0
- hikyuu/include/hikyuu/trade_sys/allocatefunds/imp/FixAmountFunds.h +18 -0
- hikyuu/include/hikyuu/trade_sys/condition/ConditionBase.h +0 -1
- hikyuu/include/hikyuu/trade_sys/environment/EnvironmentBase.h +0 -1
- hikyuu/include/hikyuu/trade_sys/moneymanager/MoneyManagerBase.h +0 -1
- hikyuu/include/hikyuu/trade_sys/multifactor/MultiFactorBase.h +0 -1
- hikyuu/include/hikyuu/trade_sys/multifactor/NormalizeBase.h +0 -1
- hikyuu/include/hikyuu/trade_sys/multifactor/ScoresFilterBase.h +0 -1
- hikyuu/include/hikyuu/trade_sys/portfolio/Portfolio.h +13 -13
- hikyuu/include/hikyuu/trade_sys/profitgoal/ProfitGoalBase.h +9 -11
- hikyuu/include/hikyuu/trade_sys/selector/SelectorBase.h +0 -1
- hikyuu/include/hikyuu/trade_sys/signal/SignalBase.h +0 -1
- hikyuu/include/hikyuu/trade_sys/slippage/SlippageBase.h +0 -1
- hikyuu/include/hikyuu/trade_sys/stoploss/StoplossBase.h +0 -1
- hikyuu/include/hikyuu/trade_sys/system/System.h +1 -2
- hikyuu/include/hikyuu/utilities/Log.h +6 -7
- hikyuu/include/hikyuu/utilities/Parameter.h +17 -0
- hikyuu/include/hikyuu/utilities/config.h +28 -0
- hikyuu/include/hikyuu/utilities/plugin/PluginBase.h +17 -2
- hikyuu/include/hikyuu/utilities/plugin/PluginManager.h +41 -22
- hikyuu/include/hikyuu/utilities/thread/GlobalStealThreadPool.h +1 -2
- hikyuu/include/hikyuu/utilities/thread/GlobalThreadPool.h +1 -1
- hikyuu/include/hikyuu/utilities/thread/MQStealThreadPool.h +286 -0
- hikyuu/include/hikyuu/utilities/thread/MQThreadPool.h +1 -0
- hikyuu/include/hikyuu/utilities/thread/StealThreadPool.h +297 -0
- hikyuu/include/hikyuu/utilities/thread/ThreadPool.h +1 -0
- hikyuu/include/hikyuu/utilities/thread/WorkStealQueue.h +9 -8
- hikyuu/include/hikyuu/utilities/thread/algorithm.h +64 -14
- hikyuu/include/hikyuu/version.h +4 -4
- hikyuu/plugin/libbacktest.so +0 -0
- hikyuu/plugin/libcheckdata.so +0 -0
- hikyuu/plugin/libclickhousedriver.so +0 -0
- hikyuu/plugin/libdataserver.so +0 -0
- hikyuu/plugin/libdataserver_parquet.so +0 -0
- hikyuu/plugin/libdevice.so +0 -0
- hikyuu/plugin/libextind.so +0 -0
- hikyuu/plugin/libhkuextra.so +0 -0
- hikyuu/plugin/libimport2ch.so +0 -0
- hikyuu/plugin/libimport2hdf5.so +0 -0
- hikyuu/plugin/libimport2mysql.so +0 -0
- hikyuu/plugin/libtmreport.so +0 -0
- hikyuu/trade_manage/__init__.pyi +12 -7
- hikyuu/trade_manage/trade.pyi +12 -7
- hikyuu/trade_sys/trade_sys.py +54 -5
- hikyuu/util/__init__.pyi +1 -1
- hikyuu/util/singleton.pyi +1 -1
- {hikyuu-2.7.0.dist-info → hikyuu-2.7.3.dist-info}/METADATA +10 -4
- {hikyuu-2.7.0.dist-info → hikyuu-2.7.3.dist-info}/RECORD +136 -114
- hikyuu/cpp/libboost_chrono-mt.so +0 -0
- hikyuu/cpp/libboost_chrono-mt.so.1.88.0 +0 -0
- hikyuu/cpp/libboost_date_time-mt.so +0 -0
- hikyuu/cpp/libboost_date_time-mt.so.1.88.0 +0 -0
- hikyuu/cpp/libboost_serialization-mt.so +0 -0
- hikyuu/cpp/libboost_serialization-mt.so.1.88.0 +0 -0
- hikyuu/cpp/libboost_system-mt.so +0 -0
- hikyuu/cpp/libboost_system-mt.so.1.88.0 +0 -0
- hikyuu/cpp/libboost_thread-mt.so +0 -0
- hikyuu/cpp/libboost_thread-mt.so.1.88.0 +0 -0
- hikyuu/cpp/libboost_wserialization-mt.so +0 -0
- hikyuu/cpp/libboost_wserialization-mt.so.1.88.0 +0 -0
- hikyuu/data/pytdx_to_taos.py +0 -736
- {hikyuu-2.7.0.dist-info → hikyuu-2.7.3.dist-info}/WHEEL +0 -0
- {hikyuu-2.7.0.dist-info → hikyuu-2.7.3.dist-info}/entry_points.txt +0 -0
- {hikyuu-2.7.0.dist-info → hikyuu-2.7.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,297 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* StealThreadPool.h
|
|
3
|
+
*
|
|
4
|
+
* Copyright (c) 2019 hikyuu.org
|
|
5
|
+
*
|
|
6
|
+
* Created on: 2019-9-16
|
|
7
|
+
* Author: fasiondog
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
#pragma once
|
|
11
|
+
#ifndef HIKYUU_UTILITIES_THREAD_STEALTHREADPOOL_H
|
|
12
|
+
#define HIKYUU_UTILITIES_THREAD_STEALTHREADPOOL_H
|
|
13
|
+
|
|
14
|
+
#include <future>
|
|
15
|
+
#include <thread>
|
|
16
|
+
#include <vector>
|
|
17
|
+
#include "ThreadSafeQueue.h"
|
|
18
|
+
#include "WorkStealQueue.h"
|
|
19
|
+
#include "InterruptFlag.h"
|
|
20
|
+
#include "../cppdef.h"
|
|
21
|
+
|
|
22
|
+
#ifdef __GNUC__
|
|
23
|
+
#pragma GCC diagnostic push
|
|
24
|
+
#pragma GCC diagnostic ignored "-Wsign-compare"
|
|
25
|
+
#endif
|
|
26
|
+
|
|
27
|
+
#ifndef HKU_UTILS_API
|
|
28
|
+
#define HKU_UTILS_API
|
|
29
|
+
#endif
|
|
30
|
+
|
|
31
|
+
namespace hku {
|
|
32
|
+
|
|
33
|
+
/**
|
|
34
|
+
* @brief 分布偷取式线程池
|
|
35
|
+
* @note 主要用于存在递归情况,任务又创建任务加入线程池的情况,否则建议使用普通的线程池
|
|
36
|
+
* @details
|
|
37
|
+
* @ingroup ThreadPool
|
|
38
|
+
*/
|
|
39
|
+
#ifdef _MSC_VER
|
|
40
|
+
class StealThreadPool {
|
|
41
|
+
#else
|
|
42
|
+
class HKU_UTILS_API StealThreadPool {
|
|
43
|
+
#endif
|
|
44
|
+
public:
|
|
45
|
+
/**
|
|
46
|
+
* 默认构造函数,创建和当前系统CPU数一致的线程数
|
|
47
|
+
*/
|
|
48
|
+
StealThreadPool() : StealThreadPool(std::thread::hardware_concurrency()) {}
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* 构造函数,创建指定数量的线程
|
|
52
|
+
* @param n 指定的线程数
|
|
53
|
+
* @param until_empty 任务队列为空时,自动停止运行
|
|
54
|
+
*/
|
|
55
|
+
explicit StealThreadPool(size_t n, bool until_empty = true)
|
|
56
|
+
: m_done(false), m_worker_num(n), m_running_until_empty(until_empty) {
|
|
57
|
+
try {
|
|
58
|
+
m_interrupt_flags.resize(m_worker_num);
|
|
59
|
+
for (int i = 0; i < m_worker_num; i++) {
|
|
60
|
+
// 创建工作线程及其任务队列
|
|
61
|
+
m_queues.emplace_back(new WorkStealQueue);
|
|
62
|
+
}
|
|
63
|
+
// 初始完毕所有线程资源后再启动线程
|
|
64
|
+
for (int i = 0; i < m_worker_num; i++) {
|
|
65
|
+
m_threads.emplace_back(&StealThreadPool::worker_thread, this, i);
|
|
66
|
+
m_thread_index[m_threads.back().get_id()] = i;
|
|
67
|
+
}
|
|
68
|
+
} catch (...) {
|
|
69
|
+
m_done = true;
|
|
70
|
+
throw;
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
/**
|
|
75
|
+
* 析构函数,等待并阻塞至线程池内所有任务完成
|
|
76
|
+
*/
|
|
77
|
+
~StealThreadPool() {
|
|
78
|
+
if (!m_done) {
|
|
79
|
+
join();
|
|
80
|
+
}
|
|
81
|
+
m_threads.clear();
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
/** 获取工作线程数 */
|
|
85
|
+
size_t worker_num() const {
|
|
86
|
+
return m_worker_num;
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
/** 剩余任务数 */
|
|
90
|
+
size_t remain_task_count() const {
|
|
91
|
+
if (m_done) {
|
|
92
|
+
return 0;
|
|
93
|
+
}
|
|
94
|
+
size_t total = m_master_work_queue.size();
|
|
95
|
+
for (size_t i = 0; i < m_worker_num; i++) {
|
|
96
|
+
total += m_queues[i]->size();
|
|
97
|
+
}
|
|
98
|
+
return total;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
/** 先线程池提交任务后返回的对应 future 的类型 */
|
|
102
|
+
template <typename ResultType>
|
|
103
|
+
using task_handle = std::future<ResultType>;
|
|
104
|
+
|
|
105
|
+
#ifdef _MSC_VER
|
|
106
|
+
#pragma warning(push)
|
|
107
|
+
#pragma warning(disable : 4996)
|
|
108
|
+
#endif
|
|
109
|
+
|
|
110
|
+
/** 向线程池提交任务 */
|
|
111
|
+
template <typename FunctionType>
|
|
112
|
+
auto submit(FunctionType f) {
|
|
113
|
+
if (m_done) {
|
|
114
|
+
throw std::logic_error("You can't submit a task to the stopped StealThreadPool!!");
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
int index = -1;
|
|
118
|
+
auto iter = m_thread_index.find(std::this_thread::get_id());
|
|
119
|
+
if (iter != m_thread_index.end()) {
|
|
120
|
+
index = iter->second;
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
typedef typename std::invoke_result<FunctionType>::type result_type;
|
|
124
|
+
std::packaged_task<result_type()> task(f);
|
|
125
|
+
task_handle<result_type> res(task.get_future());
|
|
126
|
+
if (index != -1 && !m_interrupt_flags[index]) {
|
|
127
|
+
// 本地线程任务从前部入队列(递归成栈)
|
|
128
|
+
m_queues[index]->push_front(std::move(task));
|
|
129
|
+
} else {
|
|
130
|
+
m_master_work_queue.push(std::move(task));
|
|
131
|
+
m_cv.notify_one();
|
|
132
|
+
}
|
|
133
|
+
return res;
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
#ifdef _MSC_VER
|
|
137
|
+
#pragma warning(pop)
|
|
138
|
+
#endif
|
|
139
|
+
|
|
140
|
+
/** 返回线程池结束状态 */
|
|
141
|
+
bool done() const {
|
|
142
|
+
return m_done;
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
/**
|
|
146
|
+
* 等待各线程完成当前执行的任务后立即结束退出
|
|
147
|
+
*/
|
|
148
|
+
void stop() {
|
|
149
|
+
if (m_done) {
|
|
150
|
+
return;
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
m_done = true;
|
|
154
|
+
|
|
155
|
+
// 同时加入结束任务指示,以便在dll退出时也能够终止
|
|
156
|
+
for (size_t i = 0; i < m_worker_num; i++) {
|
|
157
|
+
m_interrupt_flags[i].set();
|
|
158
|
+
m_queues[i]->push_front(FuncWrapper());
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
m_cv.notify_all(); // 唤醒所有工作线程
|
|
162
|
+
for (size_t i = 0; i < m_worker_num; i++) {
|
|
163
|
+
if (m_threads[i].joinable()) {
|
|
164
|
+
m_threads[i].join();
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
m_master_work_queue.clear();
|
|
169
|
+
for (size_t i = 0; i < m_worker_num; i++) {
|
|
170
|
+
m_queues[i]->clear();
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
/**
|
|
175
|
+
* 等待并阻塞至线程池内所有任务完成
|
|
176
|
+
* @note 至此线程池能工作线程结束不可再使用
|
|
177
|
+
*/
|
|
178
|
+
void join() {
|
|
179
|
+
if (m_done) {
|
|
180
|
+
return;
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
// 指示各工作线程在未获取到工作任务时,停止运行
|
|
184
|
+
if (m_running_until_empty) {
|
|
185
|
+
while (true) {
|
|
186
|
+
if (m_master_work_queue.size() != 0) {
|
|
187
|
+
std::this_thread::yield();
|
|
188
|
+
} else {
|
|
189
|
+
bool can_quit = true;
|
|
190
|
+
for (size_t i = 0; i < m_worker_num; i++) {
|
|
191
|
+
if (!m_queues[i]->empty()) {
|
|
192
|
+
can_quit = false;
|
|
193
|
+
break;
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
if (can_quit) {
|
|
197
|
+
break;
|
|
198
|
+
} else {
|
|
199
|
+
std::this_thread::yield();
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
m_done = true;
|
|
205
|
+
for (size_t i = 0; i < m_worker_num; i++) {
|
|
206
|
+
m_interrupt_flags[i].set();
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
for (size_t i = 0; i < m_worker_num; i++) {
|
|
211
|
+
m_master_work_queue.push(FuncWrapper());
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
// 唤醒所有工作线程
|
|
215
|
+
m_cv.notify_all();
|
|
216
|
+
|
|
217
|
+
// 等待线程结束
|
|
218
|
+
for (size_t i = 0; i < m_worker_num; i++) {
|
|
219
|
+
if (m_threads[i].joinable()) {
|
|
220
|
+
m_threads[i].join();
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
m_done = true;
|
|
225
|
+
m_master_work_queue.clear();
|
|
226
|
+
for (size_t i = 0; i < m_worker_num; i++) {
|
|
227
|
+
m_queues[i]->clear();
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
private:
|
|
232
|
+
typedef FuncWrapper task_type;
|
|
233
|
+
std::atomic_bool m_done; // 线程池全局需终止指示
|
|
234
|
+
size_t m_worker_num; // 工作线程数量
|
|
235
|
+
bool m_running_until_empty; // 任务队列为空时,自动停止运行
|
|
236
|
+
std::condition_variable m_cv; // 信号量,无任务时阻塞线程并等待
|
|
237
|
+
std::mutex m_cv_mutex; // 配合信号量的互斥量
|
|
238
|
+
|
|
239
|
+
std::vector<InterruptFlag> m_interrupt_flags; // 工作线程状态
|
|
240
|
+
ThreadSafeQueue<task_type> m_master_work_queue; // 主线程任务队列
|
|
241
|
+
std::vector<std::unique_ptr<WorkStealQueue>> m_queues; // 任务队列(每个工作线程一个)
|
|
242
|
+
std::vector<std::thread> m_threads; // 工作线程
|
|
243
|
+
std::unordered_map<std::thread::id, int> m_thread_index;
|
|
244
|
+
|
|
245
|
+
void worker_thread(int index) {
|
|
246
|
+
while (!m_done && !m_interrupt_flags[index]) {
|
|
247
|
+
run_pending_task(index);
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
void run_pending_task(int index) {
|
|
252
|
+
// 从本地队列提前工作任务,如本地无任务则从主队列中提取任务
|
|
253
|
+
// 如果主队列中提取的任务是空任务,则认为需结束本线程,否则从其他工作队列中偷取任务
|
|
254
|
+
task_type task;
|
|
255
|
+
if (pop_task_from_local_queue(task, index)) {
|
|
256
|
+
if (!task.isNullTask()) {
|
|
257
|
+
task();
|
|
258
|
+
} else {
|
|
259
|
+
m_interrupt_flags[index].set();
|
|
260
|
+
}
|
|
261
|
+
} else if (pop_task_from_master_queue(task)) {
|
|
262
|
+
if (!task.isNullTask()) {
|
|
263
|
+
task();
|
|
264
|
+
} else {
|
|
265
|
+
m_interrupt_flags[index].set();
|
|
266
|
+
}
|
|
267
|
+
} else if (pop_task_from_other_thread_queue(task, index)) {
|
|
268
|
+
task();
|
|
269
|
+
} else {
|
|
270
|
+
std::unique_lock<std::mutex> lk(m_cv_mutex);
|
|
271
|
+
m_cv.wait(lk, [this] { return this->m_done || !this->m_master_work_queue.empty(); });
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
bool pop_task_from_master_queue(task_type& task) {
|
|
276
|
+
return m_master_work_queue.try_pop(task);
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
// cppcheck-suppress functionStatic // 屏蔽cppcheck转静态函数建议
|
|
280
|
+
bool pop_task_from_local_queue(task_type& task, int index) {
|
|
281
|
+
return m_queues[index]->try_pop(task);
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
bool pop_task_from_other_thread_queue(task_type& task, int index) {
|
|
285
|
+
for (int i = 0; i < m_worker_num; ++i) {
|
|
286
|
+
int pos = (index + i + 1) % m_worker_num;
|
|
287
|
+
if (pos != index && !m_interrupt_flags[pos] && m_queues[pos]->try_steal(task)) {
|
|
288
|
+
return true;
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
return false;
|
|
292
|
+
}
|
|
293
|
+
};
|
|
294
|
+
|
|
295
|
+
} /* namespace hku */
|
|
296
|
+
|
|
297
|
+
#endif /* HIKYUU_UTILITIES_THREAD_STEALTHREADPOOL_H */
|
|
@@ -12,7 +12,7 @@
|
|
|
12
12
|
#define HIKYUU_UTILITIES_THREAD_WORKSTEALQUEUE_H
|
|
13
13
|
|
|
14
14
|
#include <deque>
|
|
15
|
-
#include <
|
|
15
|
+
#include <shared_mutex>
|
|
16
16
|
#include "FuncWrapper.h"
|
|
17
17
|
|
|
18
18
|
namespace hku {
|
|
@@ -24,7 +24,7 @@ class WorkStealQueue {
|
|
|
24
24
|
private:
|
|
25
25
|
typedef FuncWrapper data_type;
|
|
26
26
|
std::deque<data_type> m_queue;
|
|
27
|
-
mutable std::
|
|
27
|
+
mutable std::shared_mutex m_mutex;
|
|
28
28
|
|
|
29
29
|
public:
|
|
30
30
|
/** 构造函数 */
|
|
@@ -36,29 +36,30 @@ public:
|
|
|
36
36
|
|
|
37
37
|
/** 将数据插入队列头部 */
|
|
38
38
|
void push_front(data_type&& data) {
|
|
39
|
-
std::
|
|
39
|
+
std::unique_lock<std::shared_mutex> lock(m_mutex);
|
|
40
40
|
m_queue.push_front(std::move(data));
|
|
41
41
|
}
|
|
42
42
|
|
|
43
43
|
/** 将数据插入队列尾部 */
|
|
44
44
|
void push_back(data_type&& data) {
|
|
45
|
-
std::
|
|
45
|
+
std::unique_lock<std::shared_mutex> lock(m_mutex);
|
|
46
46
|
m_queue.push_back(std::move(data));
|
|
47
47
|
}
|
|
48
48
|
|
|
49
49
|
/** 队列是否为空 */
|
|
50
50
|
bool empty() const {
|
|
51
|
-
std::
|
|
51
|
+
std::shared_lock<std::shared_mutex> lock(m_mutex);
|
|
52
52
|
return m_queue.empty();
|
|
53
53
|
}
|
|
54
54
|
|
|
55
55
|
/** 队列大小,!未加锁,谨慎使用 */
|
|
56
56
|
size_t size() const {
|
|
57
|
+
std::shared_lock<std::shared_mutex> lock(m_mutex);
|
|
57
58
|
return m_queue.size();
|
|
58
59
|
}
|
|
59
60
|
|
|
60
61
|
void clear() {
|
|
61
|
-
std::
|
|
62
|
+
std::unique_lock<std::shared_mutex> lock(m_mutex);
|
|
62
63
|
auto tmp = std::deque<data_type>();
|
|
63
64
|
m_queue.swap(tmp);
|
|
64
65
|
}
|
|
@@ -69,7 +70,7 @@ public:
|
|
|
69
70
|
* @return 如果原本队列为空返回 false,否则为 true
|
|
70
71
|
*/
|
|
71
72
|
bool try_pop(data_type& res) {
|
|
72
|
-
std::
|
|
73
|
+
std::unique_lock<std::shared_mutex> lock(m_mutex);
|
|
73
74
|
if (m_queue.empty()) {
|
|
74
75
|
return false;
|
|
75
76
|
}
|
|
@@ -85,7 +86,7 @@ public:
|
|
|
85
86
|
* @return 如果原本队列为空返回 false,否则为 true
|
|
86
87
|
*/
|
|
87
88
|
bool try_steal(data_type& res) {
|
|
88
|
-
std::
|
|
89
|
+
std::unique_lock<std::shared_mutex> lock(m_mutex);
|
|
89
90
|
if (m_queue.empty()) {
|
|
90
91
|
return false;
|
|
91
92
|
}
|
|
@@ -12,6 +12,8 @@
|
|
|
12
12
|
#include <vector>
|
|
13
13
|
#include "ThreadPool.h"
|
|
14
14
|
#include "MQThreadPool.h"
|
|
15
|
+
#include "StealThreadPool.h"
|
|
16
|
+
#include "MQStealThreadPool.h"
|
|
15
17
|
|
|
16
18
|
//----------------------------------------------------------------
|
|
17
19
|
// Note: 除 ThreadPool/MQThreadPool 外,其他线程池由于使用
|
|
@@ -23,15 +25,17 @@ namespace hku {
|
|
|
23
25
|
|
|
24
26
|
typedef std::pair<size_t, size_t> range_t;
|
|
25
27
|
|
|
26
|
-
inline std::vector<range_t> parallelIndexRange(size_t start, size_t end) {
|
|
28
|
+
inline std::vector<range_t> parallelIndexRange(size_t start, size_t end, size_t cpu_num = 0) {
|
|
27
29
|
std::vector<std::pair<size_t, size_t>> ret;
|
|
28
30
|
if (start >= end) {
|
|
29
31
|
return ret;
|
|
30
32
|
}
|
|
31
33
|
|
|
32
34
|
size_t total = end - start;
|
|
33
|
-
|
|
34
|
-
|
|
35
|
+
if (cpu_num == 0) {
|
|
36
|
+
cpu_num = std::thread::hardware_concurrency();
|
|
37
|
+
}
|
|
38
|
+
if (cpu_num <= 1) {
|
|
35
39
|
ret.emplace_back(start, end);
|
|
36
40
|
return ret;
|
|
37
41
|
}
|
|
@@ -52,9 +56,13 @@ inline std::vector<range_t> parallelIndexRange(size_t start, size_t end) {
|
|
|
52
56
|
}
|
|
53
57
|
|
|
54
58
|
template <typename FunctionType, class TaskGroup = MQThreadPool>
|
|
55
|
-
void parallel_for_index_void(size_t start, size_t end, FunctionType f) {
|
|
56
|
-
auto ranges = parallelIndexRange(start, end);
|
|
57
|
-
|
|
59
|
+
void parallel_for_index_void(size_t start, size_t end, FunctionType f, int cpu_num = 0) {
|
|
60
|
+
auto ranges = parallelIndexRange(start, end, cpu_num);
|
|
61
|
+
if (ranges.empty()) {
|
|
62
|
+
return;
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
TaskGroup tg(cpu_num == 0 ? std::thread::hardware_concurrency() : cpu_num);
|
|
58
66
|
for (size_t i = 0, total = ranges.size(); i < total; i++) {
|
|
59
67
|
tg.submit([=, range = ranges[i]]() {
|
|
60
68
|
for (size_t ix = range.first; ix < range.second; ix++) {
|
|
@@ -67,9 +75,14 @@ void parallel_for_index_void(size_t start, size_t end, FunctionType f) {
|
|
|
67
75
|
}
|
|
68
76
|
|
|
69
77
|
template <typename FunctionType, class TaskGroup = MQThreadPool>
|
|
70
|
-
auto parallel_for_index(size_t start, size_t end, FunctionType f) {
|
|
71
|
-
|
|
72
|
-
|
|
78
|
+
auto parallel_for_index(size_t start, size_t end, FunctionType f, size_t cpu_num = 0) {
|
|
79
|
+
std::vector<typename std::invoke_result<FunctionType, size_t>::type> ret;
|
|
80
|
+
auto ranges = parallelIndexRange(start, end, cpu_num);
|
|
81
|
+
if (ranges.empty()) {
|
|
82
|
+
return ret;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
TaskGroup tg(cpu_num == 0 ? std::thread::hardware_concurrency() : cpu_num);
|
|
73
86
|
std::vector<std::future<std::vector<typename std::invoke_result<FunctionType, size_t>::type>>>
|
|
74
87
|
tasks;
|
|
75
88
|
for (size_t i = 0, total = ranges.size(); i < total; i++) {
|
|
@@ -82,7 +95,6 @@ auto parallel_for_index(size_t start, size_t end, FunctionType f) {
|
|
|
82
95
|
}));
|
|
83
96
|
}
|
|
84
97
|
|
|
85
|
-
std::vector<typename std::invoke_result<FunctionType, size_t>::type> ret;
|
|
86
98
|
for (auto& task : tasks) {
|
|
87
99
|
auto one = task.get();
|
|
88
100
|
for (auto&& value : one) {
|
|
@@ -94,15 +106,19 @@ auto parallel_for_index(size_t start, size_t end, FunctionType f) {
|
|
|
94
106
|
}
|
|
95
107
|
|
|
96
108
|
template <typename FunctionType, class TaskGroup = MQThreadPool>
|
|
97
|
-
auto parallel_for_range(size_t start, size_t end, FunctionType f) {
|
|
98
|
-
|
|
99
|
-
|
|
109
|
+
auto parallel_for_range(size_t start, size_t end, FunctionType f, size_t cpu_num = 0) {
|
|
110
|
+
typename std::invoke_result<FunctionType, range_t>::type ret;
|
|
111
|
+
auto ranges = parallelIndexRange(start, end, cpu_num);
|
|
112
|
+
if (ranges.empty()) {
|
|
113
|
+
return ret;
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
TaskGroup tg(cpu_num == 0 ? std::thread::hardware_concurrency() : cpu_num);
|
|
100
117
|
std::vector<std::future<typename std::invoke_result<FunctionType, range_t>::type>> tasks;
|
|
101
118
|
for (size_t i = 0, total = ranges.size(); i < total; i++) {
|
|
102
119
|
tasks.emplace_back(tg.submit([func = f, range = ranges[i]]() { return func(range); }));
|
|
103
120
|
}
|
|
104
121
|
|
|
105
|
-
typename std::invoke_result<FunctionType, range_t>::type ret;
|
|
106
122
|
for (auto& task : tasks) {
|
|
107
123
|
auto one = task.get();
|
|
108
124
|
for (auto&& value : one) {
|
|
@@ -113,4 +129,38 @@ auto parallel_for_range(size_t start, size_t end, FunctionType f) {
|
|
|
113
129
|
return ret;
|
|
114
130
|
}
|
|
115
131
|
|
|
132
|
+
template <typename FunctionType, class TaskGroup = ThreadPool>
|
|
133
|
+
void parallel_for_index_void_single(size_t start, size_t end, FunctionType f, int cpu_num = 0) {
|
|
134
|
+
if (start >= end) {
|
|
135
|
+
return;
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
TaskGroup tg(cpu_num == 0 ? std::thread::hardware_concurrency() : cpu_num);
|
|
139
|
+
for (size_t i = start; i < end; i++) {
|
|
140
|
+
tg.submit([func = f, i]() { func(i); });
|
|
141
|
+
}
|
|
142
|
+
tg.join();
|
|
143
|
+
return;
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
template <typename FunctionType, class TaskGroup = ThreadPool>
|
|
147
|
+
auto parallel_for_index_single(size_t start, size_t end, FunctionType f, size_t cpu_num = 0) {
|
|
148
|
+
std::vector<typename std::invoke_result<FunctionType, size_t>::type> ret;
|
|
149
|
+
if (start >= end) {
|
|
150
|
+
return ret;
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
TaskGroup tg(cpu_num == 0 ? std::thread::hardware_concurrency() : cpu_num);
|
|
154
|
+
std::vector<std::future<typename std::invoke_result<FunctionType, size_t>::type>> tasks;
|
|
155
|
+
for (size_t i = start; i < end; i++) {
|
|
156
|
+
tasks.emplace_back(tg.submit([func = f, i]() { return func(i); }));
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
for (auto& task : tasks) {
|
|
160
|
+
ret.push_back(std::move(task.get()));
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
return ret;
|
|
164
|
+
}
|
|
165
|
+
|
|
116
166
|
} // namespace hku
|
hikyuu/include/hikyuu/version.h
CHANGED
|
@@ -12,13 +12,13 @@
|
|
|
12
12
|
#define HKU_VERSION_H
|
|
13
13
|
|
|
14
14
|
// clang-format off
|
|
15
|
-
#define HKU_VERSION "2.7.
|
|
15
|
+
#define HKU_VERSION "2.7.3"
|
|
16
16
|
#define HKU_VERSION_MAJOR 2
|
|
17
17
|
#define HKU_VERSION_MINOR 7
|
|
18
|
-
#define HKU_VERSION_ALTER
|
|
19
|
-
#define HKU_VERSION_BUILD
|
|
18
|
+
#define HKU_VERSION_ALTER 3
|
|
19
|
+
#define HKU_VERSION_BUILD 202601060312
|
|
20
20
|
#define HKU_VERSION_MODE "RELEASE"
|
|
21
|
-
#define HKU_VERSION_GIT "2.7.
|
|
21
|
+
#define HKU_VERSION_GIT "2.7.3 release.9c144edf (RELEASE)"
|
|
22
22
|
// clang-format on
|
|
23
23
|
|
|
24
24
|
#endif /* HKU_VERSION_H */
|
hikyuu/plugin/libbacktest.so
CHANGED
|
Binary file
|
|
Binary file
|
|
Binary file
|
hikyuu/plugin/libdataserver.so
CHANGED
|
Binary file
|
|
Binary file
|
hikyuu/plugin/libdevice.so
CHANGED
|
Binary file
|
hikyuu/plugin/libextind.so
CHANGED
|
Binary file
|
hikyuu/plugin/libhkuextra.so
CHANGED
|
Binary file
|
|
Binary file
|
hikyuu/plugin/libimport2hdf5.so
CHANGED
|
Binary file
|
|
Binary file
|
hikyuu/plugin/libtmreport.so
CHANGED
|
Binary file
|