rquote 0.3.9__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rquote/__init__.py CHANGED
@@ -35,6 +35,11 @@ from .plots import PlotUtils
35
35
  from . import config
36
36
  from . import exceptions
37
37
  from .cache import MemoryCache, Cache
38
+ # 尝试导入持久化缓存(可选依赖)
39
+ try:
40
+ from .cache import PersistentCache
41
+ except ImportError:
42
+ PersistentCache = None
38
43
  from .utils.http import HTTPClient
39
44
 
40
45
 
@@ -93,5 +98,6 @@ __all__ = [
93
98
  'exceptions',
94
99
  'MemoryCache',
95
100
  'Cache',
101
+ 'PersistentCache',
96
102
  'HTTPClient',
97
103
  ]
rquote/cache/__init__.py CHANGED
@@ -5,5 +5,10 @@
5
5
  from .base import Cache
6
6
  from .memory import MemoryCache
7
7
 
8
- __all__ = ['Cache', 'MemoryCache']
8
+ # 尝试导入持久化缓存(可选依赖)
9
+ try:
10
+ from .persistent import PersistentCache
11
+ __all__ = ['Cache', 'MemoryCache', 'PersistentCache']
12
+ except ImportError:
13
+ __all__ = ['Cache', 'MemoryCache']
9
14
 
@@ -0,0 +1,507 @@
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ 持久化缓存实现
4
+ """
5
+ import os
6
+ import time
7
+ from pathlib import Path
8
+ from typing import Optional, Any, Tuple
9
+ import pandas as pd
10
+ from .base import Cache
11
+
12
+ # 导入日志
13
+ try:
14
+ from ..utils.logging import logger
15
+ except ImportError:
16
+ import logging
17
+ logger = logging.getLogger(__name__)
18
+
19
+ # 尝试导入 duckdb(可选依赖)
20
+ try:
21
+ import duckdb
22
+ DUCKDB_AVAILABLE = True
23
+ except ImportError:
24
+ DUCKDB_AVAILABLE = False
25
+ duckdb = None
26
+
27
+
28
+ class PersistentCache(Cache):
29
+ """持久化缓存实现,使用 duckdb 或文件系统存储数据"""
30
+
31
+ def __init__(self, db_path: Optional[str] = None, use_duckdb: bool = True, ttl: Optional[int] = None):
32
+ """
33
+ 初始化持久化缓存
34
+
35
+ Args:
36
+ db_path: 数据库文件路径,默认为 ~/.rquote/cache.db
37
+ use_duckdb: 是否使用 duckdb(如果可用),否则使用 pickle 文件
38
+ ttl: 默认过期时间(秒),None 表示不过期
39
+ """
40
+ self.use_duckdb = use_duckdb and DUCKDB_AVAILABLE
41
+ self.ttl = ttl
42
+
43
+ if db_path is None:
44
+ # 默认路径:~/.rquote/cache.db 或 ~/.rquote/cache.pkl
45
+ home = Path.home()
46
+ cache_dir = home / '.rquote'
47
+ cache_dir.mkdir(exist_ok=True)
48
+ if self.use_duckdb:
49
+ db_path = str(cache_dir / 'cache.db')
50
+ else:
51
+ db_path = str(cache_dir / 'cache.pkl')
52
+
53
+ self.db_path = db_path
54
+
55
+ if self.use_duckdb:
56
+ self._init_duckdb()
57
+ else:
58
+ self._init_pickle()
59
+
60
+ def _init_duckdb(self):
61
+ """初始化 duckdb 数据库"""
62
+ self.conn = duckdb.connect(self.db_path)
63
+ # 创建缓存表
64
+ self.conn.execute("""
65
+ CREATE TABLE IF NOT EXISTS cache_data (
66
+ cache_key TEXT PRIMARY KEY,
67
+ symbol TEXT NOT NULL,
68
+ name TEXT,
69
+ data BLOB,
70
+ earliest_date TEXT,
71
+ latest_date TEXT,
72
+ freq TEXT,
73
+ fq TEXT,
74
+ updated_at TIMESTAMP,
75
+ expire_at TIMESTAMP
76
+ )
77
+ """)
78
+ self.conn.execute("""
79
+ CREATE INDEX IF NOT EXISTS idx_symbol_freq_fq
80
+ ON cache_data(symbol, freq, fq)
81
+ """)
82
+
83
+ def _init_pickle(self):
84
+ """初始化 pickle 存储"""
85
+ import pickle
86
+ self.pickle = pickle
87
+ if os.path.exists(self.db_path):
88
+ try:
89
+ with open(self.db_path, 'rb') as f:
90
+ self._cache_data = self.pickle.load(f)
91
+ except:
92
+ self._cache_data = {}
93
+ else:
94
+ self._cache_data = {}
95
+
96
+ def _save_pickle(self):
97
+ """保存 pickle 数据"""
98
+ import pickle
99
+ with open(self.db_path, 'wb') as f:
100
+ self.pickle.dump(self._cache_data, f)
101
+
102
+ def _extract_key_parts(self, key: str) -> Tuple[str, str, str, str, str]:
103
+ """
104
+ 从完整 key 中提取各部分
105
+
106
+ Args:
107
+ key: 完整 key,格式如 "symbol:sdate:edate:freq:days:fq"
108
+
109
+ Returns:
110
+ (symbol, sdate, edate, freq, fq)
111
+ """
112
+ parts = key.split(':')
113
+ if len(parts) >= 6:
114
+ return parts[0], parts[1], parts[2], parts[3], parts[5]
115
+ elif len(parts) >= 4:
116
+ return parts[0], parts[1] if len(parts) > 1 else '', parts[2] if len(parts) > 2 else '', parts[3], parts[4] if len(parts) > 4 else 'qfq'
117
+ else:
118
+ return parts[0] if parts else '', '', '', 'day', 'qfq'
119
+
120
+ def _get_base_key(self, symbol: str, freq: str, fq: str) -> str:
121
+ """生成基础 key(不包含日期)"""
122
+ return f"{symbol}:{freq}:{fq}"
123
+
124
+ def _parse_date(self, date_str: str) -> Optional[pd.Timestamp]:
125
+ """解析日期字符串"""
126
+ if not date_str:
127
+ return None
128
+ try:
129
+ return pd.to_datetime(date_str)
130
+ except:
131
+ return None
132
+
133
+ def _get_dataframe_date_range(self, df: pd.DataFrame) -> Tuple[Optional[pd.Timestamp], Optional[pd.Timestamp]]:
134
+ """获取 DataFrame 的日期范围"""
135
+ if df.empty:
136
+ return None, None
137
+
138
+ # 如果索引不是 DatetimeIndex,尝试转换
139
+ if not isinstance(df.index, pd.DatetimeIndex):
140
+ try:
141
+ # 尝试转换为 DatetimeIndex
142
+ index = pd.to_datetime(df.index)
143
+ if len(index) > 0:
144
+ return index.min(), index.max()
145
+ except (ValueError, TypeError):
146
+ pass
147
+ return None, None
148
+
149
+ return df.index.min(), df.index.max()
150
+
151
+ def _filter_dataframe_by_date(self, df: pd.DataFrame, sdate: Optional[str] = None,
152
+ edate: Optional[str] = None) -> pd.DataFrame:
153
+ """根据日期范围过滤 DataFrame"""
154
+ if df.empty:
155
+ return df
156
+
157
+ if not isinstance(df.index, pd.DatetimeIndex):
158
+ return df
159
+
160
+ start_date = self._parse_date(sdate) if sdate else None
161
+ end_date = self._parse_date(edate) if edate else None
162
+
163
+ if start_date is not None and end_date is not None:
164
+ mask = (df.index >= start_date) & (df.index <= end_date)
165
+ return df[mask]
166
+ elif start_date is not None:
167
+ return df[df.index >= start_date]
168
+ elif end_date is not None:
169
+ return df[df.index <= end_date]
170
+ else:
171
+ return df
172
+
173
+ def _merge_dataframes(self, df1: pd.DataFrame, df2: pd.DataFrame) -> pd.DataFrame:
174
+ """合并两个 DataFrame,去重并排序"""
175
+ if df1.empty:
176
+ return df2
177
+ if df2.empty:
178
+ return df1
179
+
180
+ # 合并并去重
181
+ combined = pd.concat([df1, df2])
182
+ combined = combined[~combined.index.duplicated(keep='last')]
183
+ combined = combined.sort_index()
184
+ return combined
185
+
186
+ def get(self, key: str, sdate: Optional[str] = None, edate: Optional[str] = None) -> Optional[Any]:
187
+ """
188
+ 获取缓存数据
189
+
190
+ Args:
191
+ key: 缓存 key,可以是完整格式 "symbol:sdate:edate:freq:days:fq"
192
+ 或 base_key 格式 "symbol:freq:fq"
193
+ sdate: 开始日期(可选,如果 key 是 base_key 格式则必须提供)
194
+ edate: 结束日期(可选,如果 key 是 base_key 格式则必须提供)
195
+
196
+ Returns:
197
+ (symbol, name, DataFrame) 或 None
198
+ """
199
+ # 判断 key 格式:如果是 base_key 格式(只有3部分),使用参数中的日期
200
+ parts = key.split(':')
201
+ if len(parts) == 3:
202
+ # base_key 格式:symbol:freq:fq
203
+ symbol, freq, fq = parts
204
+ base_key = key
205
+ # 使用参数中的日期,如果没有则使用空字符串
206
+ sdate = sdate or ''
207
+ edate = edate or ''
208
+ else:
209
+ # 完整 key 格式:symbol:sdate:edate:freq:days:fq
210
+ symbol, sdate_from_key, edate_from_key, freq, fq = self._extract_key_parts(key)
211
+ base_key = self._get_base_key(symbol, freq, fq)
212
+ # 优先使用参数中的日期,如果没有则使用 key 中的日期
213
+ sdate = sdate if sdate is not None else sdate_from_key
214
+ edate = edate if edate is not None else edate_from_key
215
+
216
+ logger.info(f"[CACHE GET] key={key}, base_key={base_key}, sdate={sdate}, edate={edate}")
217
+
218
+ if self.use_duckdb:
219
+ result = self._get_duckdb(base_key, symbol, sdate, edate, freq, fq)
220
+ else:
221
+ result = self._get_pickle(base_key, symbol, sdate, edate, freq, fq)
222
+
223
+ if result:
224
+ _, _, df = result
225
+ logger.info(f"[CACHE HIT] key={key}, 返回数据行数={len(df)}, 日期范围={df.index.min()} 到 {df.index.max()}")
226
+ else:
227
+ logger.info(f"[CACHE MISS] key={key}, 缓存中无数据")
228
+
229
+ return result
230
+
231
+ def _get_duckdb(self, base_key: str, symbol: str, sdate: str, edate: str,
232
+ freq: str, fq: str) -> Optional[Tuple[str, str, pd.DataFrame]]:
233
+ """从 duckdb 获取数据"""
234
+ result = self.conn.execute("""
235
+ SELECT name, data, earliest_date, latest_date, expire_at
236
+ FROM cache_data
237
+ WHERE cache_key = ?
238
+ """, [base_key]).fetchone()
239
+
240
+ if not result:
241
+ return None
242
+
243
+ name, data_blob, earliest_date, latest_date, expire_at = result
244
+
245
+ # 检查过期
246
+ if self.ttl and expire_at:
247
+ expire_ts = pd.to_datetime(expire_at)
248
+ if pd.Timestamp.now() > expire_ts:
249
+ self.delete(base_key)
250
+ return None
251
+
252
+ # 反序列化 DataFrame
253
+ import pickle
254
+ df = pickle.loads(data_blob)
255
+
256
+ # 获取缓存数据的日期范围
257
+ cached_earliest = self._parse_date(earliest_date)
258
+ cached_latest = self._parse_date(latest_date)
259
+
260
+ # 如果请求的日期范围完全在缓存范围内,直接返回过滤后的数据
261
+ request_sdate = self._parse_date(sdate) if sdate else None
262
+ request_edate = self._parse_date(edate) if edate else None
263
+
264
+ # 检查是否有重叠
265
+ if request_edate and cached_earliest and request_edate < cached_earliest:
266
+ # 请求的结束日期早于缓存的最早日期,无重叠
267
+ return None
268
+ if request_sdate and cached_latest and request_sdate > cached_latest:
269
+ # 请求的开始日期晚于缓存的最晚日期,无重叠
270
+ return None
271
+
272
+ # 有重叠,返回缓存中可用的部分数据
273
+ # 计算实际可用的日期范围
274
+ actual_sdate = max(request_sdate, cached_earliest) if request_sdate and cached_earliest else (request_sdate or cached_earliest)
275
+ actual_edate = min(request_edate, cached_latest) if request_edate and cached_latest else (request_edate or cached_latest)
276
+
277
+ # 过滤数据
278
+ filtered_df = self._filter_dataframe_by_date(
279
+ df,
280
+ actual_sdate.strftime('%Y-%m-%d') if actual_sdate else None,
281
+ actual_edate.strftime('%Y-%m-%d') if actual_edate else None
282
+ )
283
+
284
+ if filtered_df.empty:
285
+ return None
286
+
287
+ # 确保索引是 DatetimeIndex
288
+ if not isinstance(filtered_df.index, pd.DatetimeIndex):
289
+ try:
290
+ filtered_df.index = pd.to_datetime(filtered_df.index)
291
+ except (ValueError, TypeError):
292
+ pass # 如果转换失败,保持原样
293
+
294
+ return (symbol, name, filtered_df)
295
+
296
+ def _get_pickle(self, base_key: str, symbol: str, sdate: str, edate: str,
297
+ freq: str, fq: str) -> Optional[Tuple[str, str, pd.DataFrame]]:
298
+ """从 pickle 文件获取数据"""
299
+ if base_key not in self._cache_data:
300
+ return None
301
+
302
+ cache_entry = self._cache_data[base_key]
303
+
304
+ # 检查过期
305
+ if self.ttl and 'expire_at' in cache_entry:
306
+ expire_ts = cache_entry['expire_at']
307
+ if pd.Timestamp.now() > expire_ts:
308
+ del self._cache_data[base_key]
309
+ self._save_pickle()
310
+ return None
311
+
312
+ df = cache_entry['data']
313
+ name = cache_entry.get('name', '')
314
+ earliest_date = cache_entry.get('earliest_date')
315
+ latest_date = cache_entry.get('latest_date')
316
+
317
+ # 获取缓存数据的日期范围
318
+ cached_earliest = self._parse_date(earliest_date)
319
+ cached_latest = self._parse_date(latest_date)
320
+
321
+ # 如果请求的日期范围完全在缓存范围内,直接返回过滤后的数据
322
+ request_sdate = self._parse_date(sdate) if sdate else None
323
+ request_edate = self._parse_date(edate) if edate else None
324
+
325
+ # 检查是否有重叠
326
+ if request_edate and cached_earliest and request_edate < cached_earliest:
327
+ # 请求的结束日期早于缓存的最早日期,无重叠
328
+ return None
329
+ if request_sdate and cached_latest and request_sdate > cached_latest:
330
+ # 请求的开始日期晚于缓存的最晚日期,无重叠
331
+ return None
332
+
333
+ # 有重叠,返回缓存中可用的部分数据
334
+ # 计算实际可用的日期范围
335
+ actual_sdate = max(request_sdate, cached_earliest) if request_sdate and cached_earliest else (request_sdate or cached_earliest)
336
+ actual_edate = min(request_edate, cached_latest) if request_edate and cached_latest else (request_edate or cached_latest)
337
+
338
+ # 过滤数据
339
+ filtered_df = self._filter_dataframe_by_date(
340
+ df,
341
+ actual_sdate.strftime('%Y-%m-%d') if actual_sdate else None,
342
+ actual_edate.strftime('%Y-%m-%d') if actual_edate else None
343
+ )
344
+
345
+ if filtered_df.empty:
346
+ return None
347
+
348
+ # 确保索引是 DatetimeIndex
349
+ if not isinstance(filtered_df.index, pd.DatetimeIndex):
350
+ try:
351
+ filtered_df.index = pd.to_datetime(filtered_df.index)
352
+ except (ValueError, TypeError):
353
+ pass # 如果转换失败,保持原样
354
+
355
+ return (symbol, name, filtered_df)
356
+
357
+ def put(self, key: str, value: Any, ttl: Optional[int] = None) -> None:
358
+ """
359
+ 存储缓存数据
360
+
361
+ Args:
362
+ key: 缓存 key,可以是完整格式 "symbol:sdate:edate:freq:days:fq"
363
+ 或 base_key 格式 "symbol:freq:fq"(推荐使用 base_key)
364
+ value: (symbol, name, DataFrame) 元组
365
+ ttl: 过期时间(秒)
366
+ """
367
+ if not isinstance(value, tuple) or len(value) != 3:
368
+ return
369
+
370
+ symbol, name, df = value
371
+ if not isinstance(df, pd.DataFrame) or df.empty:
372
+ return
373
+
374
+ logger.info(f"[CACHE PUT] key={key}, 数据行数={len(df)}, 日期范围={df.index.min() if not df.empty else 'N/A'} 到 {df.index.max() if not df.empty else 'N/A'}")
375
+
376
+ # 确保索引是 DatetimeIndex(用于正确获取日期范围)
377
+ if not isinstance(df.index, pd.DatetimeIndex):
378
+ try:
379
+ df.index = pd.to_datetime(df.index)
380
+ except (ValueError, TypeError):
381
+ pass # 如果转换失败,继续处理(_get_dataframe_date_range 会处理)
382
+
383
+ # 判断 key 格式:如果是 base_key 格式(只有3部分),直接使用
384
+ parts = key.split(':')
385
+ if len(parts) == 3:
386
+ # base_key 格式:symbol:freq:fq
387
+ base_key = key
388
+ freq, fq = parts[1], parts[2]
389
+ else:
390
+ # 完整 key 格式:symbol:sdate:edate:freq:days:fq
391
+ _, _, _, freq, fq = self._extract_key_parts(key)
392
+ base_key = self._get_base_key(symbol, freq, fq)
393
+
394
+ # 尝试从基础 key 获取完整数据并合并
395
+ existing = self._get_raw(base_key)
396
+ if existing:
397
+ _, existing_name, existing_df = existing
398
+ # 使用新数据的 name(如果有)
399
+ if not name:
400
+ name = existing_name
401
+ # 合并数据
402
+ df = self._merge_dataframes(existing_df, df)
403
+ # 合并后再次确保索引是 DatetimeIndex
404
+ if not isinstance(df.index, pd.DatetimeIndex):
405
+ try:
406
+ df.index = pd.to_datetime(df.index)
407
+ except (ValueError, TypeError):
408
+ pass
409
+
410
+ # 获取日期范围
411
+ earliest_date, latest_date = self._get_dataframe_date_range(df)
412
+ earliest_str = earliest_date.strftime('%Y-%m-%d') if earliest_date else None
413
+ latest_str = latest_date.strftime('%Y-%m-%d') if latest_date else None
414
+
415
+ # 计算过期时间
416
+ expire_at = None
417
+ if ttl or self.ttl:
418
+ expire_seconds = (ttl or self.ttl)
419
+ expire_at = pd.Timestamp.now() + pd.Timedelta(seconds=expire_seconds)
420
+
421
+ if self.use_duckdb:
422
+ self._put_duckdb(base_key, symbol, name, df, earliest_str, latest_str, freq, fq, expire_at)
423
+ else:
424
+ self._put_pickle(base_key, symbol, name, df, earliest_str, latest_str, freq, fq, expire_at)
425
+
426
+ logger.info(f"[CACHE PUT] 存储完成, base_key={base_key}, 日期范围={earliest_str} 到 {latest_str}")
427
+
428
+ def _get_raw(self, base_key: str) -> Optional[Tuple[str, str, pd.DataFrame]]:
429
+ """获取原始数据(不进行日期过滤)"""
430
+ if self.use_duckdb:
431
+ result = self.conn.execute("""
432
+ SELECT name, data
433
+ FROM cache_data
434
+ WHERE cache_key = ?
435
+ """, [base_key]).fetchone()
436
+
437
+ if not result:
438
+ return None
439
+
440
+ import pickle
441
+ df = pickle.loads(result[1])
442
+ return (base_key.split(':')[0], result[0], df)
443
+ else:
444
+ if base_key not in self._cache_data:
445
+ return None
446
+ cache_entry = self._cache_data[base_key]
447
+ return (base_key.split(':')[0], cache_entry.get('name', ''), cache_entry['data'])
448
+
449
+ def _put_duckdb(self, base_key: str, symbol: str, name: str, df: pd.DataFrame,
450
+ earliest_date: Optional[str], latest_date: Optional[str],
451
+ freq: str, fq: str, expire_at: Optional[pd.Timestamp]):
452
+ """存储到 duckdb"""
453
+ import pickle
454
+ data_blob = pickle.dumps(df)
455
+
456
+ self.conn.execute("""
457
+ INSERT OR REPLACE INTO cache_data
458
+ (cache_key, symbol, name, data, earliest_date, latest_date, freq, fq, updated_at, expire_at)
459
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
460
+ """, [base_key, symbol, name, data_blob, earliest_date, latest_date, freq, fq,
461
+ pd.Timestamp.now(), expire_at])
462
+ self.conn.commit()
463
+
464
+ def _put_pickle(self, base_key: str, symbol: str, name: str, df: pd.DataFrame,
465
+ earliest_date: Optional[str], latest_date: Optional[str],
466
+ freq: str, fq: str, expire_at: Optional[pd.Timestamp]):
467
+ """存储到 pickle 文件"""
468
+ self._cache_data[base_key] = {
469
+ 'symbol': symbol,
470
+ 'name': name,
471
+ 'data': df,
472
+ 'earliest_date': earliest_date,
473
+ 'latest_date': latest_date,
474
+ 'freq': freq,
475
+ 'fq': fq,
476
+ 'updated_at': pd.Timestamp.now(),
477
+ 'expire_at': expire_at
478
+ }
479
+ self._save_pickle()
480
+
481
+ def delete(self, key: str) -> None:
482
+ """删除缓存"""
483
+ symbol, _, _, freq, fq = self._extract_key_parts(key)
484
+ base_key = self._get_base_key(symbol, freq, fq)
485
+
486
+ if self.use_duckdb:
487
+ self.conn.execute("DELETE FROM cache_data WHERE cache_key = ?", [base_key])
488
+ self.conn.commit()
489
+ else:
490
+ if base_key in self._cache_data:
491
+ del self._cache_data[base_key]
492
+ self._save_pickle()
493
+
494
+ def clear(self) -> None:
495
+ """清空所有缓存"""
496
+ if self.use_duckdb:
497
+ self.conn.execute("DELETE FROM cache_data")
498
+ self.conn.commit()
499
+ else:
500
+ self._cache_data.clear()
501
+ self._save_pickle()
502
+
503
+ def close(self):
504
+ """关闭连接"""
505
+ if self.use_duckdb:
506
+ self.conn.close()
507
+
rquote/markets/base.py CHANGED
@@ -5,9 +5,23 @@
5
5
  from abc import ABC, abstractmethod
6
6
  from typing import Tuple, Optional
7
7
  import pandas as pd
8
+ from datetime import datetime, timedelta
8
9
  from ..cache import Cache
9
10
  from ..data_sources.base import DataSource
10
11
 
12
+ # 导入日志
13
+ try:
14
+ from ..utils.logging import logger
15
+ except ImportError:
16
+ import logging
17
+ logger = logging.getLogger(__name__)
18
+
19
+ # 尝试导入持久化缓存(可选依赖)
20
+ try:
21
+ from ..cache.persistent import PersistentCache
22
+ except ImportError:
23
+ PersistentCache = None
24
+
11
25
 
12
26
  class Market(ABC):
13
27
  """市场基类"""
@@ -23,21 +37,77 @@ class Market(ABC):
23
37
  self.data_source = data_source
24
38
  self.cache = cache
25
39
 
26
- @abstractmethod
27
40
  def get_price(self, symbol: str, sdate: str = '', edate: str = '',
28
41
  freq: str = 'day', days: int = 320, fq: str = 'qfq') -> Tuple[str, str, pd.DataFrame]:
29
- """获取价格数据"""
30
- pass
42
+ """
43
+ 获取价格数据(模板方法,统一处理缓存逻辑)
44
+
45
+ 子类可以重写此方法以处理特殊情况,但建议调用 super().get_price() 来使用缓存功能
46
+ 或者实现 _fetch_price_data 方法,让基类自动处理缓存
47
+ """
48
+ symbol = self.normalize_symbol(symbol)
49
+ cache_key = f"{symbol}:{sdate}:{edate}:{freq}:{days}:{fq}"
50
+
51
+ # 如果是持久化缓存且是日级别数据,使用智能扩展逻辑
52
+ if PersistentCache and isinstance(self.cache, PersistentCache) and freq == 'day':
53
+ return self._get_price_with_persistent_cache(
54
+ symbol, sdate, edate, freq, days, fq,
55
+ lambda s, sd, ed, f, d, fq_param: self._fetch_price_data(s, sd, ed, f, d, fq_param)
56
+ )
57
+
58
+ # 普通缓存逻辑
59
+ cached = self._get_cached(cache_key)
60
+ if cached:
61
+ return cached
62
+
63
+ # 从数据源获取
64
+ result = self._fetch_price_data(symbol, sdate, edate, freq, days, fq)
65
+ self._put_cache(cache_key, result)
66
+ return result
67
+
68
+ def _fetch_price_data(self, symbol: str, sdate: str = '', edate: str = '',
69
+ freq: str = 'day', days: int = 320, fq: str = 'qfq') -> Tuple[str, str, pd.DataFrame]:
70
+ """
71
+ 从数据源获取价格数据(子类需要实现)
72
+
73
+ 这个方法只负责获取数据,不处理缓存。缓存逻辑由 get_price 统一处理。
74
+
75
+ Args:
76
+ symbol: 股票代码(已标准化)
77
+ sdate: 开始日期
78
+ edate: 结束日期
79
+ freq: 频率
80
+ days: 天数
81
+ fq: 复权方式
82
+
83
+ Returns:
84
+ (symbol, name, DataFrame)
85
+ """
86
+ # 默认实现:子类应该重写此方法
87
+ return (symbol, '', pd.DataFrame())
31
88
 
32
89
  @abstractmethod
33
90
  def normalize_symbol(self, symbol: str) -> str:
34
91
  """标准化股票代码"""
35
92
  pass
36
93
 
37
- def _get_cached(self, key: str) -> Optional[Tuple[str, str, pd.DataFrame]]:
94
+ def _get_cached(self, key: str, sdate: str = '', edate: str = '') -> Optional[Tuple[str, str, pd.DataFrame]]:
38
95
  """从缓存获取数据"""
39
96
  if self.cache:
40
- cached = self.cache.get(key)
97
+ # 如果是 PersistentCache,使用 base_key + 日期参数的方式
98
+ if PersistentCache and isinstance(self.cache, PersistentCache):
99
+ # 从完整 key 中提取 base_key
100
+ parts = key.split(':')
101
+ if len(parts) >= 3:
102
+ symbol = parts[0]
103
+ freq = parts[3] if len(parts) > 3 else 'day'
104
+ fq = parts[5] if len(parts) > 5 else 'qfq'
105
+ base_key = f"{symbol}:{freq}:{fq}"
106
+ cached = self.cache.get(base_key, sdate=sdate, edate=edate)
107
+ else:
108
+ cached = self.cache.get(key)
109
+ else:
110
+ cached = self.cache.get(key)
41
111
  if cached:
42
112
  return cached
43
113
  return None
@@ -45,5 +115,128 @@ class Market(ABC):
45
115
  def _put_cache(self, key: str, value: Tuple[str, str, pd.DataFrame]) -> None:
46
116
  """存入缓存"""
47
117
  if self.cache:
48
- self.cache.put(key, value)
118
+ # 如果是 PersistentCache,使用 base_key 存储
119
+ if PersistentCache and isinstance(self.cache, PersistentCache):
120
+ # 从完整 key 中提取 base_key
121
+ parts = key.split(':')
122
+ if len(parts) >= 3:
123
+ symbol = parts[0]
124
+ freq = parts[3] if len(parts) > 3 else 'day'
125
+ fq = parts[5] if len(parts) > 5 else 'qfq'
126
+ base_key = f"{symbol}:{freq}:{fq}"
127
+ self.cache.put(base_key, value)
128
+ else:
129
+ self.cache.put(key, value)
130
+ else:
131
+ self.cache.put(key, value)
132
+
133
+ def _get_price_with_persistent_cache(self, symbol: str, sdate: str, edate: str,
134
+ freq: str, days: int, fq: str,
135
+ fetch_func) -> Tuple[str, str, pd.DataFrame]:
136
+ """
137
+ 使用持久化缓存的智能扩展逻辑
138
+
139
+ 当请求的 edate 不在缓存中时,从缓存的最新日期向前扩展到 edate
140
+ 当请求的 sdate 不在缓存中时,从缓存的最早日期向后扩展到 sdate
141
+ """
142
+ cache_key = f"{symbol}:{sdate}:{edate}:{freq}:{days}:{fq}"
143
+
144
+ logger.info(f"[PRICE GET] symbol={symbol}, sdate={sdate}, edate={edate}, freq={freq}, cache_key={cache_key}")
145
+
146
+ # 尝试从缓存获取(传入日期参数,PersistentCache 会使用 base_key + 日期参数)
147
+ cached = self._get_cached(cache_key, sdate=sdate, edate=edate)
148
+ if cached:
149
+ _, name, cached_df = cached
150
+ logger.info(f"[PRICE CACHE HIT] symbol={symbol}, 缓存数据行数={len(cached_df)}, 日期范围={cached_df.index.min() if not cached_df.empty else 'N/A'} 到 {cached_df.index.max() if not cached_df.empty else 'N/A'}")
151
+
152
+ # 检查是否需要扩展
153
+ if cached_df.empty or not isinstance(cached_df.index, pd.DatetimeIndex):
154
+ # 缓存为空或索引不是日期,直接获取新数据
155
+ logger.info(f"[PRICE FETCH] 缓存数据无效,从网络获取 symbol={symbol}, sdate={sdate}, edate={edate}")
156
+ result = fetch_func(symbol, sdate, edate, freq, days, fq)
157
+ self._put_cache(cache_key, result)
158
+ return result
159
+
160
+ cached_earliest = cached_df.index.min()
161
+ cached_latest = cached_df.index.max()
162
+ request_sdate = pd.to_datetime(sdate) if sdate else None
163
+ request_edate = pd.to_datetime(edate) if edate else None
164
+
165
+ need_extend_forward = False # 需要向前扩展(更新日期)
166
+ need_extend_backward = False # 需要向后扩展(更早日期)
167
+ extend_sdate = sdate
168
+ extend_edate = edate
169
+
170
+ # 检查是否需要向前扩展
171
+ if request_edate and request_edate > cached_latest:
172
+ need_extend_forward = True
173
+ # 从缓存的最新日期+1天开始,扩展到请求的 edate
174
+ extend_sdate = (cached_latest + pd.Timedelta(days=1)).strftime('%Y-%m-%d')
175
+ extend_edate = edate
176
+
177
+ # 检查是否需要向后扩展
178
+ if request_sdate and request_sdate < cached_earliest:
179
+ need_extend_backward = True
180
+ # 从请求的 sdate 开始,扩展到缓存的最早日期-1天
181
+ extend_sdate = sdate
182
+ extend_edate = (cached_earliest - pd.Timedelta(days=1)).strftime('%Y-%m-%d')
183
+
184
+ # 如果需要扩展,获取缺失的数据
185
+ if need_extend_forward or need_extend_backward:
186
+ logger.info(f"[PRICE EXTEND] 需要扩展数据, symbol={symbol}, extend_sdate={extend_sdate}, extend_edate={extend_edate}, need_forward={need_extend_forward}, need_backward={need_extend_backward}")
187
+ # 获取扩展的数据
188
+ extended_result = fetch_func(symbol, extend_sdate, extend_edate, freq, days, fq)
189
+ _, _, extended_df = extended_result
190
+ logger.info(f"[PRICE FETCH] 从网络获取扩展数据, 数据行数={len(extended_df)}")
191
+
192
+ if not extended_df.empty:
193
+ # 确保两个 DataFrame 的索引都是 DatetimeIndex
194
+ if not isinstance(cached_df.index, pd.DatetimeIndex):
195
+ try:
196
+ cached_df.index = pd.to_datetime(cached_df.index)
197
+ except (ValueError, TypeError):
198
+ pass
199
+ if not isinstance(extended_df.index, pd.DatetimeIndex):
200
+ try:
201
+ extended_df.index = pd.to_datetime(extended_df.index)
202
+ except (ValueError, TypeError):
203
+ pass
204
+
205
+ # 合并数据
206
+ merged_df = pd.concat([cached_df, extended_df])
207
+ merged_df = merged_df[~merged_df.index.duplicated(keep='last')]
208
+ merged_df = merged_df.sort_index()
209
+
210
+ # 过滤到请求的日期范围
211
+ if request_sdate or request_edate:
212
+ if request_sdate and request_edate:
213
+ mask = (merged_df.index >= request_sdate) & (merged_df.index <= request_edate)
214
+ elif request_sdate:
215
+ mask = merged_df.index >= request_sdate
216
+ else:
217
+ mask = merged_df.index <= request_edate
218
+ merged_df = merged_df[mask]
219
+
220
+ result = (symbol, name, merged_df)
221
+ # 更新缓存(使用原始 key,PersistentCache 会智能合并)
222
+ self._put_cache(cache_key, result)
223
+ return result
224
+
225
+ # 不需要扩展,直接返回缓存的数据
226
+ # 注意:PersistentCache.get() 已经根据请求的日期范围进行了过滤,
227
+ # 返回的数据已经是过滤后的,不需要再次过滤
228
+ logger.info(f"[PRICE RETURN] 直接返回缓存数据, symbol={symbol}, 数据行数={len(cached_df)}")
229
+ return (symbol, name, cached_df)
230
+
231
+ # 缓存未命中,直接获取
232
+ if fetch_func:
233
+ logger.info(f"[PRICE FETCH] 缓存未命中,从网络获取 symbol={symbol}, sdate={sdate}, edate={edate}")
234
+ result = fetch_func(symbol, sdate, edate, freq, days, fq)
235
+ _, _, df = result
236
+ logger.info(f"[PRICE FETCH] 网络获取完成, 数据行数={len(df)}, 准备存储到缓存")
237
+ self._put_cache(cache_key, result)
238
+ return result
239
+ else:
240
+ # 如果没有提供 fetch_func,返回空数据
241
+ return (symbol, '', pd.DataFrame())
49
242
 
@@ -27,21 +27,20 @@ class CNStockMarket(Market):
27
27
  """获取A股价格数据"""
28
28
  symbol = self.normalize_symbol(symbol)
29
29
 
30
- # 检查缓存
31
- cache_key = f"{symbol}:{sdate}:{edate}:{freq}:{days}:{fq}"
32
- cached = self._get_cached(cache_key)
33
- if cached:
34
- return cached
35
-
36
- # 特殊处理BK(板块)代码
30
+ # 特殊处理BK(板块)代码(不使用缓存)
37
31
  if symbol[:2] == 'BK':
38
32
  return self._get_bk_price(symbol)
39
33
 
40
- # 特殊处理PT代码
34
+ # 特殊处理PT代码(不使用缓存)
41
35
  if symbol[:2] == 'pt':
42
36
  return self._get_pt_price(symbol, sdate, edate, freq, days, fq)
43
37
 
44
- # 使用数据源获取数据
38
+ # 使用基类的缓存逻辑
39
+ return super().get_price(symbol, sdate, edate, freq, days, fq)
40
+
41
+ def _fetch_price_data(self, symbol: str, sdate: str = '', edate: str = '',
42
+ freq: str = 'day', days: int = 320, fq: str = 'qfq') -> Tuple[str, str, pd.DataFrame]:
43
+ """从数据源获取A股价格数据"""
45
44
  try:
46
45
  raw_data = self.data_source.fetch_kline(
47
46
  symbol, freq=freq, sdate=sdate, edate=edate, days=days, fq=fq
@@ -51,9 +50,7 @@ class CNStockMarket(Market):
51
50
  parser = KlineParser()
52
51
  name, df = parser.parse_tencent_kline(raw_data, symbol)
53
52
 
54
- result = (symbol, name, df)
55
- self._put_cache(cache_key, result)
56
- return result
53
+ return (symbol, name, df)
57
54
  except (DataSourceError, ParseError) as e:
58
55
  logger.warning(f'Failed to fetch {symbol} using new architecture: {e}')
59
56
  # 降级到旧方法
rquote/markets/future.py CHANGED
@@ -25,18 +25,19 @@ class FutureMarket(Market):
25
25
  """获取期货价格数据"""
26
26
  symbol = self.normalize_symbol(symbol)
27
27
 
28
- # 特殊处理BTC
28
+ # 特殊处理BTC(不使用缓存)
29
29
  if symbol[2:5].lower() == 'btc':
30
30
  if freq in ('min', '1min', 'minute'):
31
31
  return self._get_btc_minute_price(symbol)
32
32
  else:
33
33
  return self._get_btc_price(symbol)
34
34
 
35
- cache_key = f"{symbol}:{sdate}:{edate}:{freq}:{days}"
36
- cached = self._get_cached(cache_key)
37
- if cached:
38
- return cached
39
-
35
+ # 使用基类的缓存逻辑
36
+ return super().get_price(symbol, sdate, edate, freq, days, fq)
37
+
38
+ def _fetch_price_data(self, symbol: str, sdate: str = '', edate: str = '',
39
+ freq: str = 'day', days: int = 320, fq: str = 'qfq') -> Tuple[str, str, pd.DataFrame]:
40
+ """从数据源获取期货价格数据"""
40
41
  future_code = symbol[2:] # 去掉'fu'前缀
41
42
 
42
43
  try:
@@ -44,9 +45,7 @@ class FutureMarket(Market):
44
45
  parser = KlineParser()
45
46
  df = parser.parse_sina_future_kline(raw_data, freq=freq)
46
47
 
47
- result = (symbol, future_code, df)
48
- self._put_cache(cache_key, result)
49
- return result
48
+ return (symbol, future_code, df)
50
49
  except (DataSourceError, ParseError) as e:
51
50
  logger.warning(f'Failed to fetch {symbol} using new architecture, falling back: {e}')
52
51
  return self._get_price_fallback(symbol, future_code, freq)
@@ -19,16 +19,9 @@ class HKStockMarket(Market):
19
19
  return 'hk' + symbol
20
20
  return symbol
21
21
 
22
- def get_price(self, symbol: str, sdate: str = '', edate: str = '',
23
- freq: str = 'day', days: int = 320, fq: str = 'qfq') -> Tuple[str, str, pd.DataFrame]:
24
- """获取港股价格数据"""
25
- symbol = self.normalize_symbol(symbol)
26
-
27
- cache_key = f"{symbol}:{sdate}:{edate}:{freq}:{days}:{fq}"
28
- cached = self._get_cached(cache_key)
29
- if cached:
30
- return cached
31
-
22
+ def _fetch_price_data(self, symbol: str, sdate: str = '', edate: str = '',
23
+ freq: str = 'day', days: int = 320, fq: str = 'qfq') -> Tuple[str, str, pd.DataFrame]:
24
+ """从数据源获取港股价格数据"""
32
25
  try:
33
26
  raw_data = self.data_source.fetch_kline(
34
27
  symbol, freq=freq, sdate=sdate, edate=edate, days=days, fq=fq
@@ -37,9 +30,7 @@ class HKStockMarket(Market):
37
30
  parser = KlineParser()
38
31
  name, df = parser.parse_tencent_kline(raw_data, symbol, fq=fq)
39
32
 
40
- result = (symbol, name, df)
41
- self._put_cache(cache_key, result)
42
- return result
33
+ return (symbol, name, df)
43
34
  except (DataSourceError, ParseError) as e:
44
35
  logger.warning(f'Failed to fetch {symbol}: {e}')
45
36
  raise
@@ -25,15 +25,16 @@ class USStockMarket(Market):
25
25
  """获取美股价格数据"""
26
26
  symbol = self.normalize_symbol(symbol)
27
27
 
28
- cache_key = f"{symbol}:{sdate}:{edate}:{freq}:{days}:{fq}"
29
- cached = self._get_cached(cache_key)
30
- if cached:
31
- return cached
32
-
33
- # 特殊处理分钟数据
28
+ # 特殊处理分钟数据(不使用缓存)
34
29
  if freq in ('min', '1min', 'minute'):
35
30
  return self._get_minute_data(symbol)
36
31
 
32
+ # 使用基类的缓存逻辑
33
+ return super().get_price(symbol, sdate, edate, freq, days, fq)
34
+
35
+ def _fetch_price_data(self, symbol: str, sdate: str = '', edate: str = '',
36
+ freq: str = 'day', days: int = 320, fq: str = 'qfq') -> Tuple[str, str, pd.DataFrame]:
37
+ """从数据源获取美股价格数据"""
37
38
  try:
38
39
  raw_data = self.data_source.fetch_kline(
39
40
  symbol, freq=freq, sdate=sdate, edate=edate, days=days, fq=fq
@@ -42,9 +43,7 @@ class USStockMarket(Market):
42
43
  parser = KlineParser()
43
44
  name, df = parser.parse_tencent_kline(raw_data, symbol)
44
45
 
45
- result = (symbol, name, df)
46
- self._put_cache(cache_key, result)
47
- return result
46
+ return (symbol, name, df)
48
47
  except (DataSourceError, ParseError) as e:
49
48
  logger.warning(f'Failed to fetch {symbol}: {e}')
50
49
  raise
@@ -1,14 +1,16 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rquote
3
- Version: 0.3.9
3
+ Version: 0.4.1
4
4
  Summary: Mostly day quotes of cn/hk/us/fund/future markets, side with quote list fetch
5
- Requires-Python: >=3.6.1
5
+ Requires-Python: >=3.9.0
6
6
  Description-Content-Type: text/markdown
7
7
  Requires-Dist: build>=0.9.0
8
8
  Requires-Dist: httpx>=0.20.0
9
9
  Requires-Dist: pandas>=1.0.0
10
10
  Requires-Dist: setuptools>=42
11
11
  Requires-Dist: twine>=3.8.0
12
+ Provides-Extra: persistent
13
+ Requires-Dist: duckdb>=0.9.0; extra == "persistent"
12
14
 
13
15
  # rquote
14
16
 
@@ -16,7 +18,7 @@ Requires-Dist: twine>=3.8.0
16
18
 
17
19
  ## 版本信息
18
20
 
19
- 当前版本:**0.3.5**
21
+ 当前版本:**0.4.1**
20
22
 
21
23
  ## 主要特性
22
24
 
@@ -60,17 +62,67 @@ sid, name, df = get_price('sz000001', sdate='2024-01-01', edate='2024-02-01')
60
62
 
61
63
  ### 使用缓存
62
64
 
65
+ #### 内存缓存(MemoryCache)
66
+
63
67
  ```python
64
68
  from rquote import get_price, MemoryCache
65
69
 
66
70
  # 创建缓存实例
67
71
  cache = MemoryCache(ttl=3600) # 缓存1小时
68
72
 
69
- # 使用缓存(通过dd参数,向后兼容)
70
- cache_dict = {}
71
- sid, name, df = get_price('sh000001', dd=cache_dict)
73
+ # 使用缓存(通过dd参数传递MemoryCache实例)
74
+ sid, name, df = get_price('sh000001', dd=cache)
75
+
76
+ # 注意:MemoryCache 是内存缓存,数据仅在当前进程运行期间有效
77
+ # 脚本运行结束后,缓存数据会丢失
78
+ ```
79
+
80
+ **缓存生命周期说明:**
81
+ - `MemoryCache` 是纯内存缓存,数据存储在进程内存中
82
+ - 缓存数据仅在当前脚本运行期间有效
83
+ - 脚本运行结束后,所有缓存数据会丢失
84
+
85
+ #### 持久化缓存(PersistentCache)
86
+
87
+ 持久化缓存支持跨进程/跨运行的缓存持久化,数据会保存到本地文件。
88
+
89
+ **安装可选依赖:**
90
+ ```bash
91
+ pip install rquote[persistent]
92
+ # 或
93
+ uv pip install "rquote[persistent]"
94
+ ```
95
+
96
+ **使用持久化缓存:**
97
+ ```python
98
+ from rquote import get_price, PersistentCache
99
+
100
+ # 创建持久化缓存实例
101
+ # 默认使用 duckdb(如果已安装),否则使用 pickle 文件
102
+ cache = PersistentCache(ttl=86400) # 缓存24小时,默认路径:~/.rquote/cache.db
103
+
104
+ # 或指定自定义路径
105
+ cache = PersistentCache(db_path='./my_cache.db', use_duckdb=True)
106
+
107
+ # 使用缓存
108
+ sid, name, df = get_price('sh000001', dd=cache)
109
+
110
+ # 持久化缓存支持智能扩展:
111
+ # - 当请求的结束日期不在缓存中时,会自动从缓存的最新日期向前扩展
112
+ # - 当请求的开始日期不在缓存中时,会自动从缓存的最早日期向后扩展
113
+ # - 数据会自动合并,避免重复请求
114
+
115
+ # 关闭缓存(可选,程序退出时会自动保存)
116
+ cache.close()
72
117
  ```
73
118
 
119
+ **持久化缓存特性:**
120
+ - ✅ 跨进程/跨运行持久化:数据保存在本地文件,下次运行仍可使用
121
+ - ✅ 智能数据合并:相同股票的数据会自动合并,key 不包含日期范围
122
+ - ✅ 智能扩展:当请求的日期范围超出缓存时,自动扩展并合并数据
123
+ - ✅ 支持 TTL:可设置缓存过期时间
124
+ - ✅ 可选 duckdb:如果安装了 duckdb,使用 duckdb 存储(性能更好),否则使用 pickle 文件
125
+
74
126
  ## 主要功能
75
127
 
76
128
  ### 历史价格数据获取
@@ -147,13 +199,13 @@ stocks = get_cn_stock_list(money_min=5e8)
147
199
 
148
200
  #### `get_hk_stocks_500()`
149
201
 
150
- 获取港股前500只股票列表
202
+ 获取港股前500只股票列表(按当日成交额排序)
151
203
 
152
204
  ```python
153
205
  from rquote import get_hk_stocks_500
154
206
 
155
207
  stocks = get_hk_stocks_500()
156
- # 返回格式: [[code, name, price, turnover, ...], ...]
208
+ # 返回格式: [[code, name, price, -, -, -, -, volume, turnover, ...], ...]
157
209
  ```
158
210
 
159
211
  #### `get_us_stocks(k=100)`
@@ -1,4 +1,4 @@
1
- rquote/__init__.py,sha256=-U5Uq4eT3Hhl2EkVmBgr5TAfU-ZfFzpOaGeJafnhyos,2208
1
+ rquote/__init__.py,sha256=HMXqZ_wfGoRqw1V3xm2MyBGYKB9ooGWIRnk60bisLZo,2370
2
2
  rquote/config.py,sha256=noep_VzY_nJehnkPQb4mkwzpeYLwkU1riqofQJ6Hhw0,1108
3
3
  rquote/exceptions.py,sha256=lJH2GC5dDhMoW_OtlBc03wlUn684-7jNPyF1NjmfVIE,569
4
4
  rquote/plots.py,sha256=UQn4sjhIzVwagfhUDM738b2HHjKo4tRdU2UCs_1-FbY,2341
@@ -8,9 +8,10 @@ rquote/api/lists.py,sha256=fRebS02Fi0qe6KpWBA-9W1UG0It6__DmRlNimtMa7L8,5331
8
8
  rquote/api/price.py,sha256=I5lZl6cUQRlE4AtzNbR-uGZt1ho9vgP1cgNFDjaigMA,3575
9
9
  rquote/api/stock_info.py,sha256=912ICdIBr8z2lKWDbq3gG0E94czTPvbx9aXsKUi-QkE,1537
10
10
  rquote/api/tick.py,sha256=nEcjuAjtBHUaD8KPRLg643piVa21PhKDQvkVWNwvvME,1431
11
- rquote/cache/__init__.py,sha256=IXGSRpvSgBlcM6twLuJEOEockbb09_VqORXdQpfwpCA,138
11
+ rquote/cache/__init__.py,sha256=S393I5Wmp0QooaRka9n7bvDUdEbg3jUhm6u815T86rM,317
12
12
  rquote/cache/base.py,sha256=orzG4Yo-6gzVG027j1-LTZPT718JohnCdLDnOLoLUQ4,515
13
13
  rquote/cache/memory.py,sha256=7z4keb3q91pzI4ASQWy1MU8T5nbWLCEUjJcStv_3hvk,1933
14
+ rquote/cache/persistent.py,sha256=ZffpM1a_ngRTjvHvPwk3XoorH87TSCBwtrXtvO0yqBo,19751
14
15
  rquote/data_sources/__init__.py,sha256=WCe1aam4677jM5G6wP4a-dQFTeBzcU5PJqsKieAVMBo,215
15
16
  rquote/data_sources/base.py,sha256=JuKsTMxH7y8yRxHg3JbLzQwXPr43rS4pnwc5625u2U4,443
16
17
  rquote/data_sources/sina.py,sha256=T_3Dl0Mwlhx8CKRJll_UKobYecRWltGaIOiGkpHS43Q,3300
@@ -18,12 +19,12 @@ rquote/data_sources/tencent.py,sha256=ayt1O85pheLwzX3z5c6Qij1NrmUywcsz6YcSVzdDoM
18
19
  rquote/factors/__init__.py,sha256=_ZbH2XxYtXwCJpvRVdNvGncoPSpMqrtlYmf1_fMGIjM,116
19
20
  rquote/factors/technical.py,sha256=dPDs3pDEDRV9iQJBrSoKpGFLQMjOqyoBdN2rUntpOUU,4235
20
21
  rquote/markets/__init__.py,sha256=k4F8cZgb-phqemMqhZXFPdOKsR4P--DD3d5i21vKhbg,365
21
- rquote/markets/base.py,sha256=DjvxRcJqwUsBTxnsE28Gd-zJLFsCGwdQpezLRAZ_9sQ,1347
22
- rquote/markets/cn_stock.py,sha256=fyF7jJHFUrI5jwuqBKHXpsIE51H4kbyc3q-uuviPLGk,8224
22
+ rquote/markets/base.py,sha256=nHBMzQRkuDUrsx9GvB_QiMh2deMUjTiUZsIRYPJpB_8,11206
23
+ rquote/markets/cn_stock.py,sha256=nu2ebTE4a6FAJkvpMN0FEPuqwom_hqTRjnUg96cQGKc,8320
23
24
  rquote/markets/factory.py,sha256=4Txpuok0LBOLT_vAiIU-NslwVnYF7sKHCdlacAboxpo,2875
24
- rquote/markets/future.py,sha256=7AqViPp0S9OQZsaU2hkJzh4My6gYFqLo1OUW2mVMSDo,7215
25
- rquote/markets/hk_stock.py,sha256=NlWaXQgXttpcQVFZjflcEkMTmXMxeP2C6Y7OGG50u7E,1452
26
- rquote/markets/us_stock.py,sha256=17mTg50g3ImOnGM4Re1MRSyvbD2mgFW6wjtMh86IEXA,2465
25
+ rquote/markets/future.py,sha256=yGMyUu9Fv75jbzPbvW6_36otEeebSij7vnzow_zyEn8,7358
26
+ rquote/markets/hk_stock.py,sha256=AhRJpWp027ACew9ogxkVCJXbqbYQ1AkbFwDJccXbvAs,1183
27
+ rquote/markets/us_stock.py,sha256=GT4IxCMTgxb0JSkDa6acZ6PpHLhK6lrskI0ftiYxGCA,2603
27
28
  rquote/parsers/__init__.py,sha256=q4g-FgpzxKBPfhJiQH3B5MEeZWUIXlyre-vAnOnfYmA,110
28
29
  rquote/parsers/kline.py,sha256=g6k8W76-4hpYsuBgvwmb5G6ZkzHOJDX-JrVVXYksw4c,4020
29
30
  rquote/utils/__init__.py,sha256=-ZHABqFHQeJrCCsgnqEYWR57jl7GduCKn2V3hpFi-pE,348
@@ -32,7 +33,7 @@ rquote/utils/helpers.py,sha256=V07n9BtRS8bEJH023Kca78-unk7iD3B9hn2UjELetYs,354
32
33
  rquote/utils/http.py,sha256=X0Alhnu0CNqyQeOt6ivUWmh2XwrWxXd2lSpQOKDdnzw,3249
33
34
  rquote/utils/logging.py,sha256=cbeRH4ODazn7iyQmGoEBT2lH5LX4Ca3zDfs_20J1T28,566
34
35
  rquote/utils/web.py,sha256=I8_pcThW6VUvahuRHdtp32iZwr85hEt1hB6TgznMy_U,3854
35
- rquote-0.3.9.dist-info/METADATA,sha256=gFYIx3AMMzbJgDx9BlXn-Xn3T9PgkTuYkQTXthyjeIA,11262
36
- rquote-0.3.9.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
37
- rquote-0.3.9.dist-info/top_level.txt,sha256=CehAiaZx7Fo8HGoV2zd5GhILUW1jQEN8YS-cWMlrK9Y,7
38
- rquote-0.3.9.dist-info/RECORD,,
36
+ rquote-0.4.1.dist-info/METADATA,sha256=Cp9oRn7ohg_2XmhczYDNR2JJMX0MK2aDFjqhP5kJWVA,13259
37
+ rquote-0.4.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
38
+ rquote-0.4.1.dist-info/top_level.txt,sha256=CehAiaZx7Fo8HGoV2zd5GhILUW1jQEN8YS-cWMlrK9Y,7
39
+ rquote-0.4.1.dist-info/RECORD,,
File without changes