mdbq 4.0.3__py3-none-any.whl → 4.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mdbq/__version__.py +1 -1
- mdbq/aggregation/query_data.py +74 -9
- mdbq/mysql/s_query.py +29 -8
- mdbq/mysql/uploader.py +73 -42
- {mdbq-4.0.3.dist-info → mdbq-4.0.4.dist-info}/METADATA +1 -1
- {mdbq-4.0.3.dist-info → mdbq-4.0.4.dist-info}/RECORD +8 -8
- {mdbq-4.0.3.dist-info → mdbq-4.0.4.dist-info}/WHEEL +0 -0
- {mdbq-4.0.3.dist-info → mdbq-4.0.4.dist-info}/top_level.txt +0 -0
mdbq/__version__.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
VERSION = '4.0.
|
1
|
+
VERSION = '4.0.4'
|
mdbq/aggregation/query_data.py
CHANGED
@@ -14,7 +14,8 @@ import platform
|
|
14
14
|
import os
|
15
15
|
import time
|
16
16
|
import calendar
|
17
|
-
import
|
17
|
+
from collections.abc import Mapping, Sequence
|
18
|
+
import inspect
|
18
19
|
|
19
20
|
dir_path = os.path.expanduser("~")
|
20
21
|
config_file = os.path.join(dir_path, 'spd.txt')
|
@@ -36,6 +37,47 @@ logger = mylogger.MyLogger(
|
|
36
37
|
)
|
37
38
|
|
38
39
|
|
40
|
+
def reorder_columns(df: pd.DataFrame, set_type) -> pd.DataFrame:
|
41
|
+
"""
|
42
|
+
调整DataFrame的列顺序,按照set_type中的顺序排列,忽略大小写,set_type中不存在的列自动跳过。
|
43
|
+
set_type可以是列表或字典(此时用字典的键名作为顺序)。
|
44
|
+
不改变数据和数据类型。
|
45
|
+
如果 set_type 为 None、空列表或空字典,则直接返回原 df,不做任何调整。
|
46
|
+
"""
|
47
|
+
# 直接返回原 df 的情况
|
48
|
+
if set_type is None:
|
49
|
+
return df
|
50
|
+
if isinstance(set_type, Mapping) and len(set_type) == 0:
|
51
|
+
return df
|
52
|
+
if isinstance(set_type, Sequence) and not isinstance(set_type, str) and len(set_type) == 0:
|
53
|
+
return df
|
54
|
+
|
55
|
+
# 如果set_type是字典,提取其键名
|
56
|
+
if isinstance(set_type, Mapping):
|
57
|
+
col_order = list(set_type.keys())
|
58
|
+
elif isinstance(set_type, Sequence) and not isinstance(set_type, str):
|
59
|
+
col_order = list(set_type)
|
60
|
+
else:
|
61
|
+
raise ValueError("set_type must be a list or a dict (or other mapping type)")
|
62
|
+
|
63
|
+
# 构建原始列名的映射(小写->原始名)
|
64
|
+
col_map = {col.lower(): col for col in df.columns}
|
65
|
+
# 生成新顺序的列名(只保留df中存在的列,且顺序按set_type)
|
66
|
+
new_cols = []
|
67
|
+
used = set()
|
68
|
+
for col in col_order:
|
69
|
+
key = col.lower()
|
70
|
+
if key in col_map and key not in used:
|
71
|
+
new_cols.append(col_map[key])
|
72
|
+
used.add(key)
|
73
|
+
# 添加剩余未在set_type中出现的列,保持原顺序
|
74
|
+
for col in df.columns:
|
75
|
+
if col.lower() not in used:
|
76
|
+
new_cols.append(col)
|
77
|
+
# 返回新顺序的DataFrame
|
78
|
+
return df[new_cols]
|
79
|
+
|
80
|
+
|
39
81
|
def upload_data_decorator(**upload_kwargs):
|
40
82
|
"""
|
41
83
|
数据上传装饰器
|
@@ -46,6 +88,24 @@ def upload_data_decorator(**upload_kwargs):
|
|
46
88
|
@wraps(func)
|
47
89
|
def wrapper(*args, **kwargs):
|
48
90
|
try:
|
91
|
+
# 获取 set_type 或 set_typ 参数
|
92
|
+
set_type = None
|
93
|
+
# 先从kwargs查找
|
94
|
+
for key in ['set_type', 'set_typ']:
|
95
|
+
if key in kwargs:
|
96
|
+
set_type = kwargs[key]
|
97
|
+
break
|
98
|
+
# 如果没在kwargs找到,尝试从args按参数名顺序查找
|
99
|
+
if set_type is None:
|
100
|
+
sig = inspect.signature(func)
|
101
|
+
params = list(sig.parameters)
|
102
|
+
for key in ['set_type', 'set_typ']:
|
103
|
+
if key in params:
|
104
|
+
idx = params.index(key)
|
105
|
+
if len(args) > idx:
|
106
|
+
set_type = args[idx]
|
107
|
+
break
|
108
|
+
|
49
109
|
# 执行原始函数
|
50
110
|
result = func(*args, **kwargs)
|
51
111
|
|
@@ -55,6 +115,9 @@ def upload_data_decorator(**upload_kwargs):
|
|
55
115
|
|
56
116
|
# 如果返回的是 DataFrame
|
57
117
|
if isinstance(result, pd.DataFrame):
|
118
|
+
# 调整列顺序
|
119
|
+
if set_type is not None:
|
120
|
+
result = reorder_columns(result, set_type)
|
58
121
|
# 设置默认值
|
59
122
|
default_kwargs = {
|
60
123
|
'check_duplicate': False,
|
@@ -87,6 +150,11 @@ def upload_data_decorator(**upload_kwargs):
|
|
87
150
|
logger.warning('函数返回的元组第一个元素不是DataFrame,直接返回原结果,不执行上传', {'函数': func.__name__})
|
88
151
|
return result
|
89
152
|
|
153
|
+
# 调整列顺序
|
154
|
+
if set_type is not None:
|
155
|
+
df = reorder_columns(df, set_type)
|
156
|
+
# 保持元组结构
|
157
|
+
result = (df, extra_kwargs) + result[2:]
|
90
158
|
# 合并装饰器参数和函数参数
|
91
159
|
merged_kwargs = {**upload_kwargs}
|
92
160
|
merged_kwargs.update(extra_kwargs)
|
@@ -2119,7 +2187,7 @@ class MysqlDatasQuery:
|
|
2119
2187
|
'partition_date_column': '日期', # 用于分表的日期列名,默认为'日期'
|
2120
2188
|
'indexes': [], # 普通索引列
|
2121
2189
|
'transaction_mode': 'batch', # 事务模式
|
2122
|
-
'unique_keys': [['日期', '产品线', '
|
2190
|
+
'unique_keys': [['日期', '产品线', '计划id', '搜索词', '关键词']], # 唯一约束列表
|
2123
2191
|
}
|
2124
2192
|
|
2125
2193
|
@try_except
|
@@ -3032,7 +3100,7 @@ class MysqlDatasQuery:
|
|
3032
3100
|
'partition_date_column': '日期', # 用于分表的日期列名,默认为'日期'
|
3033
3101
|
'indexes': [], # 普通索引列
|
3034
3102
|
'transaction_mode': 'batch', # 事务模式
|
3035
|
-
'unique_keys': [['日期', '店铺名称', '
|
3103
|
+
'unique_keys': [['日期', '店铺名称', '商品款号', 'spuid']], # 唯一约束列表
|
3036
3104
|
}
|
3037
3105
|
|
3038
3106
|
@upload_data_decorator()
|
@@ -3661,7 +3729,6 @@ def date_table():
|
|
3661
3729
|
df = df.reset_index(drop=False)
|
3662
3730
|
df.rename(columns={'index': 'id'}, inplace=True)
|
3663
3731
|
df['id'] = df['id'].apply(lambda x: x + 1)
|
3664
|
-
|
3665
3732
|
set_typ = {
|
3666
3733
|
'日期': 'date',
|
3667
3734
|
'年': 'varchar(50)',
|
@@ -3676,6 +3743,7 @@ def date_table():
|
|
3676
3743
|
'索引': 'int',
|
3677
3744
|
'月索引': 'int',
|
3678
3745
|
}
|
3746
|
+
|
3679
3747
|
return df, {
|
3680
3748
|
'db_name': '聚合数据',
|
3681
3749
|
'table_name': '日期表',
|
@@ -3772,9 +3840,6 @@ def main(days=150, months=3):
|
|
3772
3840
|
|
3773
3841
|
|
3774
3842
|
if __name__ == '__main__':
|
3775
|
-
main(
|
3776
|
-
days=150, # 清理聚合数据的日期长度
|
3777
|
-
months=3 # 生成聚合数据的长度
|
3778
|
-
)
|
3843
|
+
# main(months=3)
|
3779
3844
|
|
3780
|
-
|
3845
|
+
pass
|
mdbq/mysql/s_query.py
CHANGED
@@ -762,6 +762,21 @@ class QueryDatas:
|
|
762
762
|
finally:
|
763
763
|
self.pool = None
|
764
764
|
|
765
|
+
def _adjust_page_size(self, last_duration, current_page_size, min_size=1000, max_size=10000, target_time=2.0):
|
766
|
+
"""
|
767
|
+
根据上一次批次耗时自动调整下一次的 page_size。
|
768
|
+
- last_duration: 上一批次查询耗时(秒)
|
769
|
+
- current_page_size: 当前批次大小
|
770
|
+
- min_size, max_size: 允许的最小/最大批次
|
771
|
+
- target_time: 期望每批耗时(秒)
|
772
|
+
"""
|
773
|
+
if last_duration < target_time / 2 and current_page_size < max_size:
|
774
|
+
return min(current_page_size * 2, max_size)
|
775
|
+
elif last_duration > target_time * 2 and current_page_size > min_size:
|
776
|
+
return max(current_page_size // 2, min_size)
|
777
|
+
else:
|
778
|
+
return current_page_size
|
779
|
+
|
765
780
|
def data_to_df(
|
766
781
|
self,
|
767
782
|
db_name: str,
|
@@ -890,18 +905,20 @@ class QueryDatas:
|
|
890
905
|
# 分页查询
|
891
906
|
offset = 0
|
892
907
|
all_results = []
|
893
|
-
|
908
|
+
min_size, max_size = 1000, 10000
|
909
|
+
target_time = 1.0 # 期望每批1秒
|
910
|
+
|
894
911
|
while offset < total_count:
|
912
|
+
start_time = time.time()
|
895
913
|
# 添加分页参数
|
896
914
|
page_sql = f"{base_sql} LIMIT %s OFFSET %s"
|
897
915
|
page_params = list(params) + [page_size, offset]
|
898
|
-
|
899
916
|
cursor.execute(page_sql, tuple(page_params))
|
900
917
|
page_results = cursor.fetchall()
|
901
|
-
|
918
|
+
|
902
919
|
if not page_results:
|
903
920
|
break
|
904
|
-
|
921
|
+
|
905
922
|
if return_format == 'list_dict':
|
906
923
|
all_results.extend(page_results)
|
907
924
|
else:
|
@@ -909,14 +926,18 @@ class QueryDatas:
|
|
909
926
|
all_results = pd.DataFrame(page_results)
|
910
927
|
else:
|
911
928
|
all_results = pd.concat([all_results, pd.DataFrame(page_results)], ignore_index=True)
|
912
|
-
|
913
|
-
|
929
|
+
|
930
|
+
duration = time.time() - start_time
|
931
|
+
page_size = self._adjust_page_size(duration, page_size, min_size, max_size, target_time)
|
932
|
+
offset += len(page_results)
|
914
933
|
logger.debug('分页查询进度', {
|
915
934
|
'库': db_name,
|
916
935
|
'表': table_name,
|
917
|
-
'当前偏移量': offset,
|
936
|
+
# '当前偏移量': offset,
|
918
937
|
'总记录数': total_count,
|
919
|
-
'已获取记录数': len(all_results) if return_format == 'list_dict' else len(all_results.index)
|
938
|
+
'已获取记录数': len(all_results) if return_format == 'list_dict' else len(all_results.index),
|
939
|
+
'本批耗时': f'{duration:.2f}',
|
940
|
+
'下批page_size': page_size
|
920
941
|
})
|
921
942
|
|
922
943
|
if return_format == 'df' and isinstance(all_results, pd.DataFrame) and not all_results.empty:
|
mdbq/mysql/uploader.py
CHANGED
@@ -14,6 +14,7 @@ from dbutils.pooled_db import PooledDB
|
|
14
14
|
import json
|
15
15
|
import sys
|
16
16
|
from decimal import Decimal, InvalidOperation
|
17
|
+
import math
|
17
18
|
|
18
19
|
warnings.filterwarnings('ignore')
|
19
20
|
logger = mylogger.MyLogger(
|
@@ -240,8 +241,16 @@ class MySQLUploader:
|
|
240
241
|
conn = self.pool.connection()
|
241
242
|
return conn
|
242
243
|
except Exception as e:
|
243
|
-
logger.error('
|
244
|
-
|
244
|
+
logger.error('从连接池获取数据库连接失败,尝试重建连接池', {'error': str(e)})
|
245
|
+
# 强制重建连接池
|
246
|
+
try:
|
247
|
+
self.pool = self._create_connection_pool()
|
248
|
+
conn = self.pool.connection()
|
249
|
+
logger.info('重建连接池后获取连接成功')
|
250
|
+
return conn
|
251
|
+
except Exception as e2:
|
252
|
+
logger.error('重建连接池后依然获取连接失败', {'error': str(e2)})
|
253
|
+
raise ConnectionError(f'连接数据库失败: {str(e2)}')
|
245
254
|
|
246
255
|
@_execute_with_retry
|
247
256
|
def _check_database_exists(self, db_name: str) -> bool:
|
@@ -407,31 +416,36 @@ class MySQLUploader:
|
|
407
416
|
col_def += " NOT NULL"
|
408
417
|
column_defs.append(col_def)
|
409
418
|
# 主键处理逻辑调整
|
419
|
+
def _index_col_sql(col):
|
420
|
+
col_type = set_typ.get(col, '').lower()
|
421
|
+
if 'varchar' in col_type or 'text' in col_type:
|
422
|
+
return f"`{self._normalize_col(col)}`(100)"
|
423
|
+
return f"`{self._normalize_col(col)}`"
|
410
424
|
if primary_keys and len(primary_keys) > 0:
|
411
|
-
safe_primary_keys = [
|
412
|
-
primary_key_sql = f"PRIMARY KEY (
|
425
|
+
safe_primary_keys = [_index_col_sql(pk) for pk in primary_keys]
|
426
|
+
primary_key_sql = f"PRIMARY KEY ({','.join(safe_primary_keys)})"
|
413
427
|
else:
|
414
|
-
safe_primary_keys = [
|
428
|
+
safe_primary_keys = [_index_col_sql('id')]
|
415
429
|
primary_key_sql = f"PRIMARY KEY (`id`)"
|
416
430
|
# 索引统一在CREATE TABLE中定义
|
417
431
|
index_defs = []
|
418
432
|
if date_column and date_column in set_typ:
|
419
|
-
safe_date_col =
|
420
|
-
index_defs.append(f"INDEX `idx_{
|
433
|
+
safe_date_col = _index_col_sql(date_column)
|
434
|
+
index_defs.append(f"INDEX `idx_{self._normalize_col(date_column)}` ({safe_date_col})")
|
421
435
|
if indexes:
|
422
436
|
for idx_col in indexes:
|
423
437
|
if idx_col in set_typ:
|
424
|
-
safe_idx_col =
|
425
|
-
index_defs.append(f"INDEX `idx_{
|
438
|
+
safe_idx_col = _index_col_sql(idx_col)
|
439
|
+
index_defs.append(f"INDEX `idx_{self._normalize_col(idx_col)}` ({safe_idx_col})")
|
426
440
|
# UNIQUE KEY定义
|
427
441
|
unique_defs = []
|
428
442
|
if unique_keys:
|
429
443
|
for unique_cols in unique_keys:
|
430
444
|
if not unique_cols:
|
431
445
|
continue
|
432
|
-
safe_unique_cols = [
|
433
|
-
unique_name = f"uniq_{'_'.join(
|
434
|
-
unique_defs.append(f"UNIQUE KEY `{unique_name}` (
|
446
|
+
safe_unique_cols = [_index_col_sql(col) for col in unique_cols]
|
447
|
+
unique_name = f"uniq_{'_'.join([self._normalize_col(c) for c in unique_cols])}"
|
448
|
+
unique_defs.append(f"UNIQUE KEY `{unique_name}` ({','.join(safe_unique_cols)})")
|
435
449
|
index_defs = list(set(index_defs))
|
436
450
|
all_defs = column_defs + [primary_key_sql] + index_defs + unique_defs
|
437
451
|
sql = f"""
|
@@ -447,7 +461,7 @@ class MySQLUploader:
|
|
447
461
|
conn.commit()
|
448
462
|
logger.info('数据表及索引已创建', {'库': db_name, '表': table_name, '索引': indexes, '唯一约束': unique_keys})
|
449
463
|
except Exception as e:
|
450
|
-
logger.error('建表失败', {'库': db_name, '表': table_name, '错误': str(e)})
|
464
|
+
logger.error('建表失败', {'库': db_name, '表': table_name, '错误': str(e), '异常类型': type(e).__name__})
|
451
465
|
if conn is not None:
|
452
466
|
conn.rollback()
|
453
467
|
raise
|
@@ -491,25 +505,45 @@ class MySQLUploader:
|
|
491
505
|
def _validate_value(self, value: Any, column_type: str, allow_null: bool, db_name: str = None, table_name: str = None, col_name: str = None) -> Any:
|
492
506
|
"""
|
493
507
|
根据列类型验证并转换数据值
|
494
|
-
|
495
|
-
:param value: 要验证的值
|
496
|
-
:param column_type: 列的数据类型
|
497
|
-
:param allow_null: 是否允许空值
|
498
|
-
:param db_name: 数据库名(用于日志)
|
499
|
-
:param table_name: 表名(用于日志)
|
500
|
-
:param col_name: 列名(用于日志)
|
501
|
-
:return: 转换后的值
|
502
|
-
:raises ValueError: 当值转换失败时抛出
|
503
508
|
"""
|
509
|
+
column_type_lower = column_type.lower() if column_type else ''
|
510
|
+
# 统一判断None/NaN
|
511
|
+
is_nan = False
|
504
512
|
if value is None:
|
513
|
+
is_nan = True
|
514
|
+
elif isinstance(value, float) and math.isnan(value):
|
515
|
+
is_nan = True
|
516
|
+
elif str(value).lower() in ['nan', 'none']:
|
517
|
+
is_nan = True
|
518
|
+
if is_nan:
|
505
519
|
if not allow_null:
|
506
|
-
|
507
|
-
'
|
508
|
-
|
509
|
-
|
520
|
+
if 'int' in column_type_lower:
|
521
|
+
logger.debug('字段值为None/NaN但不允许空值, 已填充为0', {
|
522
|
+
'库': db_name, '表': table_name, '列': col_name, '字段类型': column_type
|
523
|
+
})
|
524
|
+
return 0
|
525
|
+
elif any(t in column_type_lower for t in ['float', 'double', 'decimal']):
|
526
|
+
logger.debug('字段值为None/NaN但不允许空值, 已填充为0.0', {
|
527
|
+
'库': db_name, '表': table_name, '列': col_name, '字段类型': column_type
|
528
|
+
})
|
529
|
+
return 0.0
|
530
|
+
elif 'date' in column_type_lower or 'time' in column_type_lower:
|
531
|
+
# 判断是date还是datetime/timestamp
|
532
|
+
if 'datetime' in column_type_lower or 'timestamp' in column_type_lower:
|
533
|
+
default_date = '2000-01-01 00:00:00'
|
534
|
+
else:
|
535
|
+
default_date = '2000-01-01'
|
536
|
+
logger.debug('字段值为None/NaN但不允许空值, 已填充为默认日期', {
|
537
|
+
'库': db_name, '表': table_name, '列': col_name, '字段类型': column_type, '默认值': default_date
|
538
|
+
})
|
539
|
+
return default_date
|
540
|
+
else:
|
541
|
+
logger.debug('字段值为None/NaN但不允许空值, 已填充为none字符串', {
|
542
|
+
'库': db_name, '表': table_name, '列': col_name, '字段类型': column_type
|
543
|
+
})
|
544
|
+
return 'none'
|
510
545
|
return None
|
511
546
|
try:
|
512
|
-
column_type_lower = column_type.lower()
|
513
547
|
if isinstance(value, str) and value.strip().endswith('%'):
|
514
548
|
try:
|
515
549
|
percent_str = value.strip().replace('%', '')
|
@@ -881,22 +915,21 @@ class MySQLUploader:
|
|
881
915
|
# set_typ的键清洗
|
882
916
|
set_typ = {self._normalize_col(k): v for k, v in set_typ.items()}
|
883
917
|
|
884
|
-
#
|
885
|
-
data_columns = set()
|
886
|
-
if data and len(data) > 0:
|
887
|
-
data_columns = set(data[0].keys())
|
888
|
-
|
889
|
-
# 过滤set_typ,只保留数据中存在的列
|
918
|
+
# 新实现:严格按set_typ顺序过滤,后补充data中有但set_typ没有的列
|
890
919
|
filtered_set_typ = {}
|
891
|
-
|
892
|
-
|
920
|
+
data_columns = list(data[0].keys()) if data and len(data) > 0 else []
|
921
|
+
# 先按set_typ顺序
|
922
|
+
for col in set_typ:
|
923
|
+
if col in data_columns:
|
893
924
|
filtered_set_typ[col] = set_typ[col]
|
894
|
-
|
895
|
-
|
925
|
+
# 再补充data中有但set_typ没有的列
|
926
|
+
for col in data_columns:
|
927
|
+
if col not in filtered_set_typ:
|
928
|
+
# 推断类型
|
896
929
|
sample_values = [row[col] for row in data if col in row and row[col] is not None][:5]
|
897
930
|
inferred_type = None
|
898
931
|
for val in sample_values:
|
899
|
-
inferred_type = self._infer_data_type(val, no_log=True)
|
932
|
+
inferred_type = self._infer_data_type(val, no_log=True)
|
900
933
|
if inferred_type:
|
901
934
|
break
|
902
935
|
if not inferred_type:
|
@@ -1326,8 +1359,7 @@ class MySQLUploader:
|
|
1326
1359
|
if cached:
|
1327
1360
|
return cached
|
1328
1361
|
# 获取所有列名(排除id)
|
1329
|
-
all_columns = [col for col in set_typ.keys()
|
1330
|
-
if col.lower() != 'id']
|
1362
|
+
all_columns = [col for col in set_typ.keys() if col.lower() != 'id']
|
1331
1363
|
if not check_duplicate:
|
1332
1364
|
sql = self._build_simple_insert_sql(db_name, table_name, all_columns,
|
1333
1365
|
update_on_duplicate)
|
@@ -1364,7 +1396,6 @@ class MySQLUploader:
|
|
1364
1396
|
- 只有遇到严重的数据库错误(如所有行都因唯一约束冲突且没有ON DUPLICATE KEY UPDATE),才会整体回滚。
|
1365
1397
|
- 返回值为(插入行数, 跳过行数, 失败行数)。
|
1366
1398
|
"""
|
1367
|
-
import pymysql # 确保异常类型可用
|
1368
1399
|
def get_optimal_batch_size(total_rows: int) -> int:
|
1369
1400
|
if total_rows <= 100:
|
1370
1401
|
return total_rows
|
@@ -1612,5 +1643,5 @@ def main():
|
|
1612
1643
|
|
1613
1644
|
|
1614
1645
|
if __name__ == '__main__':
|
1615
|
-
main()
|
1646
|
+
# main()
|
1616
1647
|
pass
|
@@ -1,7 +1,7 @@
|
|
1
1
|
mdbq/__init__.py,sha256=Il5Q9ATdX8yXqVxtP_nYqUhExzxPC_qk_WXQ_4h0exg,16
|
2
|
-
mdbq/__version__.py,sha256=
|
2
|
+
mdbq/__version__.py,sha256=lAYjWBa6ThlPWc3_1b0lWM6fn_3Z9ckuALnMMZXhBbs,17
|
3
3
|
mdbq/aggregation/__init__.py,sha256=EeDqX2Aml6SPx8363J-v1lz0EcZtgwIBYyCJV6CcEDU,40
|
4
|
-
mdbq/aggregation/query_data.py,sha256=
|
4
|
+
mdbq/aggregation/query_data.py,sha256=_k6Jg60RaaT056sIaiSO6v84dEnOIOGq-nUJtSr65kI,171861
|
5
5
|
mdbq/config/__init__.py,sha256=jso1oHcy6cJEfa7udS_9uO5X6kZLoPBF8l3wCYmr5dM,18
|
6
6
|
mdbq/config/config.py,sha256=eaTfrfXQ65xLqjr5I8-HkZd_jEY1JkGinEgv3TSLeoQ,3170
|
7
7
|
mdbq/log/__init__.py,sha256=Mpbrav0s0ifLL7lVDAuePEi1hJKiSHhxcv1byBKDl5E,15
|
@@ -10,9 +10,9 @@ mdbq/log/spider_logging.py,sha256=-ozWWEGm3HVv604ozs_OOvVwumjokmUPwbaodesUrPY,16
|
|
10
10
|
mdbq/mysql/__init__.py,sha256=A_DPJyAoEvTSFojiI2e94zP0FKtCkkwKP1kYUCSyQzo,11
|
11
11
|
mdbq/mysql/deduplicator.py,sha256=8v3MC6TJ0YEiExWrTP9OXAxTYnL9XbpYL2vWaER1h2M,73099
|
12
12
|
mdbq/mysql/mysql.py,sha256=pDg771xBugCMSTWeskIFTi3pFLgaqgyG3smzf-86Wn8,56772
|
13
|
-
mdbq/mysql/s_query.py,sha256=
|
13
|
+
mdbq/mysql/s_query.py,sha256=RnVCwMQ_n9PcAimbMWbHe9k8eil8shtCfa3LwLBZi6c,41909
|
14
14
|
mdbq/mysql/unique_.py,sha256=Wgqq_PjAAD757JTa10wjYaJgssZ_C_ypU6DW56jbuyw,21074
|
15
|
-
mdbq/mysql/uploader.py,sha256=
|
15
|
+
mdbq/mysql/uploader.py,sha256=e49Gk09K766QXaus_p3VOMcH2VbexQzKsqDTCGrWoWQ,74419
|
16
16
|
mdbq/other/__init__.py,sha256=jso1oHcy6cJEfa7udS_9uO5X6kZLoPBF8l3wCYmr5dM,18
|
17
17
|
mdbq/other/download_sku_picture.py,sha256=YU8DxKMXbdeE1OOKEA848WVp62jYHw5O4tXTjUdq9H0,44832
|
18
18
|
mdbq/other/otk.py,sha256=iclBIFbQbhlqzUbcMMoePXBpcP1eZ06ZtjnhcA_EbmE,7241
|
@@ -25,7 +25,7 @@ mdbq/redis/__init__.py,sha256=YtgBlVSMDphtpwYX248wGge1x-Ex_mMufz4-8W0XRmA,12
|
|
25
25
|
mdbq/redis/getredis.py,sha256=l3zBK7wrZl0oO42-_UGylyatnIp_SBw8wDDvof9fht4,23534
|
26
26
|
mdbq/spider/__init__.py,sha256=RBMFXGy_jd1HXZhngB2T2XTvJqki8P_Fr-pBcwijnew,18
|
27
27
|
mdbq/spider/aikucun.py,sha256=hPRzLQvFIF4ibN8aP3Dg_ru5meac90faPyzOB22cj-o,20965
|
28
|
-
mdbq-4.0.
|
29
|
-
mdbq-4.0.
|
30
|
-
mdbq-4.0.
|
31
|
-
mdbq-4.0.
|
28
|
+
mdbq-4.0.4.dist-info/METADATA,sha256=VM2dtOiBJ74NlYhq9UWAFAPloayAXPX9bLKnvZJd7Xg,363
|
29
|
+
mdbq-4.0.4.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
|
30
|
+
mdbq-4.0.4.dist-info/top_level.txt,sha256=2FQ-uLnCSB-OwFiWntzmwosW3X2Xqsg0ewh1axsaylA,5
|
31
|
+
mdbq-4.0.4.dist-info/RECORD,,
|
File without changes
|
File without changes
|