mdbq 2.9.2__py3-none-any.whl → 2.9.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mdbq/aggregation/aggregation.py +65 -28
- mdbq/aggregation/query_data.py +96 -22
- mdbq/config/products.py +25 -11
- mdbq/mysql/mysql.py +163 -40
- {mdbq-2.9.2.dist-info → mdbq-2.9.4.dist-info}/METADATA +1 -1
- {mdbq-2.9.2.dist-info → mdbq-2.9.4.dist-info}/RECORD +8 -8
- {mdbq-2.9.2.dist-info → mdbq-2.9.4.dist-info}/WHEEL +1 -1
- {mdbq-2.9.2.dist-info → mdbq-2.9.4.dist-info}/top_level.txt +0 -0
mdbq/aggregation/aggregation.py
CHANGED
@@ -24,7 +24,7 @@ import time
|
|
24
24
|
import re
|
25
25
|
import shutil
|
26
26
|
import getpass
|
27
|
-
|
27
|
+
from sqlalchemy import create_engine
|
28
28
|
warnings.filterwarnings('ignore')
|
29
29
|
"""
|
30
30
|
|
@@ -1167,7 +1167,24 @@ def upload_dir(path, db_name, collection_name, json_path=None):
|
|
1167
1167
|
intersection_keys = dtypes.keys() & old_dt.keys() # 获取两个字典键的交集
|
1168
1168
|
dtypes = {k: dtypes[k] for k in intersection_keys} # 使用交集的键创建新字典
|
1169
1169
|
df = df.astype(dtypes) # 再次更新 df 的数据类型
|
1170
|
+
df.fillna(0, inplace=True)
|
1171
|
+
|
1172
|
+
# for col in df.columns.tolist():
|
1173
|
+
# df[col] = df[col].apply(lambda x: 0 if str(x) == '' else x)
|
1174
|
+
# print(f'{i}/{count}')
|
1175
|
+
# sql_engine = create_engine(
|
1176
|
+
# f"mysql+pymysql://{username}:{password}@{host}:{port}/{db_name}") # 创建数据库引擎
|
1177
|
+
# df.to_sql(
|
1178
|
+
# name=collection_name,
|
1179
|
+
# con=sql_engine,
|
1180
|
+
# if_exists='append',
|
1181
|
+
# index=False,
|
1182
|
+
# chunksize=1000
|
1183
|
+
# )
|
1184
|
+
|
1185
|
+
|
1170
1186
|
|
1187
|
+
#
|
1171
1188
|
m.df_to_mysql(df=df, db_name=db_name, table_name=collection_name,
|
1172
1189
|
move_insert=False, # 先删除,再插入
|
1173
1190
|
df_sql = True,
|
@@ -1201,33 +1218,50 @@ def one_file_to_mysql(file, db_name, table_name):
|
|
1201
1218
|
|
1202
1219
|
|
1203
1220
|
def test():
|
1204
|
-
path =
|
1221
|
+
path = r'/Users/xigua/Downloads/DMP报表'
|
1205
1222
|
|
1206
1223
|
results = []
|
1207
1224
|
for root, dirs, files in os.walk(path, topdown=False):
|
1208
1225
|
for name in files:
|
1209
1226
|
if name.endswith('.csv') and 'baidu' not in name and '~' not in name:
|
1210
|
-
# print(name)
|
1211
1227
|
# df = pd.read_excel(os.path.join(root, name), header=0)
|
1212
1228
|
df = pd.read_csv(os.path.join(root, name), encoding='utf-8_sig', header=0, na_filter=False)
|
1229
|
+
results.append(df)
|
1213
1230
|
# print(name)
|
1214
|
-
|
1215
|
-
|
1216
|
-
|
1217
|
-
|
1218
|
-
|
1219
|
-
|
1220
|
-
|
1231
|
+
if len(df) == 0:
|
1232
|
+
continue
|
1233
|
+
if '达摩盘消耗占比' in df.columns.tolist():
|
1234
|
+
print(name)
|
1235
|
+
df.pop('达摩盘消耗占比')
|
1236
|
+
# df.insert(loc=1, column='店铺名称', value='万里马官方旗舰店')
|
1237
|
+
# df['更新时间'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
1238
|
+
df.to_csv(os.path.join(root, name), encoding='utf-8_sig', index=False, header=True)
|
1239
|
+
# for col in ['更新时间']:
|
1240
|
+
# if col not in df.columns.tolist():
|
1241
|
+
# print(name)
|
1242
|
+
# df[col] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
1243
|
+
# df.to_csv(os.path.join(root, name), encoding='utf-8_sig', index=False, header=True)
|
1221
1244
|
# pattern = re.findall(r'\d{4}-\d{2}-\d{2}_\d{4}-\d{2}-\d{2}', name)[0]
|
1222
1245
|
# new_name = f'py_xg_店铺销售指标_万里马官方旗舰店_{pattern}.csv'
|
1223
1246
|
# df.to_csv(os.path.join(root, name), encoding='utf-8_sig', index=False, header=True)
|
1224
1247
|
# os.remove(os.path.join(root, name))
|
1225
|
-
results.append(df)
|
1226
|
-
df = pd.concat(results)
|
1227
|
-
|
1228
|
-
|
1229
|
-
|
1230
|
-
|
1248
|
+
# results.append(df)
|
1249
|
+
# df = pd.concat(results)
|
1250
|
+
# df.drop_duplicates(
|
1251
|
+
# subset=[
|
1252
|
+
# '日期',
|
1253
|
+
# '店铺名称',
|
1254
|
+
# '报表类型',
|
1255
|
+
# '消耗',
|
1256
|
+
# '展现量',
|
1257
|
+
# '点击量',
|
1258
|
+
# ], keep='last', inplace=True, ignore_index=True)
|
1259
|
+
# df.fillna(0, inplace=True)
|
1260
|
+
# for col in df.columns.tolist():
|
1261
|
+
# df[col] = df[col].apply(lambda x: 0 if str(x) == '' else x)
|
1262
|
+
# path = '/Users/xigua/Downloads'
|
1263
|
+
# filename = '品销宝_2024年_合并.csv'
|
1264
|
+
# df.to_csv(os.path.join(path, filename), encoding='utf-8_sig', index=False, header=True)
|
1231
1265
|
|
1232
1266
|
|
1233
1267
|
if __name__ == '__main__':
|
@@ -1238,20 +1272,23 @@ if __name__ == '__main__':
|
|
1238
1272
|
#
|
1239
1273
|
# # 上传 1 个文件到数据库
|
1240
1274
|
# one_file_to_mysql(
|
1241
|
-
# file=r'/Users/
|
1242
|
-
# db_name='
|
1243
|
-
# table_name='
|
1275
|
+
# file=r'/Users/xigua/Downloads/DMP报表_2024-10-23_2024-10-29.csv',
|
1276
|
+
# db_name='达摩盘3',
|
1277
|
+
# table_name='dmp人群报表',
|
1244
1278
|
# )
|
1245
1279
|
|
1246
|
-
#
|
1247
|
-
|
1248
|
-
|
1249
|
-
|
1250
|
-
|
1251
|
-
|
1252
|
-
|
1253
|
-
|
1280
|
+
# test()
|
1281
|
+
|
1282
|
+
col = 1
|
1283
|
+
if col:
|
1284
|
+
# 上传一个目录到指定数据库
|
1285
|
+
db_name = '达摩盘3'
|
1286
|
+
table_name = 'dmp人群报表'
|
1287
|
+
upload_dir(
|
1288
|
+
path=r'/Users/xigua/Downloads/DMP报表',
|
1289
|
+
db_name=db_name,
|
1290
|
+
collection_name=table_name,
|
1291
|
+
)
|
1254
1292
|
|
1255
1293
|
|
1256
|
-
test()
|
1257
1294
|
|
mdbq/aggregation/query_data.py
CHANGED
@@ -6,6 +6,7 @@ from mdbq.mysql import mysql
|
|
6
6
|
from mdbq.mysql import s_query
|
7
7
|
from mdbq.aggregation import optimize_data
|
8
8
|
from mdbq.config import myconfig
|
9
|
+
from mdbq.config import products
|
9
10
|
import datetime
|
10
11
|
from dateutil.relativedelta import relativedelta
|
11
12
|
import pandas as pd
|
@@ -15,6 +16,7 @@ import platform
|
|
15
16
|
import getpass
|
16
17
|
import json
|
17
18
|
import os
|
19
|
+
import time
|
18
20
|
|
19
21
|
"""
|
20
22
|
程序用于下载数据库(调用 s_query.py 下载并清洗), 并对数据进行聚合清洗, 不会更新数据库信息;
|
@@ -204,7 +206,7 @@ class MysqlDatasQuery:
|
|
204
206
|
'场景名字': 1,
|
205
207
|
'宝贝id': 1,
|
206
208
|
'词类型': 1,
|
207
|
-
'
|
209
|
+
'词名字_词包名字': 1,
|
208
210
|
'花费': 1,
|
209
211
|
'展现量': 1,
|
210
212
|
'点击量': 1,
|
@@ -246,7 +248,7 @@ class MysqlDatasQuery:
|
|
246
248
|
}
|
247
249
|
df = self.download.data_to_df(
|
248
250
|
db_name='推广数据2',
|
249
|
-
table_name='
|
251
|
+
table_name='超级直播报表_人群',
|
250
252
|
start_date=start_date,
|
251
253
|
end_date=end_date,
|
252
254
|
projection=projection,
|
@@ -685,7 +687,7 @@ class MysqlDatasQuery:
|
|
685
687
|
}
|
686
688
|
df_tm_living = self.download.data_to_df(
|
687
689
|
db_name='推广数据2',
|
688
|
-
table_name='
|
690
|
+
table_name='超级直播报表_人群',
|
689
691
|
start_date=start_date,
|
690
692
|
end_date=pd.to_datetime('2024-04-16'), # 只可以取此日期之前的数据
|
691
693
|
projection=projection,
|
@@ -855,8 +857,7 @@ class MysqlDatasQuery:
|
|
855
857
|
df_crowd.sort_values('日期', ascending=True, ignore_index=True, inplace=True)
|
856
858
|
df_crowd.drop_duplicates(subset=['人群id',], keep='last', inplace=True, ignore_index=True)
|
857
859
|
df_crowd.pop('日期')
|
858
|
-
|
859
|
-
|
860
|
+
df_crowd = df_crowd.astype({'人群id': 'int64'}, errors='ignore')
|
860
861
|
projection = {}
|
861
862
|
df_dmp = self.download.data_to_df(
|
862
863
|
db_name='达摩盘3',
|
@@ -865,14 +866,16 @@ class MysqlDatasQuery:
|
|
865
866
|
end_date=end_date,
|
866
867
|
projection=projection,
|
867
868
|
)
|
869
|
+
df_dmp = df_dmp.astype({'人群id': 'int64'}, errors='ignore')
|
868
870
|
df_dmp.sort_values('日期', ascending=True, ignore_index=True, inplace=True)
|
869
|
-
df_dmp.drop_duplicates(subset=['日期', '人群id', '消耗'], keep='last', inplace=True, ignore_index=True)
|
871
|
+
df_dmp.drop_duplicates(subset=['日期', '人群id', '消耗_元'], keep='last', inplace=True, ignore_index=True)
|
870
872
|
df = pd.merge(df_dmp, df_crowd, left_on=['人群id'], right_on=['人群id'], how='left')
|
871
873
|
# 清除一些不必要的字符
|
872
874
|
df['用户年龄'] = df['用户年龄'].apply(lambda x: '~'.join(re.findall(r'^(\d+).*-(\d+)岁$', str(x))[0]) if '岁' in str(x) else x)
|
873
875
|
df['消费能力等级'] = df['消费能力等级'].apply(lambda x: f'L{''.join(re.findall(r'(\d)', str(x)))}' if '购买力' in str(x) else x)
|
874
876
|
# df.to_csv('/Users/xigua/Downloads/test3.csv', index=False, header=True, encoding='utf-8_sig')
|
875
877
|
# breakpoint()
|
878
|
+
df.rename(columns={'消耗_元': '消耗'}, inplace=True)
|
876
879
|
return df
|
877
880
|
|
878
881
|
|
@@ -999,7 +1002,8 @@ class GroupBy:
|
|
999
1002
|
df_pic_lin = df[df['店铺名称'] == '万里马官方旗舰店']
|
1000
1003
|
df_pic = df_pic_lin.groupby(['日期', '商品id'], as_index=False).agg({'花费': 'sum'})
|
1001
1004
|
df_pic = df_pic[~df_pic['商品id'].isin([''])] # 指定列中删除包含空值的行
|
1002
|
-
|
1005
|
+
date_obj = datetime.datetime.strptime(f'{year_my}-{last_month.month}-01', '%Y-%m-%d').date()
|
1006
|
+
df_pic = df_pic[(df_pic['日期'] >= date_obj)]
|
1003
1007
|
df_pic = df_pic.groupby(['商品id'], as_index=False).agg({'花费': 'sum'})
|
1004
1008
|
df_pic.sort_values('花费', ascending=False, ignore_index=True, inplace=True)
|
1005
1009
|
df_pic.reset_index(inplace=True)
|
@@ -1166,7 +1170,7 @@ class GroupBy:
|
|
1166
1170
|
'直接成交金额': float,
|
1167
1171
|
}, errors='raise')
|
1168
1172
|
if is_maximize:
|
1169
|
-
df = df.groupby(['日期', '店铺名称', '营销场景', '商品id', '词类型', '
|
1173
|
+
df = df.groupby(['日期', '店铺名称', '营销场景', '商品id', '词类型', '词名字_词包名字', '花费', '展现量', '点击量'], as_index=False).agg(
|
1170
1174
|
**{'加购量': ('加购量', np.max),
|
1171
1175
|
'成交笔数': ('成交笔数', np.max),
|
1172
1176
|
'成交金额': ('成交金额', np.max),
|
@@ -1175,7 +1179,7 @@ class GroupBy:
|
|
1175
1179
|
}
|
1176
1180
|
)
|
1177
1181
|
else:
|
1178
|
-
df = df.groupby(['日期', '店铺名称', '营销场景', '商品id', '词类型', '
|
1182
|
+
df = df.groupby(['日期', '店铺名称', '营销场景', '商品id', '词类型', '词名字_词包名字', '花费', '展现量', '点击量'], as_index=False).agg(
|
1179
1183
|
**{
|
1180
1184
|
'加购量': ('加购量', np.min),
|
1181
1185
|
'成交笔数': ('成交笔数', np.min),
|
@@ -1185,7 +1189,7 @@ class GroupBy:
|
|
1185
1189
|
}
|
1186
1190
|
)
|
1187
1191
|
df.insert(loc=1, column='推广渠道', value='万相台无界版') # df中插入新列
|
1188
|
-
df['是否品牌词'] = df['
|
1192
|
+
df['是否品牌词'] = df['词名字_词包名字'].str.contains('万里马|wanlima', regex=True)
|
1189
1193
|
df['是否品牌词'] = df['是否品牌词'].apply(lambda x: '品牌词' if x else '')
|
1190
1194
|
dir_file = f'\\\\192.168.1.198\\时尚事业部\\01.运营部\\0-电商周报-每周五更新\\分类配置文件.xlsx'
|
1191
1195
|
dir_file2 = '/Volumes/时尚事业部/01.运营部/0-电商周报-每周五更新/分类配置文件.xlsx'
|
@@ -1196,17 +1200,17 @@ class GroupBy:
|
|
1196
1200
|
# df_fl.rename(columns={'分类1': '词分类'}, inplace=True)
|
1197
1201
|
df_fl = df_fl[['关键词', '词分类']]
|
1198
1202
|
# 合并并获取词分类信息
|
1199
|
-
df = pd.merge(df, df_fl, left_on=['
|
1203
|
+
df = pd.merge(df, df_fl, left_on=['词名字_词包名字'], right_on=['关键词'], how='left')
|
1200
1204
|
df.pop('关键词')
|
1201
1205
|
df['词分类'].fillna('', inplace=True)
|
1202
1206
|
if '词分类' in df.columns.tolist():
|
1203
1207
|
# 这行决定了,从文件中读取的词分类信息优先级高于 ret_keyword 函数的词分类
|
1204
1208
|
df['词分类'] = df.apply(
|
1205
|
-
lambda x: self.ret_keyword(keyword=str(x['
|
1209
|
+
lambda x: self.ret_keyword(keyword=str(x['词名字_词包名字']), as_file=False) if x['词分类'] == ''
|
1206
1210
|
else x['词分类'], axis=1
|
1207
1211
|
)
|
1208
1212
|
else:
|
1209
|
-
df['词分类'] = df['
|
1213
|
+
df['词分类'] = df['词名字_词包名字'].apply(lambda x: self.ret_keyword(keyword=str(x), as_file=False))
|
1210
1214
|
# df.to_csv('/Users/xigua/Downloads/test.csv', index=False, header=True, encoding='utf-8_sig')
|
1211
1215
|
# breakpoint()
|
1212
1216
|
return df
|
@@ -1546,6 +1550,7 @@ class GroupBy:
|
|
1546
1550
|
df.drop_duplicates(subset=['场次id'], keep='first', inplace=True, ignore_index=True)
|
1547
1551
|
return df
|
1548
1552
|
elif '多店推广场景_按日聚合' in table_name:
|
1553
|
+
df['日期'] = pd.to_datetime(df['日期'], format='%Y-%m-%d', errors='ignore') # 转换日期列
|
1549
1554
|
df = df.groupby(
|
1550
1555
|
['日期', '店铺名称', '营销场景'],
|
1551
1556
|
as_index=False).agg(
|
@@ -1956,7 +1961,7 @@ class GroupBy:
|
|
1956
1961
|
)
|
1957
1962
|
return df
|
1958
1963
|
|
1959
|
-
@try_except
|
1964
|
+
# @try_except
|
1960
1965
|
def performance_jd(self, jd_tg=True):
|
1961
1966
|
jdtg, sku_sales = self.data_jdtg['京东_京准通'], self.data_jdtg['京东_sku_商品明细']
|
1962
1967
|
jdtg = jdtg.groupby(['日期', '跟单sku_id'],
|
@@ -1970,6 +1975,7 @@ class GroupBy:
|
|
1970
1975
|
df = df[['日期', '商品id', '货号', '成交单量', '成交金额', '成本价']]
|
1971
1976
|
df['商品id'] = df['商品id'].astype(str)
|
1972
1977
|
jdtg['跟单sku_id'] = jdtg['跟单sku_id'].astype(str)
|
1978
|
+
jdtg = jdtg.astype({'日期': 'datetime64[ns]'}, errors='raise')
|
1973
1979
|
if jd_tg is True:
|
1974
1980
|
# 完整的数据表,包含全店所有推广、销售数据
|
1975
1981
|
df = pd.merge(df, jdtg, how='left', left_on=['日期', '商品id'], right_on=['日期', '跟单sku_id']) # df 合并推广表
|
@@ -2073,8 +2079,49 @@ class GroupBy:
|
|
2073
2079
|
df.to_excel(os.path.join(path, filename + '.xlsx'), index=index, header=header, engine=engine, freeze_panes=freeze_panes)
|
2074
2080
|
|
2075
2081
|
|
2076
|
-
def
|
2077
|
-
|
2082
|
+
def date_table():
|
2083
|
+
"""
|
2084
|
+
生成 pbix 使用的日期表
|
2085
|
+
"""
|
2086
|
+
start_date = '2022-01-01' # 日期表的起始日期
|
2087
|
+
yesterday = time.strftime('%Y-%m-%d', time.localtime(time.time() - 86400))
|
2088
|
+
dic = pd.date_range(start=start_date, end=yesterday)
|
2089
|
+
df = pd.DataFrame(dic, columns=['日期'])
|
2090
|
+
df.sort_values('日期', ascending=True, ignore_index=True, inplace=True)
|
2091
|
+
df.reset_index(inplace=True)
|
2092
|
+
# inplace 添加索引到 df
|
2093
|
+
p = df.pop('index')
|
2094
|
+
df['月2'] = df['日期']
|
2095
|
+
df['月2'] = df['月2'].dt.month
|
2096
|
+
df['日期'] = df['日期'].dt.date # 日期格式保留年月日,去掉时分秒
|
2097
|
+
df['年'] = df['日期'].apply(lambda x: str(x).split('-')[0] + '年')
|
2098
|
+
df['月'] = df['月2'].apply(lambda x: str(x) + '月')
|
2099
|
+
# df.drop('月2', axis=1, inplace=True)
|
2100
|
+
mon = df.pop('月2')
|
2101
|
+
df['日'] = df['日期'].apply(lambda x: str(x).split('-')[2])
|
2102
|
+
df['年月'] = df.apply(lambda x: x['年'] + x['月'], axis=1)
|
2103
|
+
df['月日'] = df.apply(lambda x: x['月'] + x['日'] + '日', axis=1)
|
2104
|
+
df['第n周'] = df['日期'].apply(lambda x: x.strftime('第%W周'))
|
2105
|
+
df['索引'] = p
|
2106
|
+
df['月索引'] = mon
|
2107
|
+
df.sort_values('日期', ascending=False, ignore_index=True, inplace=True)
|
2108
|
+
|
2109
|
+
m = mysql.MysqlUpload(
|
2110
|
+
username=username,
|
2111
|
+
password=password,
|
2112
|
+
host=host,
|
2113
|
+
port=port,
|
2114
|
+
)
|
2115
|
+
m.df_to_mysql(
|
2116
|
+
df=df,
|
2117
|
+
db_name='聚合数据',
|
2118
|
+
table_name='日期表',
|
2119
|
+
move_insert=True, # 先删除,再插入
|
2120
|
+
df_sql=False, # 值为 True 时使用 df.to_sql 函数上传整个表, 不会排重
|
2121
|
+
drop_duplicates=False, # 值为 True 时检查重复数据再插入,反之直接上传,会比较慢
|
2122
|
+
filename=None, # 用来追踪处理进度
|
2123
|
+
service_database=service_database, # 用来追踪处理进度
|
2124
|
+
)
|
2078
2125
|
|
2079
2126
|
|
2080
2127
|
def data_aggregation(months=1, is_juhe=True, less_dict=[]):
|
@@ -2169,7 +2216,7 @@ def data_aggregation(months=1, is_juhe=True, less_dict=[]):
|
|
2169
2216
|
{
|
2170
2217
|
'数据库名': '聚合数据',
|
2171
2218
|
'集合名': '天猫_关键词报表',
|
2172
|
-
'唯一主键': ['日期', '推广渠道', '营销场景', '商品id', '花费', '词类型', '
|
2219
|
+
'唯一主键': ['日期', '推广渠道', '营销场景', '商品id', '花费', '词类型', '词名字_词包名字',],
|
2173
2220
|
'数据主体': sdq.tg_gjc(),
|
2174
2221
|
},
|
2175
2222
|
{
|
@@ -2310,13 +2357,40 @@ def data_aggregation(months=1, is_juhe=True, less_dict=[]):
|
|
2310
2357
|
)
|
2311
2358
|
|
2312
2359
|
|
2313
|
-
def main():
|
2314
|
-
|
2360
|
+
def main(days=100, months=3):
|
2361
|
+
# 更新日期表
|
2362
|
+
date_table()
|
2363
|
+
# 更新货品年份基准表, 属性设置 3 - 货品年份基准
|
2364
|
+
p = products.Products()
|
2365
|
+
p.to_mysql()
|
2315
2366
|
|
2316
|
-
|
2317
|
-
if __name__ == '__main__':
|
2367
|
+
# 数据聚合
|
2318
2368
|
data_aggregation(
|
2319
|
-
months=
|
2369
|
+
months=months,
|
2320
2370
|
is_juhe=True, # 生成聚合表
|
2321
2371
|
# less_dict=['天猫_品销宝账户报表'], # 单独聚合某一个数据库
|
2322
2372
|
)
|
2373
|
+
time.sleep(60)
|
2374
|
+
|
2375
|
+
system = platform.system() # 本机系统
|
2376
|
+
host_name = socket.gethostname() # 本机名
|
2377
|
+
conf = myconfig.main()
|
2378
|
+
db_list = conf[system][host_name]['mysql']['数据库集']
|
2379
|
+
# 清理所有库
|
2380
|
+
optimize_data.op_data(
|
2381
|
+
db_name_lists=db_list,
|
2382
|
+
days=days,
|
2383
|
+
is_mongo=False,
|
2384
|
+
is_mysql=True,
|
2385
|
+
)
|
2386
|
+
|
2387
|
+
|
2388
|
+
if __name__ == '__main__':
|
2389
|
+
main(days=100, months=3)
|
2390
|
+
|
2391
|
+
# data_aggregation(
|
2392
|
+
# months=3,
|
2393
|
+
# is_juhe=True, # 生成聚合表
|
2394
|
+
# # less_dict=['天猫_品销宝账户报表'], # 单独聚合某一个数据库
|
2395
|
+
# )
|
2396
|
+
|
mdbq/config/products.py
CHANGED
@@ -124,25 +124,39 @@ class Products:
|
|
124
124
|
]
|
125
125
|
self.datas += my_datas
|
126
126
|
|
127
|
+
|
127
128
|
def to_mysql(self):
|
128
129
|
self.update_my_datas()
|
129
130
|
df = pd.DataFrame(self.datas)
|
130
|
-
|
131
|
+
m_engine = mysql.MysqlUpload(
|
131
132
|
username=username,
|
132
133
|
password=password,
|
133
134
|
host=host,
|
134
135
|
port=port,
|
135
136
|
)
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
137
|
+
for dict_data in df.to_dict(orient='records'):
|
138
|
+
m_engine.dict_to_mysql(
|
139
|
+
db_name='属性设置3',
|
140
|
+
table_name='货品年份基准',
|
141
|
+
dict_data=dict_data,
|
142
|
+
# icm_update=['日期', '店铺名称', '宝贝id'], # 唯一组合键
|
143
|
+
unique_main_key=['商品id'],
|
144
|
+
set_type={
|
145
|
+
'商品id': 'mediumtext',
|
146
|
+
'平台': 'mediumtext',
|
147
|
+
'上市年份': 'mediumtext',
|
148
|
+
},
|
149
|
+
)
|
150
|
+
# m.df_to_mysql(
|
151
|
+
# df=df,
|
152
|
+
# db_name='属性设置3',
|
153
|
+
# table_name='货品年份基准',
|
154
|
+
# move_insert = False,
|
155
|
+
# df_sql=False, # 值为 True 时使用 df.to_sql 函数上传整个表, 不会排重
|
156
|
+
# drop_duplicates=True, # 值为 True 时检查重复数据再插入,反之直接上传,会比较慢
|
157
|
+
# icm_update=[],
|
158
|
+
# service_database=service_database, # 用来追踪处理进度
|
159
|
+
# )
|
146
160
|
|
147
161
|
def market_date(self, product_id: int):
|
148
162
|
try:
|
mdbq/mysql/mysql.py
CHANGED
@@ -6,9 +6,12 @@ import re
|
|
6
6
|
import time
|
7
7
|
from functools import wraps
|
8
8
|
import warnings
|
9
|
+
from unittest.mock import inplace
|
10
|
+
|
9
11
|
import pymysql
|
10
12
|
import numpy as np
|
11
13
|
import pandas as pd
|
14
|
+
from markdown_it.rules_inline.backticks import regex
|
12
15
|
from sqlalchemy import create_engine
|
13
16
|
import os
|
14
17
|
import calendar
|
@@ -42,7 +45,11 @@ warnings.filterwarnings('ignore')
|
|
42
45
|
|
43
46
|
|
44
47
|
def is_valid_date(date_string):
|
45
|
-
"""
|
48
|
+
"""
|
49
|
+
判断是否是日期格式, 且允许没有前导零, 且允许带时间
|
50
|
+
纯日期格式: 返回 1
|
51
|
+
日期+时间: 返回 2
|
52
|
+
"""
|
46
53
|
date_pattern = r"^(\d{4})-(0?[1-9]|1[0-2])-(0?[1-9]|[12]\d|3[01])$"
|
47
54
|
match = re.match(date_pattern, str(date_string)) # 判断纯日期格式:2024-11-09
|
48
55
|
if match is None:
|
@@ -125,15 +132,17 @@ class MysqlUpload:
|
|
125
132
|
|
126
133
|
return wrapper
|
127
134
|
|
128
|
-
|
135
|
+
@try_except
|
136
|
+
def dict_to_mysql(self, db_name, table_name, dict_data, icm_update=None, main_key=None, unique_main_key=None, index_length=100, set_type=None):
|
129
137
|
"""
|
130
138
|
插入字典数据
|
131
139
|
dict_data: 字典
|
132
|
-
main_key:
|
140
|
+
main_key: 指定索引列, 通常用日期列,默认会设置日期为索引
|
133
141
|
unique_main_key: 指定唯一索引列
|
134
142
|
index_length: 索引长度
|
143
|
+
icm_update: 增量更正,指定后 main_key 只用于检查/创建列,不能更新数据
|
144
|
+
set_type: {}
|
135
145
|
"""
|
136
|
-
|
137
146
|
if not main_key:
|
138
147
|
main_key = []
|
139
148
|
if not unique_main_key:
|
@@ -167,7 +176,10 @@ class MysqlUpload:
|
|
167
176
|
print(f'创建 mysql 表: {table_name}')
|
168
177
|
|
169
178
|
# 根据 dict_data 的值添加指定的数据类型
|
170
|
-
dtypes = self.cover_dict_dtypes(dict_data=dict_data) # {'店铺名称': 'mediumtext',...}
|
179
|
+
dtypes, dict_data = self.cover_dict_dtypes(dict_data=dict_data) # {'店铺名称': 'mediumtext',...}
|
180
|
+
if set_type:
|
181
|
+
dtypes.update(set_type) # 自定义的数据类型
|
182
|
+
|
171
183
|
# 检查列
|
172
184
|
sql = "SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = %s AND TABLE_NAME = %s;"
|
173
185
|
cursor.execute(sql, (db_name, table_name))
|
@@ -181,59 +193,164 @@ class MysqlUpload:
|
|
181
193
|
cursor.execute(sql)
|
182
194
|
print(f"添加列: {col}({dtypes[col]})") # 添加列并指定数据类型
|
183
195
|
|
184
|
-
if col in main_key or col
|
185
|
-
sql = f"
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
196
|
+
if col in main_key or col == '日期':
|
197
|
+
sql = f"CREATE INDEX index_name ON `{table_name}`(`{col}`);"
|
198
|
+
print(f"设置为索引: {col}({dtypes[col]})")
|
199
|
+
cursor.execute(sql)
|
200
|
+
if col in unique_main_key:
|
201
|
+
if dtypes[col] == 'mediumtext':
|
202
|
+
sql = f"ALTER TABLE {table_name} ADD UNIQUE (`{col}`({index_length}))"
|
203
|
+
else:
|
204
|
+
sql = f"ALTER TABLE {table_name} ADD UNIQUE (`{col}`)"
|
205
|
+
cursor.execute(sql)
|
206
|
+
# if col in main_key or col in unique_main_key:
|
207
|
+
# sql = f"SHOW INDEXES FROM `{table_name}` WHERE `Column_name` = %s"
|
208
|
+
# cursor.execute(sql, (col))
|
209
|
+
# result = cursor.fetchone() # 检查索引是否存在
|
210
|
+
# if not result:
|
211
|
+
# if col in main_key:
|
212
|
+
# sql = f"CREATE INDEX index_name ON `{table_name}`(`{col}`);"
|
213
|
+
# print(f"设置为索引: {col}({dtypes[col]})")
|
214
|
+
# cursor.execute(sql)
|
215
|
+
# elif col in unique_main_key:
|
216
|
+
# if dtypes[col] == 'mediumtext':
|
217
|
+
# sql = f"CREATE INDEX UNIQUE index_name ON `{table_name}` (`{col}`({index_length}));"
|
218
|
+
# else:
|
219
|
+
# sql = f"CREATE INDEX UNIQUE index_name ON `{table_name}` (`{col}`);"
|
220
|
+
# print(f"设置唯一索引: {col}({dtypes[col]})")
|
221
|
+
# print(sql)
|
222
|
+
# cursor.execute(sql)
|
195
223
|
connection.commit() # 提交事务
|
224
|
+
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
|
225
|
+
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
|
226
|
+
# 处理插入的数据
|
227
|
+
if icm_update:
|
228
|
+
""" 使用增量更新: 需确保 icm_update['主键'] 传进来的列组合是数据表中唯一,值不会发生变化且不会重复,否则可能产生覆盖 """
|
229
|
+
sql = 'SELECT COLUMN_NAME FROM information_schema.columns WHERE table_schema = %s AND table_name = %s'
|
230
|
+
cursor.execute(sql, (db_name, {table_name}))
|
231
|
+
columns = cursor.fetchall()
|
232
|
+
cols_exist = [col['COLUMN_NAME'] for col in columns] # 数据表的所有列, 返回 list
|
233
|
+
update_col = [item for item in cols_exist if item not in icm_update and item != 'id'] # 除了主键外的其他列
|
234
|
+
|
235
|
+
# unique_keys 示例: `日期`, `余额`
|
236
|
+
unique_keys = ', '.join(f"`{item}`" for item in update_col) # 列名需要转义
|
237
|
+
condition = []
|
238
|
+
for up_col in icm_update:
|
239
|
+
condition += [f'`{up_col}` = "{dict_data[up_col]}"']
|
240
|
+
condition = ' AND '.join(condition) # condition值示例: `品销宝余额` = '2930.73' AND `短信剩余` = '67471'
|
241
|
+
sql = f"SELECT {unique_keys} FROM `{table_name}` WHERE {condition}"
|
242
|
+
# print(sql)
|
243
|
+
# sql = f"SELECT {unique_keys} FROM `{table_name}` WHERE `创建时间` = '2014-09-19 14:32:33'"
|
244
|
+
cursor.execute(sql)
|
245
|
+
results = cursor.fetchall() # results 是数据库取出的数据
|
246
|
+
if results: # 有数据返回,再进行增量检查
|
247
|
+
for result in results: # results 是数据库数据, dict_data 是传进来的数据
|
248
|
+
change_col = [] # 发生变化的列名
|
249
|
+
change_values = [] # 发生变化的数据
|
250
|
+
for col in update_col:
|
251
|
+
# 因为 mysql 里面有 decimal 数据类型,要移除末尾的 0 再做比较(df 默认将 5.00 小数截断为 5.0)
|
252
|
+
df_value = str(dict_data[col])
|
253
|
+
mysql_value = str(result[col])
|
254
|
+
if '.' in df_value:
|
255
|
+
df_value = re.sub(r'0+$', '', df_value)
|
256
|
+
df_value = re.sub(r'\.$', '', df_value)
|
257
|
+
if '.' in mysql_value:
|
258
|
+
mysql_value = re.sub(r'0+$', '', mysql_value)
|
259
|
+
mysql_value = re.sub(r'\.$', '', mysql_value)
|
260
|
+
if df_value != mysql_value: # 传进来的数据和数据库比较, 有变化
|
261
|
+
# print(f'{dict_data['日期']}{dict_data['商品id']}{col} 列的值有变化,{str(dict_data[col])} != {str(result[col])}')
|
262
|
+
change_values += [f"`{col}` = \"{str(dict_data[col])}\""]
|
263
|
+
change_col.append(col)
|
264
|
+
not_change_col = [item for item in update_col if item not in change_col]
|
265
|
+
# change_values 是 df 传进来且和数据库对比后,发生了变化的数据,值示例: [`品销宝余额` = '9999.0', `短信剩余` = '888']
|
266
|
+
if change_values: # change_values 有数据返回,表示值需要更新
|
267
|
+
if not_change_col:
|
268
|
+
not_change_values = [f'`{col}` = "{str(dict_data[col])}"' for col in not_change_col]
|
269
|
+
not_change_values = ' AND '.join(
|
270
|
+
not_change_values) # 示例: `短信剩余` = '888' AND `test1` = '93'
|
271
|
+
# print(change_values, not_change_values)
|
272
|
+
condition += f' AND {not_change_values}' # 重新构建完整的查询条件,将未发生变化的列加进查询条件
|
273
|
+
change_values = ', '.join(f"{item}" for item in change_values) # 注意这里 item 外面没有反引号
|
274
|
+
sql = "UPDATE `%s` SET %s WHERE %s" % (table_name, change_values, condition)
|
275
|
+
# print(sql)
|
276
|
+
cursor.execute(sql)
|
277
|
+
else: # 没有数据返回,则直接插入数据
|
278
|
+
cols = ', '.join(f"`{item}`" for item in dict_data.keys()) # 列名需要转义
|
279
|
+
# data.update({item: f"{data[item]}" for item in data.keys()}) # 全部值转字符, 不是必须的
|
280
|
+
values = ', '.join([f'"{item}"' for item in dict_data.values()]) # 值要加引号
|
281
|
+
sql = f"INSERT INTO `{table_name}` ({cols}) VALUES ({values});"
|
282
|
+
cursor.execute(sql)
|
283
|
+
connection.commit() # 提交数据库
|
284
|
+
connection.close()
|
285
|
+
return
|
196
286
|
|
287
|
+
# 构建 keys
|
197
288
|
keys_data = ', '.join([f'`{str(item)}`' for item in dict_data.keys()])
|
289
|
+
# 构建 values
|
198
290
|
values_data = ', '.join(f'"{str(item)}"' for item in dict_data.values())
|
199
|
-
|
200
|
-
|
201
|
-
|
291
|
+
# 构建其他键值,重复时要更新的其他键
|
292
|
+
if main_key:
|
293
|
+
for col in main_key:
|
294
|
+
del dict_data[col]
|
295
|
+
if unique_main_key:
|
296
|
+
for col in unique_main_key:
|
297
|
+
del dict_data[col]
|
298
|
+
# 涉及列名务必使用反引号
|
299
|
+
update_datas = ', '.join([f'`{k}` = VALUES(`{k}`)' for k, v in dict_data.items()])
|
300
|
+
|
301
|
+
# 构建 sql
|
202
302
|
sql = f"INSERT INTO %s (%s) VALUES (%s) ON DUPLICATE KEY UPDATE %s" % (table_name, keys_data, values_data, update_datas)
|
203
303
|
# print(sql)
|
204
304
|
cursor.execute(sql)
|
205
|
-
connection.commit() #
|
305
|
+
connection.commit() # 提交数据库
|
206
306
|
connection.close()
|
207
307
|
|
208
|
-
|
209
308
|
def cover_dict_dtypes(self, dict_data):
|
210
309
|
if not dict_data:
|
211
310
|
print(f'mysql.py -> MysqlUpload -> cover_dict_dtypes -> 传入的字典不能为空')
|
212
311
|
return
|
213
312
|
__res_dict = {}
|
313
|
+
new_dict_data = {}
|
214
314
|
for k, v in dict_data.items():
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
315
|
+
k = str(k).lower()
|
316
|
+
k = re.sub(r'[()\-,,$&~^、 ()\"\'“”=·/。》《><!!`]', '_', k, re.IGNORECASE)
|
317
|
+
k = k.replace(')', '')
|
318
|
+
k = re.sub(r'_{2,}', '_', k)
|
319
|
+
k = re.sub(r'_+$', '', k)
|
320
|
+
if str(v) == '':
|
321
|
+
v = 0
|
322
|
+
v = str(v)
|
323
|
+
v = re.sub('^-$|^--$|^nan$|^null$', '0', v, re.I)
|
324
|
+
v = re.sub(',|="|"', '', v, re.I)
|
325
|
+
if re.findall(r'^[-+]?\d+\.?\d*%$', v):
|
326
|
+
v = str(float(v.rstrip("%")) / 100)
|
327
|
+
|
328
|
+
result1 = re.findall(r'编码|_?id|货号|款号|文件大小', k, re.IGNORECASE)
|
329
|
+
result2 = re.findall(r'占比$|投产$|产出$|同比$|环比$|roi$|率$', k, re.IGNORECASE)
|
330
|
+
date_type = is_valid_date(v) # 判断日期时间
|
331
|
+
int_num = is_integer(v) # 判断整数
|
332
|
+
count_int, count_float = count_decimal_places(v) # 判断小数,返回小数位数
|
220
333
|
if result1: # 京东sku/spu商品信息
|
221
334
|
__res_dict.update({k: 'mediumtext'})
|
222
|
-
elif
|
223
|
-
__res_dict.update({k: '
|
224
|
-
elif
|
225
|
-
|
226
|
-
|
335
|
+
elif k == '日期':
|
336
|
+
__res_dict.update({k: 'DATE'})
|
337
|
+
elif k == '更新时间':
|
338
|
+
__res_dict.update({k: 'TIMESTAMP'})
|
339
|
+
elif str(v) == '':
|
227
340
|
__res_dict.update({k: 'mediumtext'})
|
228
|
-
elif
|
341
|
+
elif result2: # 小数
|
342
|
+
__res_dict.update({k: 'decimal(10,4)'})
|
343
|
+
elif date_type == 1: # 纯日期
|
229
344
|
__res_dict.update({k: 'DATE'})
|
230
|
-
elif date_type == 2:
|
345
|
+
elif date_type == 2: # 日期+时间
|
231
346
|
__res_dict.update({k: 'DATETIME'})
|
232
347
|
elif int_num:
|
233
348
|
__res_dict.update({k: 'INT'})
|
234
349
|
elif count_float > 0:
|
235
350
|
if count_int + count_float > 10:
|
236
|
-
|
351
|
+
if count_float > 5:
|
352
|
+
v = round(float(v), 4)
|
353
|
+
__res_dict.update({k: 'decimal(12,4)'})
|
237
354
|
elif count_float >= 6:
|
238
355
|
__res_dict.update({k: 'decimal(12,6)'})
|
239
356
|
elif count_float >= 4:
|
@@ -242,9 +359,8 @@ class MysqlUpload:
|
|
242
359
|
__res_dict.update({k: 'decimal(10,2)'})
|
243
360
|
else:
|
244
361
|
__res_dict.update({k: 'mediumtext'})
|
245
|
-
|
246
|
-
|
247
|
-
|
362
|
+
new_dict_data.update({k: v})
|
363
|
+
return __res_dict, new_dict_data
|
248
364
|
|
249
365
|
@try_except
|
250
366
|
def df_to_mysql(self, df, table_name, db_name='远程数据源', icm_update=[], service_database={'xigua_lx': 'mysql'}, move_insert=False, df_sql=False, drop_duplicates=False, filename=None, count=None, json_path=None, reset_id=False):
|
@@ -309,6 +425,11 @@ class MysqlUpload:
|
|
309
425
|
for service_name, database in service_database.items():
|
310
426
|
# 2. 列数据类型转换,将 df 数据类型转换为 mysql 的数据类型
|
311
427
|
dtypes, cl, db_n, tb_n = self.convert_dtypes(df=df, db_name=db_name, table_name=table_name, path=json_path, service_name=service_name)
|
428
|
+
for dy in dtypes.keys():
|
429
|
+
if '日期' == dy:
|
430
|
+
dtypes.update({'日期': 'DATE'})
|
431
|
+
if '更新时间' == dy:
|
432
|
+
dtypes.update({'更新时间': 'TIMESTAMP'})
|
312
433
|
|
313
434
|
# 有特殊字符不需转义
|
314
435
|
sql = "SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = %s AND TABLE_NAME = %s;"
|
@@ -425,9 +546,7 @@ class MysqlUpload:
|
|
425
546
|
# data 是传进来待处理的数据, 不是数据库数据
|
426
547
|
# data 示例: {'日期': Timestamp('2024-08-27 00:00:00'), '推广费余额': 33299, '品销宝余额': 2930.73, '短信剩余': 67471}
|
427
548
|
try:
|
428
|
-
|
429
|
-
# data.update({item: f"{data[item]}" for item in data.keys()}) # 全部值转字符, 不是必须的
|
430
|
-
values = ', '.join([f'"{item}"' for item in data.values()]) # 值要加引号
|
549
|
+
|
431
550
|
condition = []
|
432
551
|
for k, v in data.items():
|
433
552
|
condition += [f'`{k}` = "{v}"']
|
@@ -1055,3 +1174,7 @@ if __name__ == '__main__':
|
|
1055
1174
|
data = conf['Windows']['xigua_lx']['mysql']['local']
|
1056
1175
|
username, password, host, port = data['username'], data['password'], data['host'], data['port']
|
1057
1176
|
print(username, password, host, port)
|
1177
|
+
|
1178
|
+
ss = '2024-11-08'
|
1179
|
+
ss= re.sub(r'\\N', '0', ss)
|
1180
|
+
print(ss, '111')
|
@@ -1,11 +1,11 @@
|
|
1
1
|
mdbq/__init__.py,sha256=Il5Q9ATdX8yXqVxtP_nYqUhExzxPC_qk_WXQ_4h0exg,16
|
2
2
|
mdbq/__version__.py,sha256=y9Mp_8x0BCZSHsdLT_q5tX9wZwd5QgqrSIENLrb6vXA,62
|
3
3
|
mdbq/aggregation/__init__.py,sha256=EeDqX2Aml6SPx8363J-v1lz0EcZtgwIBYyCJV6CcEDU,40
|
4
|
-
mdbq/aggregation/aggregation.py,sha256=
|
4
|
+
mdbq/aggregation/aggregation.py,sha256=IJS5ILEmYlrepj2oX6TDuMjab5rYEOpQuYyTpgfRbR0,73747
|
5
5
|
mdbq/aggregation/df_types.py,sha256=U9i3q2eRPTDY8qAPTw7irzu-Tlg4CIySW9uYro81wdk,8125
|
6
6
|
mdbq/aggregation/mysql_types.py,sha256=YTGyrF9vcRgfkQbpT-e-JdJ7c7VF1dDHgyx9YZRES8w,10934
|
7
7
|
mdbq/aggregation/optimize_data.py,sha256=79uwiM2WqNNFxGpE2wKz742PRq-ZGgFjdOV0vgptHdY,3513
|
8
|
-
mdbq/aggregation/query_data.py,sha256=
|
8
|
+
mdbq/aggregation/query_data.py,sha256=w_p013oMdF6YovQwP6RY6wiPTKuuTfSn53Wo1RC_CD0,103372
|
9
9
|
mdbq/bdup/__init__.py,sha256=AkhsGk81SkG1c8FqDH5tRq-8MZmFobVbN60DTyukYTY,28
|
10
10
|
mdbq/bdup/bdup.py,sha256=LAV0TgnQpc-LB-YuJthxb0U42_VkPidzQzAagan46lU,4234
|
11
11
|
mdbq/clean/__init__.py,sha256=A1d6x3L27j4NtLgiFV5TANwEkLuaDfPHDQNrPBbNWtU,41
|
@@ -18,7 +18,7 @@ mdbq/company/home_sh.py,sha256=42CZ2tZIXHLl2mOl2gk2fZnjH2IHh1VJ1s3qHABjonY,18021
|
|
18
18
|
mdbq/config/__init__.py,sha256=jso1oHcy6cJEfa7udS_9uO5X6kZLoPBF8l3wCYmr5dM,18
|
19
19
|
mdbq/config/get_myconf.py,sha256=cmNvsyoNa0RbZ9FOTjSd3jyyGwkxjUo0phvdHbGlrms,6010
|
20
20
|
mdbq/config/myconfig.py,sha256=EGymTlAimtHIDJ9egCtOehBEPOj6rea504kvsEZu64o,854
|
21
|
-
mdbq/config/products.py,sha256=
|
21
|
+
mdbq/config/products.py,sha256=sC4ctAiHR7ydkEXuIlvwvTPDLJXwengkG0hFWSQRFz0,6808
|
22
22
|
mdbq/config/set_support.py,sha256=xkZCX6y9Bq1ppBpJAofld4B2YtchA7fl0eT3dx3CrSI,777
|
23
23
|
mdbq/config/update_conf.py,sha256=taL3ZqKgiVWwUrDFuaYhim9a72Hm4BHRhhDscJTziR8,4535
|
24
24
|
mdbq/dataframe/__init__.py,sha256=2HtCN8AdRj53teXDqzysC1h8aPL-mMFy561ESmhehGQ,22
|
@@ -28,7 +28,7 @@ mdbq/log/mylogger.py,sha256=oaT7Bp-Hb9jZt52seP3ISUuxVcI19s4UiqTeouScBO0,3258
|
|
28
28
|
mdbq/mongo/__init__.py,sha256=SILt7xMtQIQl_m-ik9WLtJSXIVf424iYgCfE_tnQFbw,13
|
29
29
|
mdbq/mongo/mongo.py,sha256=v9qvrp6p1ZRWuPpbSilqveiE0FEcZF7U5xUPI0RN4xs,31880
|
30
30
|
mdbq/mysql/__init__.py,sha256=A_DPJyAoEvTSFojiI2e94zP0FKtCkkwKP1kYUCSyQzo,11
|
31
|
-
mdbq/mysql/mysql.py,sha256=
|
31
|
+
mdbq/mysql/mysql.py,sha256=5l5wFS6AFjXuZtb54JSnoqh_DlB3-tQd9LNac0CSYjQ,62538
|
32
32
|
mdbq/mysql/recheck_mysql.py,sha256=jHQSlQy0PlQ_EYICQv_2nairUX3t6OIwPtSELKIpjkY,8702
|
33
33
|
mdbq/mysql/s_query.py,sha256=bgNNIqYLDCHjD5KTFcm6x4u74selpAGs5ouJYuqX86k,8447
|
34
34
|
mdbq/mysql/year_month_day.py,sha256=VgewoE2pJxK7ErjfviL_SMTN77ki8GVbTUcao3vFUCE,1523
|
@@ -45,7 +45,7 @@ mdbq/req_post/__init__.py,sha256=jso1oHcy6cJEfa7udS_9uO5X6kZLoPBF8l3wCYmr5dM,18
|
|
45
45
|
mdbq/req_post/req_tb.py,sha256=PexWSCPJNM6Tv0ol4lAWIhlOwsAr_frnjtcdSHCFiek,36179
|
46
46
|
mdbq/spider/__init__.py,sha256=RBMFXGy_jd1HXZhngB2T2XTvJqki8P_Fr-pBcwijnew,18
|
47
47
|
mdbq/spider/aikucun.py,sha256=jHrdGWBJQaSywx7V-U4YuM6vWkwC5SR5tTOOdB3YU_c,17306
|
48
|
-
mdbq-2.9.
|
49
|
-
mdbq-2.9.
|
50
|
-
mdbq-2.9.
|
51
|
-
mdbq-2.9.
|
48
|
+
mdbq-2.9.4.dist-info/METADATA,sha256=1xFHayCMA4H3FNPMIEdut-xWBAWiwooZZrD6p7blLQU,243
|
49
|
+
mdbq-2.9.4.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
|
50
|
+
mdbq-2.9.4.dist-info/top_level.txt,sha256=2FQ-uLnCSB-OwFiWntzmwosW3X2Xqsg0ewh1axsaylA,5
|
51
|
+
mdbq-2.9.4.dist-info/RECORD,,
|
File without changes
|