mdbq 1.9.1__py3-none-any.whl → 1.9.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mdbq/aggregation/aggregation.py +45 -9
- mdbq/aggregation/query_data.py +55 -3
- mdbq/clean/data_clean.py +38 -3
- mdbq/mysql/mysql.py +15 -0
- {mdbq-1.9.1.dist-info → mdbq-1.9.3.dist-info}/METADATA +1 -1
- {mdbq-1.9.1.dist-info → mdbq-1.9.3.dist-info}/RECORD +8 -8
- {mdbq-1.9.1.dist-info → mdbq-1.9.3.dist-info}/WHEEL +1 -1
- {mdbq-1.9.1.dist-info → mdbq-1.9.3.dist-info}/top_level.txt +0 -0
mdbq/aggregation/aggregation.py
CHANGED
@@ -61,7 +61,6 @@ class DatabaseUpdate:
|
|
61
61
|
for name in files:
|
62
62
|
if '~$' in name or '.DS' in name or '.localized' in name or '.ini' in name or '$RECYCLE.BIN' in name or 'Icon' in name:
|
63
63
|
continue
|
64
|
-
|
65
64
|
db_name = None # 初始化/重置变量,避免进入下一个循环
|
66
65
|
collection_name = None
|
67
66
|
for data in datas: # 根据标题对照表适配 db_name 和 collection_name
|
@@ -189,6 +188,22 @@ class DatabaseUpdate:
|
|
189
188
|
collection_name='店铺来源_日数据_旧版'
|
190
189
|
elif name.endswith('.csv') and '客户运营平台_客户列表' in name:
|
191
190
|
df = pd.read_csv(os.path.join(root, name), encoding=encoding, header=0, na_filter=False)
|
191
|
+
elif name.endswith('.xlsx') and '直播分场次效果' in name:
|
192
|
+
pattern = re.findall(r'(\d{4}-\d{2}-\d{2})_(\d{4}-\d{2}-\d{2})', name)
|
193
|
+
if pattern:
|
194
|
+
continue
|
195
|
+
df = pd.read_excel(os.path.join(root, name), header=0)
|
196
|
+
if len(df) == 0:
|
197
|
+
print(f'{name} 报表数据为空')
|
198
|
+
continue
|
199
|
+
df.replace(to_replace=['--'], value='', regex=False, inplace=True)
|
200
|
+
df.replace(to_replace=[','], value='', regex=True, inplace=True)
|
201
|
+
df['直播开播时间'] = pd.to_datetime(df['直播开播时间'], format='%Y-%m-%d %H:%M:%S', errors='ignore')
|
202
|
+
df.insert(loc=0, column='日期', value=df['直播开播时间'])
|
203
|
+
df['日期'] = df['日期'].apply(
|
204
|
+
lambda x: pd.to_datetime(str(x).split(' ')[0], format='%Y-%m-%d', errors='ignore') if x else x)
|
205
|
+
df.insert(loc=1, column='店铺', value='万里马官方旗舰店')
|
206
|
+
|
192
207
|
elif name.endswith('.xls') and '生意参谋' in name and '无线店铺三级流量来源详情' in name:
|
193
208
|
# 店铺来源,手淘搜索,关键词
|
194
209
|
pattern = re.findall(r'(\d{4}-\d{2}-\d{2})_(\d{4}-\d{2}-\d{2})', name)
|
@@ -459,10 +474,20 @@ class DatabaseUpdate:
|
|
459
474
|
elif name.endswith('.xlsx') and '搜索分析-排名定位-商品词下排名' in name:
|
460
475
|
# 京东商品词下排名
|
461
476
|
try:
|
477
|
+
pattern = re.findall(r'(\d{4}-\d{2}-\d{2})-(\d{4}-\d{2}-\d{2})', name)
|
478
|
+
if not pattern:
|
479
|
+
continue
|
480
|
+
if pattern[0][0] != pattern[0][1]:
|
481
|
+
print(f'{name}: 检测到数据周期异常,仅支持7天数据')
|
482
|
+
continue
|
462
483
|
df = pd.read_excel(os.path.join(root, name), header=0, engine='openpyxl')
|
463
484
|
if len(df) == 0:
|
464
485
|
print(f'{name} 报表数据为空')
|
465
486
|
continue
|
487
|
+
if len(df.columns.tolist()) < 20:
|
488
|
+
print(f'{name}: 报表可能缺失诊断数据')
|
489
|
+
os.remove(os.path.join(root, name))
|
490
|
+
continue
|
466
491
|
df.rename(columns={'商品的ID': 'skuid'}, inplace=True)
|
467
492
|
for col in ['词人气', '搜索点击率']:
|
468
493
|
if col in df.columns.tolist():
|
@@ -1115,12 +1140,23 @@ if __name__ == '__main__':
|
|
1115
1140
|
# database='mysql'
|
1116
1141
|
# )
|
1117
1142
|
|
1118
|
-
db_name = '生意经2'
|
1119
|
-
table_name = '省份城市分析'
|
1120
|
-
upload_dir(
|
1121
|
-
|
1122
|
-
|
1123
|
-
|
1124
|
-
|
1125
|
-
)
|
1143
|
+
# db_name = '生意经2'
|
1144
|
+
# table_name = '省份城市分析'
|
1145
|
+
# upload_dir(
|
1146
|
+
# path='/Users/xigua/数据中心/原始文件2/生意经/地域分布',
|
1147
|
+
# db_name=db_name,
|
1148
|
+
# collection_name=table_name,
|
1149
|
+
# dbs={'mysql': True, 'mongodb': False},
|
1150
|
+
# )
|
1151
|
+
#
|
1126
1152
|
|
1153
|
+
# 新版 数据分类
|
1154
|
+
dp = DatabaseUpdate(path='/Users/xigua/Downloads')
|
1155
|
+
dp.new_unzip(is_move=True)
|
1156
|
+
dp.cleaning(is_move=False) # 清洗数据, 存入 self.datas, 不需要立即移除文件,仍保留文件到原始文件中
|
1157
|
+
# 将 self.datas 更新至数据库
|
1158
|
+
dp.upload_df(service_databases=[
|
1159
|
+
# {'home_lx': 'mongodb'},
|
1160
|
+
{'company': 'mysql'},
|
1161
|
+
# {'nas': 'mysql'},
|
1162
|
+
])
|
mdbq/aggregation/query_data.py
CHANGED
@@ -454,6 +454,50 @@ class MysqlDatasQuery:
|
|
454
454
|
)
|
455
455
|
return df
|
456
456
|
|
457
|
+
def zb_ccfx(self):
|
458
|
+
start_date, end_date = self.months_data(num=self.months)
|
459
|
+
projection = {
|
460
|
+
'日期': 1,
|
461
|
+
'店铺': 1,
|
462
|
+
'场次信息': 1,
|
463
|
+
'场次id': 1,
|
464
|
+
'直播开播时间': 1,
|
465
|
+
'开播时长': 1,
|
466
|
+
'封面图点击率': 1,
|
467
|
+
'观看人数': 1,
|
468
|
+
'观看次数': 1,
|
469
|
+
'新增粉丝数': 1,
|
470
|
+
'流量券消耗': 1,
|
471
|
+
'观看总时长(秒)': 1,
|
472
|
+
'人均观看时长(秒)': 1,
|
473
|
+
'次均观看时长(秒)': 1,
|
474
|
+
'商品点击人数': 1,
|
475
|
+
'商品点击次数': 1,
|
476
|
+
'商品点击率': 1,
|
477
|
+
'加购人数': 1,
|
478
|
+
'加购件数': 1,
|
479
|
+
'加购次数': 1,
|
480
|
+
'成交金额(元)': 1,
|
481
|
+
'成交人数': 1,
|
482
|
+
'成交件数': 1,
|
483
|
+
'成交笔数': 1,
|
484
|
+
'成交转化率': 1,
|
485
|
+
'退款人数': 1,
|
486
|
+
'退款笔数': 1,
|
487
|
+
'退款件数': 1,
|
488
|
+
'退款金额(元)': 1,
|
489
|
+
'预售定金支付金额(元)': 1,
|
490
|
+
'预售预估总金额(元)': 1,
|
491
|
+
}
|
492
|
+
df = self.download.data_to_df(
|
493
|
+
db_name='生意参谋2',
|
494
|
+
table_name='直播场次分析',
|
495
|
+
start_date=start_date,
|
496
|
+
end_date=end_date,
|
497
|
+
projection=projection,
|
498
|
+
)
|
499
|
+
return df
|
500
|
+
|
457
501
|
class GroupBy:
|
458
502
|
"""
|
459
503
|
数据聚合和导出
|
@@ -1016,6 +1060,9 @@ class GroupBy:
|
|
1016
1060
|
}
|
1017
1061
|
)
|
1018
1062
|
return df
|
1063
|
+
elif '直播场次分析' in table_name:
|
1064
|
+
df.drop_duplicates(subset=['日期', '直播开播时间', '观看人数'], keep='first', inplace=True, ignore_index=True)
|
1065
|
+
return df
|
1019
1066
|
else:
|
1020
1067
|
print(f'<{table_name}>: Groupby 类尚未配置,数据为空')
|
1021
1068
|
return pd.DataFrame({})
|
@@ -1056,7 +1103,6 @@ class GroupBy:
|
|
1056
1103
|
df['毛利率'] = df.apply(lambda x: round((x['销售额'] - x['商品成本']) / x['销售额'], 4) if x['销售额'] > 0 else 0, axis=1)
|
1057
1104
|
df['盈亏'] = df.apply(lambda x: x['商品毛利'] - x['花费'], axis=1)
|
1058
1105
|
return df
|
1059
|
-
|
1060
1106
|
def performance_concat(self, bb_tg=True):
|
1061
1107
|
tg, zb, pxb = self.data_tgyj['天猫汇总表调用'], self.data_tgyj['天猫_超级直播'], self.data_tgyj['天猫_品销宝账户报表']
|
1062
1108
|
zb.rename(columns={
|
@@ -1385,6 +1431,12 @@ def data_aggregation(service_databases=[{}], months=1):
|
|
1385
1431
|
'唯一主键': ['日期', '关键词', '访客数'],
|
1386
1432
|
'数据主体': sdq.tm_search(),
|
1387
1433
|
},
|
1434
|
+
{
|
1435
|
+
'数据库名': '聚合数据',
|
1436
|
+
'集合名': '生意参谋_直播场次分析',
|
1437
|
+
'唯一主键': ['日期', '直播开播时间'],
|
1438
|
+
'数据主体': sdq.zb_ccfx(),
|
1439
|
+
},
|
1388
1440
|
]
|
1389
1441
|
for items in data_dict: # 遍历返回结果
|
1390
1442
|
db_name, table_name, unique_key_list, df = items['数据库名'], items['集合名'], items['唯一主键'], items['数据主体']
|
@@ -1402,7 +1454,7 @@ def data_aggregation(service_databases=[{}], months=1):
|
|
1402
1454
|
service_database=service_database,
|
1403
1455
|
)
|
1404
1456
|
g.sp_index_datas = pd.DataFrame() # 重置,不然下个循环会继续刷入数据库
|
1405
|
-
# g.as_csv(df=df, filename=table_name + '.csv') # 导出 csv
|
1457
|
+
# # g.as_csv(df=df, filename=table_name + '.csv') # 导出 csv
|
1406
1458
|
if '日期' in df.columns.tolist():
|
1407
1459
|
m.df_to_mysql(
|
1408
1460
|
df=df,
|
@@ -1483,7 +1535,7 @@ def main():
|
|
1483
1535
|
|
1484
1536
|
|
1485
1537
|
if __name__ == '__main__':
|
1486
|
-
data_aggregation(service_databases=[{'company': 'mysql'}], months=
|
1538
|
+
data_aggregation(service_databases=[{'company': 'mysql'}], months=1) # 正常的聚合所有数据
|
1487
1539
|
# data_aggregation_one(service_databases=[{'company': 'mysql'}], months=1) # 单独聚合某一个数据库,具体库进函数编辑
|
1488
1540
|
# optimize_data.op_data(service_databases=[{'company': 'mysql'}], days=3650) # 立即启动对聚合数据的清理工作
|
1489
1541
|
|
mdbq/clean/data_clean.py
CHANGED
@@ -443,7 +443,26 @@ class DataClean:
|
|
443
443
|
if self.set_up_to_mysql:
|
444
444
|
m.df_to_mysql(df=df, db_name='生意参谋2', tabel_name='生意参谋_自助取数_店铺流量_月数据')
|
445
445
|
os.remove(os.path.join(root, name))
|
446
|
-
|
446
|
+
elif name.endswith('.xlsx') and '直播分场次效果' in name:
|
447
|
+
pattern = re.findall(r'(\d{4}-\d{2}-\d{2})_(\d{4}-\d{2}-\d{2})', name)
|
448
|
+
if pattern:
|
449
|
+
continue
|
450
|
+
df = pd.read_excel(os.path.join(root, name), header=0)
|
451
|
+
if len(df) == 0:
|
452
|
+
print(f'{name} 报表数据为空')
|
453
|
+
continue
|
454
|
+
df.replace(to_replace=['--'], value='0', regex=False, inplace=True)
|
455
|
+
df.replace(to_replace=[','], value='', regex=True, inplace=True)
|
456
|
+
df['直播开播时间'] = pd.to_datetime(df['直播开播时间'], format='%Y-%m-%d %H:%M:%S', errors='ignore')
|
457
|
+
df.insert(loc=0, column='日期', value=df['直播开播时间'])
|
458
|
+
df['日期'] = df['日期'].apply(lambda x: pd.to_datetime(str(x).split(' ')[0], format='%Y-%m-%d', errors='ignore') if x else x)
|
459
|
+
df.insert(loc=1, column='店铺', value='万里马官方旗舰店')
|
460
|
+
min_clm = str(df.min()['直播开播时间']).split(' ')[0]
|
461
|
+
max_clm = str(df.max()['直播开播时间']).split(' ')[0]
|
462
|
+
new_name = f'{os.path.splitext(name)[0]}_{min_clm}_{max_clm}.csv'
|
463
|
+
new_name = re.sub(r' ?(\(\d+\))', '',new_name)
|
464
|
+
self.save_to_csv(df, root, new_name) # mysql 可能改变 df 列名,所以在上传 mysql 前保存 csv
|
465
|
+
os.remove(os.path.join(root, name))
|
447
466
|
elif name.endswith('.csv') and 'baobei' in name:
|
448
467
|
# 生意经宝贝指标日数据
|
449
468
|
# print(name)
|
@@ -808,6 +827,12 @@ class DataClean:
|
|
808
827
|
os.remove(os.path.join(root, name))
|
809
828
|
elif name.endswith('.xlsx') and '搜索分析-排名定位-商品词下排名' in name:
|
810
829
|
# 京东商品词下排名
|
830
|
+
pattern = re.findall(r'(\d{4}-\d{2}-\d{2})-(\d{4}-\d{2}-\d{2})', name)
|
831
|
+
if not pattern:
|
832
|
+
continue
|
833
|
+
if pattern[0][0] != pattern[0][1]:
|
834
|
+
print(f'{name}: 检测到数据周期异常,仅支持7天数据')
|
835
|
+
continue
|
811
836
|
new_name = os.path.splitext(name)[0] + '.csv'
|
812
837
|
# print(name)
|
813
838
|
df = pd.read_excel(os.path.join(root, name), header=0, engine='openpyxl')
|
@@ -815,6 +840,10 @@ class DataClean:
|
|
815
840
|
print(f'{name} 报表数据为空')
|
816
841
|
os.remove(os.path.join(root, name))
|
817
842
|
continue
|
843
|
+
if len(df.columns.tolist()) < 20:
|
844
|
+
print(f'{name}: 报表可能缺失诊断数据')
|
845
|
+
os.remove(os.path.join(root, name))
|
846
|
+
continue
|
818
847
|
df.rename(columns={'商品的ID': 'skuid'}, inplace=True)
|
819
848
|
df['skuid'] = df['skuid'].apply(lambda x: f'="{x}"' if x and '=' not in str(x) else x)
|
820
849
|
self.save_to_csv(df, root, new_name)
|
@@ -1192,6 +1221,12 @@ class DataClean:
|
|
1192
1221
|
elif name.endswith('.csv') and '客户运营平台_客户列表' in name:
|
1193
1222
|
t_path = str(pathlib.Path(self.source_path, '生意参谋/客户运营平台'))
|
1194
1223
|
bib(t_path, _as_month=True)
|
1224
|
+
elif name.endswith('.csv') and '直播分场次效果' in name:
|
1225
|
+
pattern = re.findall(r'(\d{4}-\d{2}-\d{2})_(\d{4}-\d{2}-\d{2})', name)
|
1226
|
+
if not pattern:
|
1227
|
+
continue
|
1228
|
+
t_path = str(pathlib.Path(self.source_path, '生意参谋/直播场次分析'))
|
1229
|
+
bib(t_path, _as_month=True)
|
1195
1230
|
# 京东分界线 ------- 开始标记
|
1196
1231
|
# 京东分界线
|
1197
1232
|
elif name.endswith('.csv') and '全部渠道_商品明细' in name:
|
@@ -1435,11 +1470,11 @@ def main():
|
|
1435
1470
|
c.set_up_to_mysql = False
|
1436
1471
|
c.new_unzip(is_move=True) # 解压文件
|
1437
1472
|
c.change_and_sort()
|
1438
|
-
|
1473
|
+
c.move_all() # 移到文件到原始文件夹
|
1439
1474
|
# c.attribute() # 商品素材重命名和分类
|
1440
1475
|
|
1441
1476
|
|
1442
1477
|
if __name__ == '__main__':
|
1443
|
-
|
1478
|
+
main()
|
1444
1479
|
username, password, host, port = get_myconf.select_config_values(target_service='aliyun', database='mongodb')
|
1445
1480
|
print(username, password, host, port)
|
mdbq/mysql/mysql.py
CHANGED
@@ -57,6 +57,18 @@ class MysqlUpload:
|
|
57
57
|
}
|
58
58
|
self.filename = None
|
59
59
|
|
60
|
+
@staticmethod
|
61
|
+
def try_except(func): # 在类内部定义一个异常处理方法
|
62
|
+
@wraps(func)
|
63
|
+
def wrapper(*args, **kwargs):
|
64
|
+
try:
|
65
|
+
return func(*args, **kwargs)
|
66
|
+
except Exception as e:
|
67
|
+
print(f'{func.__name__}, {e}') # 将异常信息返回
|
68
|
+
|
69
|
+
return wrapper
|
70
|
+
|
71
|
+
@try_except
|
60
72
|
def df_to_mysql(self, df, table_name, db_name='远程数据源', icm_update=[], service_database={'home_lx': 'mysql'}, move_insert=False, df_sql=False, drop_duplicates=False, filename=None, count=None, json_path=None):
|
61
73
|
"""
|
62
74
|
将 df 写入数据库
|
@@ -81,6 +93,9 @@ class MysqlUpload:
|
|
81
93
|
else:
|
82
94
|
print(f'{db_name}: {table_name} 传入的 df 不是有效的 dataframe 结构, {self.filename}')
|
83
95
|
return
|
96
|
+
if not db_name or db_name == 'None':
|
97
|
+
print(f'{db_name} 不能为 None')
|
98
|
+
return
|
84
99
|
|
85
100
|
cv = converter.DataFrameConverter()
|
86
101
|
df = cv.convert_df_cols(df=df) # 清理 dataframe 非法值
|
@@ -1,15 +1,15 @@
|
|
1
1
|
mdbq/__init__.py,sha256=Il5Q9ATdX8yXqVxtP_nYqUhExzxPC_qk_WXQ_4h0exg,16
|
2
2
|
mdbq/__version__.py,sha256=y9Mp_8x0BCZSHsdLT_q5tX9wZwd5QgqrSIENLrb6vXA,62
|
3
3
|
mdbq/aggregation/__init__.py,sha256=EeDqX2Aml6SPx8363J-v1lz0EcZtgwIBYyCJV6CcEDU,40
|
4
|
-
mdbq/aggregation/aggregation.py,sha256=
|
4
|
+
mdbq/aggregation/aggregation.py,sha256=A_Zy6FtlmrVmjjXaioUecRYVVDpKQHi3Ase-z8nSV14,67166
|
5
5
|
mdbq/aggregation/df_types.py,sha256=oQJS2IBU3_IO6GMgbssHuC2yCjNnbta0QPGrFOwNLnU,7591
|
6
6
|
mdbq/aggregation/mysql_types.py,sha256=DQYROALDiwjJzjhaJfIIdnsrNs11i5BORlj_v6bp67Y,11062
|
7
7
|
mdbq/aggregation/optimize_data.py,sha256=u2Kl_MFtZueXJ57ycy4H2OhXD431RctUYJYCl637uT0,4176
|
8
|
-
mdbq/aggregation/query_data.py,sha256=
|
8
|
+
mdbq/aggregation/query_data.py,sha256=8Cc3fj4-lLkQyazOtb-_CePiiCog3omeiaJ577dfZXU,72435
|
9
9
|
mdbq/bdup/__init__.py,sha256=AkhsGk81SkG1c8FqDH5tRq-8MZmFobVbN60DTyukYTY,28
|
10
10
|
mdbq/bdup/bdup.py,sha256=LAV0TgnQpc-LB-YuJthxb0U42_VkPidzQzAagan46lU,4234
|
11
11
|
mdbq/clean/__init__.py,sha256=A1d6x3L27j4NtLgiFV5TANwEkLuaDfPHDQNrPBbNWtU,41
|
12
|
-
mdbq/clean/data_clean.py,sha256=
|
12
|
+
mdbq/clean/data_clean.py,sha256=LrnliUyiI_9zwbI4GprDuYS-1XoZmSpI_DK6hn-vg9I,100661
|
13
13
|
mdbq/company/__init__.py,sha256=qz8F_GsP_pMB5PblgJAUAMjasuZbOEp3qQOCB39E8f0,21
|
14
14
|
mdbq/company/copysh.py,sha256=4PGjvmPzvrmstOaAwHQGFXIGCWqqNXZEOYf1QdUvMlI,17762
|
15
15
|
mdbq/config/__init__.py,sha256=jso1oHcy6cJEfa7udS_9uO5X6kZLoPBF8l3wCYmr5dM,18
|
@@ -24,7 +24,7 @@ mdbq/log/mylogger.py,sha256=oaT7Bp-Hb9jZt52seP3ISUuxVcI19s4UiqTeouScBO0,3258
|
|
24
24
|
mdbq/mongo/__init__.py,sha256=SILt7xMtQIQl_m-ik9WLtJSXIVf424iYgCfE_tnQFbw,13
|
25
25
|
mdbq/mongo/mongo.py,sha256=v9qvrp6p1ZRWuPpbSilqveiE0FEcZF7U5xUPI0RN4xs,31880
|
26
26
|
mdbq/mysql/__init__.py,sha256=A_DPJyAoEvTSFojiI2e94zP0FKtCkkwKP1kYUCSyQzo,11
|
27
|
-
mdbq/mysql/mysql.py,sha256=
|
27
|
+
mdbq/mysql/mysql.py,sha256=LJJja2S5OWc-3lOHDmsVFJieFM3U69pbyWBEYQVn7P4,44541
|
28
28
|
mdbq/mysql/s_query.py,sha256=fIQvQKPyV7rvSUuxVWXv9S5FmCnIM4GHKconE1Zn5BA,8378
|
29
29
|
mdbq/mysql/year_month_day.py,sha256=VgewoE2pJxK7ErjfviL_SMTN77ki8GVbTUcao3vFUCE,1523
|
30
30
|
mdbq/other/__init__.py,sha256=jso1oHcy6cJEfa7udS_9uO5X6kZLoPBF8l3wCYmr5dM,18
|
@@ -36,7 +36,7 @@ mdbq/pbix/__init__.py,sha256=Trtfaynu9RjoTyLLYBN2xdRxTvm_zhCniUkVTAYwcjo,24
|
|
36
36
|
mdbq/pbix/pbix_refresh.py,sha256=JUjKW3bNEyoMVfVfo77UhguvS5AWkixvVhDbw4_MHco,2396
|
37
37
|
mdbq/pbix/refresh_all.py,sha256=0uAnBKCd5cx5FLTkawN1GV9yi87rfyMgYal5LABtumQ,7186
|
38
38
|
mdbq/spider/__init__.py,sha256=RBMFXGy_jd1HXZhngB2T2XTvJqki8P_Fr-pBcwijnew,18
|
39
|
-
mdbq-1.9.
|
40
|
-
mdbq-1.9.
|
41
|
-
mdbq-1.9.
|
42
|
-
mdbq-1.9.
|
39
|
+
mdbq-1.9.3.dist-info/METADATA,sha256=gEFZjDAiQ8_FgTxoXwPOdv5UEMcL_so2cCd3yHr3W4A,245
|
40
|
+
mdbq-1.9.3.dist-info/WHEEL,sha256=cpQTJ5IWu9CdaPViMhC9YzF8gZuS5-vlfoFihTBC86A,91
|
41
|
+
mdbq-1.9.3.dist-info/top_level.txt,sha256=2FQ-uLnCSB-OwFiWntzmwosW3X2Xqsg0ewh1axsaylA,5
|
42
|
+
mdbq-1.9.3.dist-info/RECORD,,
|
File without changes
|