mdbq 1.7.7__py3-none-any.whl → 1.7.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mdbq/aggregation/aggregation.py +8 -8
- mdbq/aggregation/query_data.py +91 -21
- mdbq/company/copysh.py +2 -0
- mdbq/mysql/mysql.py +28 -11
- {mdbq-1.7.7.dist-info → mdbq-1.7.9.dist-info}/METADATA +1 -1
- {mdbq-1.7.7.dist-info → mdbq-1.7.9.dist-info}/RECORD +8 -8
- {mdbq-1.7.7.dist-info → mdbq-1.7.9.dist-info}/WHEEL +0 -0
- {mdbq-1.7.7.dist-info → mdbq-1.7.9.dist-info}/top_level.txt +0 -0
mdbq/aggregation/aggregation.py
CHANGED
@@ -1100,12 +1100,12 @@ if __name__ == '__main__':
|
|
1100
1100
|
# database='mysql'
|
1101
1101
|
# )
|
1102
1102
|
|
1103
|
-
|
1104
|
-
|
1105
|
-
|
1106
|
-
|
1107
|
-
|
1108
|
-
|
1109
|
-
|
1110
|
-
|
1103
|
+
db_name = '推广数据2'
|
1104
|
+
table_name = '营销场景报表'
|
1105
|
+
upload_dir(
|
1106
|
+
path='/Users/xigua/数据中心/原始文件2/推广报表/营销场景报表',
|
1107
|
+
db_name=db_name,
|
1108
|
+
collection_name=table_name,
|
1109
|
+
dbs={'mysql': True, 'mongodb': False},
|
1110
|
+
)
|
1111
1111
|
|
mdbq/aggregation/query_data.py
CHANGED
@@ -431,6 +431,28 @@ class MysqlDatasQuery:
|
|
431
431
|
start_date = f'{start_date.year}-{start_date.month}-01' # 替换为 n 月以前的第一天
|
432
432
|
return pd.to_datetime(start_date), pd.to_datetime(end_date)
|
433
433
|
|
434
|
+
def tm_search(self):
|
435
|
+
start_date, end_date = self.months_data(num=self.months)
|
436
|
+
projection = {
|
437
|
+
'日期': 1,
|
438
|
+
'关键词': 1,
|
439
|
+
'访客数': 1,
|
440
|
+
'支付转化率': 1,
|
441
|
+
'支付金额': 1,
|
442
|
+
'下单金额': 1,
|
443
|
+
'支付买家数': 1,
|
444
|
+
'下单买家数': 1,
|
445
|
+
'加购人数': 1,
|
446
|
+
'新访客': 1,
|
447
|
+
}
|
448
|
+
df = self.download.data_to_df(
|
449
|
+
db_name='生意参谋2',
|
450
|
+
table_name='店铺来源_手淘搜索',
|
451
|
+
start_date=start_date,
|
452
|
+
end_date=end_date,
|
453
|
+
projection=projection,
|
454
|
+
)
|
455
|
+
return df
|
434
456
|
|
435
457
|
class GroupBy:
|
436
458
|
"""
|
@@ -978,6 +1000,22 @@ class GroupBy:
|
|
978
1000
|
df['s_是否品牌词'] = df['搜索词'].str.contains('万里马|wanlima', regex=True)
|
979
1001
|
df['s_是否品牌词'] = df['s_是否品牌词'].apply(lambda x: '品牌词' if x else '')
|
980
1002
|
return df
|
1003
|
+
elif '天猫店铺来源_手淘搜索' in table_name:
|
1004
|
+
df = df.groupby(
|
1005
|
+
['日期', '关键词'],
|
1006
|
+
as_index=False).agg(
|
1007
|
+
**{
|
1008
|
+
'访客数': ('访客数', np.max),
|
1009
|
+
'支付转化率': ('支付转化率', np.max),
|
1010
|
+
'支付金额': ('支付金额', np.max),
|
1011
|
+
'下单金额': ('下单金额', np.max),
|
1012
|
+
'支付买家数': ('支付买家数', np.max),
|
1013
|
+
'下单买家数': ('下单买家数', np.max),
|
1014
|
+
'加购人数': ('加购人数', np.max),
|
1015
|
+
'新访客': ('新访客', np.max),
|
1016
|
+
}
|
1017
|
+
)
|
1018
|
+
return df
|
981
1019
|
else:
|
982
1020
|
print(f'<{table_name}>: Groupby 类尚未配置,数据为空')
|
983
1021
|
return pd.DataFrame({})
|
@@ -1048,10 +1086,20 @@ class GroupBy:
|
|
1048
1086
|
'直接成交金额': float,
|
1049
1087
|
'自然流量曝光量': int,
|
1050
1088
|
}, errors='raise')
|
1089
|
+
# tg = tg.groupby(['日期', '推广渠道', '营销场景', '商品id', '花费', '展现量', '点击量'], as_index=False).agg(
|
1090
|
+
# **{'加购量': ('加购量', np.max),
|
1091
|
+
# '成交笔数': ('成交笔数', np.max),
|
1092
|
+
# '成交金额': ('成交金额', np.max),
|
1093
|
+
# '自然流量曝光量': ('自然流量曝光量', np.max),
|
1094
|
+
# '直接成交笔数': ('直接成交笔数', np.max),
|
1095
|
+
# '直接成交金额': ('直接成交金额', np.max)
|
1096
|
+
# }
|
1097
|
+
# )
|
1051
1098
|
df = pd.concat([tg, zb, pxb], axis=0, ignore_index=True)
|
1052
1099
|
df.fillna(0, inplace=True) # concat 之后要填充空值
|
1053
1100
|
df = df.astype(
|
1054
1101
|
{
|
1102
|
+
'商品id': str,
|
1055
1103
|
'自然流量曝光量': int,
|
1056
1104
|
}
|
1057
1105
|
)
|
@@ -1249,13 +1297,13 @@ def data_aggregation(service_databases=[{}], months=1):
|
|
1249
1297
|
{
|
1250
1298
|
'数据库名': '聚合数据',
|
1251
1299
|
'集合名': '天猫生意经_宝贝指标',
|
1252
|
-
'唯一主键': ['日期', '宝贝id'],
|
1300
|
+
'唯一主键': ['日期', '宝贝id'], # 不能加其他字段做主键,比如销售额,是变动的,不是唯一的
|
1253
1301
|
'数据主体': sdq.syj(),
|
1254
1302
|
},
|
1255
1303
|
{
|
1256
1304
|
'数据库名': '聚合数据',
|
1257
1305
|
'集合名': '天猫_店铺来源_日数据',
|
1258
|
-
'唯一主键': ['日期', '一级来源', '二级来源', '三级来源'],
|
1306
|
+
'唯一主键': ['日期', '一级来源', '二级来源', '三级来源', '访客数'],
|
1259
1307
|
'数据主体': sdq.dplyd(),
|
1260
1308
|
},
|
1261
1309
|
{
|
@@ -1327,9 +1375,15 @@ def data_aggregation(service_databases=[{}], months=1):
|
|
1327
1375
|
{
|
1328
1376
|
'数据库名': '聚合数据',
|
1329
1377
|
'集合名': '天猫_品销宝账户报表',
|
1330
|
-
'唯一主键': ['日期', '报表类型'],
|
1378
|
+
'唯一主键': ['日期', '报表类型', '推广渠道', '营销场景', '花费'],
|
1331
1379
|
'数据主体': sdq.pxb_zh(),
|
1332
1380
|
},
|
1381
|
+
{
|
1382
|
+
'数据库名': '聚合数据',
|
1383
|
+
'集合名': '天猫店铺来源_手淘搜索',
|
1384
|
+
'唯一主键': ['日期', '关键词', '访客数'],
|
1385
|
+
'数据主体': sdq.tm_search(),
|
1386
|
+
},
|
1333
1387
|
]
|
1334
1388
|
for items in data_dict: # 遍历返回结果
|
1335
1389
|
db_name, table_name, unique_key_list, df = items['数据库名'], items['集合名'], items['唯一主键'], items['数据主体']
|
@@ -1347,23 +1401,36 @@ def data_aggregation(service_databases=[{}], months=1):
|
|
1347
1401
|
)
|
1348
1402
|
g.sp_index_datas = pd.DataFrame() # 重置,不然下个循环会继续刷入数据库
|
1349
1403
|
# g.as_csv(df=df, filename=table_name + '.csv') # 导出 csv
|
1350
|
-
|
1351
|
-
|
1352
|
-
|
1353
|
-
|
1354
|
-
|
1355
|
-
|
1356
|
-
|
1357
|
-
|
1358
|
-
|
1404
|
+
if '日期' in df.columns.tolist():
|
1405
|
+
m.df_to_mysql(
|
1406
|
+
df=df,
|
1407
|
+
db_name=db_name,
|
1408
|
+
table_name=table_name,
|
1409
|
+
move_insert=True, # 先删除,再插入
|
1410
|
+
# df_sql=True,
|
1411
|
+
# drop_duplicates=False,
|
1412
|
+
# icm_update=unique_key_list,
|
1413
|
+
service_database=service_database,
|
1414
|
+
) # 3. 回传数据库
|
1415
|
+
else: # 没有日期列的就用主键排重
|
1416
|
+
m.df_to_mysql(
|
1417
|
+
df=df,
|
1418
|
+
db_name=db_name,
|
1419
|
+
table_name=table_name,
|
1420
|
+
# df_sql=True,
|
1421
|
+
drop_duplicates=False,
|
1422
|
+
icm_update=unique_key_list,
|
1423
|
+
service_database=service_database,
|
1424
|
+
) # 3. 回传数据库
|
1359
1425
|
res = g.performance(bb_tg=True) # 盈亏表,依赖其他表,单独做
|
1360
1426
|
m.df_to_mysql(
|
1361
1427
|
df=res,
|
1362
1428
|
db_name='聚合数据',
|
1363
1429
|
table_name='_全店商品销售',
|
1430
|
+
move_insert=True, # 先删除,再插入
|
1364
1431
|
# df_sql=True,
|
1365
|
-
drop_duplicates=False,
|
1366
|
-
icm_update=['日期', '商品id'], # 设置唯一主键
|
1432
|
+
# drop_duplicates=False,
|
1433
|
+
# icm_update=['日期', '商品id'], # 设置唯一主键
|
1367
1434
|
service_database=service_database,
|
1368
1435
|
)
|
1369
1436
|
res = g.performance(bb_tg=False) # 盈亏表,依赖其他表,单独做
|
@@ -1371,9 +1438,10 @@ def data_aggregation(service_databases=[{}], months=1):
|
|
1371
1438
|
df=res,
|
1372
1439
|
db_name='聚合数据',
|
1373
1440
|
table_name='_推广商品销售',
|
1441
|
+
move_insert=True, # 先删除,再插入
|
1374
1442
|
# df_sql=True,
|
1375
|
-
drop_duplicates=False,
|
1376
|
-
icm_update=['日期', '商品id'], # 设置唯一主键
|
1443
|
+
# drop_duplicates=False,
|
1444
|
+
# icm_update=['日期', '商品id'], # 设置唯一主键
|
1377
1445
|
service_database=service_database,
|
1378
1446
|
)
|
1379
1447
|
|
@@ -1382,9 +1450,10 @@ def data_aggregation(service_databases=[{}], months=1):
|
|
1382
1450
|
df=res,
|
1383
1451
|
db_name='聚合数据',
|
1384
1452
|
table_name='天猫_推广汇总',
|
1453
|
+
move_insert=True, # 先删除,再插入
|
1385
1454
|
# df_sql=True,
|
1386
|
-
drop_duplicates=False,
|
1387
|
-
icm_update=['日期', '商品id'], # 设置唯一主键
|
1455
|
+
# drop_duplicates=False,
|
1456
|
+
# icm_update=['日期', '推广渠道', '营销场景', '商品id', '花费', '展现量', '点击量'], # 设置唯一主键
|
1388
1457
|
service_database=service_database,
|
1389
1458
|
)
|
1390
1459
|
|
@@ -1394,9 +1463,10 @@ def data_aggregation(service_databases=[{}], months=1):
|
|
1394
1463
|
df=res,
|
1395
1464
|
db_name='聚合数据',
|
1396
1465
|
table_name='_京东_推广商品销售',
|
1466
|
+
move_insert=True, # 先删除,再插入
|
1397
1467
|
# df_sql=True,
|
1398
|
-
drop_duplicates=False,
|
1399
|
-
icm_update=['日期', '跟单sku id', '货号', '花费'], # 设置唯一主键
|
1468
|
+
# drop_duplicates=False,
|
1469
|
+
# icm_update=['日期', '跟单sku id', '货号', '花费'], # 设置唯一主键
|
1400
1470
|
service_database=service_database,
|
1401
1471
|
)
|
1402
1472
|
|
@@ -1410,7 +1480,7 @@ def main():
|
|
1410
1480
|
|
1411
1481
|
|
1412
1482
|
if __name__ == '__main__':
|
1413
|
-
data_aggregation(service_databases=[{'
|
1483
|
+
data_aggregation(service_databases=[{'company': 'mysql'}], months=1) # 正常的聚合所有数据
|
1414
1484
|
# data_aggregation_one(service_databases=[{'company': 'mysql'}], months=1) # 单独聚合某一个数据库,具体库进函数编辑
|
1415
1485
|
# optimize_data.op_data(service_databases=[{'company': 'mysql'}], days=3650) # 立即启动对聚合数据的清理工作
|
1416
1486
|
|
mdbq/company/copysh.py
CHANGED
@@ -20,6 +20,7 @@ from mdbq.config import get_myconf
|
|
20
20
|
from mdbq.config import set_support
|
21
21
|
from mdbq.config import products
|
22
22
|
from mdbq.mysql import mysql
|
23
|
+
from mdbq.pbix import refresh_all
|
23
24
|
warnings.filterwarnings('ignore')
|
24
25
|
|
25
26
|
|
@@ -252,6 +253,7 @@ class TbFiles:
|
|
252
253
|
|
253
254
|
excel_path = os.path.join(self.share_path, 'EXCEL报表')
|
254
255
|
files = os.listdir(excel_path)
|
256
|
+
r = refresh_all.RefreshAll()
|
255
257
|
for file in files:
|
256
258
|
if file.endswith('.xlsx'):
|
257
259
|
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
mdbq/mysql/mysql.py
CHANGED
@@ -57,11 +57,14 @@ class MysqlUpload:
|
|
57
57
|
}
|
58
58
|
self.filename = None
|
59
59
|
|
60
|
-
def df_to_mysql(self, df, table_name, db_name='远程数据源', icm_update=[], service_database={'home_lx': 'mysql'}, df_sql=False, drop_duplicates=False, filename=None, count=None, json_path=None):
|
60
|
+
def df_to_mysql(self, df, table_name, db_name='远程数据源', icm_update=[], service_database={'home_lx': 'mysql'}, move_insert=False, df_sql=False, drop_duplicates=False, filename=None, count=None, json_path=None):
|
61
61
|
"""
|
62
62
|
将 df 写入数据库
|
63
63
|
db_name: 数据库名称
|
64
64
|
table_name: 集合/表名称
|
65
|
+
move_insert: 根据df 的日期,先移除数据库数据,再插入, df_sql, drop_duplicates, icm_update 都要设置为 False
|
66
|
+
原则上只限于聚合数据使用,原始数据插入时不要设置
|
67
|
+
|
65
68
|
df_sql: 这是一个临时参数, 值为 True 时使用 df.to_sql 函数上传整个表, 不会排重,初创表大量上传数据的时候使用
|
66
69
|
drop_duplicates: 值为 True 时检查重复数据再插入,反之直接上传,数据量大时会比较慢
|
67
70
|
icm_update: 增量更新, 在聚合数据中使用,原始文件不要使用,设置此参数时需将 drop_duplicates 改为 False
|
@@ -162,20 +165,34 @@ class MysqlUpload:
|
|
162
165
|
elif cl:
|
163
166
|
mysql_types.mysql_all_dtypes(service_database=service_database) # 更新所有数据库所有数据表的 dtypes 信息到本地 json
|
164
167
|
|
165
|
-
#
|
166
|
-
# if drop_duplicates and '日期' in df.columns.tolist():
|
167
|
-
# dates = df['日期'].values.tolist()
|
168
|
-
# start_date = pd.to_datetime(min(dates)).strftime('%Y-%m-%d')
|
169
|
-
# end_date = (pd.to_datetime(max(dates)) + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
|
170
|
-
# sql = f"DELETE FROM `{table_name}` WHERE {'日期'} BETWEEN '%s' AND '%s'" % (start_date, end_date)
|
171
|
-
# cursor.execute(sql)
|
172
|
-
# connection.commit()
|
173
|
-
|
174
|
-
# 5. 更新插入数据
|
168
|
+
# 4. 更新插入数据
|
175
169
|
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S ")
|
176
170
|
for service_name, database in service_database.items():
|
177
171
|
print(f'{now}正在更新 mysql ({self.host}:{self.port}) {db_name}/{table_name}, {count}, {service_name}, {self.filename}')
|
178
172
|
|
173
|
+
# 5. 移除指定日期范围内的数据,原则上只限于聚合数据使用,原始数据插入时不要设置
|
174
|
+
if move_insert and '日期' in df.columns.tolist():
|
175
|
+
# 移除数据
|
176
|
+
dates = df['日期'].values.tolist()
|
177
|
+
start_date = pd.to_datetime(min(dates)).strftime('%Y-%m-%d')
|
178
|
+
end_date = (pd.to_datetime(max(dates)) + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
|
179
|
+
sql = f"DELETE FROM `{table_name}` WHERE {'日期'} BETWEEN '%s' AND '%s'" % (start_date, end_date)
|
180
|
+
cursor.execute(sql)
|
181
|
+
connection.commit()
|
182
|
+
|
183
|
+
# 插入数据
|
184
|
+
engine = create_engine(
|
185
|
+
f"mysql+pymysql://{self.username}:{self.password}@{self.host}:{self.port}/{db_name}") # 创建数据库引擎
|
186
|
+
df.to_sql(
|
187
|
+
name=table_name,
|
188
|
+
con=engine,
|
189
|
+
if_exists='append',
|
190
|
+
index=False,
|
191
|
+
chunksize=1000
|
192
|
+
)
|
193
|
+
connection.close()
|
194
|
+
return
|
195
|
+
|
179
196
|
datas = df.to_dict(orient='records')
|
180
197
|
for data in datas:
|
181
198
|
# data 是传进来待处理的数据, 不是数据库数据
|
@@ -1,17 +1,17 @@
|
|
1
1
|
mdbq/__init__.py,sha256=Il5Q9ATdX8yXqVxtP_nYqUhExzxPC_qk_WXQ_4h0exg,16
|
2
2
|
mdbq/__version__.py,sha256=y9Mp_8x0BCZSHsdLT_q5tX9wZwd5QgqrSIENLrb6vXA,62
|
3
3
|
mdbq/aggregation/__init__.py,sha256=EeDqX2Aml6SPx8363J-v1lz0EcZtgwIBYyCJV6CcEDU,40
|
4
|
-
mdbq/aggregation/aggregation.py,sha256=
|
4
|
+
mdbq/aggregation/aggregation.py,sha256=sgsetJHK4fOcXvqQCVgJoSIwZQLMznVG3I-MqHlW_fM,64116
|
5
5
|
mdbq/aggregation/df_types.py,sha256=oQJS2IBU3_IO6GMgbssHuC2yCjNnbta0QPGrFOwNLnU,7591
|
6
6
|
mdbq/aggregation/mysql_types.py,sha256=DQYROALDiwjJzjhaJfIIdnsrNs11i5BORlj_v6bp67Y,11062
|
7
7
|
mdbq/aggregation/optimize_data.py,sha256=u2Kl_MFtZueXJ57ycy4H2OhXD431RctUYJYCl637uT0,4176
|
8
|
-
mdbq/aggregation/query_data.py,sha256=
|
8
|
+
mdbq/aggregation/query_data.py,sha256=dzS1XvoJ0oEckrvIF-_uUALnPIRG4mOwG5ktr3LWsKY,70243
|
9
9
|
mdbq/bdup/__init__.py,sha256=AkhsGk81SkG1c8FqDH5tRq-8MZmFobVbN60DTyukYTY,28
|
10
10
|
mdbq/bdup/bdup.py,sha256=LAV0TgnQpc-LB-YuJthxb0U42_VkPidzQzAagan46lU,4234
|
11
11
|
mdbq/clean/__init__.py,sha256=A1d6x3L27j4NtLgiFV5TANwEkLuaDfPHDQNrPBbNWtU,41
|
12
12
|
mdbq/clean/data_clean.py,sha256=T0WYOKFwNZTNk3temKOw1K2H54kxu9QBJjlTbkMtxNk,94217
|
13
13
|
mdbq/company/__init__.py,sha256=qz8F_GsP_pMB5PblgJAUAMjasuZbOEp3qQOCB39E8f0,21
|
14
|
-
mdbq/company/copysh.py,sha256=
|
14
|
+
mdbq/company/copysh.py,sha256=z1jql2UABdKGGPYF6VRhXcBwCYaCBFR91kZwthBlOdU,17754
|
15
15
|
mdbq/config/__init__.py,sha256=jso1oHcy6cJEfa7udS_9uO5X6kZLoPBF8l3wCYmr5dM,18
|
16
16
|
mdbq/config/get_myconf.py,sha256=-CFEW0dQh4OIwVgwK-cL0eVp1LN3PjJgN89d4P5TB9I,6011
|
17
17
|
mdbq/config/products.py,sha256=vIK8DJ-F3XXwvNPK-4OJq2tZITNlL6Sub8QBdoOng8U,5676
|
@@ -24,7 +24,7 @@ mdbq/log/mylogger.py,sha256=oaT7Bp-Hb9jZt52seP3ISUuxVcI19s4UiqTeouScBO0,3258
|
|
24
24
|
mdbq/mongo/__init__.py,sha256=SILt7xMtQIQl_m-ik9WLtJSXIVf424iYgCfE_tnQFbw,13
|
25
25
|
mdbq/mongo/mongo.py,sha256=v9qvrp6p1ZRWuPpbSilqveiE0FEcZF7U5xUPI0RN4xs,31880
|
26
26
|
mdbq/mysql/__init__.py,sha256=A_DPJyAoEvTSFojiI2e94zP0FKtCkkwKP1kYUCSyQzo,11
|
27
|
-
mdbq/mysql/mysql.py,sha256=
|
27
|
+
mdbq/mysql/mysql.py,sha256=UKnBmywqTzc0VJfZGlC-9KzV7I--9P7H-jspUp_IvtU,44071
|
28
28
|
mdbq/mysql/s_query.py,sha256=fIQvQKPyV7rvSUuxVWXv9S5FmCnIM4GHKconE1Zn5BA,8378
|
29
29
|
mdbq/mysql/year_month_day.py,sha256=VgewoE2pJxK7ErjfviL_SMTN77ki8GVbTUcao3vFUCE,1523
|
30
30
|
mdbq/other/__init__.py,sha256=jso1oHcy6cJEfa7udS_9uO5X6kZLoPBF8l3wCYmr5dM,18
|
@@ -35,7 +35,7 @@ mdbq/pbix/__init__.py,sha256=Trtfaynu9RjoTyLLYBN2xdRxTvm_zhCniUkVTAYwcjo,24
|
|
35
35
|
mdbq/pbix/pbix_refresh.py,sha256=JUjKW3bNEyoMVfVfo77UhguvS5AWkixvVhDbw4_MHco,2396
|
36
36
|
mdbq/pbix/refresh_all.py,sha256=sBZ61LKvm-raa9ROnC-AAvPYLU7dbudmuxy__5QCB2A,7176
|
37
37
|
mdbq/spider/__init__.py,sha256=RBMFXGy_jd1HXZhngB2T2XTvJqki8P_Fr-pBcwijnew,18
|
38
|
-
mdbq-1.7.
|
39
|
-
mdbq-1.7.
|
40
|
-
mdbq-1.7.
|
41
|
-
mdbq-1.7.
|
38
|
+
mdbq-1.7.9.dist-info/METADATA,sha256=UsQE_0nklf8Vqmz_cQsepvi79ifPTWx9WhNsN6PWJOc,245
|
39
|
+
mdbq-1.7.9.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
|
40
|
+
mdbq-1.7.9.dist-info/top_level.txt,sha256=2FQ-uLnCSB-OwFiWntzmwosW3X2Xqsg0ewh1axsaylA,5
|
41
|
+
mdbq-1.7.9.dist-info/RECORD,,
|
File without changes
|
File without changes
|