mdbq 2.8.7__py3-none-any.whl → 2.8.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,7 +7,7 @@ import numpy as np
7
7
  import chardet
8
8
  import zipfile
9
9
  import socket
10
- from pandas.tseries.holiday import next_monday
10
+
11
11
  from pyzipper import PyZipFile
12
12
  import os
13
13
  import platform
@@ -1201,8 +1201,9 @@ def one_file_to_mysql(file, db_name, table_name):
1201
1201
 
1202
1202
 
1203
1203
  def test():
1204
- path = os.path.relpath(r'/Users/xigua/Downloads/未命名文件夹')
1204
+ path = os.path.relpath(r'/Users/xigua/Downloads/手淘搜索_本店引流词/2024-05')
1205
1205
 
1206
+ results = []
1206
1207
  for root, dirs, files in os.walk(path, topdown=False):
1207
1208
  for name in files:
1208
1209
  if name.endswith('.csv') and 'baidu' not in name and '~' not in name:
@@ -1210,17 +1211,23 @@ def test():
1210
1211
  # df = pd.read_excel(os.path.join(root, name), header=0)
1211
1212
  df = pd.read_csv(os.path.join(root, name), encoding='utf-8_sig', header=0, na_filter=False)
1212
1213
  # print(name)
1213
- if len(df) == 0:
1214
- continue
1215
- # df.insert(loc=1, column='店铺名称', value='万里马官方旗舰店')
1216
- if '颜色编码' in df.columns.tolist():
1217
- print(name)
1218
- df.pop('颜色编码')
1219
- df.to_csv(os.path.join(root, name), encoding='utf-8_sig', index=False, header=True)
1214
+ # if len(df) == 0:
1215
+ # continue
1216
+ # # df.insert(loc=1, column='店铺名称', value='万里马官方旗舰店')
1217
+ # if '店铺名称' not in df.columns.tolist():
1218
+ # print(name)
1219
+ # df.insert(loc=1, column='店铺名称', value='京东箱包旗舰店')
1220
+ # df.to_csv(os.path.join(root, name), encoding='utf-8_sig', index=False, header=True)
1220
1221
  # pattern = re.findall(r'\d{4}-\d{2}-\d{2}_\d{4}-\d{2}-\d{2}', name)[0]
1221
1222
  # new_name = f'py_xg_店铺销售指标_万里马官方旗舰店_{pattern}.csv'
1222
1223
  # df.to_csv(os.path.join(root, name), encoding='utf-8_sig', index=False, header=True)
1223
1224
  # os.remove(os.path.join(root, name))
1225
+ results.append(df)
1226
+ df = pd.concat(results)
1227
+ path = '/Users/xigua/Downloads/手淘搜索_本店引流词'
1228
+ filename = 'py_xg_手淘搜索_本店引流词_万里马官方旗舰店_2024-05_合并.csv'
1229
+ df.to_csv(os.path.join(path, filename), encoding='utf-8_sig', index=False, header=True)
1230
+
1224
1231
 
1225
1232
 
1226
1233
  if __name__ == '__main__':
@@ -1237,14 +1244,14 @@ if __name__ == '__main__':
1237
1244
  # )
1238
1245
 
1239
1246
  # # 上传一个目录到指定数据库
1240
- # db_name = '生意经3'
1241
- # table_name = '宝贝指标'
1247
+ # db_name = '生意参谋3'
1248
+ # table_name = '手淘搜索_本店引流词'
1242
1249
  # upload_dir(
1243
- # path=os.path.relpath(r'/Users/xigua/数据中心/原始文件3/生意经/宝贝指标sdff'),
1250
+ # path=os.path.relpath(r'/Users/xigua/Downloads/手淘搜索_本店引流词'),
1244
1251
  # db_name=db_name,
1245
1252
  # collection_name=table_name,
1246
1253
  # )
1247
1254
 
1248
1255
 
1249
- # test()
1256
+ test()
1250
1257
 
@@ -2318,5 +2318,5 @@ if __name__ == '__main__':
2318
2318
  data_aggregation(
2319
2319
  months=3,
2320
2320
  is_juhe=True, # 生成聚合表
2321
- # less_dict=['多店推广场景_按日聚合'], # 单独聚合某一个数据库
2321
+ # less_dict=['天猫_品销宝账户报表'], # 单独聚合某一个数据库
2322
2322
  )
@@ -133,8 +133,6 @@ class DataClean:
133
133
  for name in files:
134
134
  if '~$' in name or '.DS' in name or '.localized' in name or '.jpg' in name or '.png' in name:
135
135
  continue
136
- if 'py_xg' in name:
137
- continue
138
136
  is_continue = False
139
137
  if is_except:
140
138
  for item in is_except:
@@ -156,46 +154,17 @@ class DataClean:
156
154
  is_continue = True
157
155
  if not is_continue:
158
156
  continue
159
- if name.endswith('.xls') and '商品排行_' in name:
160
- df = pd.read_excel(os.path.join(root, name), header=4)
161
- if len(df) == 0:
162
- print(f'{name} 报表数据不能为空')
163
- os.remove(os.path.join(root, name))
164
- continue
165
- df.replace(to_replace=['-'], value=0, regex=False, inplace=True)
166
- df.replace(to_replace=[','], value='', regex=True, inplace=True)
167
- df.rename(columns={'统计日期': '日期', '商品ID': '商品id'}, inplace=True)
168
- shop_name = re.findall(r'_([\u4e00-\u9fffA-Za-z]+店)', name)[0]
169
- if '店铺名称' not in df.columns.tolist():
170
- df.insert(loc=1, column='店铺名称', value=shop_name)
171
- new_name = f'py_xg_{os.path.splitext(name)[0]}.csv'
172
- self.save_to_csv(df, root, new_name, encoding='utf-8_sig')
173
- os.remove(os.path.join(root, name))
174
- elif name.endswith('.xls') and '手淘搜索_本店引流词_' in name:
175
- df = pd.read_excel(os.path.join(root, name), header=5, engine='xlrd')
176
- if len(df) == 0:
177
- print(f'{name} 报表数据不能为空')
178
- os.remove(os.path.join(root, name))
179
- continue
180
- df.replace(to_replace=['-'], value=0, regex=False, inplace=True)
181
- df.replace(to_replace=[','], value='', regex=True, inplace=True)
182
- df.rename(columns={'统计日期': '日期'}, inplace=True)
183
- shop_name = re.findall(r'本店.*_([\u4e00-\u9fffA-Za-z]+店)_', name)[0]
184
- kw_type = re.findall('手淘搜索_本店引流词_([\u4e00-\u9fff]+)_', name)[0]
185
- df.insert(loc=2, column='词类型', value=kw_type)
186
- if '店铺名称' in df.columns.tolist():
187
- df['店铺名称'] = shop_name
188
- else:
189
- df.insert(loc=1, column='店铺名称', value=shop_name)
190
- new_name = f'py_xg_{os.path.splitext(name)[0]}.csv'
191
- self.save_to_csv(df, root, new_name, encoding='utf-8_sig')
192
- os.remove(os.path.join(root, name))
157
+ if name.endswith('.csv') and '商品排行_' in name:
158
+ df = pd.read_csv(os.path.join(root, name), encoding='utf-8_sig', header=0, na_filter=False)
159
+ # df = pd.read_excel(os.path.join(root, name), header=4)
160
+
161
+ elif name.endswith('.csv') and '手淘搜索_本店引流词_' in name:
162
+ df = pd.read_csv(os.path.join(root, name), encoding='utf-8_sig', header=0, na_filter=False)
163
+ # df = pd.read_excel(os.path.join(root, name), header=5, engine='xlrd')
193
164
 
194
165
  elif name.endswith('.csv') and '_来源构成_' in name:
195
166
  df = pd.read_csv(os.path.join(root, name), encoding='utf-8_sig', header=0, na_filter=False)
196
- new_name = f'py_xg_{os.path.splitext(name)[0]}.csv'
197
- self.save_to_csv(df, root, new_name, encoding='utf-8_sig')
198
- os.remove(os.path.join(root, name))
167
+
199
168
  elif name.endswith('.csv') and '爱库存_商品榜单_' in name:
200
169
  df = pd.read_csv(os.path.join(root, name), encoding='utf-8_sig', header=0, na_filter=False)
201
170
  if '店铺名称' not in df.columns.tolist():
@@ -205,12 +174,12 @@ class DataClean:
205
174
  os.remove(os.path.join(root, name))
206
175
  elif name.endswith('.csv') and '直播分场次效果' in name:
207
176
  df = pd.read_csv(os.path.join(root, name), encoding='utf-8_sig', header=0, na_filter=False)
208
- shop_name = re.findall(r'_([\u4e00-\u9fffA-Za-z]+店)_', name)[0]
209
- if '店铺名称' not in df.columns.tolist():
210
- df.insert(loc=1, column='店铺名称', value=shop_name)
211
- new_name = f'py_xg_{os.path.splitext(name)[0]}.csv'
212
- self.save_to_csv(df, root, new_name, encoding='utf-8_sig')
213
- os.remove(os.path.join(root, name))
177
+ # shop_name = re.findall(r'_([\u4e00-\u9fffA-Za-z]+店)_', name)[0]
178
+ # if '店铺名称' not in df.columns.tolist():
179
+ # df.insert(loc=1, column='店铺名称', value=shop_name)
180
+ # new_name = f'py_xg_{os.path.splitext(name)[0]}.csv'
181
+ # self.save_to_csv(df, root, new_name, encoding='utf-8_sig')
182
+ # os.remove(os.path.join(root, name))
214
183
 
215
184
  # 将数据传入 self.datas 等待更新进数据库
216
185
  if not db_name or not collection_name:
@@ -245,8 +214,6 @@ class DataClean:
245
214
  for name in files:
246
215
  if '~$' in name or '.DS' in name or '.localized' in name or '.jpg' in name or '.png' in name:
247
216
  continue
248
- if 'py_xg' in name:
249
- continue
250
217
  is_continue = False
251
218
  if is_except:
252
219
  for item in is_except:
@@ -270,30 +237,8 @@ class DataClean:
270
237
  continue
271
238
  if name.endswith('.csv') and '人群属性_万里马官方旗舰店' in name: # 推广类报表
272
239
  df = pd.read_csv(os.path.join(root, name), encoding='utf-8_sig', header=0, na_filter=False)
273
- if len(df) == 0:
274
- print(f'{name} 报表数据为空')
275
- os.remove(os.path.join(root, name))
276
- continue
277
- new_name = f'py_xg_{os.path.splitext(name)[0]}.csv'
278
- self.save_to_csv(df, root, new_name, encoding='utf-8_sig')
279
- os.remove(os.path.join(root, name))
280
240
  elif name.endswith('.csv') and 'dmp人群报表_' in name:
281
241
  df = pd.read_csv(os.path.join(root, name), encoding='utf-8_sig', header=0, na_filter=False)
282
- df = df[df['日期'] != '']
283
- if len(df) == 0:
284
- print(f'{name} 报表数据为空')
285
- os.remove(os.path.join(root, name))
286
- continue
287
- for col in df.columns.tolist():
288
- if '(' in col or ')' in col:
289
- new_col = re.sub(r'\(.*\)', '', col)
290
- df.rename(columns={col: new_col}, inplace=True)
291
- shop_name = re.findall(r'_([\u4e00-\u9fffA-Za-z]+店)', name)[0]
292
- if '店铺名称' not in df.columns.tolist():
293
- df.insert(loc=1, column='店铺名称', value=shop_name)
294
- new_name = f'py_xg_{os.path.splitext(name)[0]}.csv'
295
- self.save_to_csv(df, root, new_name, encoding='utf-8_sig')
296
- os.remove(os.path.join(root, name))
297
242
 
298
243
  # 将数据传入 self.datas 等待更新进数据库
299
244
  if not db_name or not collection_name:
@@ -388,8 +333,8 @@ class DataClean:
388
333
  for name in files:
389
334
  if '~$' in name or '.DS' in name or '.localized' in name or '.jpg' in name or '.png' in name:
390
335
  continue
391
- if 'py_xg' in name:
392
- continue
336
+ # if 'py_xg' in name:
337
+ # continue
393
338
  is_continue = False
394
339
  if is_except:
395
340
  for item in is_except:
@@ -420,21 +365,10 @@ class DataClean:
420
365
  print(f'报表名称错误,不属于天猫/淘宝店:{name}')
421
366
  continue
422
367
 
423
- if name.endswith('.csv'): # 推广类报表
424
- if '明星店铺' in name: # 明星店铺可能会先释放 csv 文件
425
- continue
426
- encoding = self.get_encoding(file_path=os.path.join(root, name))
427
- shop_name = re.findall(r'_([\u4e00-\u9fffA-Za-z]+店)', name)[0]
428
- df = pd.read_csv(os.path.join(root, name), encoding=encoding, header=0, na_filter=False)
429
- df.replace(to_replace=['\\N'], value=0, regex=False, inplace=True) # 替换掉特殊字符
430
- df.fillna(0, inplace=True)
431
- date_min = df["日期"].values.min()
432
- date_max = df["日期"].values.max()
433
- df['日期'] = pd.to_datetime(df['日期'], format='%Y-%m-%d', errors='ignore')
434
- df.insert(loc=1, column='店铺名称', value=shop_name)
435
- new_name = f'py_xg_{name}'
436
- self.save_to_csv(df, root, new_name, encoding='utf-8_sig')
437
- os.remove(os.path.join(root, name))
368
+ if name.endswith('.csv') and '明星店铺' not in name: # 推广类报表
369
+ df = pd.read_csv(os.path.join(root, name), encoding='utf-8_sig', header=0, na_filter=False)
370
+ elif name.endswith('.csv') and '品销宝_明星店铺' in name:
371
+ df = pd.read_csv(os.path.join(root, name), encoding='utf-8_sig', header=0, na_filter=False)
438
372
  elif name.endswith('.xlsx') and '品销宝_明星店铺' in name:
439
373
  # 品销宝
440
374
  sheets4 = ['账户', '推广计划', '推广单元', '创意', '品牌流量包', '定向人群'] # 品销宝
@@ -469,8 +403,9 @@ class DataClean:
469
403
 
470
404
  # 将数据传入 self.datas 等待更新进数据库
471
405
  if not db_name or not collection_name:
472
- # print(f'db_name/collection_name 不能为空')
406
+ print(f'db_name/collection_name 不能为空')
473
407
  continue
408
+ # print(db_name, collection_name)
474
409
  self.datas.append(
475
410
  {
476
411
  '数据库名': db_name,
@@ -511,8 +446,8 @@ class DataClean:
511
446
  for name in files:
512
447
  if '~$' in name or '.DS' in name or '.localized' in name or '.jpg' in name or '.png' in name:
513
448
  continue
514
- if 'py_xg' in name:
515
- continue
449
+ # if 'py_xg' in name:
450
+ # continue
516
451
  is_continue = False
517
452
  if is_except:
518
453
  for item in is_except:
@@ -536,27 +471,12 @@ class DataClean:
536
471
  continue
537
472
 
538
473
  if name.endswith('.csv') and 'baobei' in name:
539
- encoding = self.get_encoding(file_path=os.path.join(root, name))
540
- df = pd.read_csv(os.path.join(root, name), encoding=encoding, header=0, na_filter=False)
541
- p = df.pop('日期')
542
- df.insert(loc=0, column='日期', value=p)
543
- df['日期'] = df['日期'].apply(lambda x: '-'.join(re.findall(r'(\d{4})(\d{2})(\d{2})', str(x))[0]) if int(x) > 0 else '')
544
- df.replace(to_replace=['--'], value='', regex=False, inplace=True)
545
- new_name = f'py_xg_{name}'
546
- self.save_to_csv(df, root, new_name, encoding='utf-8_sig')
547
- os.remove(os.path.join(root, name))
474
+ # encoding = self.get_encoding(file_path=os.path.join(root, name))
475
+ df = pd.read_csv(os.path.join(root, name), encoding='utf-8_sig', header=0, na_filter=False)
548
476
  elif name.endswith('.csv') and 'order' in name:
549
477
  """ 如果是手动下载的表格,这里不能使用表格原先的 gb2312, 会报错 """
550
478
  # df = pd.read_csv(os.path.join(root, name), encoding='gb18030', header=0, na_filter=False)
551
- encoding = self.get_encoding(file_path=os.path.join(root, name))
552
- df = pd.read_csv(os.path.join(root, name), encoding=encoding, header=0, na_filter=False)
553
- df.rename(columns={'宝贝标题': '商品标题', '宝贝链接': '商品链接'}, inplace=True)
554
- df['日期'] = df['日期'].apply(lambda x: '-'.join(re.findall(r'(\d{4})(\d{2})(\d{2})', str(x))[0]) if int(x) > 0 else '')
555
- df['商品id'] = df.apply(lambda x: re.sub(r'.*id=', '', x['商品链接']), axis=1)
556
- df = df[df['订单号'] != '']
557
- new_name = f'py_xg_{name}'
558
- self.save_to_csv(df, root, new_name, encoding='utf-8_sig')
559
- os.remove(os.path.join(root, name))
479
+ df = pd.read_csv(os.path.join(root, name), encoding='utf-8_sig', header=0, na_filter=False)
560
480
  elif name.endswith('.csv') and '省份城市分析' in name:
561
481
  encoding = self.get_encoding(file_path=os.path.join(root, name))
562
482
  df = pd.read_csv(os.path.join(root, name), encoding=encoding, header=0, na_filter=False)
@@ -583,158 +503,7 @@ class DataClean:
583
503
  os.remove(os.path.join(root, name))
584
504
  elif name.endswith('.csv') and '店铺销售指标' in name:
585
505
  # 生意经, 店铺指标,仅限月数据,实际日指标也可以
586
- encoding = self.get_encoding(file_path=os.path.join(root, name))
587
- df = pd.read_csv(os.path.join(root, name), encoding=encoding, header=0, na_filter=False)
588
- if len(df) == 0:
589
- print(f'{name} 报表数据为空')
590
- os.remove(os.path.join(root, name))
591
- continue
592
- df['日期'] = df['日期'].apply(lambda x: '-'.join(re.findall(r'(\d{4})(\d{2})(\d{2})', str(x))[0]) if int(x) > 0 else '')
593
- df.replace(to_replace=['--'], value='', regex=False, inplace=True)
594
- new_name = f'py_xg_{name}'
595
- self.save_to_csv(df, root, new_name, encoding='utf-8_sig')
596
- os.remove(os.path.join(root, name))
597
-
598
- # 将数据传入 self.datas 等待更新进数据库
599
- if not db_name or not collection_name:
600
- # print(f'db_name/collection_name 不能为空')
601
- continue
602
- self.datas.append(
603
- {
604
- '数据库名': db_name,
605
- '集合名称': collection_name,
606
- '数据主体': df,
607
- '文件名': name,
608
- }
609
- )
610
-
611
- def syj_reports_tb(self, path=None, is_except=[]):
612
- """ 淘宝店 生意经报表 """
613
- if not path:
614
- path = self.path
615
- report_names = [
616
- {
617
- '文件简称': 'baobei',
618
- '数据库名': '淘宝_生意经3',
619
- '集合名称': '宝贝指标',
620
- },
621
- {
622
- '文件简称': 'order',
623
- '数据库名': '淘宝_生意经3',
624
- '集合名称': '订单数据',
625
- },
626
- {
627
- '文件简称': '省份城市分析',
628
- '数据库名': '淘宝_生意经3',
629
- '集合名称': '省份城市分析',
630
- },
631
- {
632
- '文件简称': '店铺销售指标',
633
- '数据库名': '淘宝_生意经3',
634
- '集合名称': '店铺销售指标',
635
- },
636
- ]
637
-
638
- for root, dirs, files in os.walk(path, topdown=False):
639
- for name in files:
640
- if '~$' in name or '.DS' in name or '.localized' in name or '.jpg' in name or '.png' in name:
641
- continue
642
- if 'py_xg' in name:
643
- continue
644
- is_continue = False
645
- if is_except:
646
- for item in is_except:
647
- if item in os.path.join(root, name):
648
- # print(name)
649
- is_continue = True
650
- break
651
- if is_continue: # 需要排除不做处理的文件或文件夹
652
- continue
653
-
654
- # 这里排除掉非目标报表
655
- is_continue = False
656
- db_name = None # 初始化参数
657
- collection_name = None
658
- for item in report_names:
659
- if item['文件简称'] in name:
660
- db_name = item['数据库名']
661
- collection_name = item['集合名称']
662
- is_continue = True
663
- if not is_continue:
664
- continue
665
-
666
- if name.endswith('.csv') and 'baobei' in name:
667
- encoding = self.get_encoding(file_path=os.path.join(root, name))
668
- df = pd.read_csv(os.path.join(root, name), encoding=encoding, header=0, na_filter=False)
669
- pattern = re.findall(r'-(\d{4})(\d{2})(\d{2})\W', name)[0]
670
- df['日期'] = '-'.join(pattern)
671
- df.replace(to_replace=['--'], value='', regex=False, inplace=True)
672
- new_name = f'py_xg_淘宝_baobeitrains_{'-'.join(pattern)}.csv'
673
- self.save_to_csv(df, root, new_name, encoding='utf-8_sig')
674
- os.remove(os.path.join(root, name))
675
- elif name.endswith('.csv') and 'order' in name:
676
- """ 这里不能使用表格原先的 gb2312, 会报错 """
677
- # encoding = self.get_encoding(file_path=os.path.join(root, name))
678
- df = pd.read_csv(os.path.join(root, name), encoding='gb18030', header=0, na_filter=False)
679
- pattern = re.findall(r'(.*)(\d{4})(\d{2})(\d{2})-(\d{4})(\d{2})(\d{2})', name)[0]
680
- date1 ='-'.join(pattern[1:4])
681
- date2 = '-'.join(pattern[4:7])
682
- df.insert(loc=0, column='日期', value=date1)
683
- df.insert(loc=1, column='数据周期', value=f'{date1}_{date2}')
684
- df.rename(columns={'宝贝标题': '商品标题', '宝贝链接': '商品链接'}, inplace=True)
685
- df['颜色编码'] = df['商家编码'].apply(
686
- lambda x: ''.join(re.findall(r' .*(\d{4})$', str(x))) if x else x)
687
- new_name = f'py_xg_淘宝_order_{date1}_{date2}.csv'
688
- self.save_to_csv(df, root, new_name, encoding='utf-8_sig')
689
- os.remove(os.path.join(root, name))
690
- elif name.endswith('.csv') and '省份城市分析' in name:
691
- encoding = self.get_encoding(file_path=os.path.join(root, name))
692
- df = pd.read_csv(os.path.join(root, name), encoding=encoding, header=0, na_filter=False)
693
- pattern = re.findall(r'(.*[\u4e00-\u9fa5])(\d{4})(\d{2})(\d{2})\.', name)[0]
694
- date = '-'.join(pattern[1:])
695
- new_name = f'py_xg_淘宝_{pattern[0]}-{date}.csv'
696
- df = pd.read_csv(os.path.join(root, name), encoding=encoding, header=0, na_filter=False)
697
- if len(df) == 0:
698
- print(f'{name} 报表数据为空')
699
- os.remove(os.path.join(root, name))
700
- continue
701
- df['省'] = df['省份'].apply(lambda x: x if ' ├─ ' not in x and ' └─ ' not in x else None)
702
- df['城市'] = df[['省份', '省']].apply(lambda x: '汇总' if x['省'] else x['省份'], axis=1)
703
- df['省'].fillna(method='ffill', inplace=True)
704
- df['城市'].replace(to_replace=[' ├─ | └─ '], value='', regex=True, inplace=True)
705
- pov = df.pop('省')
706
- city = df.pop('城市')
707
- df['省+市'] = df['省份']
708
- df['省份'] = pov
709
- df.insert(loc=1, column='城市', value=city)
710
- df.insert(loc=0, column='日期', value=date)
711
- df['日期'] = pd.to_datetime(df['日期'], format='%Y-%m-%d', errors='ignore')
712
- self.save_to_csv(df, root, new_name, encoding='utf-8_sig')
713
- os.remove(os.path.join(root, name))
714
- elif name.endswith('.csv') and '店铺销售指标' in name:
715
- # 生意经, 店铺指标,仅限月数据,实际日指标也可以
716
- name_st = re.findall(r'(.*)\(分日', name)
717
- if not name_st:
718
- print(f'{name} 已转换的表格')
719
- continue
720
- encoding = self.get_encoding(file_path=os.path.join(root, name))
721
- df = pd.read_csv(os.path.join(root, name), encoding=encoding, header=0, na_filter=False)
722
- if len(df) == 0:
723
- print(f'{name} 报表数据为空')
724
- os.remove(os.path.join(root, name))
725
- continue
726
- df['日期'] = df['日期'].astype(str).apply(
727
- lambda x: '-'.join(re.findall(r'(\d{4})(\d{2})(\d{2})', x)[0]) if x else x)
728
- df['日期'] = pd.to_datetime(df['日期'], format='%Y-%m-%d', errors='ignore') # 转换日期列
729
- # min_clm = str(df.min()['日期']).split(' ')[0]
730
- # max_clm = str(df.max()['日期']).split(' ')[0]
731
- min_clm = str(df['日期'].min()).split(' ')[0]
732
- max_clm = str(df['日期'].max()).split(' ')[0]
733
- new_name = f'py_xg_淘宝_{name_st[0]}-{min_clm}_{max_clm}.csv' # 保存时将(分日)去掉
734
- df.replace(to_replace=['--'], value='', regex=False, inplace=True)
735
- df['日期'] = pd.to_datetime(df['日期'], format='%Y-%m-%d', errors='ignore')
736
- self.save_to_csv(df, root, new_name, encoding='utf-8_sig')
737
- os.remove(os.path.join(root, name))
506
+ df = pd.read_csv(os.path.join(root, name), encoding='utf-8_sig', header=0, na_filter=False)
738
507
 
739
508
  # 将数据传入 self.datas 等待更新进数据库
740
509
  if not db_name or not collection_name:
@@ -770,17 +539,17 @@ class DataClean:
770
539
  '集合名称': '推广数据_关键词报表',
771
540
  },
772
541
  {
773
- '文件简称': '京东商智_sku_商品明细',
542
+ '文件简称': 'sku_商品明细',
774
543
  '数据库名': '京东数据3',
775
544
  '集合名称': '京东商智_sku_商品明细',
776
545
  },
777
546
  {
778
- '文件简称': '京东商智_spu_商品明细',
547
+ '文件简称': 'spu_商品明细',
779
548
  '数据库名': '京东数据3',
780
549
  '集合名称': '京东商智_spu_商品明细',
781
550
  },
782
551
  {
783
- '文件简称': '京东商智_店铺来源_三级来源',
552
+ '文件简称': '店铺来源_三级来源',
784
553
  '数据库名': '京东数据3',
785
554
  '集合名称': '京东商智_店铺来源',
786
555
  },
@@ -790,8 +559,8 @@ class DataClean:
790
559
  for name in files:
791
560
  if '~$' in name or '.DS' in name or '.localized' in name or '.jpg' in name or '.png' in name:
792
561
  continue
793
- if 'py_xg' in name:
794
- continue
562
+ # if 'py_xg' in name:
563
+ # continue
795
564
  is_continue = False
796
565
  if is_except:
797
566
  for item in is_except:
@@ -817,47 +586,50 @@ class DataClean:
817
586
  if name.endswith('.csv') and '京东推广_' in name:
818
587
  # df = pd.read_excel(os.path.join(root, name), header=0, engine='openpyxl')
819
588
  df = pd.read_csv(os.path.join(root, name), encoding='utf-8_sig', header=0, na_filter=False)
820
- new_name = f'py_xg_{name}'
821
- if os.path.isfile(os.path.join(root, new_name)):
822
- os.remove(os.path.join(root, new_name))
823
- os.rename(os.path.join(root, name), os.path.join(root, new_name))
824
- elif name.endswith('.xlsx') and '京东商智_sku_商品明细' in name:
825
- df = pd.read_excel(os.path.join(root, name), header=0, engine='openpyxl')
826
- df.replace(to_replace=['-'], value='', regex=False, inplace=True)
827
- pattern = re.findall(r'_(\d{4}-\d{2}-\d{2})', name)[0]
828
- df.insert(loc=0, column='日期', value=pattern)
829
- df.insert(loc=1, column='店铺名称', value='京东箱包旗舰店')
830
- df.fillna(0, inplace=True)
831
- new_name = f'py_xg_{os.path.splitext(name)[0]}.csv'
832
- df.to_csv(os.path.join(root, new_name), encoding='utf-8_sig', index=False, header=True)
833
- # df.to_excel(os.path.join(upload_path, new_name),
834
- # index=False, header=True, engine='openpyxl', freeze_panes=(1, 0))
835
- os.remove(os.path.join(root, name))
836
- elif name.endswith('.xlsx') and '京东商智_spu_商品明细' in name:
837
- df = pd.read_excel(os.path.join(root, name), header=0, engine='openpyxl')
838
- df.replace(to_replace=['-'], value='', regex=False, inplace=True)
839
- pattern = re.findall(r'_(\d{4}-\d{2}-\d{2})', name)[0]
840
- df.insert(loc=0, column='日期', value=pattern)
841
- df.insert(loc=1, column='店铺名称', value='京东箱包旗舰店')
842
- df.fillna(0, inplace=True)
843
- new_name = f'py_xg_{os.path.splitext(name)[0]}.csv'
844
- df.to_csv(os.path.join(root, new_name), encoding='utf-8_sig', index=False, header=True)
845
- # df.to_excel(os.path.join(upload_path, new_name),
846
- # index=False, header=True, engine='openpyxl', freeze_panes=(1, 0))
847
- os.remove(os.path.join(root, name))
848
- elif name.endswith('.xlsx') and '京东商智_店铺来源_三级来源' in name:
849
- df = pd.read_excel(os.path.join(root, name), header=0, engine='openpyxl')
850
- df.replace(to_replace=['-'], value='', regex=False, inplace=True)
851
- df.rename(columns={'时间': '日期'}, inplace=True)
852
- for col in df.columns.tolist():
853
- if '环比' in col or '同比' in col:
854
- df.drop(col, axis=1, inplace=True)
855
- df.fillna(0, inplace=True)
856
- new_name = f'py_xg_{os.path.splitext(name)[0]}.csv'
857
- df.to_csv(os.path.join(root, new_name), encoding='utf-8_sig', index=False, header=True)
858
- # df.to_excel(os.path.join(upload_path, new_name),
859
- # index=False, header=True, engine='openpyxl', freeze_panes=(1, 0))
860
- os.remove(os.path.join(root, name))
589
+ # new_name = f'py_xg_{name}'
590
+ # if os.path.isfile(os.path.join(root, new_name)):
591
+ # os.remove(os.path.join(root, new_name))
592
+ # os.rename(os.path.join(root, name), os.path.join(root, new_name))
593
+ elif name.endswith('.csv') and 'sku_商品明细' in name:
594
+ df = pd.read_csv(os.path.join(root, name), encoding='utf-8_sig', header=0, na_filter=False)
595
+ # df = pd.read_excel(os.path.join(root, name), header=0, engine='openpyxl')
596
+ # df.replace(to_replace=['-'], value='', regex=False, inplace=True)
597
+ # pattern = re.findall(r'_(\d{4}-\d{2}-\d{2})', name)[0]
598
+ # df.insert(loc=0, column='日期', value=pattern)
599
+ # df.insert(loc=1, column='店铺名称', value='京东箱包旗舰店')
600
+ # df.fillna(0, inplace=True)
601
+ # new_name = f'py_xg_{os.path.splitext(name)[0]}.csv'
602
+ # df.to_csv(os.path.join(root, new_name), encoding='utf-8_sig', index=False, header=True)
603
+ # # df.to_excel(os.path.join(upload_path, new_name),
604
+ # # index=False, header=True, engine='openpyxl', freeze_panes=(1, 0))
605
+ # os.remove(os.path.join(root, name))
606
+ elif name.endswith('.csv') and 'spu_商品明细' in name:
607
+ df = pd.read_csv(os.path.join(root, name), encoding='utf-8_sig', header=0, na_filter=False)
608
+ # df = pd.read_excel(os.path.join(root, name), header=0, engine='openpyxl')
609
+ # df.replace(to_replace=['-'], value='', regex=False, inplace=True)
610
+ # pattern = re.findall(r'_(\d{4}-\d{2}-\d{2})', name)[0]
611
+ # df.insert(loc=0, column='日期', value=pattern)
612
+ # df.insert(loc=1, column='店铺名称', value='京东箱包旗舰店')
613
+ # df.fillna(0, inplace=True)
614
+ # new_name = f'py_xg_{os.path.splitext(name)[0]}.csv'
615
+ # df.to_csv(os.path.join(root, new_name), encoding='utf-8_sig', index=False, header=True)
616
+ # # df.to_excel(os.path.join(upload_path, new_name),
617
+ # # index=False, header=True, engine='openpyxl', freeze_panes=(1, 0))
618
+ # os.remove(os.path.join(root, name))
619
+ elif name.endswith('.csv') and '店铺来源_三级来源' in name:
620
+ df = pd.read_csv(os.path.join(root, name), encoding='utf-8_sig', header=0, na_filter=False)
621
+ # df = pd.read_excel(os.path.join(root, name), header=0, engine='openpyxl')
622
+ # df.replace(to_replace=['-'], value='', regex=False, inplace=True)
623
+ # df.rename(columns={'时间': '日期'}, inplace=True)
624
+ # for col in df.columns.tolist():
625
+ # if '环比' in col or '同比' in col:
626
+ # df.drop(col, axis=1, inplace=True)
627
+ # df.fillna(0, inplace=True)
628
+ # new_name = f'py_xg_{os.path.splitext(name)[0]}.csv'
629
+ # df.to_csv(os.path.join(root, new_name), encoding='utf-8_sig', index=False, header=True)
630
+ # # df.to_excel(os.path.join(upload_path, new_name),
631
+ # # index=False, header=True, engine='openpyxl', freeze_panes=(1, 0))
632
+ # os.remove(os.path.join(root, name))
861
633
 
862
634
  # 将数据传入 self.datas 等待更新进数据库
863
635
  if not db_name or not collection_name:
@@ -1125,10 +897,10 @@ class DataClean:
1125
897
  if 'py_xg' not in name: # 排除非目标文件
1126
898
  continue
1127
899
 
1128
- if name.endswith('.csv') and '京东商智_spu_商品明细' in name:
900
+ if name.endswith('.csv') and 'spu_商品明细' in name:
1129
901
  t_path = os.path.join(self.source_path, '京东报表', '京东商智_spu_商品明细')
1130
902
  bib(t_path, _as_month=True)
1131
- elif name.endswith('.csv') and '京东商智_sku_商品明细' in name:
903
+ elif name.endswith('.csv') and 'sku_商品明细' in name:
1132
904
  t_path = os.path.join(self.source_path, '京东报表', '京东商智_sku_商品明细')
1133
905
  bib(t_path, _as_month=True)
1134
906
  elif name.endswith('.csv') and '京东推广_搜索词' in name:
@@ -1140,7 +912,7 @@ class DataClean:
1140
912
  elif name.endswith('.csv') and '京东推广_关键词点击' in name:
1141
913
  t_path = os.path.join(self.source_path, '京东报表', '关键词报表')
1142
914
  bib(t_path, _as_month=True)
1143
- elif name.endswith('.csv') and '京东商智_店铺来源_三级来源' in name:
915
+ elif name.endswith('.csv') and '店铺来源_三级来源' in name:
1144
916
  t_path = os.path.join(self.source_path, '京东报表', '店铺来源_三级来源')
1145
917
  bib(t_path, _as_month=True)
1146
918
 
@@ -1209,25 +981,10 @@ class DataClean:
1209
981
  t_path = os.path.join(self.source_path, '天猫推广报表', '超级短视频_主体')
1210
982
  bib(t_path, _as_month=True)
1211
983
 
1212
- elif name.endswith('.csv') and 'tg_report_品销宝_明星店铺_万里马官方旗舰店' in name:
1213
- if '账户' in name:
1214
- t_path = os.path.join(self.source_path, '天猫推广报表', '品销宝', '账户报表')
1215
- bib(t_path, _as_month=True)
1216
- elif '推广计划' in name:
1217
- t_path = os.path.join(self.source_path, '天猫推广报表', '品销宝', '推广计划报表')
1218
- bib(t_path, _as_month=True)
1219
- elif '推广单元' in name:
1220
- t_path = os.path.join(self.source_path, '天猫推广报表', '品销宝', '推广单元报表')
1221
- bib(t_path, _as_month=True)
1222
- elif '创意' in name:
1223
- t_path = os.path.join(self.source_path, '天猫推广报表', '品销宝', '创意报表')
1224
- bib(t_path, _as_month=True)
1225
- elif '品牌流量包' in name:
1226
- t_path = os.path.join(self.source_path, '天猫推广报表', '品销宝', '品牌流量包报表')
1227
- bib(t_path, _as_month=True)
1228
- elif '定向人群' in name:
1229
- t_path = os.path.join(self.source_path, '天猫推广报表', '品销宝', '定向人群报表')
1230
- bib(t_path, _as_month=True)
984
+ elif name.endswith('.csv') and 'tg_report_品销宝_明星店铺_' in name:
985
+ t_path = os.path.join(self.source_path, '天猫推广报表', '品销宝')
986
+ bib(t_path, _as_month=True)
987
+
1231
988
  elif name.endswith('xlsx') and '商品素材_万里马官方旗舰店' in name:
1232
989
  t_path = os.path.join(self.source_path, '商品素材')
1233
990
  bib(t_path, _as_month=True)
@@ -1452,8 +1209,8 @@ class DataClean:
1452
1209
  df=df,
1453
1210
  db_name=db_name,
1454
1211
  table_name=collection_name,
1455
- move_insert=True, # 先删除,再插入
1456
- df_sql=False, # 值为 True 时使用 df.to_sql 函数上传整个表, 不会排重
1212
+ move_insert=False, # 先删除,再插入,新版有多店数据,不可按日期删除
1213
+ df_sql=True, # 值为 True 时使用 df.to_sql 函数上传整个表, 不会排重
1457
1214
  drop_duplicates=False, # 值为 True 时检查重复数据再插入,反之直接上传,会比较慢
1458
1215
  filename=rt_filename, # 用来追踪处理进度
1459
1216
  service_database=service_database, # 字典
@@ -1521,8 +1278,7 @@ def main(is_mysql=False, is_company=False):
1521
1278
  cn.dmp_tm(is_except=['except']) # 达摩盘
1522
1279
  cn.tg_reports(is_except=['except']) # 推广报表,天猫淘宝共同清洗
1523
1280
  cn.syj_reports_tm(is_except=['except']) # 天猫生意经
1524
- # # 淘宝生意经,不可以和天猫同时运行
1525
- # cn.syj_reports_tb(is_except=['except']) # 淘宝生意经,不可以和天猫同时运行
1281
+
1526
1282
  cn.jd_reports(is_except=['except']) # 清洗京东报表
1527
1283
  cn.sp_scene_clean(is_except=['except']) # 商品素材
1528
1284
  cn.upload_df() # 上传数据库
mdbq/company/copysh.py CHANGED
@@ -301,6 +301,30 @@ class TbFiles:
301
301
  now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
302
302
  print(f'{now} 同步完成!')
303
303
 
304
+ def refresh_excel(self):
305
+ # 刷新共享位置的指定文件/文件夹
306
+ if platform.system() == 'Windows' and socket.gethostname() == 'company':
307
+ excel_path = os.path.join(self.share_path, 'EXCEL报表')
308
+ files = os.listdir(excel_path)
309
+ files = [f'{excel_path}\\{item}' for item in files if item.endswith('.xlsx') or item.endswith('.xls')]
310
+ r = refresh_all.RefreshAll()
311
+ for file in files:
312
+ if '~' in file or 'DS_Store' in file or 'baidu' in file or 'xunlei' in file:
313
+ continue
314
+ if file.endswith('.xlsx') or file.endswith('.xls'):
315
+ r.refresh_excel(file=file)
316
+ time.sleep(5)
317
+
318
+ # 临时加的
319
+ # excel_file = f'\\\\192.168.1.198\\时尚事业部\\01.运营部\\0-电商周报-每周五更新\\0-WLM_运营周报-1012输出.xlsx'
320
+ dir_files = f'\\\\192.168.1.198\\时尚事业部\\01.运营部\\0-电商周报-每周五更新'
321
+ files = os.listdir(dir_files)
322
+ for file in files:
323
+ if file.endswith('.xlsx') and file.startswith(
324
+ '0-WLM_运营周报') and '~' not in file and 'baidu' not in file:
325
+ excel_file = os.path.join(dir_files, file)
326
+ r.refresh_excel(file=excel_file)
327
+
304
328
  def check_upload_mysql(self):
305
329
  # 每天只更新一次
306
330
  today = datetime.date.today()
@@ -407,12 +431,15 @@ def main():
407
431
  )
408
432
  # print(conf)
409
433
  myconfig.write_back(datas=conf) # 写回文件生效
434
+
435
+ if socket.gethostname() == 'company' or socket.gethostname() == 'Mac2.local':
436
+ t.refresh_excel()
410
437
  now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S ')
411
438
  print(f'{now}数据完成!')
412
439
 
413
- t.sleep_minutes = 5 # 同步前休眠时间
414
- if socket.gethostname() == 'company' or socket.gethostname() == 'Mac2.local':
415
- t.tb_file()
440
+ # t.sleep_minutes = 5 # 同步前休眠时间
441
+ # if socket.gethostname() == 'company' or socket.gethostname() == 'Mac2.local':
442
+ # t.tb_file()
416
443
  time.sleep(600) # 检测间隔
417
444
 
418
445
 
mdbq/config/products.py CHANGED
@@ -34,6 +34,9 @@ class Products:
34
34
 
35
35
  def update_my_datas(self):
36
36
  my_datas = [
37
+ {
38
+ '平台': '天猫', '商品id': '848929365673', '上市年份': '2024年11月'
39
+ },
37
40
  {
38
41
  '平台': '天猫', '商品id': '840499705810', '上市年份': '2024年10月'
39
42
  },
mdbq/mysql/mysql.py CHANGED
@@ -180,6 +180,14 @@ class MysqlUpload:
180
180
  # except Exception as e:
181
181
  # print(f'{e}')
182
182
  # connection.rollback()
183
+
184
+ if cl and db_n and tb_n:
185
+ mysql_types.mysql_all_dtypes(db_name=db_name, table_name=table_name) # 更新一个表的 dtypes
186
+ elif cl and db_n:
187
+ mysql_types.mysql_all_dtypes(db_name=db_name) # 更新一个数据库的 dtypes
188
+ elif cl:
189
+ mysql_types.mysql_all_dtypes() # 更新所有数据库所有数据表的 dtypes 信息到本地 json
190
+
183
191
  connection.close()
184
192
  return
185
193
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mdbq
3
- Version: 2.8.7
3
+ Version: 2.8.9
4
4
  Home-page: https://pypi.org/project/mdbq
5
5
  Author: xigua,
6
6
  Author-email: 2587125111@qq.com
@@ -1,24 +1,24 @@
1
1
  mdbq/__init__.py,sha256=Il5Q9ATdX8yXqVxtP_nYqUhExzxPC_qk_WXQ_4h0exg,16
2
2
  mdbq/__version__.py,sha256=y9Mp_8x0BCZSHsdLT_q5tX9wZwd5QgqrSIENLrb6vXA,62
3
3
  mdbq/aggregation/__init__.py,sha256=EeDqX2Aml6SPx8363J-v1lz0EcZtgwIBYyCJV6CcEDU,40
4
- mdbq/aggregation/aggregation.py,sha256=fLsYQO7LTUtVFaVOxXry4OZviiIpZMcTLplE-HXW9XY,71964
4
+ mdbq/aggregation/aggregation.py,sha256=VtGP7KhUY-NUBA2CXt50hYuGOZ1bU_NeaBBvAUxYgTg,72338
5
5
  mdbq/aggregation/df_types.py,sha256=U9i3q2eRPTDY8qAPTw7irzu-Tlg4CIySW9uYro81wdk,8125
6
6
  mdbq/aggregation/mysql_types.py,sha256=YTGyrF9vcRgfkQbpT-e-JdJ7c7VF1dDHgyx9YZRES8w,10934
7
7
  mdbq/aggregation/optimize_data.py,sha256=79uwiM2WqNNFxGpE2wKz742PRq-ZGgFjdOV0vgptHdY,3513
8
- mdbq/aggregation/query_data.py,sha256=rjXFP6dsrbXNdgA840z_xhrR-ka2i-hVdllHD6Yn2O4,100180
8
+ mdbq/aggregation/query_data.py,sha256=iRgPljgOPE7dzhaaVxRXOEOOKQTmWg6sGsDplNLTvQw,100177
9
9
  mdbq/bdup/__init__.py,sha256=AkhsGk81SkG1c8FqDH5tRq-8MZmFobVbN60DTyukYTY,28
10
10
  mdbq/bdup/bdup.py,sha256=LAV0TgnQpc-LB-YuJthxb0U42_VkPidzQzAagan46lU,4234
11
11
  mdbq/clean/__init__.py,sha256=A1d6x3L27j4NtLgiFV5TANwEkLuaDfPHDQNrPBbNWtU,41
12
- mdbq/clean/clean_upload.py,sha256=LRXcvsLqON5NJ5rj7RbeV0750N2Jrjtmr4J7sNPNPoM,81544
12
+ mdbq/clean/clean_upload.py,sha256=FJxoEX-2QKuFhrF1ecl_LdZ1uFnVPx4HigNcXdErB28,66561
13
13
  mdbq/clean/data_clean.py,sha256=ucfslhqXVZoH2QaXHSAWDky0GhIvH9f4GeNaHg4SrFE,104790
14
14
  mdbq/company/__init__.py,sha256=qz8F_GsP_pMB5PblgJAUAMjasuZbOEp3qQOCB39E8f0,21
15
- mdbq/company/copysh.py,sha256=gWaNgRe_rBHBBphch2AHkGAljZRdfqYBn_FC3m_l11A,20061
15
+ mdbq/company/copysh.py,sha256=UD5BLBe9uMfqjdslyY7-TtGXuJI5jsol-w4kIQFDfQk,21577
16
16
  mdbq/company/copysh_bak.py,sha256=NvlXCBZBcO2GIT5nLRYYqhOyHWM1-1RE7DHvgbj6jmQ,19723
17
17
  mdbq/company/home_sh.py,sha256=42CZ2tZIXHLl2mOl2gk2fZnjH2IHh1VJ1s3qHABjonY,18021
18
18
  mdbq/config/__init__.py,sha256=jso1oHcy6cJEfa7udS_9uO5X6kZLoPBF8l3wCYmr5dM,18
19
19
  mdbq/config/get_myconf.py,sha256=cmNvsyoNa0RbZ9FOTjSd3jyyGwkxjUo0phvdHbGlrms,6010
20
20
  mdbq/config/myconfig.py,sha256=EGymTlAimtHIDJ9egCtOehBEPOj6rea504kvsEZu64o,854
21
- mdbq/config/products.py,sha256=ceCvyH23PSvdappIL3yIyIpo6Cqhw6ZKyVNFoj3XIYc,6087
21
+ mdbq/config/products.py,sha256=Sj4FSb2dZcMKp6ox-FJdIR87QLgMN_TJ7Z6KAWMTWyw,6214
22
22
  mdbq/config/set_support.py,sha256=xkZCX6y9Bq1ppBpJAofld4B2YtchA7fl0eT3dx3CrSI,777
23
23
  mdbq/config/update_conf.py,sha256=taL3ZqKgiVWwUrDFuaYhim9a72Hm4BHRhhDscJTziR8,4535
24
24
  mdbq/dataframe/__init__.py,sha256=2HtCN8AdRj53teXDqzysC1h8aPL-mMFy561ESmhehGQ,22
@@ -28,7 +28,7 @@ mdbq/log/mylogger.py,sha256=oaT7Bp-Hb9jZt52seP3ISUuxVcI19s4UiqTeouScBO0,3258
28
28
  mdbq/mongo/__init__.py,sha256=SILt7xMtQIQl_m-ik9WLtJSXIVf424iYgCfE_tnQFbw,13
29
29
  mdbq/mongo/mongo.py,sha256=v9qvrp6p1ZRWuPpbSilqveiE0FEcZF7U5xUPI0RN4xs,31880
30
30
  mdbq/mysql/__init__.py,sha256=A_DPJyAoEvTSFojiI2e94zP0FKtCkkwKP1kYUCSyQzo,11
31
- mdbq/mysql/mysql.py,sha256=cpCmGP4qlkZrJBFdQagUiHXPfuS13J_XcSNeQ97NgFY,45709
31
+ mdbq/mysql/mysql.py,sha256=apcj0WDdbrHr7UzO2kjcesDxDUlWxG4KcIpI1mBuwMk,46152
32
32
  mdbq/mysql/recheck_mysql.py,sha256=jHQSlQy0PlQ_EYICQv_2nairUX3t6OIwPtSELKIpjkY,8702
33
33
  mdbq/mysql/s_query.py,sha256=bgNNIqYLDCHjD5KTFcm6x4u74selpAGs5ouJYuqX86k,8447
34
34
  mdbq/mysql/year_month_day.py,sha256=VgewoE2pJxK7ErjfviL_SMTN77ki8GVbTUcao3vFUCE,1523
@@ -45,7 +45,7 @@ mdbq/req_post/__init__.py,sha256=jso1oHcy6cJEfa7udS_9uO5X6kZLoPBF8l3wCYmr5dM,18
45
45
  mdbq/req_post/req_tb.py,sha256=PexWSCPJNM6Tv0ol4lAWIhlOwsAr_frnjtcdSHCFiek,36179
46
46
  mdbq/spider/__init__.py,sha256=RBMFXGy_jd1HXZhngB2T2XTvJqki8P_Fr-pBcwijnew,18
47
47
  mdbq/spider/aikucun.py,sha256=jHrdGWBJQaSywx7V-U4YuM6vWkwC5SR5tTOOdB3YU_c,17306
48
- mdbq-2.8.7.dist-info/METADATA,sha256=cDSdYxyr-otFt0wyENhynpHpw5P800B9oB_GW6bfINs,243
49
- mdbq-2.8.7.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
50
- mdbq-2.8.7.dist-info/top_level.txt,sha256=2FQ-uLnCSB-OwFiWntzmwosW3X2Xqsg0ewh1axsaylA,5
51
- mdbq-2.8.7.dist-info/RECORD,,
48
+ mdbq-2.8.9.dist-info/METADATA,sha256=2_jXKvIIyj0iXk8T4uADVdvpKKF_kxFoWlN75PLoQlo,243
49
+ mdbq-2.8.9.dist-info/WHEEL,sha256=cpQTJ5IWu9CdaPViMhC9YzF8gZuS5-vlfoFihTBC86A,91
50
+ mdbq-2.8.9.dist-info/top_level.txt,sha256=2FQ-uLnCSB-OwFiWntzmwosW3X2Xqsg0ewh1axsaylA,5
51
+ mdbq-2.8.9.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.44.0)
2
+ Generator: setuptools (70.1.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5