mdbq 2.3.5__tar.gz → 2.3.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. {mdbq-2.3.5 → mdbq-2.3.7}/PKG-INFO +1 -1
  2. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/aggregation/query_data.py +166 -1
  3. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/company/copysh.py +7 -2
  4. mdbq-2.3.7/mdbq/req_post/req_tb.py +621 -0
  5. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq.egg-info/PKG-INFO +1 -1
  6. {mdbq-2.3.5 → mdbq-2.3.7}/setup.py +1 -1
  7. mdbq-2.3.5/mdbq/req_post/req_tb.py +0 -330
  8. {mdbq-2.3.5 → mdbq-2.3.7}/README.txt +0 -0
  9. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/__init__.py +0 -0
  10. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/__version__.py +0 -0
  11. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/aggregation/__init__.py +0 -0
  12. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/aggregation/aggregation.py +0 -0
  13. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/aggregation/df_types.py +0 -0
  14. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/aggregation/mysql_types.py +0 -0
  15. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/aggregation/optimize_data.py +0 -0
  16. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/bdup/__init__.py +0 -0
  17. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/bdup/bdup.py +0 -0
  18. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/clean/__init__.py +0 -0
  19. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/clean/data_clean.py +0 -0
  20. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/company/__init__.py +0 -0
  21. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/company/home_sh.py +0 -0
  22. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/config/__init__.py +0 -0
  23. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/config/get_myconf.py +0 -0
  24. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/config/products.py +0 -0
  25. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/config/set_support.py +0 -0
  26. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/config/update_conf.py +0 -0
  27. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/dataframe/__init__.py +0 -0
  28. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/dataframe/converter.py +0 -0
  29. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/log/__init__.py +0 -0
  30. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/log/mylogger.py +0 -0
  31. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/mongo/__init__.py +0 -0
  32. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/mongo/mongo.py +0 -0
  33. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/mysql/__init__.py +0 -0
  34. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/mysql/mysql.py +0 -0
  35. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/mysql/s_query.py +0 -0
  36. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/mysql/year_month_day.py +0 -0
  37. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/other/__init__.py +0 -0
  38. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/other/porxy.py +0 -0
  39. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/other/pov_city.py +0 -0
  40. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/other/sku_picture.py +0 -0
  41. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/other/ua_sj.py +0 -0
  42. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/pbix/__init__.py +0 -0
  43. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/pbix/pbix_refresh.py +0 -0
  44. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/pbix/refresh_all.py +0 -0
  45. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/pbix/refresh_all_old.py +0 -0
  46. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/req_post/__init__.py +0 -0
  47. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/spider/__init__.py +0 -0
  48. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq/spider/aikucun.py +0 -0
  49. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq.egg-info/SOURCES.txt +0 -0
  50. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq.egg-info/dependency_links.txt +0 -0
  51. {mdbq-2.3.5 → mdbq-2.3.7}/mdbq.egg-info/top_level.txt +0 -0
  52. {mdbq-2.3.5 → mdbq-2.3.7}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mdbq
3
- Version: 2.3.5
3
+ Version: 2.3.7
4
4
  Home-page: https://pypi.org/project/mdbsql
5
5
  Author: xigua,
6
6
  Author-email: 2587125111@qq.com
@@ -1,5 +1,6 @@
1
1
  # -*- coding: UTF-8 –*-
2
2
  import re
3
+ from unittest.mock import inplace
3
4
 
4
5
  from mdbq.mongo import mongo
5
6
  from mdbq.mysql import mysql
@@ -1003,6 +1004,23 @@ class GroupBy:
1003
1004
  df.insert(loc=1, column='推广渠道', value='万相台无界版') # df中插入新列
1004
1005
  df['是否品牌词'] = df['词名字/词包名字'].str.contains('万里马|wanlima', regex=True)
1005
1006
  df['是否品牌词'] = df['是否品牌词'].apply(lambda x: '品牌词' if x else '')
1007
+ dir_file = f'\\\\192.168.1.198\\时尚事业部\\01.运营部\\0-电商周报-每周五更新\\分类配置文件.xlsx'
1008
+ dir_file2 = '/Volumes/时尚事业部/01.运营部/0-电商周报-每周五更新/分类配置文件.xlsx'
1009
+ if not os.path.isfile(dir_file):
1010
+ dir_file = dir_file2
1011
+ if os.path.isfile(dir_file):
1012
+ df_fl = pd.read_excel(dir_file, sheet_name='关键词分类', header=0)
1013
+ df_fl.rename(columns={'分类1': '词分类'}, inplace=True)
1014
+ df_fl = df_fl[['关键词', '词分类']]
1015
+ df = pd.merge(df, df_fl, left_on=['词名字/词包名字'], right_on=['关键词'], how='left')
1016
+ df.pop('关键词')
1017
+ df['词分类'].fillna('', inplace=True)
1018
+ if '词分类' in df.columns.tolist():
1019
+ df['词分类'] = df.apply(lambda x: self.ret_keyword(keyword=str(x['词名字/词包名字']), as_file=False) if x['词分类'] == '' else x['词分类'], axis=1)
1020
+ else:
1021
+ df['词分类'] = df['词名字/词包名字'].apply(lambda x: self.ret_keyword(keyword=str(x), as_file=False))
1022
+ # df.to_csv('/Users/xigua/Downloads/test.csv', index=False, header=True, encoding='utf-8_sig')
1023
+ # breakpoint()
1006
1024
  return df
1007
1025
  elif '天猫_超级直播' in table_name:
1008
1026
  df.rename(columns={
@@ -1389,6 +1407,153 @@ class GroupBy:
1389
1407
  print(f'<{table_name}>: Groupby 类尚未配置,数据为空')
1390
1408
  return pd.DataFrame({})
1391
1409
 
1410
+ def ret_keyword(self, keyword, as_file=False):
1411
+ datas = [
1412
+ {
1413
+ '类别': '品牌词',
1414
+ '值': [
1415
+ '万里马',
1416
+ 'wanlima',
1417
+ 'fion',
1418
+ '菲安妮',
1419
+ '迪桑娜',
1420
+ 'dissona',
1421
+ 'hr',
1422
+ 'vh',
1423
+ 'songmont',
1424
+ 'vanessahogan',
1425
+ 'dilaks',
1426
+ 'khdesign',
1427
+ 'peco',
1428
+ 'giimmii',
1429
+ 'cassile',
1430
+ 'grotto',
1431
+ 'why',
1432
+ 'roulis',
1433
+ 'lesschic',
1434
+ 'amazing song',
1435
+ 'mytaste',
1436
+ 'bagtree',
1437
+ '红谷',
1438
+ 'hongu',
1439
+ ]
1440
+ },
1441
+ {
1442
+ '类别': '智选',
1443
+ '值': [
1444
+ '智选',
1445
+ ]
1446
+ },
1447
+ {
1448
+ '类别': '年份',
1449
+ '值': [
1450
+ '20',
1451
+ ]
1452
+ },
1453
+ {
1454
+ '类别': '材质',
1455
+ '值': [
1456
+ '皮',
1457
+ '牛仔',
1458
+ '丹宁',
1459
+ '帆布',
1460
+ ]
1461
+ },
1462
+ {
1463
+ '类别': '季节',
1464
+ '值': [
1465
+ '春',
1466
+ '夏',
1467
+ '秋',
1468
+ '冬',
1469
+ ]
1470
+ },
1471
+ {
1472
+ '类别': '款式',
1473
+ '值': [
1474
+ '水桶',
1475
+ '托特',
1476
+ '腋下',
1477
+ '小方',
1478
+ '通用款',
1479
+ '手拿',
1480
+ '马鞍',
1481
+ '链条',
1482
+ '菜篮',
1483
+ 'hobo',
1484
+ '波士顿',
1485
+ '凯莉',
1486
+ '饺子',
1487
+ '盒子',
1488
+ '牛角',
1489
+ '公文',
1490
+ '月牙',
1491
+ '单肩',
1492
+ '枕头',
1493
+ '斜挎',
1494
+ '手提',
1495
+ '手拎',
1496
+ '拎手',
1497
+ '斜肩',
1498
+ '棒球',
1499
+ '饺包',
1500
+ '保龄球',
1501
+ '戴妃',
1502
+ '半月',
1503
+ '弯月',
1504
+ '法棍',
1505
+ '流浪',
1506
+ '拎包',
1507
+ '中式',
1508
+ '手挽',
1509
+ '皮带',
1510
+ '眼镜',
1511
+ '斜跨',
1512
+ '律师',
1513
+ '斜背',
1514
+ ]
1515
+ },
1516
+ {
1517
+ '类别': '品类词',
1518
+ '值': [
1519
+ '老花',
1520
+ '包包',
1521
+ '通勤',
1522
+ '高级感',
1523
+ '轻奢',
1524
+ '包',
1525
+ '新款',
1526
+ '小众',
1527
+ '爆款',
1528
+ '工作',
1529
+ '精致',
1530
+ '奢侈',
1531
+ '袋',
1532
+ ],
1533
+ },
1534
+ ]
1535
+ if as_file:
1536
+ with open(os.path.join(self.output, f'分类配置.json'), 'w') as f:
1537
+ json.dump(datas, f, ensure_ascii=False, sort_keys=False, indent=4)
1538
+ breakpoint()
1539
+ result = ''
1540
+ res = []
1541
+ is_continue = False
1542
+ for data in datas:
1543
+ for item in data['值']:
1544
+ if item == '20':
1545
+ pattern = r'\d\d'
1546
+ res = re.findall(f'{item}{pattern}', str(keyword), re.IGNORECASE)
1547
+ else:
1548
+ res = re.findall(item, str(keyword), re.IGNORECASE)
1549
+ if res:
1550
+ result = data['类别']
1551
+ is_continue = True
1552
+ break
1553
+ if is_continue:
1554
+ break
1555
+ return result
1556
+
1392
1557
  # @try_except
1393
1558
  def performance(self, bb_tg=True):
1394
1559
  # print(self.data_tgyj)
@@ -1875,6 +2040,6 @@ def main():
1875
2040
 
1876
2041
 
1877
2042
  if __name__ == '__main__':
1878
- data_aggregation(service_databases=[{'company': 'mysql'}], months=1) # 正常的聚合所有数据
2043
+ data_aggregation(service_databases=[{'company': 'mysql'}], months=0) # 正常的聚合所有数据
1879
2044
  # data_aggregation_one(service_databases=[{'company': 'mysql'}], months=1) # 单独聚合某一个数据库,具体库进函数编辑
1880
2045
  # optimize_data.op_data(service_databases=[{'company': 'mysql'}], days=3650) # 立即启动对聚合数据的清理工作
@@ -265,8 +265,13 @@ class TbFiles:
265
265
  time.sleep(5)
266
266
 
267
267
  # 临时加的
268
- excel_file = f'\\\\192.168.1.198\\时尚事业部\\01.运营部\\0-电商周报-每周五更新\\WLM 周报-模版V1.xlsx'
269
- r.refresh_excel(file=excel_file)
268
+ # excel_file = f'\\\\192.168.1.198\\时尚事业部\\01.运营部\\0-电商周报-每周五更新\\0-WLM_运营周报-1012输出.xlsx'
269
+ dir_files = f'\\\\192.168.1.198\\时尚事业部\\01.运营部\\0-电商周报-每周五更新'
270
+ files = os.listdir(dir_files)
271
+ for file in files:
272
+ if file.endswith('.xlsx') and '0-WLM_运营周报' in file and '~' not in file and 'baidu' not in file:
273
+ excel_file = os.path.join(dir_files, file)
274
+ r.refresh_excel(file=excel_file)
270
275
 
271
276
  self.before_max_time = self.check_change() # 重置值, 避免重复同步
272
277