mdbq 1.5.8__tar.gz → 1.6.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mdbq-1.5.8 → mdbq-1.6.0}/PKG-INFO +1 -1
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/aggregation/aggregation.py +24 -15
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/aggregation/query_data.py +86 -5
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/clean/data_clean.py +16 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/mysql/mysql.py +1 -1
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq.egg-info/PKG-INFO +1 -1
- {mdbq-1.5.8 → mdbq-1.6.0}/setup.py +1 -1
- {mdbq-1.5.8 → mdbq-1.6.0}/README.txt +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/__init__.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/__version__.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/aggregation/__init__.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/aggregation/df_types.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/aggregation/mysql_types.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/aggregation/optimize_data.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/bdup/__init__.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/bdup/bdup.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/clean/__init__.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/company/__init__.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/company/copysh.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/config/__init__.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/config/get_myconf.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/config/products.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/config/set_support.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/config/update_conf.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/dataframe/__init__.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/dataframe/converter.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/log/__init__.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/log/mylogger.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/mongo/__init__.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/mongo/mongo.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/mysql/__init__.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/mysql/s_query.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/mysql/year_month_day.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/other/__init__.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/other/porxy.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/other/pov_city.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/other/ua_sj.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/pbix/__init__.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/pbix/pbix_refresh.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/pbix/refresh_all.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq/spider/__init__.py +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq.egg-info/SOURCES.txt +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq.egg-info/dependency_links.txt +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/mdbq.egg-info/top_level.txt +0 -0
- {mdbq-1.5.8 → mdbq-1.6.0}/setup.cfg +0 -0
@@ -42,6 +42,7 @@ class DatabaseUpdate:
|
|
42
42
|
def cleaning(self, is_move=True):
|
43
43
|
"""
|
44
44
|
数据清洗, 返回包含 数据库名, 集合名称, 和 df 主体
|
45
|
+
修改 cleaning 时,要同步 support 下的 标题对照表.csv
|
45
46
|
"""
|
46
47
|
if not os.path.exists(self.path):
|
47
48
|
print(f'1.1.0 初始化时传入了不存在的目录: {self.path}')
|
@@ -108,8 +109,12 @@ class DatabaseUpdate:
|
|
108
109
|
# df.replace(to_replace=['\\N'], value=0, regex=False, inplace=True) # 替换掉特殊字符
|
109
110
|
# df.replace(to_replace=[''], value=0, regex=False, inplace=True)
|
110
111
|
# df.fillna(0, inplace=True)
|
111
|
-
|
112
|
-
|
112
|
+
if '省' in df.columns.tolist() and '场景名字' in df.columns.tolist() and '地域报表' in name:
|
113
|
+
db_name = '推广数据2'
|
114
|
+
collection_name = f'完整_{tg_name}'
|
115
|
+
else:
|
116
|
+
db_name = '推广数据2'
|
117
|
+
collection_name = f'{tg_name}'
|
113
118
|
if name.endswith('.csv') and '超级直播' in name:
|
114
119
|
# 超级直播
|
115
120
|
df = pd.read_csv(os.path.join(root, name), encoding=encoding, header=0, na_filter=False)
|
@@ -516,6 +521,13 @@ class DatabaseUpdate:
|
|
516
521
|
print(f'{name} 报表数据为空')
|
517
522
|
continue
|
518
523
|
df = df[df['缩略图'] != '合计']
|
524
|
+
elif name.endswith('.csv') and '营销概况_全站营销' in name:
|
525
|
+
df = pd.read_csv(os.path.join(root, name), encoding='utf-8_sig', header=1, na_filter=False)
|
526
|
+
df = df[(df['日期'] != '日期') & (df['日期'] != '汇总') & (df['日期'] != '0') & (df['花费'] != '0') & (df['花费'] != '0.00')]
|
527
|
+
df['日期'] = df['日期'].apply(lambda x: f'{str(x)[:4]}-{str(x)[4:6]}-{str(x)[6:8]}')
|
528
|
+
df.drop("'当前时间'", axis=1, inplace=True)
|
529
|
+
df.rename(columns={'全站ROI': '全站roi'}, inplace=True)
|
530
|
+
df.insert(loc=1, column='产品线', value='全站营销')
|
519
531
|
|
520
532
|
# 商品素材,必须保持放在最后处理
|
521
533
|
elif name.endswith('xlsx'):
|
@@ -836,10 +848,11 @@ class DatabaseUpdate:
|
|
836
848
|
def other_table(self, service_databases=[{'home_lx': 'mysql'}]):
|
837
849
|
""" 上传 support 文件夹下的 主推商品.csv """
|
838
850
|
support_file = set_support.SetSupport(dirname='support').dirname
|
839
|
-
filename = '主推商品.
|
851
|
+
filename = '主推商品.xlsx'
|
840
852
|
if not os.path.isfile(os.path.join(support_file, filename)):
|
841
853
|
return
|
842
|
-
df = pd.read_csv(os.path.join(support_file, filename), encoding='utf-8_sig', header=0, na_filter=False)
|
854
|
+
# df = pd.read_csv(os.path.join(support_file, filename), encoding='utf-8_sig', header=0, na_filter=False)
|
855
|
+
df = pd.read_excel(os.path.join(support_file, filename), header=0)
|
843
856
|
for service_database in service_databases:
|
844
857
|
for service_name, database in service_database.items():
|
845
858
|
username, password, host, port = get_myconf.select_config_values(
|
@@ -961,6 +974,7 @@ def one_file_to_mysql(file, db_name, table_name, target_service, database):
|
|
961
974
|
username, password, host, port = get_myconf.select_config_values(target_service=target_service, database=database)
|
962
975
|
filename = os.path.basename(file)
|
963
976
|
df = pd.read_csv(file, encoding='utf-8_sig', header=0, na_filter=False, float_precision='high')
|
977
|
+
# df.replace(to_replace=[','], value='', regex=True, inplace=True) # 替换掉特殊字符
|
964
978
|
m = mysql.MysqlUpload(username=username, password=password, host=host, port=port)
|
965
979
|
m.df_to_mysql(df=df, db_name=db_name, table_name=table_name, filename=filename, df_sql=True, drop_duplicates=False,)
|
966
980
|
|
@@ -1044,22 +1058,17 @@ if __name__ == '__main__':
|
|
1044
1058
|
print(username, password, host, port)
|
1045
1059
|
# file_dir(one_file=False)
|
1046
1060
|
# one_file_to_mysql(
|
1047
|
-
# file='/Users/xigua/数据中心/原始文件2
|
1048
|
-
# db_name='
|
1049
|
-
# table_name='
|
1050
|
-
# target_service='
|
1061
|
+
# file='/Users/xigua/数据中心/原始文件2/京东报表/JD推广_全站营销报表/2024-08/万里马箱包推广1_营销概况_全站营销_2024-08-19_2024-09-02.csv',
|
1062
|
+
# db_name='京东数据2',
|
1063
|
+
# table_name='推广数据_全站营销',
|
1064
|
+
# target_service='company',
|
1051
1065
|
# database='mysql'
|
1052
1066
|
# )
|
1053
1067
|
# db_name = '生意参谋2'
|
1054
|
-
# table_name = '
|
1068
|
+
# table_name = '自助取数_店铺流量_月数据'
|
1055
1069
|
# upload_dir(
|
1056
|
-
# path='/Users/xigua/数据中心/原始文件2
|
1070
|
+
# path='/Users/xigua/数据中心/原始文件2/月数据/流量来源-自助取数-月数据',
|
1057
1071
|
# db_name=db_name,
|
1058
1072
|
# collection_name=table_name,
|
1059
1073
|
# dbs={'mysql': True, 'mongodb': False},
|
1060
1074
|
# )
|
1061
|
-
|
1062
|
-
# test2()
|
1063
|
-
|
1064
|
-
dp = DatabaseUpdate(path='')
|
1065
|
-
dp.other_table(service_databases=[{'company': 'mysql'}])
|
@@ -13,6 +13,9 @@ import platform
|
|
13
13
|
import getpass
|
14
14
|
import json
|
15
15
|
import os
|
16
|
+
|
17
|
+
from sqlalchemy.event import remove
|
18
|
+
|
16
19
|
"""
|
17
20
|
程序用于下载数据库(调用 s_query.py 下载并清洗), 并对数据进行聚合清洗, 不会更新数据库信息;
|
18
21
|
|
@@ -207,6 +210,7 @@ class MysqlDatasQuery:
|
|
207
210
|
columns_name=['日期', '商品id', '商品白底图', '方版场景图'],
|
208
211
|
)
|
209
212
|
df = pd.DataFrame(data=data_values)
|
213
|
+
|
210
214
|
return df
|
211
215
|
|
212
216
|
def dplyd(self):
|
@@ -290,7 +294,28 @@ class MysqlDatasQuery:
|
|
290
294
|
projection=projection,
|
291
295
|
)
|
292
296
|
return df
|
293
|
-
|
297
|
+
def jdqzyx(self):
|
298
|
+
start_date, end_date = self.months_data(num=self.months)
|
299
|
+
projection = {
|
300
|
+
'日期': 1,
|
301
|
+
'产品线': 1,
|
302
|
+
'花费': 1,
|
303
|
+
'全站roi': 1,
|
304
|
+
'全站交易额': 1,
|
305
|
+
'全站订单行': 1,
|
306
|
+
'全站订单成本': 1,
|
307
|
+
'全站费比': 1,
|
308
|
+
'核心位置展现量': 1,
|
309
|
+
'核心位置点击量': 1,
|
310
|
+
}
|
311
|
+
df = self.download.data_to_df(
|
312
|
+
db_name='京东数据2',
|
313
|
+
table_name='推广数据_全站营销',
|
314
|
+
start_date=start_date,
|
315
|
+
end_date=end_date,
|
316
|
+
projection=projection,
|
317
|
+
)
|
318
|
+
return df
|
294
319
|
def sku_sales(self):
|
295
320
|
start_date, end_date = self.months_data(num=self.months)
|
296
321
|
projection = {
|
@@ -337,6 +362,7 @@ class GroupBy:
|
|
337
362
|
self.output = os.path.join('数据中心/数据库导出')
|
338
363
|
self.data_tgyj = {} # 推广综合聚合数据表
|
339
364
|
self.data_jdtg = {} # 京东推广数据,聚合数据
|
365
|
+
self.sp_index_datas = pd.DataFrame() # 商品 id 索引表
|
340
366
|
|
341
367
|
@staticmethod
|
342
368
|
def try_except(func): # 在类内部定义一个异常处理方法
|
@@ -353,6 +379,7 @@ class GroupBy:
|
|
353
379
|
def groupby(self, df, table_name, is_maximize=True):
|
354
380
|
"""
|
355
381
|
self.is_maximize: 是否最大转化数据
|
382
|
+
table_name: 聚合数据库处的名称,不是原始数据库
|
356
383
|
"""
|
357
384
|
if isinstance(df, pd.DataFrame):
|
358
385
|
if len(df) == 0:
|
@@ -361,6 +388,7 @@ class GroupBy:
|
|
361
388
|
else:
|
362
389
|
print(f'query_data.groupby函数中 {table_name} 传入的 df 不是 dataframe 结构')
|
363
390
|
return pd.DataFrame()
|
391
|
+
# print(table_name)
|
364
392
|
if '宝贝主体报表' in table_name:
|
365
393
|
df.rename(columns={
|
366
394
|
'场景名字': '营销场景',
|
@@ -419,6 +447,27 @@ class GroupBy:
|
|
419
447
|
table_name: df_new,
|
420
448
|
}
|
421
449
|
)
|
450
|
+
# df_pic:商品排序索引表, 给 powerbi 中的主推款排序用的,(从上月1号到今天的总花费进行排序)
|
451
|
+
today = datetime.date.today()
|
452
|
+
last_month = today - datetime.timedelta(days=30)
|
453
|
+
if last_month.month == 12:
|
454
|
+
year_my = today.year - 1
|
455
|
+
else:
|
456
|
+
year_my = today.year
|
457
|
+
# 截取 从上月1日 至 今天的花费数据, 推广款式按此数据从高到低排序(商品图+排序)
|
458
|
+
df_pic = df.groupby(['日期', '商品id'], as_index=False).agg({'花费': 'sum'})
|
459
|
+
df_pic = df_pic[~df_pic['商品id'].isin([''])] # 指定列中删除包含空值的行
|
460
|
+
df_pic = df_pic[(df_pic['日期'] >= f'{year_my}-{last_month.month}-01')]
|
461
|
+
df_pic = df_pic.groupby(['商品id'], as_index=False).agg({'花费': 'sum'})
|
462
|
+
df_pic.sort_values('花费', ascending=False, ignore_index=True, inplace=True)
|
463
|
+
df_pic.reset_index(inplace=True)
|
464
|
+
df_pic['index'] = df_pic['index'] + 100
|
465
|
+
df_pic.rename(columns={'index': '商品索引'}, inplace=True)
|
466
|
+
df_pic_new = pd.merge(df, df_pic, how='left', on=['商品id'])
|
467
|
+
df_pic_new['商品索引'].fillna(1000, inplace=True)
|
468
|
+
self.sp_index_datas = df_pic_new[['商品id', '商品索引']]
|
469
|
+
return df
|
470
|
+
elif '商品索引表' in table_name:
|
422
471
|
return df
|
423
472
|
elif '人群报表' in table_name:
|
424
473
|
df.rename(columns={
|
@@ -639,7 +688,7 @@ class GroupBy:
|
|
639
688
|
}
|
640
689
|
)
|
641
690
|
return df
|
642
|
-
elif '京准通' in table_name:
|
691
|
+
elif '京东_京准通' in table_name and '全站营销' not in table_name:
|
643
692
|
df = df.groupby(['日期', '产品线', '触发sku id', '跟单sku id', 'spu id', '花费', '展现数', '点击数'], as_index=False).agg(
|
644
693
|
**{'直接订单行': ('直接订单行', np.max),
|
645
694
|
'直接订单金额': ('直接订单金额', np.max),
|
@@ -656,6 +705,19 @@ class GroupBy:
|
|
656
705
|
}
|
657
706
|
)
|
658
707
|
return df
|
708
|
+
elif '京东_京准通_全站营销' in table_name:
|
709
|
+
df = df.groupby(['日期', '产品线', '花费'], as_index=False).agg(
|
710
|
+
**{'全站roi': ('全站roi', np.max),
|
711
|
+
'全站交易额': ('全站交易额', np.max),
|
712
|
+
'全站订单行': ('全站订单行', np.max),
|
713
|
+
'全站订单成本': ('全站订单成本', np.max),
|
714
|
+
'全站费比': ('全站费比', np.max),
|
715
|
+
'核心位置展现量': ('核心位置展现量', np.max),
|
716
|
+
'核心位置点击量': ('核心位置点击量', np.max),
|
717
|
+
}
|
718
|
+
)
|
719
|
+
df = df[df['花费'] > 0]
|
720
|
+
return df
|
659
721
|
elif '京东_sku_商品明细' in table_name:
|
660
722
|
df = df[df['商品id'] != '合计']
|
661
723
|
df = df.groupby(['日期', '商品id', '货号', '访客数', '成交客户数', '加购商品件数', '加购人数'],
|
@@ -723,6 +785,8 @@ class GroupBy:
|
|
723
785
|
cost = self.data_tgyj['商品成本']
|
724
786
|
df = pd.merge(sku_sales, cost, how='left', left_on='货号', right_on='款号')
|
725
787
|
df = df[['日期', '商品id', '货号', '成交单量', '成交金额', '成本价']]
|
788
|
+
df['商品id'] = df['商品id'].astype(str)
|
789
|
+
jdtg['跟单sku id'] = jdtg['跟单sku id'].astype(str)
|
726
790
|
if jd_tg is True:
|
727
791
|
# 完整的数据表,包含全店所有推广、销售数据
|
728
792
|
df = pd.merge(df, jdtg, how='left', left_on=['日期', '商品id'], right_on=['日期', '跟单sku id']) # df 合并推广表
|
@@ -847,9 +911,9 @@ def data_aggregation_one(service_databases=[{}], months=1):
|
|
847
911
|
data_dict = [
|
848
912
|
{
|
849
913
|
'数据库名': '聚合数据',
|
850
|
-
'集合名': '京东
|
851
|
-
'唯一主键': ['日期', '
|
852
|
-
'数据主体': sdq.
|
914
|
+
'集合名': '京东_京准通_全站营销',
|
915
|
+
'唯一主键': ['日期', '产品线',],
|
916
|
+
'数据主体': sdq.jdqzyx(),
|
853
917
|
},
|
854
918
|
]
|
855
919
|
######################################################
|
@@ -936,6 +1000,12 @@ def data_aggregation(service_databases=[{}], months=1):
|
|
936
1000
|
'唯一主键': ['日期', '产品线', '触发sku id', '跟单sku id', '花费', ],
|
937
1001
|
'数据主体': sdq.jdjzt(),
|
938
1002
|
},
|
1003
|
+
{
|
1004
|
+
'数据库名': '聚合数据',
|
1005
|
+
'集合名': '京东_京准通_全站营销',
|
1006
|
+
'唯一主键': ['日期', '产品线', '花费'],
|
1007
|
+
'数据主体': sdq.jdqzyx(),
|
1008
|
+
},
|
939
1009
|
{
|
940
1010
|
'数据库名': '聚合数据',
|
941
1011
|
'集合名': '京东_sku_商品明细',
|
@@ -958,6 +1028,17 @@ def data_aggregation(service_databases=[{}], months=1):
|
|
958
1028
|
for items in data_dict: # 遍历返回结果
|
959
1029
|
db_name, table_name, unique_key_list, df = items['数据库名'], items['集合名'], items['唯一主键'], items['数据主体']
|
960
1030
|
df = g.groupby(df=df, table_name=table_name, is_maximize=True) # 2. 聚合数据
|
1031
|
+
if len(g.sp_index_datas) != 0:
|
1032
|
+
# 由推广主体报表,写入一个商品索引表,索引规则:从上月 1 号至今花费从高到低排序
|
1033
|
+
m.df_to_mysql(
|
1034
|
+
df=g.sp_index_datas,
|
1035
|
+
db_name='属性设置2',
|
1036
|
+
table_name='商品索引表',
|
1037
|
+
drop_duplicates=False,
|
1038
|
+
icm_update=['商品id'],
|
1039
|
+
service_database=service_database,
|
1040
|
+
)
|
1041
|
+
g.sp_index_datas = pd.DataFrame() # 重置,不然下个循环会继续刷入数据库
|
961
1042
|
# g.as_csv(df=df, filename=table_name + '.csv') # 导出 csv
|
962
1043
|
m.df_to_mysql(
|
963
1044
|
df=df,
|
@@ -135,6 +135,9 @@ class DataClean:
|
|
135
135
|
tm_s_name = pattern[0] + shop_name + date_min + date_max
|
136
136
|
new_root_p = pathlib.Path(self.source_path, '推广报表', tg_name) # 文件夹,未包括文件名
|
137
137
|
df['日期'] = pd.to_datetime(df['日期'], format='%Y-%m-%d', errors='ignore')
|
138
|
+
if '省' in df.columns.tolist() and '场景名字' in df.columns.tolist():
|
139
|
+
new_root_p = pathlib.Path(self.source_path, '推广报表', f'完整_{tg_name}')
|
140
|
+
tm_s_name = f'完整_{tm_s_name}'
|
138
141
|
self.save_to_csv(df, new_root_p, tm_s_name)
|
139
142
|
if self.set_up_to_mogo:
|
140
143
|
d.df_to_mongo(df=df, db_name='天猫数据1', collection_name=f'天猫_推广_{tg_name}')
|
@@ -662,6 +665,16 @@ class DataClean:
|
|
662
665
|
m.df_to_mysql(df=df, db_name='天猫数据1', tabel_name='万相台_人群洞察')
|
663
666
|
|
664
667
|
# ----------------------- 京东数据处理分界线 -----------------------
|
668
|
+
elif name.endswith('.csv') and '营销概况_全站营销' in name:
|
669
|
+
df = pd.read_csv(os.path.join(root, name), encoding='utf-8_sig', header=1, na_filter=False)
|
670
|
+
df = df[(df['日期'] != '日期') & (df['日期'] != '汇总') & (df['日期'] != '0') & (df['花费'] != '0') & (df['花费'] != '0.00')]
|
671
|
+
df['日期'] = df['日期'].apply(lambda x: f'{str(x)[:4]}-{str(x)[4:6]}-{str(x)[6:8]}')
|
672
|
+
df.drop("'当前时间'", axis=1, inplace=True)
|
673
|
+
df.rename(columns={'全站ROI': '全站roi'}, inplace=True)
|
674
|
+
df.insert(loc=1, column='产品线', value='全站营销')
|
675
|
+
new_name = re.sub('至', '_', name)
|
676
|
+
self.save_to_csv(df, root, new_name)
|
677
|
+
os.remove(os.path.join(root, name))
|
665
678
|
elif name.endswith('.xlsx') and '店铺来源_流量来源' in name:
|
666
679
|
# 京东店铺来源
|
667
680
|
if '按天' not in name:
|
@@ -1150,6 +1163,9 @@ class DataClean:
|
|
1150
1163
|
elif name.endswith('.csv') and '付费广告_行业分析_行业大盘' in name:
|
1151
1164
|
t_path = str(pathlib.Path(self.source_path, '京东报表/行业大盘_流量排行'))
|
1152
1165
|
bib(t_path, _as_month=False)
|
1166
|
+
elif name.endswith('.csv') and '营销概况_全站营销' in name:
|
1167
|
+
t_path = str(pathlib.Path(self.source_path, '京东报表/JD推广_全站营销报表'))
|
1168
|
+
bib(t_path, _as_month=True)
|
1153
1169
|
# 京东分界线 ------- 结束标记
|
1154
1170
|
|
1155
1171
|
def attribute(self, path=None, _str='商品素材导出', ):
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|