mdbq 1.6.9__tar.gz → 1.7.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mdbq-1.6.9 → mdbq-1.7.1}/PKG-INFO +1 -1
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/aggregation/query_data.py +79 -6
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq.egg-info/PKG-INFO +1 -1
- {mdbq-1.6.9 → mdbq-1.7.1}/setup.py +1 -1
- {mdbq-1.6.9 → mdbq-1.7.1}/README.txt +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/__init__.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/__version__.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/aggregation/__init__.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/aggregation/aggregation.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/aggregation/df_types.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/aggregation/mysql_types.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/aggregation/optimize_data.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/bdup/__init__.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/bdup/bdup.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/clean/__init__.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/clean/data_clean.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/company/__init__.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/company/copysh.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/config/__init__.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/config/get_myconf.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/config/products.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/config/set_support.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/config/update_conf.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/dataframe/__init__.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/dataframe/converter.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/log/__init__.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/log/mylogger.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/mongo/__init__.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/mongo/mongo.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/mysql/__init__.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/mysql/mysql.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/mysql/s_query.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/mysql/year_month_day.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/other/__init__.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/other/porxy.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/other/pov_city.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/other/ua_sj.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/pbix/__init__.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/pbix/pbix_refresh.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/pbix/refresh_all.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq/spider/__init__.py +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq.egg-info/SOURCES.txt +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq.egg-info/dependency_links.txt +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/mdbq.egg-info/top_level.txt +0 -0
- {mdbq-1.6.9 → mdbq-1.7.1}/setup.cfg +0 -0
@@ -415,7 +415,7 @@ class GroupBy:
|
|
415
415
|
print(f'query_data.groupby函数中 {table_name} 传入的 df 不是 dataframe 结构')
|
416
416
|
return pd.DataFrame()
|
417
417
|
# print(table_name)
|
418
|
-
if '主体报表' in table_name:
|
418
|
+
if '天猫_主体报表' in table_name:
|
419
419
|
df.rename(columns={
|
420
420
|
'场景名字': '营销场景',
|
421
421
|
'主体id': '商品id',
|
@@ -473,6 +473,11 @@ class GroupBy:
|
|
473
473
|
table_name: df_new,
|
474
474
|
}
|
475
475
|
)
|
476
|
+
self.data_tgyj.update(
|
477
|
+
{
|
478
|
+
'天猫汇总表调用': df,
|
479
|
+
}
|
480
|
+
)
|
476
481
|
# df_pic:商品排序索引表, 给 powerbi 中的主推款排序用的,(从上月1号到今天的总花费进行排序)
|
477
482
|
today = datetime.date.today()
|
478
483
|
last_month = today - datetime.timedelta(days=30)
|
@@ -577,7 +582,7 @@ class GroupBy:
|
|
577
582
|
)
|
578
583
|
df.insert(loc=1, column='推广渠道', value='万相台无界版') # df中插入新列
|
579
584
|
return df
|
580
|
-
elif '超级直播' in table_name:
|
585
|
+
elif '天猫_超级直播' in table_name:
|
581
586
|
df.rename(columns={
|
582
587
|
'观看次数': '观看次数',
|
583
588
|
'总购物车数': '加购量',
|
@@ -625,6 +630,23 @@ class GroupBy:
|
|
625
630
|
df.insert(loc=1, column='推广渠道', value='万相台无界版') # df中插入新列
|
626
631
|
# df.insert(loc=2, column='营销场景', value='超级直播') # df中插入新列
|
627
632
|
# df = df.loc[df['日期'].between(start_day, today)]
|
633
|
+
df_new = df.groupby(['日期', '推广渠道', '营销场景'], as_index=False).agg(
|
634
|
+
**{
|
635
|
+
'花费': ('花费', np.sum),
|
636
|
+
'展现量': ('展现量', np.sum),
|
637
|
+
'观看次数': ('观看次数', np.sum),
|
638
|
+
'加购量': ('加购量', np.sum),
|
639
|
+
'成交笔数': ('成交笔数', np.sum),
|
640
|
+
'成交金额': ('成交金额', np.sum),
|
641
|
+
'直接成交笔数': ('直接成交笔数', np.sum),
|
642
|
+
'直接成交金额': ('直接成交金额', np.sum),
|
643
|
+
}
|
644
|
+
)
|
645
|
+
self.data_tgyj.update(
|
646
|
+
{
|
647
|
+
table_name: df_new,
|
648
|
+
}
|
649
|
+
)
|
628
650
|
return df
|
629
651
|
elif '宝贝指标' in table_name:
|
630
652
|
""" 聚合时不可以加商家编码,编码有些是空白,有些是 0 """
|
@@ -818,7 +840,7 @@ class GroupBy:
|
|
818
840
|
def performance(self, bb_tg=True):
|
819
841
|
# print(self.data_tgyj)
|
820
842
|
tg, syj, idbm, pic, cost = (
|
821
|
-
self.data_tgyj['主体报表'],
|
843
|
+
self.data_tgyj['天猫_主体报表'],
|
822
844
|
self.data_tgyj['天猫生意经_宝贝指标'],
|
823
845
|
self.data_tgyj['商品id编码表'],
|
824
846
|
self.data_tgyj['商品id图片对照表'],
|
@@ -851,6 +873,44 @@ class GroupBy:
|
|
851
873
|
df['盈亏'] = df.apply(lambda x: x['商品毛利'] - x['花费'], axis=1)
|
852
874
|
return df
|
853
875
|
|
876
|
+
def performance_concat(self, bb_tg=True):
|
877
|
+
tg, zb = self.data_tgyj['天猫汇总表调用'], self.data_tgyj['天猫_超级直播']
|
878
|
+
zb.rename(columns={
|
879
|
+
'观看次数': '点击量',
|
880
|
+
}, inplace=True)
|
881
|
+
zb.fillna(0, inplace=True) # astype 之前要填充空值
|
882
|
+
tg.fillna(0, inplace=True)
|
883
|
+
zb = zb.astype({
|
884
|
+
'花费': float,
|
885
|
+
'展现量': int,
|
886
|
+
'点击量': int,
|
887
|
+
'加购量': int,
|
888
|
+
'成交笔数': int,
|
889
|
+
'成交金额': float,
|
890
|
+
'直接成交笔数': int,
|
891
|
+
'直接成交金额': float,
|
892
|
+
}, errors='raise')
|
893
|
+
tg = tg.astype({
|
894
|
+
'商品id': str,
|
895
|
+
'花费': float,
|
896
|
+
'展现量': int,
|
897
|
+
'点击量': int,
|
898
|
+
'加购量': int,
|
899
|
+
'成交笔数': int,
|
900
|
+
'成交金额': float,
|
901
|
+
'直接成交笔数': int,
|
902
|
+
'直接成交金额': float,
|
903
|
+
'自然流量曝光量': int,
|
904
|
+
}, errors='raise')
|
905
|
+
df = pd.concat([tg, zb], axis=0, ignore_index=True)
|
906
|
+
df.fillna(0, inplace=True) # concat 之后要填充空值
|
907
|
+
df = df.astype(
|
908
|
+
{
|
909
|
+
'自然流量曝光量': int,
|
910
|
+
}
|
911
|
+
)
|
912
|
+
return df
|
913
|
+
|
854
914
|
def performance_jd(self, jd_tg=True):
|
855
915
|
jdtg, sku_sales = self.data_jdtg['京东_京准通'], self.data_jdtg['京东_sku_商品明细']
|
856
916
|
jdtg = jdtg.groupby(['日期', '跟单sku id'],
|
@@ -1035,7 +1095,7 @@ def data_aggregation(service_databases=[{}], months=1):
|
|
1035
1095
|
data_dict = [
|
1036
1096
|
{
|
1037
1097
|
'数据库名': '聚合数据',
|
1038
|
-
'集合名': '主体报表',
|
1098
|
+
'集合名': '天猫_主体报表',
|
1039
1099
|
'唯一主键': ['日期', '推广渠道', '营销场景', '商品id', '花费'],
|
1040
1100
|
'数据主体': sdq.tg_wxt(),
|
1041
1101
|
},
|
@@ -1153,6 +1213,18 @@ def data_aggregation(service_databases=[{}], months=1):
|
|
1153
1213
|
icm_update=['日期', '商品id'], # 设置唯一主键
|
1154
1214
|
service_database=service_database,
|
1155
1215
|
)
|
1216
|
+
|
1217
|
+
res = g.performance_concat(bb_tg=False) # 推广主体合并直播表,依赖其他表,单独做
|
1218
|
+
m.df_to_mysql(
|
1219
|
+
df=res,
|
1220
|
+
db_name='聚合数据',
|
1221
|
+
table_name='天猫_推广汇总',
|
1222
|
+
drop_duplicates=False,
|
1223
|
+
icm_update=['日期', '商品id'], # 设置唯一主键
|
1224
|
+
service_database=service_database,
|
1225
|
+
)
|
1226
|
+
|
1227
|
+
|
1156
1228
|
res = g.performance_jd(jd_tg=False) # 盈亏表,依赖其他表,单独做
|
1157
1229
|
m.df_to_mysql(
|
1158
1230
|
df=res,
|
@@ -1163,6 +1235,7 @@ def data_aggregation(service_databases=[{}], months=1):
|
|
1163
1235
|
service_database=service_database,
|
1164
1236
|
)
|
1165
1237
|
|
1238
|
+
|
1166
1239
|
# 这里要注释掉,不然 copysh.py 可能有问题,这里主要修改配置文件,后续触发 home_lx 的 optimize_datas.py(有s)程序进行全局清理
|
1167
1240
|
# optimize_data.op_data(service_databases=service_databases, days=3650) # 立即启动对聚合数据的清理工作
|
1168
1241
|
|
@@ -1172,7 +1245,7 @@ def main():
|
|
1172
1245
|
|
1173
1246
|
|
1174
1247
|
if __name__ == '__main__':
|
1175
|
-
data_aggregation(service_databases=[{'company': 'mysql'}], months=
|
1176
|
-
# data_aggregation_one(service_databases=[{'
|
1248
|
+
data_aggregation(service_databases=[{'company': 'mysql'}], months=24) # 正常的聚合所有数据
|
1249
|
+
# data_aggregation_one(service_databases=[{'company': 'mysql'}], months=1) # 单独聚合某一个数据库,具体库进函数编辑
|
1177
1250
|
# optimize_data.op_data(service_databases=[{'company': 'mysql'}], days=3650) # 立即启动对聚合数据的清理工作
|
1178
1251
|
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|