mdbq 2.9.0__py3-none-any.whl → 2.9.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mdbq/aggregation/aggregation.py +43 -21
- mdbq/aggregation/query_data.py +88 -15
- mdbq/mysql/mysql.py +292 -3
- {mdbq-2.9.0.dist-info → mdbq-2.9.3.dist-info}/METADATA +1 -1
- {mdbq-2.9.0.dist-info → mdbq-2.9.3.dist-info}/RECORD +7 -7
- {mdbq-2.9.0.dist-info → mdbq-2.9.3.dist-info}/WHEEL +1 -1
- {mdbq-2.9.0.dist-info → mdbq-2.9.3.dist-info}/top_level.txt +0 -0
mdbq/aggregation/aggregation.py
CHANGED
@@ -24,7 +24,7 @@ import time
|
|
24
24
|
import re
|
25
25
|
import shutil
|
26
26
|
import getpass
|
27
|
-
|
27
|
+
from sqlalchemy import create_engine
|
28
28
|
warnings.filterwarnings('ignore')
|
29
29
|
"""
|
30
30
|
|
@@ -1167,7 +1167,24 @@ def upload_dir(path, db_name, collection_name, json_path=None):
|
|
1167
1167
|
intersection_keys = dtypes.keys() & old_dt.keys() # 获取两个字典键的交集
|
1168
1168
|
dtypes = {k: dtypes[k] for k in intersection_keys} # 使用交集的键创建新字典
|
1169
1169
|
df = df.astype(dtypes) # 再次更新 df 的数据类型
|
1170
|
+
df.fillna(0, inplace=True)
|
1171
|
+
|
1172
|
+
# for col in df.columns.tolist():
|
1173
|
+
# df[col] = df[col].apply(lambda x: 0 if str(x) == '' else x)
|
1174
|
+
# print(f'{i}/{count}')
|
1175
|
+
# sql_engine = create_engine(
|
1176
|
+
# f"mysql+pymysql://{username}:{password}@{host}:{port}/{db_name}") # 创建数据库引擎
|
1177
|
+
# df.to_sql(
|
1178
|
+
# name=collection_name,
|
1179
|
+
# con=sql_engine,
|
1180
|
+
# if_exists='append',
|
1181
|
+
# index=False,
|
1182
|
+
# chunksize=1000
|
1183
|
+
# )
|
1184
|
+
|
1185
|
+
|
1170
1186
|
|
1187
|
+
#
|
1171
1188
|
m.df_to_mysql(df=df, db_name=db_name, table_name=collection_name,
|
1172
1189
|
move_insert=False, # 先删除,再插入
|
1173
1190
|
df_sql = True,
|
@@ -1201,32 +1218,34 @@ def one_file_to_mysql(file, db_name, table_name):
|
|
1201
1218
|
|
1202
1219
|
|
1203
1220
|
def test():
|
1204
|
-
path =
|
1221
|
+
path = r'C:\同步空间\BaiduSyncdisk\原始文件3\天猫推广报表\品销宝'
|
1205
1222
|
|
1206
1223
|
results = []
|
1207
1224
|
for root, dirs, files in os.walk(path, topdown=False):
|
1208
1225
|
for name in files:
|
1209
1226
|
if name.endswith('.csv') and 'baidu' not in name and '~' not in name:
|
1210
|
-
# print(name)
|
1211
1227
|
# df = pd.read_excel(os.path.join(root, name), header=0)
|
1212
1228
|
df = pd.read_csv(os.path.join(root, name), encoding='utf-8_sig', header=0, na_filter=False)
|
1213
1229
|
# print(name)
|
1214
1230
|
# if len(df) == 0:
|
1215
1231
|
# continue
|
1216
1232
|
# # df.insert(loc=1, column='店铺名称', value='万里马官方旗舰店')
|
1217
|
-
|
1218
|
-
|
1219
|
-
#
|
1220
|
-
# df.
|
1233
|
+
df['更新时间'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
1234
|
+
df.to_csv(os.path.join(root, name), encoding='utf-8_sig', index=False, header=True)
|
1235
|
+
# for col in ['更新时间']:
|
1236
|
+
# if col not in df.columns.tolist():
|
1237
|
+
# print(name)
|
1238
|
+
# df[col] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
1239
|
+
# df.to_csv(os.path.join(root, name), encoding='utf-8_sig', index=False, header=True)
|
1221
1240
|
# pattern = re.findall(r'\d{4}-\d{2}-\d{2}_\d{4}-\d{2}-\d{2}', name)[0]
|
1222
1241
|
# new_name = f'py_xg_店铺销售指标_万里马官方旗舰店_{pattern}.csv'
|
1223
1242
|
# df.to_csv(os.path.join(root, name), encoding='utf-8_sig', index=False, header=True)
|
1224
1243
|
# os.remove(os.path.join(root, name))
|
1225
|
-
results.append(df)
|
1226
|
-
df = pd.concat(results)
|
1227
|
-
path = '/Users/xigua/Downloads/手淘搜索_本店引流词'
|
1228
|
-
filename = 'py_xg_手淘搜索_本店引流词_万里马官方旗舰店_2024-05_合并.csv'
|
1229
|
-
df.to_csv(os.path.join(path, filename), encoding='utf-8_sig', index=False, header=True)
|
1244
|
+
# results.append(df)
|
1245
|
+
# df = pd.concat(results)
|
1246
|
+
# path = '/Users/xigua/Downloads/手淘搜索_本店引流词'
|
1247
|
+
# filename = 'py_xg_手淘搜索_本店引流词_万里马官方旗舰店_2024-05_合并.csv'
|
1248
|
+
# df.to_csv(os.path.join(path, filename), encoding='utf-8_sig', index=False, header=True)
|
1230
1249
|
|
1231
1250
|
|
1232
1251
|
|
@@ -1243,15 +1262,18 @@ if __name__ == '__main__':
|
|
1243
1262
|
# table_name='超级直播',
|
1244
1263
|
# )
|
1245
1264
|
|
1246
|
-
#
|
1247
|
-
|
1248
|
-
|
1249
|
-
|
1250
|
-
|
1251
|
-
|
1252
|
-
|
1253
|
-
|
1265
|
+
# test()
|
1266
|
+
|
1267
|
+
col = 1
|
1268
|
+
if col:
|
1269
|
+
# 上传一个目录到指定数据库
|
1270
|
+
db_name = '生意参谋3'
|
1271
|
+
table_name = '商品排行'
|
1272
|
+
upload_dir(
|
1273
|
+
path=r'C:\同步空间\BaiduSyncdisk\原始文件3\生意参谋\商品排行',
|
1274
|
+
db_name=db_name,
|
1275
|
+
collection_name=table_name,
|
1276
|
+
)
|
1254
1277
|
|
1255
1278
|
|
1256
|
-
test()
|
1257
1279
|
|
mdbq/aggregation/query_data.py
CHANGED
@@ -204,7 +204,7 @@ class MysqlDatasQuery:
|
|
204
204
|
'场景名字': 1,
|
205
205
|
'宝贝id': 1,
|
206
206
|
'词类型': 1,
|
207
|
-
'
|
207
|
+
'词名字_词包名字': 1,
|
208
208
|
'花费': 1,
|
209
209
|
'展现量': 1,
|
210
210
|
'点击量': 1,
|
@@ -246,7 +246,7 @@ class MysqlDatasQuery:
|
|
246
246
|
}
|
247
247
|
df = self.download.data_to_df(
|
248
248
|
db_name='推广数据2',
|
249
|
-
table_name='
|
249
|
+
table_name='超级直播报表_人群',
|
250
250
|
start_date=start_date,
|
251
251
|
end_date=end_date,
|
252
252
|
projection=projection,
|
@@ -685,7 +685,7 @@ class MysqlDatasQuery:
|
|
685
685
|
}
|
686
686
|
df_tm_living = self.download.data_to_df(
|
687
687
|
db_name='推广数据2',
|
688
|
-
table_name='
|
688
|
+
table_name='超级直播报表_人群',
|
689
689
|
start_date=start_date,
|
690
690
|
end_date=pd.to_datetime('2024-04-16'), # 只可以取此日期之前的数据
|
691
691
|
projection=projection,
|
@@ -866,13 +866,14 @@ class MysqlDatasQuery:
|
|
866
866
|
projection=projection,
|
867
867
|
)
|
868
868
|
df_dmp.sort_values('日期', ascending=True, ignore_index=True, inplace=True)
|
869
|
-
df_dmp.drop_duplicates(subset=['日期', '人群id', '消耗'], keep='last', inplace=True, ignore_index=True)
|
869
|
+
df_dmp.drop_duplicates(subset=['日期', '人群id', '消耗_元'], keep='last', inplace=True, ignore_index=True)
|
870
870
|
df = pd.merge(df_dmp, df_crowd, left_on=['人群id'], right_on=['人群id'], how='left')
|
871
871
|
# 清除一些不必要的字符
|
872
872
|
df['用户年龄'] = df['用户年龄'].apply(lambda x: '~'.join(re.findall(r'^(\d+).*-(\d+)岁$', str(x))[0]) if '岁' in str(x) else x)
|
873
873
|
df['消费能力等级'] = df['消费能力等级'].apply(lambda x: f'L{''.join(re.findall(r'(\d)', str(x)))}' if '购买力' in str(x) else x)
|
874
874
|
# df.to_csv('/Users/xigua/Downloads/test3.csv', index=False, header=True, encoding='utf-8_sig')
|
875
875
|
# breakpoint()
|
876
|
+
df.rename(columns={'消耗_元': '消耗'}, inplace=True)
|
876
877
|
return df
|
877
878
|
|
878
879
|
|
@@ -999,7 +1000,8 @@ class GroupBy:
|
|
999
1000
|
df_pic_lin = df[df['店铺名称'] == '万里马官方旗舰店']
|
1000
1001
|
df_pic = df_pic_lin.groupby(['日期', '商品id'], as_index=False).agg({'花费': 'sum'})
|
1001
1002
|
df_pic = df_pic[~df_pic['商品id'].isin([''])] # 指定列中删除包含空值的行
|
1002
|
-
|
1003
|
+
date_obj = datetime.datetime.strptime(f'{year_my}-{last_month.month}-01', '%Y-%m-%d').date()
|
1004
|
+
df_pic = df_pic[(df_pic['日期'] >= date_obj)]
|
1003
1005
|
df_pic = df_pic.groupby(['商品id'], as_index=False).agg({'花费': 'sum'})
|
1004
1006
|
df_pic.sort_values('花费', ascending=False, ignore_index=True, inplace=True)
|
1005
1007
|
df_pic.reset_index(inplace=True)
|
@@ -1166,7 +1168,7 @@ class GroupBy:
|
|
1166
1168
|
'直接成交金额': float,
|
1167
1169
|
}, errors='raise')
|
1168
1170
|
if is_maximize:
|
1169
|
-
df = df.groupby(['日期', '店铺名称', '营销场景', '商品id', '词类型', '
|
1171
|
+
df = df.groupby(['日期', '店铺名称', '营销场景', '商品id', '词类型', '词名字_词包名字', '花费', '展现量', '点击量'], as_index=False).agg(
|
1170
1172
|
**{'加购量': ('加购量', np.max),
|
1171
1173
|
'成交笔数': ('成交笔数', np.max),
|
1172
1174
|
'成交金额': ('成交金额', np.max),
|
@@ -1175,7 +1177,7 @@ class GroupBy:
|
|
1175
1177
|
}
|
1176
1178
|
)
|
1177
1179
|
else:
|
1178
|
-
df = df.groupby(['日期', '店铺名称', '营销场景', '商品id', '词类型', '
|
1180
|
+
df = df.groupby(['日期', '店铺名称', '营销场景', '商品id', '词类型', '词名字_词包名字', '花费', '展现量', '点击量'], as_index=False).agg(
|
1179
1181
|
**{
|
1180
1182
|
'加购量': ('加购量', np.min),
|
1181
1183
|
'成交笔数': ('成交笔数', np.min),
|
@@ -1185,7 +1187,7 @@ class GroupBy:
|
|
1185
1187
|
}
|
1186
1188
|
)
|
1187
1189
|
df.insert(loc=1, column='推广渠道', value='万相台无界版') # df中插入新列
|
1188
|
-
df['是否品牌词'] = df['
|
1190
|
+
df['是否品牌词'] = df['词名字_词包名字'].str.contains('万里马|wanlima', regex=True)
|
1189
1191
|
df['是否品牌词'] = df['是否品牌词'].apply(lambda x: '品牌词' if x else '')
|
1190
1192
|
dir_file = f'\\\\192.168.1.198\\时尚事业部\\01.运营部\\0-电商周报-每周五更新\\分类配置文件.xlsx'
|
1191
1193
|
dir_file2 = '/Volumes/时尚事业部/01.运营部/0-电商周报-每周五更新/分类配置文件.xlsx'
|
@@ -1196,17 +1198,17 @@ class GroupBy:
|
|
1196
1198
|
# df_fl.rename(columns={'分类1': '词分类'}, inplace=True)
|
1197
1199
|
df_fl = df_fl[['关键词', '词分类']]
|
1198
1200
|
# 合并并获取词分类信息
|
1199
|
-
df = pd.merge(df, df_fl, left_on=['
|
1201
|
+
df = pd.merge(df, df_fl, left_on=['词名字_词包名字'], right_on=['关键词'], how='left')
|
1200
1202
|
df.pop('关键词')
|
1201
1203
|
df['词分类'].fillna('', inplace=True)
|
1202
1204
|
if '词分类' in df.columns.tolist():
|
1203
1205
|
# 这行决定了,从文件中读取的词分类信息优先级高于 ret_keyword 函数的词分类
|
1204
1206
|
df['词分类'] = df.apply(
|
1205
|
-
lambda x: self.ret_keyword(keyword=str(x['
|
1207
|
+
lambda x: self.ret_keyword(keyword=str(x['词名字_词包名字']), as_file=False) if x['词分类'] == ''
|
1206
1208
|
else x['词分类'], axis=1
|
1207
1209
|
)
|
1208
1210
|
else:
|
1209
|
-
df['词分类'] = df['
|
1211
|
+
df['词分类'] = df['词名字_词包名字'].apply(lambda x: self.ret_keyword(keyword=str(x), as_file=False))
|
1210
1212
|
# df.to_csv('/Users/xigua/Downloads/test.csv', index=False, header=True, encoding='utf-8_sig')
|
1211
1213
|
# breakpoint()
|
1212
1214
|
return df
|
@@ -1546,6 +1548,7 @@ class GroupBy:
|
|
1546
1548
|
df.drop_duplicates(subset=['场次id'], keep='first', inplace=True, ignore_index=True)
|
1547
1549
|
return df
|
1548
1550
|
elif '多店推广场景_按日聚合' in table_name:
|
1551
|
+
df['日期'] = pd.to_datetime(df['日期'], format='%Y-%m-%d', errors='ignore') # 转换日期列
|
1549
1552
|
df = df.groupby(
|
1550
1553
|
['日期', '店铺名称', '营销场景'],
|
1551
1554
|
as_index=False).agg(
|
@@ -2073,8 +2076,49 @@ class GroupBy:
|
|
2073
2076
|
df.to_excel(os.path.join(path, filename + '.xlsx'), index=index, header=header, engine=engine, freeze_panes=freeze_panes)
|
2074
2077
|
|
2075
2078
|
|
2076
|
-
def
|
2077
|
-
|
2079
|
+
def date_table():
|
2080
|
+
"""
|
2081
|
+
生成 pbix 使用的日期表
|
2082
|
+
"""
|
2083
|
+
start_date = '2022-01-01' # 日期表的起始日期
|
2084
|
+
yesterday = time.strftime('%Y-%m-%d', time.localtime(time.time() - 86400))
|
2085
|
+
dic = pd.date_range(start=start_date, end=yesterday)
|
2086
|
+
df = pd.DataFrame(dic, columns=['日期'])
|
2087
|
+
df.sort_values('日期', ascending=True, ignore_index=True, inplace=True)
|
2088
|
+
df.reset_index(inplace=True)
|
2089
|
+
# inplace 添加索引到 df
|
2090
|
+
p = df.pop('index')
|
2091
|
+
df['月2'] = df['日期']
|
2092
|
+
df['月2'] = df['月2'].dt.month
|
2093
|
+
df['日期'] = df['日期'].dt.date # 日期格式保留年月日,去掉时分秒
|
2094
|
+
df['年'] = df['日期'].apply(lambda x: str(x).split('-')[0] + '年')
|
2095
|
+
df['月'] = df['月2'].apply(lambda x: str(x) + '月')
|
2096
|
+
# df.drop('月2', axis=1, inplace=True)
|
2097
|
+
mon = df.pop('月2')
|
2098
|
+
df['日'] = df['日期'].apply(lambda x: str(x).split('-')[2])
|
2099
|
+
df['年月'] = df.apply(lambda x: x['年'] + x['月'], axis=1)
|
2100
|
+
df['月日'] = df.apply(lambda x: x['月'] + x['日'] + '日', axis=1)
|
2101
|
+
df['第n周'] = df['日期'].apply(lambda x: x.strftime('第%W周'))
|
2102
|
+
df['索引'] = p
|
2103
|
+
df['月索引'] = mon
|
2104
|
+
df.sort_values('日期', ascending=False, ignore_index=True, inplace=True)
|
2105
|
+
|
2106
|
+
m = mysql.MysqlUpload(
|
2107
|
+
username=username,
|
2108
|
+
password=password,
|
2109
|
+
host=host,
|
2110
|
+
port=port,
|
2111
|
+
)
|
2112
|
+
m.df_to_mysql(
|
2113
|
+
df=df,
|
2114
|
+
db_name='聚合数据',
|
2115
|
+
table_name='日期表',
|
2116
|
+
move_insert=True, # 先删除,再插入
|
2117
|
+
df_sql=False, # 值为 True 时使用 df.to_sql 函数上传整个表, 不会排重
|
2118
|
+
drop_duplicates=False, # 值为 True 时检查重复数据再插入,反之直接上传,会比较慢
|
2119
|
+
filename=None, # 用来追踪处理进度
|
2120
|
+
service_database=service_database, # 用来追踪处理进度
|
2121
|
+
)
|
2078
2122
|
|
2079
2123
|
|
2080
2124
|
def data_aggregation(months=1, is_juhe=True, less_dict=[]):
|
@@ -2169,7 +2213,7 @@ def data_aggregation(months=1, is_juhe=True, less_dict=[]):
|
|
2169
2213
|
{
|
2170
2214
|
'数据库名': '聚合数据',
|
2171
2215
|
'集合名': '天猫_关键词报表',
|
2172
|
-
'唯一主键': ['日期', '推广渠道', '营销场景', '商品id', '花费', '词类型', '
|
2216
|
+
'唯一主键': ['日期', '推广渠道', '营销场景', '商品id', '花费', '词类型', '词名字_词包名字',],
|
2173
2217
|
'数据主体': sdq.tg_gjc(),
|
2174
2218
|
},
|
2175
2219
|
{
|
@@ -2311,7 +2355,36 @@ def data_aggregation(months=1, is_juhe=True, less_dict=[]):
|
|
2311
2355
|
|
2312
2356
|
|
2313
2357
|
def main():
|
2314
|
-
|
2358
|
+
# 更新日期表
|
2359
|
+
date_table()
|
2360
|
+
# 更新货品年份基准表, 属性设置 3 - 货品年份基准
|
2361
|
+
p = products.Products()
|
2362
|
+
p.to_mysql()
|
2363
|
+
|
2364
|
+
system = platform.system() # 本机系统
|
2365
|
+
host_name = socket.gethostname() # 本机名
|
2366
|
+
conf = myconfig.main()
|
2367
|
+
db_list = conf[system][host_name]['mysql']['数据库集']
|
2368
|
+
db_list = [item for item in db_list if item != '聚合数据']
|
2369
|
+
# 清理所有非聚合数据的库
|
2370
|
+
optimize_data.op_data(
|
2371
|
+
db_name_lists=db_list,
|
2372
|
+
days=5,
|
2373
|
+
is_mongo=True,
|
2374
|
+
is_mysql=True,
|
2375
|
+
)
|
2376
|
+
|
2377
|
+
# 数据聚合
|
2378
|
+
query_data.data_aggregation(months=3)
|
2379
|
+
time.sleep(60)
|
2380
|
+
|
2381
|
+
# 清理聚合数据, mongodb 中没有聚合数据,所以只需要清理 mysql 即可
|
2382
|
+
optimize_data.op_data(
|
2383
|
+
db_name_lists=['聚合数据'],
|
2384
|
+
days=100,
|
2385
|
+
is_mongo=False,
|
2386
|
+
is_mysql=True,
|
2387
|
+
)
|
2315
2388
|
|
2316
2389
|
|
2317
2390
|
if __name__ == '__main__':
|
mdbq/mysql/mysql.py
CHANGED
@@ -6,9 +6,12 @@ import re
|
|
6
6
|
import time
|
7
7
|
from functools import wraps
|
8
8
|
import warnings
|
9
|
+
from unittest.mock import inplace
|
10
|
+
|
9
11
|
import pymysql
|
10
12
|
import numpy as np
|
11
13
|
import pandas as pd
|
14
|
+
from markdown_it.rules_inline.backticks import regex
|
12
15
|
from sqlalchemy import create_engine
|
13
16
|
import os
|
14
17
|
import calendar
|
@@ -41,6 +44,67 @@ warnings.filterwarnings('ignore')
|
|
41
44
|
"""
|
42
45
|
|
43
46
|
|
47
|
+
def is_valid_date(date_string):
|
48
|
+
"""
|
49
|
+
判断是否是日期格式, 且允许没有前导零, 且允许带时间
|
50
|
+
纯日期格式: 返回 1
|
51
|
+
日期+时间: 返回 2
|
52
|
+
"""
|
53
|
+
date_pattern = r"^(\d{4})-(0?[1-9]|1[0-2])-(0?[1-9]|[12]\d|3[01])$"
|
54
|
+
match = re.match(date_pattern, str(date_string)) # 判断纯日期格式:2024-11-09
|
55
|
+
if match is None:
|
56
|
+
date_pattern = r".*\d+:\d+:\d+$"
|
57
|
+
match = re.match(date_pattern, date_string) # 判断日期+时间:2024-11-09 00:36:45
|
58
|
+
if match is not None:
|
59
|
+
return 2
|
60
|
+
else:
|
61
|
+
return 1
|
62
|
+
|
63
|
+
|
64
|
+
def is_integer(int_str):
|
65
|
+
""" 判断是否整数, 允许包含千分位分隔符, 允许科学计数法 """
|
66
|
+
# 如果是科学计数法
|
67
|
+
match = re.findall(r'^[-+]?(\d+)\.(\d+)[eE][-+]?(\d+)$', str(int_str))
|
68
|
+
if match:
|
69
|
+
if len(match[0]) == 3:
|
70
|
+
if int(match[0][0]) == 0: # 0 开头
|
71
|
+
if int(match[0][2]) > 10: # 转换后整数长度超过 10 位
|
72
|
+
return False
|
73
|
+
else: # 不是 0 开头
|
74
|
+
if len(match[0][0]) + int(match[0][2]) > 10: # 转换后整数长度超过 10 位
|
75
|
+
return False
|
76
|
+
if int(match[0][2]) >= len(match[0][1]):
|
77
|
+
return True
|
78
|
+
else:
|
79
|
+
return False
|
80
|
+
# 如果是普通数字, 且允许千分符
|
81
|
+
__pattern = r'^[-+]?\d{1,3}(,\d{3}){0,3}$|^[-+]?\d{1,9}$'
|
82
|
+
return re.match(__pattern, str(int_str)) is not None
|
83
|
+
|
84
|
+
|
85
|
+
def count_decimal_places(num_str):
|
86
|
+
""" 计算小数位数, 允许科学计数法 """
|
87
|
+
match = re.match(r'^[-+]?\d+(\.\d+)?([eE][-+]?\d+)?$', str(num_str))
|
88
|
+
if match:
|
89
|
+
# 如果是科学计数法
|
90
|
+
match = re.findall(r'(\d+)\.(\d+)[eE][-+]?(\d+)$', str(num_str))
|
91
|
+
if match:
|
92
|
+
if len(match[0]) == 3:
|
93
|
+
if int(match[0][2]) < len(match[0][1]):
|
94
|
+
# count_int 清除整数部分开头的 0 并计算整数位数
|
95
|
+
count_int = len(re.sub('^0+', '', str(match[0][0]))) + int(match[0][2])
|
96
|
+
# 计算小数位数
|
97
|
+
count_float = len(match[0][1]) - int(match[0][2])
|
98
|
+
return count_int, count_float
|
99
|
+
# 如果是普通小数
|
100
|
+
match = re.findall(r'(\d+)\.(\d+)$', str(num_str))
|
101
|
+
if match:
|
102
|
+
count_int = len(re.sub('^0+', '', str(match[0][0])))
|
103
|
+
count_float = len(match[0][1])
|
104
|
+
return count_int, count_float # 计算小数位数
|
105
|
+
return 0, 0
|
106
|
+
|
107
|
+
|
44
108
|
class MysqlUpload:
|
45
109
|
def __init__(self, username: str, password: str, host: str, port: int, charset: str = 'utf8mb4'):
|
46
110
|
self.username = username
|
@@ -68,6 +132,229 @@ class MysqlUpload:
|
|
68
132
|
|
69
133
|
return wrapper
|
70
134
|
|
135
|
+
def dict_to_mysql(self, db_name, table_name, dict_data, icm_update=None, main_key=None, unique_main_key=None, index_length=100, set_type=None):
|
136
|
+
"""
|
137
|
+
插入字典数据
|
138
|
+
dict_data: 字典
|
139
|
+
main_key: 指定索引列, 通常用日期列,默认会设置日期为索引
|
140
|
+
unique_main_key: 指定唯一索引列
|
141
|
+
index_length: 索引长度
|
142
|
+
icm_update: 增量更正,指定后 main_key 只用于检查/创建列,不能更新数据
|
143
|
+
set_type: {}
|
144
|
+
"""
|
145
|
+
if not main_key:
|
146
|
+
main_key = []
|
147
|
+
if not unique_main_key:
|
148
|
+
unique_main_key = []
|
149
|
+
connection = pymysql.connect(**self.config) # 连接数据库
|
150
|
+
with connection.cursor() as cursor:
|
151
|
+
cursor.execute(f"SHOW DATABASES LIKE '{db_name}'") # 检查数据库是否存在
|
152
|
+
database_exists = cursor.fetchone()
|
153
|
+
if not database_exists:
|
154
|
+
# 如果数据库不存在,则新建
|
155
|
+
if '8.138.27' in str(self.host) or platform.system() == "Linux": # 阿里云 mysql 低版本不支持 0900
|
156
|
+
sql = f"CREATE DATABASE `{db_name}` COLLATE utf8mb4_unicode_ci"
|
157
|
+
self.config.update({'charset': 'utf8mb4_unicode_ci'})
|
158
|
+
if '192.168.1.100' in str(self.host):
|
159
|
+
sql = f"CREATE DATABASE `{db_name}`"
|
160
|
+
else:
|
161
|
+
sql = f"CREATE DATABASE `{db_name}` COLLATE utf8mb4_0900_ai_ci"
|
162
|
+
cursor.execute(sql)
|
163
|
+
connection.commit()
|
164
|
+
print(f"创建Database: {db_name}")
|
165
|
+
|
166
|
+
self.config.update({'database': db_name}) # 添加更新 config 字段
|
167
|
+
connection = pymysql.connect(**self.config) # 重新连接数据库
|
168
|
+
with connection.cursor() as cursor:
|
169
|
+
# 1. 查询表, 不存在则创建一个空表
|
170
|
+
sql = "SHOW TABLES LIKE %s;" # 有特殊字符不需转义
|
171
|
+
cursor.execute(sql, (table_name))
|
172
|
+
if not cursor.fetchone():
|
173
|
+
sql = f"CREATE TABLE IF NOT EXISTS `{table_name}` (id INT AUTO_INCREMENT PRIMARY KEY);"
|
174
|
+
cursor.execute(sql)
|
175
|
+
print(f'创建 mysql 表: {table_name}')
|
176
|
+
|
177
|
+
# 根据 dict_data 的值添加指定的数据类型
|
178
|
+
dtypes, dict_data = self.cover_dict_dtypes(dict_data=dict_data) # {'店铺名称': 'mediumtext',...}
|
179
|
+
if set_type:
|
180
|
+
dtypes.update(set_type) # 自定义的数据类型
|
181
|
+
|
182
|
+
# 检查列
|
183
|
+
sql = "SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = %s AND TABLE_NAME = %s;"
|
184
|
+
cursor.execute(sql, (db_name, table_name))
|
185
|
+
col_exist = [item['COLUMN_NAME'] for item in cursor.fetchall()] # 已存在的所有列
|
186
|
+
col_not_exist = [col for col in dict_data.keys() if col not in col_exist] # 不存在的列
|
187
|
+
# 不存在则新建列
|
188
|
+
if col_not_exist: # 数据表中不存在的列
|
189
|
+
for col in col_not_exist:
|
190
|
+
# 创建列,需转义
|
191
|
+
sql = f"ALTER TABLE `{table_name}` ADD COLUMN `{col}` {dtypes[col]} NOT NULL;"
|
192
|
+
cursor.execute(sql)
|
193
|
+
print(f"添加列: {col}({dtypes[col]})") # 添加列并指定数据类型
|
194
|
+
|
195
|
+
if col in main_key or col == '日期':
|
196
|
+
sql = f"CREATE INDEX index_name ON `{table_name}`(`{col}`);"
|
197
|
+
print(f"设置为索引: {col}({dtypes[col]})")
|
198
|
+
cursor.execute(sql)
|
199
|
+
if col in unique_main_key:
|
200
|
+
if dtypes[col] == 'mediumtext':
|
201
|
+
sql = f"ALTER TABLE {table_name} ADD UNIQUE (`{col}`({index_length}))"
|
202
|
+
else:
|
203
|
+
sql = f"ALTER TABLE {table_name} ADD UNIQUE (`{col}`)"
|
204
|
+
cursor.execute(sql)
|
205
|
+
# if col in main_key or col in unique_main_key:
|
206
|
+
# sql = f"SHOW INDEXES FROM `{table_name}` WHERE `Column_name` = %s"
|
207
|
+
# cursor.execute(sql, (col))
|
208
|
+
# result = cursor.fetchone() # 检查索引是否存在
|
209
|
+
# if not result:
|
210
|
+
# if col in main_key:
|
211
|
+
# sql = f"CREATE INDEX index_name ON `{table_name}`(`{col}`);"
|
212
|
+
# print(f"设置为索引: {col}({dtypes[col]})")
|
213
|
+
# cursor.execute(sql)
|
214
|
+
# elif col in unique_main_key:
|
215
|
+
# if dtypes[col] == 'mediumtext':
|
216
|
+
# sql = f"CREATE INDEX UNIQUE index_name ON `{table_name}` (`{col}`({index_length}));"
|
217
|
+
# else:
|
218
|
+
# sql = f"CREATE INDEX UNIQUE index_name ON `{table_name}` (`{col}`);"
|
219
|
+
# print(f"设置唯一索引: {col}({dtypes[col]})")
|
220
|
+
# print(sql)
|
221
|
+
# cursor.execute(sql)
|
222
|
+
connection.commit() # 提交事务
|
223
|
+
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
|
224
|
+
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
|
225
|
+
# 处理插入的数据
|
226
|
+
if icm_update:
|
227
|
+
""" 使用增量更新: 需确保 icm_update['主键'] 传进来的列组合是数据表中唯一,值不会发生变化且不会重复,否则可能产生覆盖 """
|
228
|
+
sql = 'SELECT COLUMN_NAME FROM information_schema.columns WHERE table_schema = %s AND table_name = %s'
|
229
|
+
cursor.execute(sql, (db_name, {table_name}))
|
230
|
+
columns = cursor.fetchall()
|
231
|
+
cols_exist = [col['COLUMN_NAME'] for col in columns] # 数据表的所有列, 返回 list
|
232
|
+
update_col = [item for item in cols_exist if item not in icm_update and item != 'id'] # 除了主键外的其他列
|
233
|
+
|
234
|
+
# unique_keys 示例: `日期`, `余额`
|
235
|
+
unique_keys = ', '.join(f"`{item}`" for item in update_col) # 列名需要转义
|
236
|
+
condition = []
|
237
|
+
for up_col in icm_update:
|
238
|
+
condition += [f'`{up_col}` = "{dict_data[up_col]}"']
|
239
|
+
condition = ' AND '.join(condition) # condition值示例: `品销宝余额` = '2930.73' AND `短信剩余` = '67471'
|
240
|
+
sql = f"SELECT {unique_keys} FROM `{table_name}` WHERE {condition}"
|
241
|
+
# print(sql)
|
242
|
+
# sql = f"SELECT {unique_keys} FROM `{table_name}` WHERE `创建时间` = '2014-09-19 14:32:33'"
|
243
|
+
cursor.execute(sql)
|
244
|
+
results = cursor.fetchall() # results 是数据库取出的数据
|
245
|
+
if results: # 有数据返回,再进行增量检查
|
246
|
+
for result in results: # results 是数据库数据, dict_data 是传进来的数据
|
247
|
+
change_col = [] # 发生变化的列名
|
248
|
+
change_values = [] # 发生变化的数据
|
249
|
+
for col in update_col:
|
250
|
+
# 因为 mysql 里面有 decimal 数据类型,要移除末尾的 0 再做比较(df 默认将 5.00 小数截断为 5.0)
|
251
|
+
df_value = str(dict_data[col])
|
252
|
+
mysql_value = str(result[col])
|
253
|
+
if '.' in df_value:
|
254
|
+
df_value = re.sub(r'0+$', '', df_value)
|
255
|
+
df_value = re.sub(r'\.$', '', df_value)
|
256
|
+
if '.' in mysql_value:
|
257
|
+
mysql_value = re.sub(r'0+$', '', mysql_value)
|
258
|
+
mysql_value = re.sub(r'\.$', '', mysql_value)
|
259
|
+
if df_value != mysql_value: # 传进来的数据和数据库比较, 有变化
|
260
|
+
# print(f'{dict_data['日期']}{dict_data['商品id']}{col} 列的值有变化,{str(dict_data[col])} != {str(result[col])}')
|
261
|
+
change_values += [f"`{col}` = \"{str(dict_data[col])}\""]
|
262
|
+
change_col.append(col)
|
263
|
+
not_change_col = [item for item in update_col if item not in change_col]
|
264
|
+
# change_values 是 df 传进来且和数据库对比后,发生了变化的数据,值示例: [`品销宝余额` = '9999.0', `短信剩余` = '888']
|
265
|
+
if change_values: # change_values 有数据返回,表示值需要更新
|
266
|
+
if not_change_col:
|
267
|
+
not_change_values = [f'`{col}` = "{str(dict_data[col])}"' for col in not_change_col]
|
268
|
+
not_change_values = ' AND '.join(
|
269
|
+
not_change_values) # 示例: `短信剩余` = '888' AND `test1` = '93'
|
270
|
+
# print(change_values, not_change_values)
|
271
|
+
condition += f' AND {not_change_values}' # 重新构建完整的查询条件,将未发生变化的列加进查询条件
|
272
|
+
change_values = ', '.join(f"{item}" for item in change_values) # 注意这里 item 外面没有反引号
|
273
|
+
sql = "UPDATE `%s` SET %s WHERE %s" % (table_name, change_values, condition)
|
274
|
+
# print(sql)
|
275
|
+
cursor.execute(sql)
|
276
|
+
else: # 没有数据返回,则直接插入数据
|
277
|
+
cols = ', '.join(f"`{item}`" for item in dict_data.keys()) # 列名需要转义
|
278
|
+
# data.update({item: f"{data[item]}" for item in data.keys()}) # 全部值转字符, 不是必须的
|
279
|
+
values = ', '.join([f'"{item}"' for item in dict_data.values()]) # 值要加引号
|
280
|
+
sql = f"INSERT INTO `{table_name}` ({cols}) VALUES ({values});"
|
281
|
+
cursor.execute(sql)
|
282
|
+
connection.commit() # 提交数据库
|
283
|
+
connection.close()
|
284
|
+
return
|
285
|
+
|
286
|
+
# 构建 keys
|
287
|
+
keys_data = ', '.join([f'`{str(item)}`' for item in dict_data.keys()])
|
288
|
+
# 构建 values
|
289
|
+
values_data = ', '.join(f'"{str(item)}"' for item in dict_data.values())
|
290
|
+
# 构建其他键值,重复时要更新的其他键
|
291
|
+
if main_key:
|
292
|
+
for col in main_key:
|
293
|
+
del dict_data[col]
|
294
|
+
if unique_main_key:
|
295
|
+
for col in unique_main_key:
|
296
|
+
del dict_data[col]
|
297
|
+
update_datas = ', '.join([f'{k} = VALUES({k})' for k, v in dict_data.items()])
|
298
|
+
|
299
|
+
# 构建 sql
|
300
|
+
sql = f"INSERT INTO %s (%s) VALUES (%s) ON DUPLICATE KEY UPDATE %s" % (table_name, keys_data, values_data, update_datas)
|
301
|
+
cursor.execute(sql)
|
302
|
+
connection.commit() # 提交数据库
|
303
|
+
connection.close()
|
304
|
+
|
305
|
+
def cover_dict_dtypes(self, dict_data):
|
306
|
+
if not dict_data:
|
307
|
+
print(f'mysql.py -> MysqlUpload -> cover_dict_dtypes -> 传入的字典不能为空')
|
308
|
+
return
|
309
|
+
__res_dict = {}
|
310
|
+
new_dict_data = {}
|
311
|
+
for k, v in dict_data.items():
|
312
|
+
k = str(k).lower()
|
313
|
+
k = re.sub(r'[()\-,,$&~^、 ()\"\'“”=·/。》《><!!`]', '_', k, re.IGNORECASE)
|
314
|
+
k = k.replace(')', '')
|
315
|
+
k = re.sub(r'_{2,}', '_', k)
|
316
|
+
k = re.sub(r'_+$', '', k)
|
317
|
+
if str(v) == '':
|
318
|
+
v = 0
|
319
|
+
v = str(v)
|
320
|
+
v = re.sub('^-$|^--$|^nan$|^null$', '0', v, re.I)
|
321
|
+
v = re.sub(',|="|"', '', v, re.I)
|
322
|
+
if re.findall(r'^[-+]?\d+\.?\d*%$', v):
|
323
|
+
v = str(float(v.rstrip("%")) / 100)
|
324
|
+
|
325
|
+
result1 = re.findall(r'编码|_?id|货号|款号|文件大小', k, re.IGNORECASE)
|
326
|
+
result2 = re.findall(r'占比$|投产$|产出$|同比$|环比$|roi$|率$', k, re.IGNORECASE)
|
327
|
+
date_type = is_valid_date(v) # 判断日期时间
|
328
|
+
int_num = is_integer(v) # 判断整数
|
329
|
+
count_int, count_float = count_decimal_places(v) # 判断小数,返回小数位数
|
330
|
+
if result1: # 京东sku/spu商品信息
|
331
|
+
__res_dict.update({k: 'mediumtext'})
|
332
|
+
elif str(v) == '':
|
333
|
+
__res_dict.update({k: 'mediumtext'})
|
334
|
+
elif result2: # 小数
|
335
|
+
__res_dict.update({k: 'decimal(10,4)'})
|
336
|
+
elif date_type == 1: # 纯日期
|
337
|
+
__res_dict.update({k: 'DATE'})
|
338
|
+
elif date_type == 2: # 日期+时间
|
339
|
+
__res_dict.update({k: 'DATETIME'})
|
340
|
+
elif int_num:
|
341
|
+
__res_dict.update({k: 'INT'})
|
342
|
+
elif count_float > 0:
|
343
|
+
if count_int + count_float > 10:
|
344
|
+
if count_float > 5:
|
345
|
+
v = round(float(v), 4)
|
346
|
+
__res_dict.update({k: 'decimal(12,4)'})
|
347
|
+
elif count_float >= 6:
|
348
|
+
__res_dict.update({k: 'decimal(12,6)'})
|
349
|
+
elif count_float >= 4:
|
350
|
+
__res_dict.update({k: 'decimal(10,4)'})
|
351
|
+
else:
|
352
|
+
__res_dict.update({k: 'decimal(10,2)'})
|
353
|
+
else:
|
354
|
+
__res_dict.update({k: 'mediumtext'})
|
355
|
+
new_dict_data.update({k: v})
|
356
|
+
return __res_dict, new_dict_data
|
357
|
+
|
71
358
|
@try_except
|
72
359
|
def df_to_mysql(self, df, table_name, db_name='远程数据源', icm_update=[], service_database={'xigua_lx': 'mysql'}, move_insert=False, df_sql=False, drop_duplicates=False, filename=None, count=None, json_path=None, reset_id=False):
|
73
360
|
"""
|
@@ -247,9 +534,7 @@ class MysqlUpload:
|
|
247
534
|
# data 是传进来待处理的数据, 不是数据库数据
|
248
535
|
# data 示例: {'日期': Timestamp('2024-08-27 00:00:00'), '推广费余额': 33299, '品销宝余额': 2930.73, '短信剩余': 67471}
|
249
536
|
try:
|
250
|
-
|
251
|
-
# data.update({item: f"{data[item]}" for item in data.keys()}) # 全部值转字符, 不是必须的
|
252
|
-
values = ', '.join([f'"{item}"' for item in data.values()]) # 值要加引号
|
537
|
+
|
253
538
|
condition = []
|
254
539
|
for k, v in data.items():
|
255
540
|
condition += [f'`{k}` = "{v}"']
|
@@ -877,3 +1162,7 @@ if __name__ == '__main__':
|
|
877
1162
|
data = conf['Windows']['xigua_lx']['mysql']['local']
|
878
1163
|
username, password, host, port = data['username'], data['password'], data['host'], data['port']
|
879
1164
|
print(username, password, host, port)
|
1165
|
+
|
1166
|
+
ss = '2024-11-08'
|
1167
|
+
ss= re.sub(r'\\N', '0', ss)
|
1168
|
+
print(ss, '111')
|
@@ -1,11 +1,11 @@
|
|
1
1
|
mdbq/__init__.py,sha256=Il5Q9ATdX8yXqVxtP_nYqUhExzxPC_qk_WXQ_4h0exg,16
|
2
2
|
mdbq/__version__.py,sha256=y9Mp_8x0BCZSHsdLT_q5tX9wZwd5QgqrSIENLrb6vXA,62
|
3
3
|
mdbq/aggregation/__init__.py,sha256=EeDqX2Aml6SPx8363J-v1lz0EcZtgwIBYyCJV6CcEDU,40
|
4
|
-
mdbq/aggregation/aggregation.py,sha256=
|
4
|
+
mdbq/aggregation/aggregation.py,sha256=IVh9SFO1yp12qDBuEOWTi9SAytYktKBrsPJNPuDetSM,73254
|
5
5
|
mdbq/aggregation/df_types.py,sha256=U9i3q2eRPTDY8qAPTw7irzu-Tlg4CIySW9uYro81wdk,8125
|
6
6
|
mdbq/aggregation/mysql_types.py,sha256=YTGyrF9vcRgfkQbpT-e-JdJ7c7VF1dDHgyx9YZRES8w,10934
|
7
7
|
mdbq/aggregation/optimize_data.py,sha256=79uwiM2WqNNFxGpE2wKz742PRq-ZGgFjdOV0vgptHdY,3513
|
8
|
-
mdbq/aggregation/query_data.py,sha256=
|
8
|
+
mdbq/aggregation/query_data.py,sha256=zut8WyyAKTULfGWMltyQYqsVsIaBDUU8E3w2_UL4hbA,103248
|
9
9
|
mdbq/bdup/__init__.py,sha256=AkhsGk81SkG1c8FqDH5tRq-8MZmFobVbN60DTyukYTY,28
|
10
10
|
mdbq/bdup/bdup.py,sha256=LAV0TgnQpc-LB-YuJthxb0U42_VkPidzQzAagan46lU,4234
|
11
11
|
mdbq/clean/__init__.py,sha256=A1d6x3L27j4NtLgiFV5TANwEkLuaDfPHDQNrPBbNWtU,41
|
@@ -28,7 +28,7 @@ mdbq/log/mylogger.py,sha256=oaT7Bp-Hb9jZt52seP3ISUuxVcI19s4UiqTeouScBO0,3258
|
|
28
28
|
mdbq/mongo/__init__.py,sha256=SILt7xMtQIQl_m-ik9WLtJSXIVf424iYgCfE_tnQFbw,13
|
29
29
|
mdbq/mongo/mongo.py,sha256=v9qvrp6p1ZRWuPpbSilqveiE0FEcZF7U5xUPI0RN4xs,31880
|
30
30
|
mdbq/mysql/__init__.py,sha256=A_DPJyAoEvTSFojiI2e94zP0FKtCkkwKP1kYUCSyQzo,11
|
31
|
-
mdbq/mysql/mysql.py,sha256=
|
31
|
+
mdbq/mysql/mysql.py,sha256=tKkgjbOvy5uIn7Z-ws_biS-04-UHnr5rKqNvtWr_Yss,62024
|
32
32
|
mdbq/mysql/recheck_mysql.py,sha256=jHQSlQy0PlQ_EYICQv_2nairUX3t6OIwPtSELKIpjkY,8702
|
33
33
|
mdbq/mysql/s_query.py,sha256=bgNNIqYLDCHjD5KTFcm6x4u74selpAGs5ouJYuqX86k,8447
|
34
34
|
mdbq/mysql/year_month_day.py,sha256=VgewoE2pJxK7ErjfviL_SMTN77ki8GVbTUcao3vFUCE,1523
|
@@ -45,7 +45,7 @@ mdbq/req_post/__init__.py,sha256=jso1oHcy6cJEfa7udS_9uO5X6kZLoPBF8l3wCYmr5dM,18
|
|
45
45
|
mdbq/req_post/req_tb.py,sha256=PexWSCPJNM6Tv0ol4lAWIhlOwsAr_frnjtcdSHCFiek,36179
|
46
46
|
mdbq/spider/__init__.py,sha256=RBMFXGy_jd1HXZhngB2T2XTvJqki8P_Fr-pBcwijnew,18
|
47
47
|
mdbq/spider/aikucun.py,sha256=jHrdGWBJQaSywx7V-U4YuM6vWkwC5SR5tTOOdB3YU_c,17306
|
48
|
-
mdbq-2.9.
|
49
|
-
mdbq-2.9.
|
50
|
-
mdbq-2.9.
|
51
|
-
mdbq-2.9.
|
48
|
+
mdbq-2.9.3.dist-info/METADATA,sha256=fL1JR-lJNlMr2cIzQIEO460TetP9yzBerJPCJYnBRQ8,243
|
49
|
+
mdbq-2.9.3.dist-info/WHEEL,sha256=cpQTJ5IWu9CdaPViMhC9YzF8gZuS5-vlfoFihTBC86A,91
|
50
|
+
mdbq-2.9.3.dist-info/top_level.txt,sha256=2FQ-uLnCSB-OwFiWntzmwosW3X2Xqsg0ewh1axsaylA,5
|
51
|
+
mdbq-2.9.3.dist-info/RECORD,,
|
File without changes
|