mdbq 1.2.6__py3-none-any.whl → 1.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -300,6 +300,7 @@ class DatabaseUpdate:
300
300
  df.insert(loc=0, column='日期', value=date)
301
301
  df['省份'] = pov
302
302
  df['省+市'] = df[['省份', '城市']].apply(lambda x: f'{x["省份"]}-{x["城市"]}', axis=1)
303
+ df.replace('NAN', 0, inplace=True)
303
304
  elif name.endswith('csv') and 'order' in name:
304
305
  # 生意经,订单数据,仅限月数据
305
306
  pattern = re.findall(r'(.*)(\d{4})(\d{2})(\d{2})-(\d{4})(\d{2})(\d{2})', name)
@@ -639,7 +640,13 @@ class DatabaseUpdate:
639
640
  collection_name=collection_name,
640
641
  is_file_dtype=True, # 默认本地文件优先: True
641
642
  )
642
- m.df_to_mysql(df=df, db_name=db_name, table_name=collection_name)
643
+ m.df_to_mysql(
644
+ df=df,
645
+ db_name=db_name,
646
+ table_name=collection_name,
647
+ df_sql=False, # 值为 True 时使用 df.to_sql 函数上传整个表, 不会排重
648
+ drop_dup=True # 值为 True 时检查重复数据再插入,反之直接上传
649
+ )
643
650
  df_to_json.as_json_file() # 写入 json 文件, 包含数据的 dtypes 信息
644
651
 
645
652
  def new_unzip(self, path=None, is_move=None):
@@ -984,4 +991,13 @@ if __name__ == '__main__':
984
991
  # dbs={'mysql': True, 'mongodb': False},
985
992
  # )
986
993
 
987
- test2()
994
+ # test2()
995
+
996
+ file = '/Users/xigua/Downloads/余额查询.csv'
997
+ df = pd.read_csv(file, encoding='utf-8_sig', header=0, na_filter=False)
998
+ username, password, host, port = get_myconf.select_config_values(target_service='company', database='mysql')
999
+ m = mysql.MysqlUpload(username=username, password=password, host=host, port=port)
1000
+ m.df_to_mysql(df=df, db_name='test', table_name='增量更新测试',
1001
+ drop_dup=False,
1002
+ icm_update=['日期', '推广费余额']
1003
+ )
@@ -206,7 +206,7 @@ def mysql_all_dtypes(db_name=None, table_name=None, path=None):
206
206
  time.sleep(0.5)
207
207
 
208
208
  d = DataTypes()
209
- d.json_file = os.path.join(path, 'mysql_types.json') # # json 保存位置
209
+ d.json_file = os.path.join(path, f'mysql_types.json') # # json 保存位置
210
210
  for result in results:
211
211
  for db_n, table_n in result.items():
212
212
  # print(db_n, table_n, db_name, table_name)
@@ -388,19 +388,22 @@ class GroupBy:
388
388
  self.data_tgyj['商品id图片对照表'],
389
389
  self.data_tgyj['商品成本']) # 这里不要加逗号
390
390
  pic['商品id'] = pic['商品id'].astype(str)
391
- df = pd.merge(idbm, pic, how='left', left_on='宝贝id', right_on='商品id')
391
+ df = pd.merge(idbm, pic, how='left', left_on='宝贝id', right_on='商品id') # id 编码表合并图片表
392
392
  df = df[['宝贝id', '商家编码', '商品图片']]
393
- df = pd.merge(df, cost, how='left', left_on='商家编码', right_on='款号')
393
+ df = pd.merge(df, cost, how='left', left_on='商家编码', right_on='款号') # df 合并商品成本表
394
394
  df = df[['宝贝id', '商家编码', '商品图片', '成本价']]
395
- df = pd.merge(tg, df, how='left', left_on='商品id', right_on='宝贝id')
395
+ df = pd.merge(tg, df, how='left', left_on='商品id', right_on='宝贝id') # 推广表合并 df
396
396
  df.drop(labels='宝贝id', axis=1, inplace=True)
397
397
  if bb_tg is True:
398
398
  # 生意经合并推广表,完整的数据表,包含全店所有推广、销售数据
399
399
  df = pd.merge(syj, df, how='left', left_on=['日期', '宝贝id'], right_on=['日期', '商品id'])
400
+ df.drop(labels='商品id', axis=1, inplace=True) # 因为生意经中的宝贝 id 列才是完整的
401
+ df.rename(columns={'宝贝id': '商品id'}, inplace=True)
402
+ # df.to_csv('/Users/xigua/Downloads/test.csv', encoding='utf-8_sig', index=False, header=True)
400
403
  else:
401
404
  # 推广表合并生意经 , 以推广数据为基准,销售数据不齐全
402
405
  df = pd.merge(df, syj, how='left', left_on=['日期', '商品id'], right_on=['日期', '宝贝id'])
403
- df.drop(labels='宝贝id', axis=1, inplace=True)
406
+ df.drop(labels='宝贝id', axis=1, inplace=True)
404
407
  df.drop_duplicates(subset=['日期', '商品id', '花费', '销售额'], keep='last', inplace=True, ignore_index=True)
405
408
  df['成本价'] = df['成本价'].astype('float64')
406
409
  df['商品成本'] = df.apply(lambda x: (x['成本价'] + x['销售额']/x['销售量'] * 0.11 + 6) * x['销售量'] if x['销售量'] > 0 else 0, axis=1)
@@ -425,6 +428,8 @@ class GroupBy:
425
428
  path = os.path.join(self.output, path)
426
429
  if not os.path.exists(path):
427
430
  os.makedirs(path)
431
+ if filename.endswith('.csv'):
432
+ filename = filename[:-4]
428
433
  if st_ascend and ascend:
429
434
  try:
430
435
  df.sort_values(st_ascend, ascending=ascend, ignore_index=True, inplace=True)
@@ -510,47 +515,71 @@ def data_aggregation(service_databases=[{}]):
510
515
  {
511
516
  '数据库名': '聚合数据',
512
517
  '集合名': '宝贝主体报表',
518
+ '唯一主键': ['日期', '推广渠道', '营销场景', '商品id', '花费'],
513
519
  '数据主体': sdq.tg_wxt(),
514
520
  },
515
521
  {
516
522
  '数据库名': '聚合数据',
517
523
  '集合名': '天猫生意经_宝贝指标',
524
+ '唯一主键': ['日期', '宝贝id'],
518
525
  '数据主体': sdq.syj(),
519
526
  },
520
527
  {
521
528
  '数据库名': '聚合数据',
522
529
  '集合名': '天猫_店铺来源_日数据',
530
+ '唯一主键': ['日期', '一级来源', '二级来源', '三级来源'],
523
531
  '数据主体': sdq.dplyd(),
524
532
  },
525
533
  {
526
534
  '数据库名': '聚合数据',
527
535
  '集合名': '商品id编码表',
536
+ '唯一主键': ['宝贝id'],
528
537
  '数据主体': sdq.idbm(),
529
538
  },
530
539
  {
531
540
  '数据库名': '聚合数据',
532
541
  '集合名': '商品id图片对照表',
542
+ '唯一主键': ['商品id'],
533
543
  '数据主体': sdq.sp_picture(),
534
544
  },
535
545
  {
536
546
  '数据库名': '聚合数据',
537
547
  '集合名': '商品成本',
548
+ '唯一主键': ['款号'],
538
549
  '数据主体': sdq.sp_cost(),
539
550
  },
540
551
  ]
541
552
  for items in data_dict: # 遍历返回结果
542
- db_name, table_name, df = items['数据库名'], items['集合名'], items['数据主体']
553
+ db_name, table_name, unique_key_list, df = items['数据库名'], items['集合名'], items['唯一主键'], items['数据主体']
543
554
  df = g.groupby(df=df, table_name=table_name, is_maximize=True) # 2. 聚合数据
544
- # g.as_csv(df=df, filename=table_name + '.csv')
545
- m.df_to_mysql(df=df, db_name=db_name, table_name=table_name, drop_dup=True) # 3. 回传数据库
555
+ g.as_csv(df=df, filename=table_name + '.csv') # 导出 csv
556
+ m.df_to_mysql(
557
+ df=df,
558
+ db_name=db_name,
559
+ table_name=table_name,
560
+ drop_dup=False,
561
+ icm_update=unique_key_list
562
+ ) # 3. 回传数据库
546
563
  res = g.performance(bb_tg=True) # 盈亏表,依赖其他表,单独做
547
- m.df_to_mysql(df=res, db_name='聚合数据', table_name='_全店商品销售', drop_dup=True)
564
+ m.df_to_mysql(
565
+ df=res,
566
+ db_name='聚合数据',
567
+ table_name='_全店商品销售',
568
+ drop_dup=False,
569
+ icm_update=['日期', '商品id'] # 设置唯一主键
570
+ )
548
571
  res = g.performance(bb_tg=False) # 盈亏表,依赖其他表,单独做
549
- m.df_to_mysql(df=res, db_name='聚合数据', table_name='_推广商品销售', drop_dup=True)
572
+ m.df_to_mysql(
573
+ df=res,
574
+ db_name='聚合数据',
575
+ table_name='_推广商品销售',
576
+ drop_dup=False,
577
+ icm_update=['日期', '商品id'] # 设置唯一主键
578
+ )
550
579
 
551
580
  # optimize_data.op_data(service_databases=service_databases, days=3650) # 立即启动对聚合数据的清理工作
552
581
 
553
582
 
554
583
  if __name__ == '__main__':
555
- data_aggregation(service_databases=[{'home_lx': 'mysql'}])
584
+ data_aggregation(service_databases=[{'company': 'mysql'}])
556
585
  # optimize_data.op_data(service_databases=[{'company': 'mysql'}], days=3650) # 立即启动对聚合数据的清理工作
mdbq/clean/data_clean.py CHANGED
@@ -1136,15 +1136,15 @@ class DataClean:
1136
1136
  if not path:
1137
1137
  path = self.path
1138
1138
 
1139
- if self.set_up_to_mogo:
1140
- username, password, host, port = get_myconf.select_config_values(target_service='home_lx',
1141
- database='mongodb')
1142
- d = mongo.UploadMongo(username=username, password=password, host=host, port=port,
1143
- drop_duplicates=False
1144
- )
1145
- if self.set_up_to_mysql:
1146
- username, password, host, port = get_myconf.select_config_values(target_service='home_lx', database='mysql')
1147
- m = mysql.MysqlUpload(username=username, password=password, host=host, port=port)
1139
+ # if self.set_up_to_mogo:
1140
+ # username, password, host, port = get_myconf.select_config_values(target_service='home_lx',
1141
+ # database='mongodb')
1142
+ # d = mongo.UploadMongo(username=username, password=password, host=host, port=port,
1143
+ # drop_duplicates=False
1144
+ # )
1145
+ # if self.set_up_to_mysql:
1146
+ # username, password, host, port = get_myconf.select_config_values(target_service='home_lx', database='mysql')
1147
+ # m = mysql.MysqlUpload(username=username, password=password, host=host, port=port)
1148
1148
  new_save_path = os.path.join(self.source_path, '属性设置', '商品素材')
1149
1149
  for root, dirs, files in os.walk(path, topdown=False):
1150
1150
  for name in files:
@@ -1181,17 +1181,17 @@ class DataClean:
1181
1181
  )
1182
1182
  # mysql 可能改变 df 列名,所以在上传 mysql 前保存 csv
1183
1183
  self.save_to_csv(df, new_save_path, new_name, encoding='utf-8_sig')
1184
- try:
1185
- if self.set_up_to_mogo:
1186
- d.df_to_mongo(df=df, db_name=db_name, collection_name=collection_name)
1187
- if self.set_up_to_mysql:
1188
- m.df_to_mysql(df=df, db_name=db_name, tabel_name=collection_name)
1189
- except Exception as e:
1190
- print(e)
1184
+ # try:
1185
+ # if self.set_up_to_mogo:
1186
+ # d.df_to_mongo(df=df, db_name=db_name, collection_name=collection_name)
1187
+ # if self.set_up_to_mysql:
1188
+ # m.df_to_mysql(df=df, db_name=db_name, tabel_name=collection_name)
1189
+ # except Exception as e:
1190
+ # print(e)
1191
1191
  os.remove(os.path.join(root, name))
1192
- if self.set_up_to_mogo:
1193
- if d.client:
1194
- d.client.close() # 必须手动关闭数据库连接
1192
+ # if self.set_up_to_mogo:
1193
+ # if d.client:
1194
+ # d.client.close() # 必须手动关闭数据库连接
1195
1195
 
1196
1196
  # @try_except
1197
1197
  def new_unzip(self, path=None, is_move=None):
@@ -61,7 +61,8 @@ class DataFrameConverter(object):
61
61
  # 转换日期样式的列为日期类型
62
62
  value = df.loc[0, col]
63
63
  if value:
64
- res = re.match(r'\d{4}-\d{2}-\d{2}|\d{4}-\d{2}-\d{2} |\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}', str(value))
64
+ res = re.match(r'\d{4}-\d{2}-\d{2}|\d{4}-\d{2}-\d{2} |\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}'
65
+ r'|\d{4}/\d{1}/\d{1}|\d{4}/\d{1}/\d{2}|\d{4}/\d{2}/\d{1}|\d{4}/\d{2}/\d{2}', str(value))
65
66
  if res:
66
67
  try:
67
68
  df[col] = df[col].apply(lambda x: pd.to_datetime(x))
mdbq/mysql/mysql.py CHANGED
@@ -10,6 +10,7 @@ import pymysql
10
10
  import numpy as np
11
11
  import pandas as pd
12
12
  import sqlalchemy.types
13
+ from macholib.mach_o import rpath_command
13
14
  from more_itertools.more import iequals
14
15
  from pandas.core.dtypes.common import INT64_DTYPE
15
16
  from sqlalchemy import create_engine
@@ -60,15 +61,17 @@ class MysqlUpload:
60
61
  }
61
62
  self.filename = None
62
63
 
63
- def df_to_mysql(self, df, table_name, db_name='远程数据源', df_sql=False, drop_dup=True, drop_duplicates=False, filename=None, count=None):
64
+ def df_to_mysql(self, df, table_name, db_name='远程数据源', icm_update=[], icm_up=[], df_sql=False, drop_dup=True, drop_duplicates=False, filename=None, count=None):
64
65
  """
65
66
  将 df 写入数据库
66
67
  db_name: 数据库名称
67
68
  table_name: 集合/表名称
68
- df_sql: 使用 df.to_sql 函数上传整个表, 不会排重
69
- drop_duplicates:仅限于聚合数据使用,其他情况不要设置此参数
69
+ df_sql: 这是一个临时参数, 值为 True 时使用 df.to_sql 函数上传整个表, 不会排重
70
+ drop_duplicates:值为 True 时(仅限于聚合数据使用),其他情况不要设置此参数
70
71
  drop_dup: 值为 True 时检查重复数据再插入,反之直接上传
71
72
  filename: 传这个参数是方便定位产生错误的文件
73
+ icm_update: 增量更新, 在聚合数据中使用,原始文件不要使用,设置此参数时需将 drop_dup 改为 False
74
+ 使用增量更新: 必须确保 icm_update 传进来的列必须是数据表中唯一主键,值不会发生变化,不会重复,否则可能产生错乱覆盖情况
72
75
  """
73
76
  self.filename = filename
74
77
  if isinstance(df, pd.DataFrame):
@@ -168,6 +171,7 @@ class MysqlUpload:
168
171
 
169
172
  # print(cl, db_n, tb_n)
170
173
  # 返回这些结果的目的是等添加完列再写 json 文件才能读到 types 信息
174
+ # ⚠️ mysql_all_dtypes 函数默认只读取 home_lx 的数据库信息,不会读取其他系统
171
175
  if cl and db_n and tb_n:
172
176
  mysql_types.mysql_all_dtypes(db_name=db_name, table_name=table_name) # 更新一个表的 dtypes
173
177
  elif cl and db_n:
@@ -190,8 +194,10 @@ class MysqlUpload:
190
194
 
191
195
  datas = df.to_dict(orient='records')
192
196
  for data in datas:
197
+ # data 是传进来待处理的数据, 不是数据库数据
198
+ # data 示例: {'日期': Timestamp('2024-08-27 00:00:00'), '推广费余额': 33299, '品销宝余额': 2930.73, '短信剩余': 67471}
193
199
  try:
194
- cols = ', '.join(f"`{item}`" for item in data.keys()) # 列名转义
200
+ cols = ', '.join(f"`{item}`" for item in data.keys()) # 列名需要转义
195
201
  # data.update({item: f"{data[item]}" for item in data.keys()}) # 全部值转字符, 不是必须的
196
202
  values = ', '.join([f"'{item}'" for item in data.values()]) # 值要加单引号 ''
197
203
  condition = []
@@ -200,7 +206,7 @@ class MysqlUpload:
200
206
  condition = ' AND '.join(condition) # 构建查询条件
201
207
  # print(condition)
202
208
 
203
- if drop_dup:
209
+ if drop_dup: # 查重插入
204
210
  sql = f"SELECT {cols} FROM `{table_name}` WHERE {condition}"
205
211
  # sql = f"SELECT {cols} FROM `{table_name}` WHERE `创建时间` = '2014-09-19 14:32:33'"
206
212
  cursor.execute(sql)
@@ -210,6 +216,76 @@ class MysqlUpload:
210
216
  cursor.execute(sql)
211
217
  # else:
212
218
  # print(f'重复数据不插入: {condition[:50]}...')
219
+ elif icm_update: # 增量更新
220
+ """ 使用增量更新: 需确保 icm_update['主键'] 传进来的列必须是数据表中唯一主键,值不会发生变化且不会重复,否则可能产生覆盖情况 """
221
+ sql = 'SELECT COLUMN_NAME FROM information_schema.columns WHERE table_schema = %s AND table_name = %s'
222
+ cursor.execute(sql, (db_name, {table_name}))
223
+ columns = cursor.fetchall()
224
+ cols_exist = [col['COLUMN_NAME'] for col in columns] # 数据表的所有列, 返回 list
225
+ update_col = [item for item in cols_exist if item not in icm_update and item != 'id'] # 除了主键外的其他列
226
+
227
+ # unique_keys 示例: `日期`, `推广费余额`
228
+ unique_keys = ', '.join(f"`{item}`" for item in update_col) # 列名需要转义
229
+ condition = []
230
+ for up_col in icm_update:
231
+ condition += [f"`{up_col}` = '{data[up_col]}'"]
232
+ condition = ' AND '.join(condition) # condition值示例: `品销宝余额` = '2930.73' AND `短信剩余` = '67471'
233
+ sql = f"SELECT {unique_keys} FROM `{table_name}` WHERE {condition}"
234
+ # print(sql)
235
+ # sql = f"SELECT {unique_keys} FROM `{table_name}` WHERE `创建时间` = '2014-09-19 14:32:33'"
236
+ cursor.execute(sql)
237
+ results = cursor.fetchall() # results 是数据库取出的数据
238
+ if results: # 有数据返回,再进行增量检查
239
+ for result in results: # results 是数据库数据, data 是传进来的数据
240
+ not_change_col = []
241
+ change_values = []
242
+ for col in update_col:
243
+ # 因为 mysql 里面有 decimal 数据类型,要移除末尾的 0 再做比较(df 默认将 5.00 小数截断为 5.0)
244
+ df_value = str(data[col])
245
+ mysql_value = str(result[col])
246
+ if '.' in df_value:
247
+ df_value = re.sub('0+$', '', df_value)
248
+ df_value = re.sub('\.$', '', df_value)
249
+ if '.' in mysql_value:
250
+ mysql_value = re.sub('0+$', '', mysql_value)
251
+ mysql_value = re.sub('\.$', '', mysql_value)
252
+ if df_value != mysql_value: # 传进来的数据和数据库比较, 有变化
253
+ # print(f'{data['日期']}{data['商品id']}{col} 列的值有变化,{str(data[col])} != {str(result[col])}')
254
+ change_values += [f"`{col}` = '{str(data[col])}'"]
255
+ not_change_col += [item for item in update_col if item != col]
256
+ # change_values 是 df 传进来且和数据库对比后,发生了变化的数据,值示例: [`品销宝余额` = '9999.0', `短信剩余` = '888']
257
+ if change_values: # change_values 有数据返回,表示值需要更新
258
+ not_change_values = [f"`{col}` = '{str(data[col])}'" for col in not_change_col]
259
+ not_change_values = ' AND '.join(not_change_values) # 示例: `短信剩余` = '888' AND `test1` = '93'
260
+ # print(change_values, not_change_values)
261
+ condition += f' AND {not_change_values}' # 重新构建完整的查询条件,将未发生变化的列加进查询条件
262
+ change_values = ', '.join(f"{item}" for item in change_values) # 注意这里 item 外面没有反引号
263
+ sql = f"UPDATE {table_name} SET {change_values} WHERE {condition}"
264
+ # print(sql)
265
+ cursor.execute(sql)
266
+ else: # 没有数据返回,则直接插入数据
267
+ sql = f"INSERT INTO `{table_name}` ({cols}) VALUES ({values});"
268
+ cursor.execute(sql)
269
+ # elif icm_up:
270
+ # sql = 'SELECT COLUMN_NAME FROM information_schema.columns WHERE table_schema = %s AND table_name = %s'
271
+ # cursor.execute(sql, (db_name, {table_name}))
272
+ # columns = cursor.fetchall()
273
+ # cols_exist = [col['COLUMN_NAME'] for col in columns] # 数据表的所有列, 返回 list
274
+ # cols_exist = [item for item in cols_exist if item != 'id']
275
+ # update_col = [item for item in cols_exist if item not in icm_up] # 除了主键外的其他列
276
+ #
277
+ # unique_keys = ', '.join([f"`{item}`" for item in cols_exist])
278
+ # unique_keys_values = ', '.join([f"'{data[item]}'" for item in cols_exist])
279
+ #
280
+ # change_values = []
281
+ # for col in update_col:
282
+ # change_values += [f"`{col}` = '{str(data[col])}'"]
283
+ # change_values = ', '.join(f"{item}" for item in change_values) # 注意这里 item 外面没有反引号
284
+ # # print(change_values)
285
+ # sql = f"INSERT INTO `{table_name}` ({unique_keys}) VALUES ({unique_keys_values}) ON DUPLICATE KEY UPDATE {change_values};"
286
+ # print(sql)
287
+ # # cursor.execute(sql)
288
+
213
289
  else:
214
290
  sql = f"INSERT INTO `{table_name}` ({cols}) VALUES ({values});"
215
291
  cursor.execute(sql)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mdbq
3
- Version: 1.2.6
3
+ Version: 1.2.8
4
4
  Home-page: https://pypi.org/project/mdbsql
5
5
  Author: xigua,
6
6
  Author-email: 2587125111@qq.com
@@ -1,15 +1,15 @@
1
1
  mdbq/__init__.py,sha256=Il5Q9ATdX8yXqVxtP_nYqUhExzxPC_qk_WXQ_4h0exg,16
2
2
  mdbq/__version__.py,sha256=y9Mp_8x0BCZSHsdLT_q5tX9wZwd5QgqrSIENLrb6vXA,62
3
3
  mdbq/aggregation/__init__.py,sha256=EeDqX2Aml6SPx8363J-v1lz0EcZtgwIBYyCJV6CcEDU,40
4
- mdbq/aggregation/aggregation.py,sha256=mBgIY7afloW8H5qoBy56vCabIQRxVvAhrRZgGbZUxFQ,55791
4
+ mdbq/aggregation/aggregation.py,sha256=ukOtdTJNXoCM0M1Nhrax4J5rJoWLSVYCw55TnrNStVc,56697
5
5
  mdbq/aggregation/df_types.py,sha256=rHLIgv82PJSFmDvXkZyOJAffXkFyyMyFO23w9tUt8EQ,7525
6
- mdbq/aggregation/mysql_types.py,sha256=umVixmbFZM63k-QhVWLvOuhcAde4P_oDKbdo8ry2O9w,10633
6
+ mdbq/aggregation/mysql_types.py,sha256=_XIqpaX_qmqolFlGywMYfvBn32u8MbPCaX6n7rQOVRQ,10634
7
7
  mdbq/aggregation/optimize_data.py,sha256=jLAWtxPUuhpo4XTVrhKtT4xK3grs7r73ePQfLhxlu1I,779
8
- mdbq/aggregation/query_data.py,sha256=fg_9OdNSwHbo9vhK1pAKOazHFHZfE9_rBxRyQIWJX9U,25694
8
+ mdbq/aggregation/query_data.py,sha256=kNX9htViFN0EnpF7D_eOQtTWy8BIa5-yJmJiqY7f8ds,27083
9
9
  mdbq/bdup/__init__.py,sha256=AkhsGk81SkG1c8FqDH5tRq-8MZmFobVbN60DTyukYTY,28
10
10
  mdbq/bdup/bdup.py,sha256=LAV0TgnQpc-LB-YuJthxb0U42_VkPidzQzAagan46lU,4234
11
11
  mdbq/clean/__init__.py,sha256=A1d6x3L27j4NtLgiFV5TANwEkLuaDfPHDQNrPBbNWtU,41
12
- mdbq/clean/data_clean.py,sha256=tcYu4tFkBm5jzSo6KHWyfyLEJ-s6EO_Za-fvakE-Jh0,85610
12
+ mdbq/clean/data_clean.py,sha256=SjfKGhDUh4hv93J1nbfYTQy_sw-8IuGLSOyuY6Xu8QA,85648
13
13
  mdbq/company/__init__.py,sha256=qz8F_GsP_pMB5PblgJAUAMjasuZbOEp3qQOCB39E8f0,21
14
14
  mdbq/company/copysh.py,sha256=i8f8YxmUg-EIzQR-ZHTtnC1A5InwsRtY1_sIsCznVp8,16363
15
15
  mdbq/config/__init__.py,sha256=jso1oHcy6cJEfa7udS_9uO5X6kZLoPBF8l3wCYmr5dM,18
@@ -18,13 +18,13 @@ mdbq/config/products.py,sha256=9gqXJMsw8KKuD4Xs6krNgcF7AuWDvV7clI6wVi3QjcA,4260
18
18
  mdbq/config/set_support.py,sha256=xkZCX6y9Bq1ppBpJAofld4B2YtchA7fl0eT3dx3CrSI,777
19
19
  mdbq/config/update_conf.py,sha256=taL3ZqKgiVWwUrDFuaYhim9a72Hm4BHRhhDscJTziR8,4535
20
20
  mdbq/dataframe/__init__.py,sha256=2HtCN8AdRj53teXDqzysC1h8aPL-mMFy561ESmhehGQ,22
21
- mdbq/dataframe/converter.py,sha256=BAst61HvtXqN3yWguia47zNY19c-wpby8CsdS48PC6g,3592
21
+ mdbq/dataframe/converter.py,sha256=w0-gGJnIajGIhOgYGkCvc0JMcmxIHNpgPf_bgWUSOG4,3699
22
22
  mdbq/log/__init__.py,sha256=Mpbrav0s0ifLL7lVDAuePEi1hJKiSHhxcv1byBKDl5E,15
23
23
  mdbq/log/mylogger.py,sha256=oaT7Bp-Hb9jZt52seP3ISUuxVcI19s4UiqTeouScBO0,3258
24
24
  mdbq/mongo/__init__.py,sha256=SILt7xMtQIQl_m-ik9WLtJSXIVf424iYgCfE_tnQFbw,13
25
25
  mdbq/mongo/mongo.py,sha256=v9qvrp6p1ZRWuPpbSilqveiE0FEcZF7U5xUPI0RN4xs,31880
26
26
  mdbq/mysql/__init__.py,sha256=A_DPJyAoEvTSFojiI2e94zP0FKtCkkwKP1kYUCSyQzo,11
27
- mdbq/mysql/mysql.py,sha256=KvUQflP5sYOECTHOs2Fs9ABcQvgPCbBnAX2ZlE3JjgY,37544
27
+ mdbq/mysql/mysql.py,sha256=4Omt9Su0Cv-oRDGBKUi4_62wbs8OfDE5ssoHIWn3Kys,44328
28
28
  mdbq/mysql/s_query.py,sha256=a33aYhW6gAnspIZfQ7l23ePln9-MD1f_ukypr5M0jd8,8018
29
29
  mdbq/mysql/year_month_day.py,sha256=VgewoE2pJxK7ErjfviL_SMTN77ki8GVbTUcao3vFUCE,1523
30
30
  mdbq/other/__init__.py,sha256=jso1oHcy6cJEfa7udS_9uO5X6kZLoPBF8l3wCYmr5dM,18
@@ -35,7 +35,7 @@ mdbq/pbix/__init__.py,sha256=Trtfaynu9RjoTyLLYBN2xdRxTvm_zhCniUkVTAYwcjo,24
35
35
  mdbq/pbix/pbix_refresh.py,sha256=JUjKW3bNEyoMVfVfo77UhguvS5AWkixvVhDbw4_MHco,2396
36
36
  mdbq/pbix/refresh_all.py,sha256=tgy762608HMaXWynbOURIf2UVMuSPybzrDXQnOOcnZU,6102
37
37
  mdbq/spider/__init__.py,sha256=RBMFXGy_jd1HXZhngB2T2XTvJqki8P_Fr-pBcwijnew,18
38
- mdbq-1.2.6.dist-info/METADATA,sha256=_s1z5j_Q_dSi4lrw46NcpwMlgz5TkZnndOmWp4290Mk,245
39
- mdbq-1.2.6.dist-info/WHEEL,sha256=cpQTJ5IWu9CdaPViMhC9YzF8gZuS5-vlfoFihTBC86A,91
40
- mdbq-1.2.6.dist-info/top_level.txt,sha256=2FQ-uLnCSB-OwFiWntzmwosW3X2Xqsg0ewh1axsaylA,5
41
- mdbq-1.2.6.dist-info/RECORD,,
38
+ mdbq-1.2.8.dist-info/METADATA,sha256=m6-ftUmS0npMhvz1brisQdXGN4Kc7jTAXBueFeE4HkE,245
39
+ mdbq-1.2.8.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
40
+ mdbq-1.2.8.dist-info/top_level.txt,sha256=2FQ-uLnCSB-OwFiWntzmwosW3X2Xqsg0ewh1axsaylA,5
41
+ mdbq-1.2.8.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (70.1.0)
2
+ Generator: bdist_wheel (0.44.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5