mdbq 3.3.16__py3-none-any.whl → 3.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -91,7 +91,9 @@ else:
91
91
 
92
92
  class DataShow:
93
93
  def __init__(self):
94
- self.path = '/Users/xigua/Downloads'
94
+ self.path = '/Users/xigua/Downloads/html文件'
95
+ if not os.path.isdir(self.path):
96
+ os.makedirs(self.path)
95
97
  root = tk.Tk()
96
98
  self.screen_width = root.winfo_screenwidth()
97
99
  self.screen_height = root.winfo_screenheight()
@@ -119,100 +121,143 @@ class DataShow:
119
121
  df = pd.concat(__res, ignore_index=True)
120
122
  return df
121
123
 
122
- def dpll_bak(self, db_name='聚合数据', table_name='店铺流量来源构成', pro_list=None, filename='店铺流量来源'):
123
- if not pro_list:
124
- pro_list = ['日期', '三级来源', '访客数']
125
- df = self.getdata(db_name=db_name, table_name=table_name, pro_list=pro_list, start_date='2024-11-01', end_date=self.end_date)
126
- if len(df) == 0:
127
- print(f'数据不能为空: {table_name}')
128
- return
129
- df = df[df['三级来源'] != '汇总']
130
- df['日期'] = pd.to_datetime(df['日期'])
131
- today = datetime.date.today()
132
-
133
- def st_date(num=1):
134
- return pd.to_datetime(today - datetime.timedelta(days=num))
135
- max_date = df['日期'].max().strftime('%Y-%m-%d')
136
- df1 = df[df['日期'] >= st_date(1)]
137
- df2 = df[df['日期'] >= st_date(7)]
138
- df3 = df[df['日期'] >= st_date(30)]
139
- df2 = df2.groupby(
140
- ['三级来源'],
141
- as_index=False).agg(
142
- **{
143
- '访客数': ('访客数', np.sum),
144
- }
124
+ def pov_city(self, db_name='生意经3', filename='销售地域分布', start_date=None, end_date=None, percentage=None):
125
+ """
126
+ 生意经 省份城市销售分析
127
+ """
128
+ if not start_date:
129
+ start_date = self.start_date
130
+ if not end_date:
131
+ end_date = self.end_date
132
+ pov_set = self.getdata(
133
+ db_name='属性设置3',
134
+ table_name=f'城市等级',
135
+ pro_list=[],
136
+ start_date=start_date,
137
+ end_date=end_date
145
138
  )
146
- df3 = df3.groupby(
147
- ['三级来源'],
148
- as_index=False).agg(
149
- **{
150
- '访客数': ('访客数', np.sum),
151
- }
139
+ # print(pov_set)
140
+ # 城市
141
+ pro_list = ['日期', '店铺名称', '城市', '销售额', '退款额']
142
+ year = datetime.datetime.today().year
143
+ df_city = self.getdata(
144
+ db_name=db_name,
145
+ table_name=f'地域分析_城市_{year}',
146
+ pro_list=pro_list,
147
+ start_date=start_date,
148
+ end_date=end_date
149
+ )
150
+ df_city = df_city[df_city['店铺名称'] == '万里马官方旗舰店']
151
+ df_city = df_city.groupby(['店铺名称', '城市'], as_index=False).agg(
152
+ **{'销售额': ('销售额', np.sum), '退款额': ('退款额', np.sum)})
153
+ df_city = df_city[df_city['销售额'] > 0]
154
+
155
+ # 将城市等级添加到df
156
+ pov_set = pov_set[['城市等级', '城市']]
157
+ pov_set.drop_duplicates(subset='城市', keep='last', inplace=True, ignore_index=True)
158
+ df_city = pd.merge(df_city, pov_set, left_on=['城市'], right_on=['城市'], how='left')
159
+ df_level = df_city.groupby(['店铺名称', '城市等级'], as_index=False).agg(
160
+ **{'销售额': ('销售额', np.sum), '退款额': ('退款额', np.sum)})
161
+ data_list = [('销售 按城市等级', df_level['城市等级'].tolist(), df_level['销售额'].tolist())]
162
+ if percentage:
163
+ print(df_city['销售额'].sum())
164
+ return
165
+ df_city1 = df_city[df_city['销售额'] > int(percentage)]
166
+ data_list += ('销售额top城市', df_city1['城市'].tolist(), df_city1['销售额'].tolist())
167
+ df_city2 = df_city[df_city['退款额'] > int(percentage)]
168
+ data_list += ('退款额top城市', df_city2['城市'].tolist(), df_city2['退款额'].tolist())
169
+
170
+ # 省份
171
+ pro_list = ['日期', '店铺名称', '省份', '销售额', '退款额']
172
+ year = datetime.datetime.today().year
173
+ df_pov = self.getdata(
174
+ db_name=db_name,
175
+ table_name=f'地域分析_省份_{year}',
176
+ pro_list=pro_list,
177
+ start_date=start_date,
178
+ end_date=end_date
152
179
  )
153
- # print(df)
154
- labels1 = df1['三级来源'].tolist()
155
- values1 = df1['访客数'].tolist()
156
- labels2 = df2['三级来源'].tolist()
157
- values2 = df2['访客数'].tolist()
158
- labels3 = df3['三级来源'].tolist()
159
- values3 = df3['访客数'].tolist()
160
-
161
- def make_sub(data_list, num):
162
- # 创建一个具有1行2列的网格布局
163
- t_p = []
164
- for i in range(num):
165
- t_p.extend([{"type": "pie"}])
166
- fig = make_subplots(rows=1, cols=num, specs=[t_p])
167
- pie_title = {1: 1, 2: 7, 3: 30}
168
- i = 1
169
- for item in data_list:
170
- # 计算每个扇区的百分比,并找出哪些扇区应该被保留
171
- total = sum(item['值'])
172
- # 计算每个扇区的百分比,并找出哪些扇区应该被保留
173
- threshold_percentage = 0.1 # 阈值百分比
174
- filtered_indices = [i for i, value in enumerate(item['值']) if
175
- (value / total) * 100 >= threshold_percentage]
176
- # 提取被保留的扇区的标签和值
177
- filtered_labels = [item['键'][i] for i in filtered_indices]
178
- filtered_values = [item['值'][i] for i in filtered_indices]
180
+ df_pov = df_pov[df_pov['店铺名称'] == '万里马官方旗舰店']
181
+ # print(df_pov[df_pov['省份'] == '广东'])
182
+ df_pov = df_pov.groupby(['店铺名称', '省份'], as_index=False).agg(
183
+ **{'销售额': ('销售额', np.sum), '退款额': ('退款额', np.sum)})
184
+ if percentage:
185
+ df_pov1 = df_pov[df_pov['销售额'] > int(percentage)]
186
+ data_list += [('销售 按省份', df_pov1['省份'].tolist(), df_pov1['销售额'].tolist())] # 添加列表数据
187
+ df_pov2 = df_pov[df_pov['退款额'] > int(percentage)]
188
+ data_list += [('退款 按省份', df_pov2['省份'].tolist(), df_pov2['退款额'].tolist())] # 添加列表数据
179
189
 
180
- # 添加饼图
181
- fig.add_trace(
182
- go.Pie(labels=filtered_labels, values=filtered_values, name=f'pie {i}', textinfo='label+percent'),
183
- row=1, col=i)
184
- # fig.add_trace(go.Pie(labels=item['键'], values=item['值'], name=f'最近{pie_title[i]}天', textinfo='label+percent'), row=1, col=i)
185
- fig.add_annotation(
186
- text=f'最近{pie_title[i]}天',
187
- x=0.15 + 0.35 * (i - 1),
188
- y=0.98,
189
- xref='paper', # # 相对于整个图表区域
190
- yref='paper',
191
- showarrow=True, # 显示箭头
192
- align="left", # 文本对齐方式
193
- font=dict(size=16),
194
- )
195
- i += 1
196
- fig.update_layout(
197
- title_text=f'店铺流量来源 最近一天: {max_date}',
198
- xaxis_title='X Axis',
199
- yaxis_title='Y Axis',
200
- # width=self.screen_width // 1.4,
201
- # height=self.screen_width // 2,
202
- margin=dict(
203
- l=100, # 左边距
204
- r=300,
205
- t=100, # 上边距
206
- b=400,
207
- ),
190
+ t_p1 = []
191
+ for i in range(3):
192
+ t_p1.extend([{"type": "pie"}]) # 折线图类型
193
+ t_p2 = []
194
+ for i in range(3):
195
+ t_p2.extend([{"type": "pie"}]) # 饼图类型
196
+ specs = [t_p1, t_p2]
197
+ fig = make_subplots(rows=2, cols=3, specs=specs)
198
+ row = 0
199
+ col = 0
200
+ for item in data_list:
201
+ title, labels, values = item
202
+ # 计算每个扇区的百分比,并找出哪些扇区应该被保留
203
+ total = sum(values)
204
+ # 计算每个扇区的百分比,并找出哪些扇区应该被保留
205
+ percentage = 1.2 # 阈值百分比
206
+ filtered_indices = [i for i, value in enumerate(values) if
207
+ (value / total) * 100 >= percentage]
208
+ # 提取被保留的扇区的标签和值
209
+ filtered_labels = [labels[i] for i in filtered_indices]
210
+ filtered_values = [values[i] for i in filtered_indices]
211
+ # 添加饼图
212
+ fig.add_trace(
213
+ go.Pie(
214
+ labels=filtered_labels,
215
+ values=filtered_values,
216
+ name=title,
217
+ textinfo='label+percent'
218
+ ),
219
+ row=row // 3 + 1,
220
+ col=col % 3 + 1,
221
+ )
222
+ x = 0.14 + 0.355 * (row % 3)
223
+ y = 1.04 - 0.59 * (row // 3)
224
+ fig.add_annotation(
225
+ text=title,
226
+ x=x,
227
+ y=y,
228
+ xref='paper', # # 相对于整个图表区域
229
+ yref='paper',
230
+ showarrow=True, # 显示箭头
231
+ align="left", # 文本对齐方式
232
+ font=dict(size=14),
233
+ )
234
+ row += 1
235
+ col += 1
236
+ fig.update_layout(
237
+ title_text=f'销售地域分布',
238
+ # xaxis_title='X Axis',
239
+ # yaxis_title='Y Axis',
240
+ # width=self.screen_width // 1.4,
241
+ # height=self.screen_width // 2,
242
+ margin=dict(
243
+ l=100, # 左边距
244
+ r=100,
245
+ t=100, # 上边距
246
+ b=100,
247
+ ),
248
+ legend=dict(
249
+ # title='Legend Title', # 图例标题
250
+ orientation='v', # 图例方向('h' 表示水平,'v' 表示垂直)
251
+ # x=0.5, # 图例在图表中的 x 位置(0 到 1 的比例)
252
+ # y=1.02, # 图例在图表中的 y 位置(稍微超出顶部以避免遮挡数据)
253
+ font=dict(
254
+ size=12 # 图例字体大小
208
255
  )
209
- fig.update_layout(xaxis_showgrid=False, yaxis_showgrid=False, xaxis_visible=False, yaxis_visible=False)
210
- return fig
211
-
212
- data_list = [{'键': labels1, '值': values1}, {'键': labels2, '值': values2}, {'键': labels3, '值': values3}]
213
- fig = make_sub(data_list=data_list, num=3)
256
+ )
257
+ )
214
258
  fig.write_html(os.path.join(self.path, f'{filename}.html'))
215
259
 
260
+
216
261
  def dpll(self, db_name='聚合数据', table_name='店铺流量来源构成', pro_list=None, filename='店铺流量来源'):
217
262
  if not pro_list:
218
263
  pro_list = ['日期', '店铺名称', '类别', '来源构成', '二级来源', '三级来源', '访客数']
@@ -600,7 +645,7 @@ class DataShow:
600
645
  align="left", # 文本对齐方式
601
646
  font=dict(size=12),
602
647
  )
603
- fig.write_html(os.path.join(self.path, f'{filename}.html'))
648
+ fig.write_html(os.path.join(self.path, f'{filename}_{item_id}.html'))
604
649
 
605
650
  def crowd(self, db_name='人群画像2', table_list=None, pro_list=None, filename='达摩盘人群画像', crowd_id=None, last_date=None):
606
651
  # item_ids = [696017020186, 714066010148, 830890472575]
@@ -739,29 +784,51 @@ class DataShow:
739
784
  align="left", # 文本对齐方式
740
785
  font=dict(size=12),
741
786
  )
742
- fig.write_html(os.path.join(self.path, f'{filename}.html'))
787
+ fig.write_html(os.path.join(self.path, f'{filename}_{crowd_name[:15]}.html'))
743
788
 
744
789
 
745
790
  def main():
746
791
  ds = DataShow()
747
- ds.dpll()
748
- ds.tg(
749
- days=15,
750
- # start_date='2024-11-01',
751
- # end_date='2024-11-30',
752
- )
753
- ds.item_crowd(
754
- item_id=839148235697,
755
- lab='全部渠道',
756
- option='商详浏览',
757
- last_date=None,
758
- d_str='近30天',
759
- )
760
- ds.crowd(
761
- crowd_id=40457166,
762
- last_date=None,
763
- )
764
792
 
793
+ # # 店铺流量来源
794
+ # ds.dpll()
795
+ # # 多店聚合推广数据
796
+ # ds.tg(
797
+ # days=15,
798
+ # # start_date='2024-11-01',
799
+ # # end_date='2024-11-30',
800
+ # )
801
+ #
802
+ # # 商品人群画像
803
+ # item_id_list = [
804
+ # 839148235697,
805
+ # ]
806
+ # for item_id in item_id_list:
807
+ # ds.item_crowd(
808
+ # item_id=item_id,
809
+ # lab='全部渠道',
810
+ # option='商详浏览',
811
+ # last_date=None,
812
+ # d_str='近30天',
813
+ # )
814
+ #
815
+ # # 达摩盘人群画像
816
+ # crowid_list = [
817
+ # 40457166,
818
+ # ]
819
+ # for crowid in crowid_list:
820
+ # ds.crowd(
821
+ # crowd_id=crowid,
822
+ # last_date=None,
823
+ # )
824
+
825
+ ds.pov_city(
826
+ db_name='生意经3',
827
+ filename='销售地域分布',
828
+ start_date='2024-06-01',
829
+ end_date='2024-12-11',
830
+ percentage=1,
831
+ )
765
832
 
766
833
  if __name__ == '__main__':
767
834
  main()
@@ -495,17 +495,19 @@ class MysqlDatasQuery:
495
495
  df_sx.rename(columns={'消费能力等级': '消费力层级'}, inplace=True)
496
496
  df = pd.merge(df, df_sx, left_on=['人群名字'], right_on=['人群名称'], how='left')
497
497
  df.pop('人群名称')
498
- df['消费力层级'] = df['消费力层级'].apply(lambda x: f'L{"".join(re.findall(r'L(\d)', str(x)))}' if str(x) != 'nan' else x)
499
- df['用户年龄'] = df['用户年龄'].apply(lambda x: "~".join(re.findall(r'(\d{2})\D.*(\d{2})岁', str(x))[0]) if str(x) != 'nan' else x)
498
+ df['消费力层级'] = df['消费力层级'].apply(
499
+ lambda x: f'L{"".join(re.findall(r'L(\d)', str(x)))}' if str(x) != 'nan' else x)
500
+ df['用户年龄'] = df['用户年龄'].apply(
501
+ lambda x: "~".join(re.findall(r'(\d{2})\D.*(\d{2})岁', str(x))[0])
502
+ if str(x) != 'nan' and re.findall(r'(\d{2})\D.*(\d{2})岁', str(x)) else x)
500
503
 
501
504
  # 1. 匹配 L后面接 2 个或以上数字,不区分大小写,示例:L345
502
505
  # 2. 其余情况,L 后面接多个数字的都会被第一条 if 命中,不区分大小写
503
506
 
504
507
  df['消费力层级'] = df.apply(
505
508
  lambda x:
506
- ''.join(re.findall(r'(l\d+)', x['人群名字'].upper(), re.IGNORECASE)) if re.findall(r'(l\d{2,})',
507
- x['人群名字'],
508
- re.IGNORECASE) and str(x['消费力层级']) == 'nan'
509
+ ''.join(re.findall(r'(l\d+)', x['人群名字'].upper(), re.IGNORECASE))
510
+ if re.findall(r'(l\d{2,})', x['人群名字'], re.IGNORECASE) and str(x['消费力层级']) == 'nan'
509
511
  else 'L5' if re.findall(r'(l\d*5)', x['人群名字'], re.IGNORECASE) and str(x['消费力层级']) == 'nan'
510
512
  else 'L4' if re.findall(r'(l\d*4)', x['人群名字'], re.IGNORECASE) and str(x['消费力层级']) == 'nan'
511
513
  else 'L3' if re.findall(r'(l\d*3)', x['人群名字'], re.IGNORECASE) and str(x['消费力层级']) == 'nan'
@@ -524,11 +526,12 @@ class MysqlDatasQuery:
524
526
  pattern2 = r'(?<![\dlL])(\d{2}_\d{2})'
525
527
  df['用户年龄'] = df.apply(
526
528
  lambda x:
527
- ''.join(re.findall(pattern1, x['人群名字'].upper())) if re.findall(pattern1, x['人群名字']) and str(x['用户年龄']) == 'nan'
528
- # else ''.join(re.findall(r'[^\d|l|L](\d{2}_\d{2})', x['人群名字'].upper())) if re.findall(r'[^\d|l|L](\d{2}_\d{2})', x['人群名字'])
529
- else ''.join(re.findall(pattern2, x['人群名字'].upper())) if re.findall(pattern2, x['人群名字']) and str(x['用户年龄']) == 'nan'
530
- else ''.join(re.findall(r'(\d{2}-\d{2})岁', x['人群名字'].upper())) if re.findall(r'(\d{2}-\d{2})',
531
- x['人群名字']) and str(x['用户年龄']) == 'nan'
529
+ ''.join(re.findall(pattern1, x['人群名字'].upper()))
530
+ if re.findall(pattern1, x['人群名字']) and str(x['用户年龄']) == 'nan'
531
+ else ''.join(re.findall(pattern2, x['人群名字'].upper()))
532
+ if re.findall(pattern2, x['人群名字']) and str(x['用户年龄']) == 'nan'
533
+ else ''.join(re.findall(r'(\d{2}-\d{2})岁', x['人群名字'].upper()))
534
+ if re.findall(r'(\d{2}-\d{2})岁', x['人群名字']) and str(x['用户年龄']) == 'nan'
532
535
  else x['用户年龄'], axis=1)
533
536
  df['用户年龄'] = df['用户年龄'].apply(
534
537
  lambda x: f'{x[:2]}~{x[2:4]}' if str(x).isdigit()
mdbq/mysql/mysql.py CHANGED
@@ -113,6 +113,7 @@ class MysqlUpload:
113
113
 
114
114
  @staticmethod
115
115
  def try_except(func): # 在类内部定义一个异常处理方法
116
+
116
117
  @wraps(func)
117
118
  def wrapper(*args, **kwargs):
118
119
  try:
@@ -122,6 +123,7 @@ class MysqlUpload:
122
123
  with open(error_file, 'a') as f:
123
124
  now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
124
125
  f.write(f'\n{now} \n')
126
+ f.write(f'函数注释内容(用于定位函数): {func.__doc__} \n')
125
127
  # f.write(f'报错的文件:\n{e.__traceback__.tb_frame.f_globals["__file__"]}\n') # 发生异常所在的文件
126
128
  traceback.print_exc(file=open(error_file, 'a')) # 返回完整的堆栈信息
127
129
  print(f'更多信息请查看日志文件: {error_file}')
@@ -1439,6 +1441,7 @@ class OptimizeDatas:
1439
1441
 
1440
1442
  @staticmethod
1441
1443
  def try_except(func): # 在类内部定义一个异常处理方法
1444
+
1442
1445
  @wraps(func)
1443
1446
  def wrapper(*args, **kwargs):
1444
1447
  try:
@@ -1448,6 +1451,7 @@ class OptimizeDatas:
1448
1451
  with open(error_file, 'a') as f:
1449
1452
  now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
1450
1453
  f.write(f'\n{now} \n')
1454
+ f.write(f'函数注释内容(用于定位函数): {func.__doc__} \n')
1451
1455
  # f.write(f'报错的文件:\n{e.__traceback__.tb_frame.f_globals["__file__"]}\n') # 发生异常所在的文件
1452
1456
  traceback.print_exc(file=open(error_file, 'a')) # 返回完整的堆栈信息
1453
1457
  print(f'更多信息请查看日志文件: {error_file}')
mdbq/spider/aikucun.py CHANGED
@@ -483,7 +483,7 @@ class AikuCunNew:
483
483
 
484
484
  if __name__ == '__main__':
485
485
  get_cookie_aikucun() # 登录并获取 cookies
486
- akucun(date_num=3, headless=True) # 下载数据
486
+ akucun(date_num=10, headless=True) # 下载数据
487
487
 
488
488
  # a = AikuCunNew(shop_name='aikucun')
489
489
  # a.akc()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mdbq
3
- Version: 3.3.16
3
+ Version: 3.4.1
4
4
  Home-page: https://pypi.org/project/mdbq
5
5
  Author: xigua,
6
6
  Author-email: 2587125111@qq.com
@@ -2,9 +2,9 @@ mdbq/__init__.py,sha256=Il5Q9ATdX8yXqVxtP_nYqUhExzxPC_qk_WXQ_4h0exg,16
2
2
  mdbq/__version__.py,sha256=y9Mp_8x0BCZSHsdLT_q5tX9wZwd5QgqrSIENLrb6vXA,62
3
3
  mdbq/aggregation/__init__.py,sha256=EeDqX2Aml6SPx8363J-v1lz0EcZtgwIBYyCJV6CcEDU,40
4
4
  mdbq/aggregation/aggregation.py,sha256=-yzApnlqSN2L0E1YMu5ml-W827qpKQvWPCOI7jj2kzY,80264
5
- mdbq/aggregation/datashow.py,sha256=2NzHGjGoUy2WG-MxmbilCj6KBAmVah3jqFuEd2zv9XU,32379
5
+ mdbq/aggregation/datashow.py,sha256=Hwpt9REb7Iep_ptdVw0TqebYaJNNyvNs6dyOB_LqozM,34893
6
6
  mdbq/aggregation/optimize_data.py,sha256=RXIv7cACCgYyehAxMjUYi_S7rVyjIwXKWMaM3nduGtA,3068
7
- mdbq/aggregation/query_data.py,sha256=FcwaYUom2UGqCRsuGgwfuVdnY86PUOzkCivyoCY2oVQ,175663
7
+ mdbq/aggregation/query_data.py,sha256=FiNZhL5_El2B5ADfCPGUZXsE2iZd3UmGml9Te9qJIpU,175364
8
8
  mdbq/bdup/__init__.py,sha256=AkhsGk81SkG1c8FqDH5tRq-8MZmFobVbN60DTyukYTY,28
9
9
  mdbq/bdup/bdup.py,sha256=LAV0TgnQpc-LB-YuJthxb0U42_VkPidzQzAagan46lU,4234
10
10
  mdbq/config/__init__.py,sha256=jso1oHcy6cJEfa7udS_9uO5X6kZLoPBF8l3wCYmr5dM,18
@@ -18,7 +18,7 @@ mdbq/log/mylogger.py,sha256=oaT7Bp-Hb9jZt52seP3ISUuxVcI19s4UiqTeouScBO0,3258
18
18
  mdbq/mongo/__init__.py,sha256=SILt7xMtQIQl_m-ik9WLtJSXIVf424iYgCfE_tnQFbw,13
19
19
  mdbq/mongo/mongo.py,sha256=M9DUeUCMPDngkwn9-ui0uTiFrvfNU1kLs22s5SmoNm0,31899
20
20
  mdbq/mysql/__init__.py,sha256=A_DPJyAoEvTSFojiI2e94zP0FKtCkkwKP1kYUCSyQzo,11
21
- mdbq/mysql/mysql.py,sha256=OndnoP1cBDM9h1bR_Uh2waT3yUjlgr05zHIlC7mmxhc,99378
21
+ mdbq/mysql/mysql.py,sha256=r5YkS1WnV9dGtEHFcwaekjtUBgFcvkdmwif-m52CyHI,99560
22
22
  mdbq/mysql/recheck_mysql.py,sha256=ppBTfBLgkRWirMVZ31e_ZPULiGPJU7K3PP9G6QBZ3QI,8605
23
23
  mdbq/mysql/s_query.py,sha256=6L5Cp90zq13noZHjzSA5mqms_hD01c8GO1_NfbYDu6w,9252
24
24
  mdbq/mysql/year_month_day.py,sha256=VgewoE2pJxK7ErjfviL_SMTN77ki8GVbTUcao3vFUCE,1523
@@ -33,8 +33,8 @@ mdbq/pbix/pbix_refresh.py,sha256=JUjKW3bNEyoMVfVfo77UhguvS5AWkixvVhDbw4_MHco,239
33
33
  mdbq/pbix/refresh_all.py,sha256=OBT9EewSZ0aRS9vL_FflVn74d4l2G00wzHiikCC4TC0,5926
34
34
  mdbq/pbix/refresh_all_old.py,sha256=_pq3WSQ728GPtEG5pfsZI2uTJhU8D6ra-htIk1JXYzw,7192
35
35
  mdbq/spider/__init__.py,sha256=RBMFXGy_jd1HXZhngB2T2XTvJqki8P_Fr-pBcwijnew,18
36
- mdbq/spider/aikucun.py,sha256=eAIITxnbbxsR_EoohJ78CRw2dEdfSHOltfpxBrh0cvc,22207
37
- mdbq-3.3.16.dist-info/METADATA,sha256=c2t76yzpaP9kkwDg5y3Ooam9oYe6p4ntlKjWFUjZ464,244
38
- mdbq-3.3.16.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
39
- mdbq-3.3.16.dist-info/top_level.txt,sha256=2FQ-uLnCSB-OwFiWntzmwosW3X2Xqsg0ewh1axsaylA,5
40
- mdbq-3.3.16.dist-info/RECORD,,
36
+ mdbq/spider/aikucun.py,sha256=v7VO5gtEXR6_4Q6ujbTyu1FHu7TXHcwSQ6hIO249YH0,22208
37
+ mdbq-3.4.1.dist-info/METADATA,sha256=n1MFzq9VZzCy63dJOU-fcJSZ0lZTXawBaIW5vXngzCE,243
38
+ mdbq-3.4.1.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
39
+ mdbq-3.4.1.dist-info/top_level.txt,sha256=2FQ-uLnCSB-OwFiWntzmwosW3X2Xqsg0ewh1axsaylA,5
40
+ mdbq-3.4.1.dist-info/RECORD,,
File without changes