mdbq 3.4.0__py3-none-any.whl → 3.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,5 @@
1
1
  # -*- coding: UTF-8 –*-
2
+ import decimal
2
3
  import os
3
4
  import re
4
5
  import socket
@@ -91,7 +92,9 @@ else:
91
92
 
92
93
  class DataShow:
93
94
  def __init__(self):
94
- self.path = '/Users/xigua/Downloads'
95
+ self.path = '/Users/xigua/Downloads/html文件'
96
+ if not os.path.isdir(self.path):
97
+ os.makedirs(self.path)
95
98
  root = tk.Tk()
96
99
  self.screen_width = root.winfo_screenwidth()
97
100
  self.screen_height = root.winfo_screenheight()
@@ -119,100 +122,171 @@ class DataShow:
119
122
  df = pd.concat(__res, ignore_index=True)
120
123
  return df
121
124
 
122
- def dpll_bak(self, db_name='聚合数据', table_name='店铺流量来源构成', pro_list=None, filename='店铺流量来源'):
123
- if not pro_list:
124
- pro_list = ['日期', '三级来源', '访客数']
125
- df = self.getdata(db_name=db_name, table_name=table_name, pro_list=pro_list, start_date='2024-11-01', end_date=self.end_date)
126
- if len(df) == 0:
127
- print(f'数据不能为空: {table_name}')
128
- return
129
- df = df[df['三级来源'] != '汇总']
130
- df['日期'] = pd.to_datetime(df['日期'])
131
- today = datetime.date.today()
132
-
133
- def st_date(num=1):
134
- return pd.to_datetime(today - datetime.timedelta(days=num))
135
- max_date = df['日期'].max().strftime('%Y-%m-%d')
136
- df1 = df[df['日期'] >= st_date(1)]
137
- df2 = df[df['日期'] >= st_date(7)]
138
- df3 = df[df['日期'] >= st_date(30)]
139
- df2 = df2.groupby(
140
- ['三级来源'],
141
- as_index=False).agg(
142
- **{
143
- '访客数': ('访客数', np.sum),
144
- }
125
+ def pov_city(self, db_name='生意经3', filename='销售地域分布', start_date=None, end_date=None, percentage=None):
126
+ """
127
+ 生意经 省份城市销售分析
128
+ """
129
+ if not start_date:
130
+ start_date = self.start_date
131
+ if not end_date:
132
+ end_date = self.end_date
133
+ pov_set = self.getdata(
134
+ db_name='属性设置3',
135
+ table_name=f'城市等级',
136
+ pro_list=[],
137
+ start_date=start_date,
138
+ end_date=end_date
139
+ )
140
+ # print(pov_set)
141
+ # 城市
142
+ pro_list = ['日期', '店铺名称', '城市', '销售额', '退款额']
143
+ year = datetime.datetime.today().year
144
+ df_city = self.getdata(
145
+ db_name=db_name,
146
+ table_name=f'地域分析_城市_{year}',
147
+ pro_list=pro_list,
148
+ start_date=start_date,
149
+ end_date=end_date
150
+ )
151
+ df_city = df_city[df_city['店铺名称'] == '万里马官方旗舰店']
152
+ df_city = df_city.groupby(['店铺名称', '城市'], as_index=False).agg(
153
+ **{'销售额': ('销售额', np.sum), '退款额': ('退款额', np.sum)})
154
+ df_city = df_city[df_city['销售额'] > 0]
155
+
156
+ # 省份
157
+ pro_list = ['日期', '店铺名称', '省份', '销售额', '退款额']
158
+ year = datetime.datetime.today().year
159
+ df_pov = self.getdata(
160
+ db_name=db_name,
161
+ table_name=f'地域分析_省份_{year}',
162
+ pro_list=pro_list,
163
+ start_date=start_date,
164
+ end_date=end_date
165
+ )
166
+ df_pov = df_pov[df_pov['店铺名称'] == '万里马官方旗舰店']
167
+ # print(df_pov[df_pov['省份'] == '广东'])
168
+ df_pov = df_pov.groupby(['店铺名称', '省份'], as_index=False).agg(
169
+ **{'销售额': ('销售额', np.sum), '退款额': ('退款额', np.sum)})
170
+ df_pov.drop_duplicates(subset='省份', keep='last', inplace=True, ignore_index=True)
171
+ df_pov.sort_values(['销售额'], ascending=[False], ignore_index=True, inplace=True)
172
+ df_pov = df_pov[df_pov['省份'] != '其他']
173
+ percentages = df_pov['销售额'] / df_pov['销售额'].sum() * 100
174
+ df_pov1 = df_pov.head(10)
175
+ data_list = [('销售 top省份', df_pov1['省份'].tolist(), df_pov1['销售额'].tolist(), percentages)]
176
+
177
+ # 将城市等级添加到df
178
+ pov_set = pov_set[['城市等级', '城市']]
179
+ pov_set.drop_duplicates(subset='城市', keep='last', inplace=True, ignore_index=True)
180
+ df_city = pd.merge(df_city, pov_set, left_on=['城市'], right_on=['城市'], how='left')
181
+ df_level = df_city.groupby(['店铺名称', '城市等级'], as_index=False).agg(
182
+ **{'销售额': ('销售额', np.sum), '退款额': ('退款额', np.sum)})
183
+ city_level_list = [('按城市等级', df_level['城市等级'].tolist(), df_level['销售额'].tolist())]
184
+ df_city.drop_duplicates(subset='城市', keep='last', inplace=True, ignore_index=True)
185
+ df_city.sort_values(['销售额'], ascending=[False], ignore_index=True, inplace=True)
186
+ df_city = df_city[df_city['城市'] != '其他']
187
+ percentages = df_city['销售额'] / df_city['销售额'].sum() * 100
188
+ df_city1 = df_city.head(10)
189
+ data_list += [('销售 top城市', df_city1['城市'].tolist(), df_city1['销售额'].tolist(), percentages)]
190
+
191
+ # 退款 top 城市
192
+ df_city.sort_values(['退款额'], ascending=[False], ignore_index=True, inplace=True)
193
+ percentages = df_city['退款额'] / df_city['退款额'].sum() * 100
194
+ df_city2 = df_city.head(10)
195
+ data_list += [('退款 top城市', df_city2['城市'].tolist(), df_city2['退款额'].tolist(), percentages)]
196
+
197
+ t_p1 = [{"type": "pie"}]
198
+ for i in range(2):
199
+ t_p1.extend([{"type": "bar"}]) # 折线图类型
200
+ t_p2 = []
201
+ for i in range(3):
202
+ t_p2.extend([{"type": "bar"}]) # 饼图类型
203
+ specs = [t_p1, t_p2]
204
+ fig = make_subplots(rows=2, cols=3, specs=specs)
205
+ title, labels, values = city_level_list[0]
206
+ # 添加饼图
207
+ fig.add_trace(
208
+ go.Pie(
209
+ labels=labels,
210
+ values=values,
211
+ name=title,
212
+ textinfo='label+percent'
213
+ ),
214
+ row=1,
215
+ col=1,
145
216
  )
146
- df3 = df3.groupby(
147
- ['三级来源'],
148
- as_index=False).agg(
149
- **{
150
- '访客数': ('访客数', np.sum),
151
- }
217
+ x = 0.14
218
+ y = 1
219
+ fig.add_annotation(
220
+ text=title,
221
+ x=x,
222
+ y=y,
223
+ xref='paper', # # 相对于整个图表区域
224
+ yref='paper',
225
+ showarrow=True, # 显示箭头
226
+ align="left", # 文本对齐方式
227
+ font=dict(size=14),
152
228
  )
153
- # print(df)
154
- labels1 = df1['三级来源'].tolist()
155
- values1 = df1['访客数'].tolist()
156
- labels2 = df2['三级来源'].tolist()
157
- values2 = df2['访客数'].tolist()
158
- labels3 = df3['三级来源'].tolist()
159
- values3 = df3['访客数'].tolist()
160
-
161
- def make_sub(data_list, num):
162
- # 创建一个具有1行2列的网格布局
163
- t_p = []
164
- for i in range(num):
165
- t_p.extend([{"type": "pie"}])
166
- fig = make_subplots(rows=1, cols=num, specs=[t_p])
167
- pie_title = {1: 1, 2: 7, 3: 30}
168
- i = 1
169
- for item in data_list:
170
- # 计算每个扇区的百分比,并找出哪些扇区应该被保留
171
- total = sum(item['值'])
172
- # 计算每个扇区的百分比,并找出哪些扇区应该被保留
173
- threshold_percentage = 0.1 # 阈值百分比
174
- filtered_indices = [i for i, value in enumerate(item['值']) if
175
- (value / total) * 100 >= threshold_percentage]
176
- # 提取被保留的扇区的标签和值
177
- filtered_labels = [item[''][i] for i in filtered_indices]
178
- filtered_values = [item[''][i] for i in filtered_indices]
229
+ row = 1
230
+ col = 1
231
+ for item in data_list:
232
+ title, labels, values, percentages = item
233
+ bar = go.Bar(
234
+ x=labels,
235
+ y=values,
236
+ name=title,
237
+ orientation='v', # 垂直柱形图
238
+ text=percentages.map('{:.2f}%'.format), # 设置要显示的文本(百分比)
239
+ textposition = 'outside', # 设置文本位置在柱形图外部
240
+ width=0.55 # 调整柱子最大宽度
241
+ )
242
+ fig.add_trace(
243
+ bar,
244
+ row=row // 3 + 1,
245
+ col=col % 3 + 1,
246
+ )
247
+ x = 0.14 + 0.355 * (row % 3)
248
+ y = 1 - 0.575 * (row // 3)
249
+ fig.add_annotation(
250
+ text=title,
251
+ x=x,
252
+ y=y,
253
+ xref='paper', # # 相对于整个图表区域
254
+ yref='paper',
255
+ showarrow=True, # 显示箭头
256
+ align="left", # 文本对齐方式
257
+ font=dict(size=14),
258
+ )
259
+ row += 1
260
+ col += 1
179
261
 
180
- # 添加饼图
181
- fig.add_trace(
182
- go.Pie(labels=filtered_labels, values=filtered_values, name=f'pie {i}', textinfo='label+percent'),
183
- row=1, col=i)
184
- # fig.add_trace(go.Pie(labels=item['键'], values=item['值'], name=f'最近{pie_title[i]}天', textinfo='label+percent'), row=1, col=i)
185
- fig.add_annotation(
186
- text=f'最近{pie_title[i]}天',
187
- x=0.15 + 0.35 * (i - 1),
188
- y=0.98,
189
- xref='paper', # # 相对于整个图表区域
190
- yref='paper',
191
- showarrow=True, # 显示箭头
192
- align="left", # 文本对齐方式
193
- font=dict(size=16),
194
- )
195
- i += 1
196
- fig.update_layout(
197
- title_text=f'店铺流量来源 最近一天: {max_date}',
198
- xaxis_title='X Axis',
199
- yaxis_title='Y Axis',
200
- # width=self.screen_width // 1.4,
201
- # height=self.screen_width // 2,
202
- margin=dict(
203
- l=100, # 左边距
204
- r=300,
205
- t=100, # 上边距
206
- b=400,
207
- ),
262
+ fig.update_layout(
263
+ title_text=f'销售地域分布',
264
+ margin=dict(
265
+ l=100, # 左边距
266
+ r=100,
267
+ t=100, # 上边距
268
+ b=100,
269
+ ),
270
+ legend=dict(
271
+ orientation='v', # 图例方向('h' 表示水平,'v' 表示垂直)
272
+ font=dict(
273
+ size=12 # 图例字体大小
208
274
  )
209
- fig.update_layout(xaxis_showgrid=False, yaxis_showgrid=False, xaxis_visible=False, yaxis_visible=False)
210
- return fig
211
-
212
- data_list = [{'键': labels1, '值': values1}, {'键': labels2, '值': values2}, {'键': labels3, '值': values3}]
213
- fig = make_sub(data_list=data_list, num=3)
275
+ )
276
+ )
277
+ fig.add_annotation(
278
+ text=f'统计时间周期: {start_date}~{end_date}',
279
+ x=0.5,
280
+ y=-0.1,
281
+ xref='paper', # # 相对于整个图表区域
282
+ yref='paper',
283
+ showarrow=False, # 显示箭头
284
+ align="left", # 文本对齐方式
285
+ font=dict(size=12),
286
+ )
214
287
  fig.write_html(os.path.join(self.path, f'{filename}.html'))
215
288
 
289
+
216
290
  def dpll(self, db_name='聚合数据', table_name='店铺流量来源构成', pro_list=None, filename='店铺流量来源'):
217
291
  if not pro_list:
218
292
  pro_list = ['日期', '店铺名称', '类别', '来源构成', '二级来源', '三级来源', '访客数']
@@ -324,7 +398,7 @@ class DataShow:
324
398
  )
325
399
  count2 += 1
326
400
  fig.update_layout(
327
- title_text=f'店铺流量来源 最近数据: {max_date}',
401
+ title_text=f'店铺流量来源',
328
402
  # xaxis_title='X Axis',
329
403
  # yaxis_title='Y Axis',
330
404
  # width=self.screen_width // 1.4,
@@ -345,6 +419,16 @@ class DataShow:
345
419
  )
346
420
  )
347
421
  )
422
+ fig.add_annotation(
423
+ text=f'最近数据日期: {max_date}',
424
+ x=0.5,
425
+ y=-0.25,
426
+ xref='paper', # # 相对于整个图表区域
427
+ yref='paper',
428
+ showarrow=False, # 显示箭头
429
+ align="left", # 文本对齐方式
430
+ font=dict(size=12),
431
+ )
348
432
  fig.write_html(os.path.join(self.path, f'{filename}.html'))
349
433
 
350
434
  def tg(self, db_name='聚合数据', table_name='多店推广场景_按日聚合', pro_list=None, filename='多店推广场景', days=None, start_date=None, end_date=None):
@@ -600,7 +684,7 @@ class DataShow:
600
684
  align="left", # 文本对齐方式
601
685
  font=dict(size=12),
602
686
  )
603
- fig.write_html(os.path.join(self.path, f'{filename}.html'))
687
+ fig.write_html(os.path.join(self.path, f'{filename}_{item_id}.html'))
604
688
 
605
689
  def crowd(self, db_name='人群画像2', table_list=None, pro_list=None, filename='达摩盘人群画像', crowd_id=None, last_date=None):
606
690
  # item_ids = [696017020186, 714066010148, 830890472575]
@@ -739,29 +823,51 @@ class DataShow:
739
823
  align="left", # 文本对齐方式
740
824
  font=dict(size=12),
741
825
  )
742
- fig.write_html(os.path.join(self.path, f'{filename}.html'))
826
+ fig.write_html(os.path.join(self.path, f'{filename}_{crowd_name[:15]}.html'))
743
827
 
744
828
 
745
829
  def main():
746
830
  ds = DataShow()
831
+
832
+ # 店铺流量来源
747
833
  ds.dpll()
834
+ # 多店聚合推广数据
748
835
  ds.tg(
749
836
  days=15,
750
837
  # start_date='2024-11-01',
751
838
  # end_date='2024-11-30',
752
839
  )
753
- ds.item_crowd(
754
- item_id=839148235697,
755
- lab='全部渠道',
756
- option='商详浏览',
757
- last_date=None,
758
- d_str='近30天',
759
- )
760
- ds.crowd(
761
- crowd_id=40457166,
762
- last_date=None,
763
- )
764
840
 
841
+ # 商品人群画像
842
+ item_id_list = [
843
+ 839148235697,
844
+ ]
845
+ for item_id in item_id_list:
846
+ ds.item_crowd(
847
+ item_id=item_id,
848
+ lab='全部渠道',
849
+ option='商详浏览',
850
+ last_date=None,
851
+ d_str='近30天',
852
+ )
853
+
854
+ # 达摩盘人群画像
855
+ crowid_list = [
856
+ 40457166,
857
+ ]
858
+ for crowid in crowid_list:
859
+ ds.crowd(
860
+ crowd_id=crowid,
861
+ last_date=None,
862
+ )
863
+
864
+ ds.pov_city(
865
+ db_name='生意经3',
866
+ filename='销售地域分布',
867
+ start_date='2024-12-01',
868
+ end_date='2024-12-11',
869
+ percentage=0.02,
870
+ )
765
871
 
766
872
  if __name__ == '__main__':
767
873
  main()
@@ -1193,6 +1193,83 @@ class MysqlDatasQuery:
1193
1193
  )
1194
1194
  return True
1195
1195
 
1196
+ def item_up(self, db_name='聚合数据', table_name='淘宝店铺货品'):
1197
+ start_date, end_date = self.months_data(num=self.months)
1198
+ projection = {}
1199
+ df_set = self.download.data_to_df(
1200
+ db_name='属性设置3',
1201
+ table_name=f'货品年份基准',
1202
+ start_date=start_date,
1203
+ end_date=end_date,
1204
+ projection={'商品id':1, '上市年份':1},
1205
+ )
1206
+ df = self.download.data_to_df(
1207
+ db_name='市场数据3',
1208
+ table_name=f'淘宝店铺数据',
1209
+ start_date=start_date,
1210
+ end_date=end_date,
1211
+ projection=projection,
1212
+ )
1213
+ df['日期'] = pd.to_datetime(df['日期'], format='%Y-%m-%d', errors='ignore') # 转换日期列
1214
+ df = df[df['日期'] == pd.to_datetime('2024-12-12')]
1215
+
1216
+ df_set['商品id'] = df_set['商品id'].astype('int64')
1217
+ df['商品id'] = df['商品id'].astype('int64')
1218
+ df_set.sort_values('商品id', ascending=False, ignore_index=True, inplace=True)
1219
+
1220
+ def check_year(item_id):
1221
+ for item in df_set.to_dict(orient='records'):
1222
+ if item_id > item['商品id']:
1223
+ return item['上市年份']
1224
+
1225
+ df['上市年份'] = df['商品id'].apply(lambda x: check_year(x))
1226
+ p = df.pop('上市年份')
1227
+ df.insert(loc=5, column='上市年份', value=p)
1228
+ now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
1229
+ print(f'{now} 正在更新: mysql ({host}:{port}) {db_name}/{table_name}')
1230
+ set_typ = {
1231
+ '日期': 'date',
1232
+ '店铺id': 'bigint',
1233
+ '店铺名称': 'varchar(255)',
1234
+ '商家id': 'bigint',
1235
+ '商品id': 'bigint',
1236
+ '商品标题': 'varchar(255)',
1237
+ '商品链接': 'varchar(255)',
1238
+ '商品图片': 'varchar(255)',
1239
+ '销量': 'varchar(50)',
1240
+ '页面价': 'int',
1241
+ 'data_sku': 'varchar(1000)',
1242
+ '更新时间': 'timestamp',
1243
+ '上市年份': 'varchar(50)',
1244
+ }
1245
+ m_engine.df_to_mysql(
1246
+ df=df,
1247
+ db_name=db_name,
1248
+ table_name=table_name,
1249
+ # icm_update=['日期', '一级来源', '二级来源', '三级来源', '访客数'], # 增量更新, 在聚合数据中使用,其他不要用
1250
+ move_insert=True, # 先删除,再插入
1251
+ df_sql=False, # 值为 True 时使用 df.to_sql 函数上传整个表, 不会排重
1252
+ drop_duplicates=False, # 值为 True 时检查重复数据再插入,反之直接上传,会比较慢
1253
+ count=None,
1254
+ filename=None, # 用来追踪处理进度
1255
+ reset_id=True, # 是否重置自增列
1256
+ set_typ=set_typ,
1257
+ )
1258
+ company_engine.df_to_mysql(
1259
+ df=df,
1260
+ db_name=db_name,
1261
+ table_name=table_name,
1262
+ # icm_update=['日期', '一级来源', '二级来源', '三级来源', '访客数'], # 增量更新, 在聚合数据中使用,其他不要用
1263
+ move_insert=True, # 先删除,再插入
1264
+ df_sql=False, # 值为 True 时使用 df.to_sql 函数上传整个表, 不会排重
1265
+ drop_duplicates=False, # 值为 True 时检查重复数据再插入,反之直接上传,会比较慢
1266
+ count=None,
1267
+ filename=None, # 用来追踪处理进度
1268
+ reset_id=True, # 是否重置自增列
1269
+ set_typ=set_typ,
1270
+ )
1271
+
1272
+
1196
1273
  def spph(self, db_name='聚合数据', table_name='天猫_商品排行'):
1197
1274
  """ """
1198
1275
  start_date, end_date = self.months_data(num=self.months)
@@ -3840,7 +3917,7 @@ if __name__ == '__main__':
3840
3917
  sdq = MysqlDatasQuery() # 实例化数据处理类
3841
3918
  sdq.months = 1 # 设置数据周期, 1 表示近 2 个月
3842
3919
  sdq.update_service = True # 调试时加,true: 将数据写入 mysql 服务器
3843
- sdq.tg_rqbb(db_name='聚合数据', table_name='天猫_人群报表')
3920
+ sdq.item_up()
3844
3921
 
3845
3922
  # string = '30-34岁,35-39岁,40-49岁'
3846
3923
  # d = "~".join(re.findall(r'(\d+)\D.*\D(\d+)岁', string)[0])
mdbq/spider/aikucun.py CHANGED
@@ -483,7 +483,7 @@ class AikuCunNew:
483
483
 
484
484
  if __name__ == '__main__':
485
485
  get_cookie_aikucun() # 登录并获取 cookies
486
- akucun(date_num=3, headless=True) # 下载数据
486
+ akucun(date_num=10, headless=True) # 下载数据
487
487
 
488
488
  # a = AikuCunNew(shop_name='aikucun')
489
489
  # a.akc()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mdbq
3
- Version: 3.4.0
3
+ Version: 3.4.2
4
4
  Home-page: https://pypi.org/project/mdbq
5
5
  Author: xigua,
6
6
  Author-email: 2587125111@qq.com
@@ -2,9 +2,9 @@ mdbq/__init__.py,sha256=Il5Q9ATdX8yXqVxtP_nYqUhExzxPC_qk_WXQ_4h0exg,16
2
2
  mdbq/__version__.py,sha256=y9Mp_8x0BCZSHsdLT_q5tX9wZwd5QgqrSIENLrb6vXA,62
3
3
  mdbq/aggregation/__init__.py,sha256=EeDqX2Aml6SPx8363J-v1lz0EcZtgwIBYyCJV6CcEDU,40
4
4
  mdbq/aggregation/aggregation.py,sha256=-yzApnlqSN2L0E1YMu5ml-W827qpKQvWPCOI7jj2kzY,80264
5
- mdbq/aggregation/datashow.py,sha256=2NzHGjGoUy2WG-MxmbilCj6KBAmVah3jqFuEd2zv9XU,32379
5
+ mdbq/aggregation/datashow.py,sha256=k4gUYldnmi_iZJrM7wNtjeenXJl82hUoYcPu6iIL3PU,35864
6
6
  mdbq/aggregation/optimize_data.py,sha256=RXIv7cACCgYyehAxMjUYi_S7rVyjIwXKWMaM3nduGtA,3068
7
- mdbq/aggregation/query_data.py,sha256=FiNZhL5_El2B5ADfCPGUZXsE2iZd3UmGml9Te9qJIpU,175364
7
+ mdbq/aggregation/query_data.py,sha256=9NALeHTP9tblOEPyntLBRtdroLG_qN9qWi34Hg4rXFM,178891
8
8
  mdbq/bdup/__init__.py,sha256=AkhsGk81SkG1c8FqDH5tRq-8MZmFobVbN60DTyukYTY,28
9
9
  mdbq/bdup/bdup.py,sha256=LAV0TgnQpc-LB-YuJthxb0U42_VkPidzQzAagan46lU,4234
10
10
  mdbq/config/__init__.py,sha256=jso1oHcy6cJEfa7udS_9uO5X6kZLoPBF8l3wCYmr5dM,18
@@ -33,8 +33,8 @@ mdbq/pbix/pbix_refresh.py,sha256=JUjKW3bNEyoMVfVfo77UhguvS5AWkixvVhDbw4_MHco,239
33
33
  mdbq/pbix/refresh_all.py,sha256=OBT9EewSZ0aRS9vL_FflVn74d4l2G00wzHiikCC4TC0,5926
34
34
  mdbq/pbix/refresh_all_old.py,sha256=_pq3WSQ728GPtEG5pfsZI2uTJhU8D6ra-htIk1JXYzw,7192
35
35
  mdbq/spider/__init__.py,sha256=RBMFXGy_jd1HXZhngB2T2XTvJqki8P_Fr-pBcwijnew,18
36
- mdbq/spider/aikucun.py,sha256=eAIITxnbbxsR_EoohJ78CRw2dEdfSHOltfpxBrh0cvc,22207
37
- mdbq-3.4.0.dist-info/METADATA,sha256=5yMS8KxK8EkUCkrPLlPexLMSb2JYk3pVTL5b2S39uME,243
38
- mdbq-3.4.0.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
39
- mdbq-3.4.0.dist-info/top_level.txt,sha256=2FQ-uLnCSB-OwFiWntzmwosW3X2Xqsg0ewh1axsaylA,5
40
- mdbq-3.4.0.dist-info/RECORD,,
36
+ mdbq/spider/aikucun.py,sha256=v7VO5gtEXR6_4Q6ujbTyu1FHu7TXHcwSQ6hIO249YH0,22208
37
+ mdbq-3.4.2.dist-info/METADATA,sha256=I2lVjMi-WsvegW9ZCQcR4UV8wg4g1A9-mzgVFQ_H7x4,243
38
+ mdbq-3.4.2.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
39
+ mdbq-3.4.2.dist-info/top_level.txt,sha256=2FQ-uLnCSB-OwFiWntzmwosW3X2Xqsg0ewh1axsaylA,5
40
+ mdbq-3.4.2.dist-info/RECORD,,
File without changes