mdbq 3.6.8__py3-none-any.whl → 3.6.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mdbq/mysql/s_query.py CHANGED
@@ -12,6 +12,7 @@ from sqlalchemy import create_engine
12
12
  import os
13
13
  import calendar
14
14
  from mdbq.dataframe import converter
15
+ from decimal import Decimal
15
16
 
16
17
  warnings.filterwarnings('ignore')
17
18
  """
@@ -49,9 +50,14 @@ class QueryDatas:
49
50
  return columns
50
51
 
51
52
  def data_to_df(self, db_name, table_name, start_date, end_date, projection: dict=[]):
52
-
53
- start_date = pd.to_datetime(start_date).strftime('%Y-%m-%d')
54
- end_date = pd.to_datetime(end_date).strftime('%Y-%m-%d')
53
+ if start_date:
54
+ start_date = pd.to_datetime(start_date).strftime('%Y-%m-%d')
55
+ else:
56
+ start_date = '1970-01-01'
57
+ if end_date:
58
+ end_date = pd.to_datetime(end_date).strftime('%Y-%m-%d')
59
+ else:
60
+ end_date = datetime.datetime.today().strftime('%Y-%m-%d')
55
61
  df = pd.DataFrame() # 初始化df
56
62
 
57
63
  if self.check_infos(db_name, table_name) == False:
@@ -97,6 +103,9 @@ class QueryDatas:
97
103
  rows = cursor.fetchall() # 获取查询结果
98
104
  columns = [desc[0] for desc in cursor.description]
99
105
  df = pd.DataFrame(rows, columns=columns) # 转为 df
106
+ # 使用applymap将每个Decimal转换为float
107
+ df_float = df.applymap(lambda x: float(x) if isinstance(x, Decimal) else x)
108
+
100
109
  if 'id' in df.columns.tolist():
101
110
  df.pop('id') # 默认不返回 id 列
102
111
  if len(df) == 0:
mdbq/redis/getredis.py CHANGED
@@ -12,6 +12,7 @@ import logging
12
12
  from logging.handlers import RotatingFileHandler
13
13
  import getpass
14
14
  import platform
15
+ from decimal import Decimal
15
16
 
16
17
  if platform.system() == 'Windows':
17
18
  D_PATH = os.path.join(f'C:\\Users\\{getpass.getuser()}\\Downloads')
@@ -57,6 +58,9 @@ logger.addHandler(file_handler)
57
58
 
58
59
 
59
60
  class RedisData(object):
61
+ """
62
+ 存储 string
63
+ """
60
64
  def __init__(self, redis_engine, download, cache_ttl: int):
61
65
  self.redis_engine = redis_engine # Redis 数据处理引擎
62
66
  self.download = download # MySQL 数据处理引擎
@@ -268,6 +272,409 @@ class RedisData(object):
268
272
  return temp_df.to_json(orient="records", force_ascii=False)
269
273
 
270
274
 
275
+ class RedisDataHash(object):
276
+ """
277
+ 存储 hash
278
+ Redis缓存与MySQL数据联合查询处理器
279
+
280
+ 功能特性:
281
+ - 支持带年份分表的MySQL数据查询
282
+ - 多级缓存策略(内存缓存+Redis缓存)
283
+ - 异步缓存更新机制
284
+ - 自动处理日期范围和数据类型转换
285
+ """
286
+
287
+ def __init__(self, redis_engine, download, cache_ttl: int):
288
+ """
289
+ 初始化缓存处理器
290
+
291
+ :param redis_engine: Redis连接实例
292
+ :param download: 数据下载处理器(需实现data_to_df方法)
293
+ :param cache_ttl: 缓存存活时间(单位:分钟,内部转换为秒存储)
294
+ """
295
+ self.redis_engine = redis_engine
296
+ self.download = download
297
+ self.cache_ttl = cache_ttl * 60 # 转换为秒存储
298
+
299
+ def get_from_mysql(
300
+ self,
301
+ db_name: str,
302
+ table_name: str,
303
+ set_year: bool,
304
+ start_date,
305
+ end_date
306
+ ) -> pd.DataFrame:
307
+ """
308
+ 从MySQL直接获取数据的核心方法
309
+
310
+ 处理逻辑:
311
+ 1. 当启用年份分表时(set_year=True),自动遍历2024到当前年份的所有分表
312
+ 2. 合并所有符合条件的数据表内容
313
+ 3. 自动处理日期列格式转换
314
+
315
+ :return: 合并后的DataFrame(可能包含多个分表数据)
316
+ """
317
+ # 原有实现保持不变
318
+ dfs = []
319
+ if set_year:
320
+ # 处理年份分表情况(例如 table_2024, table_2025...)
321
+ current_year = datetime.datetime.today().year
322
+ for year in range(2024, current_year + 1):
323
+ df = self._fetch_table_data(
324
+ db_name, f"{table_name}_{year}", start_date, end_date
325
+ )
326
+ if df is not None:
327
+ dfs.append(df)
328
+ else:
329
+ # 单表查询模式
330
+ df = self._fetch_table_data(db_name, table_name, start_date, end_date)
331
+ if df is not None:
332
+ dfs.append(df)
333
+
334
+ # 合并结果并处理空数据情况
335
+ combined_df = pd.concat(dfs, ignore_index=True) if dfs else pd.DataFrame()
336
+ if combined_df.empty:
337
+ logger.warn(f"warning: {db_name}.{table_name} 未读取到数据")
338
+ else:
339
+ combined_df = self._convert_date_columns(combined_df)
340
+ return combined_df
341
+
342
+ def get_from_redis(
343
+ self,
344
+ db_name: str,
345
+ table_name: str,
346
+ set_year: bool,
347
+ start_date,
348
+ end_date
349
+ ) -> pd.DataFrame:
350
+ """
351
+ 带缓存策略的数据获取主入口
352
+
353
+ 执行流程:
354
+ 1. 生成缓存键并检查TTL(存活时间)
355
+ 2. 当TTL<60秒时触发异步更新,同时直接访问MySQL获取最新数据
356
+ 3. 从Redis获取历史数据并进行日期过滤
357
+ 4. 若缓存数据不完整,触发异步更新并降级到MySQL查询
358
+ 5. 异常时自动降级到MySQL查询
359
+
360
+ 设计特点:
361
+ - 缓存预热:首次访问时异步更新缓存
362
+ - 降级机制:任何异常自动切换直连MySQL
363
+ - 过时缓存:当TTL不足时并行更新缓存
364
+ """
365
+ # 时分秒部分重置为 00:00:00 这是个巨坑,不可以省略
366
+ start_dt = pd.to_datetime(start_date).floor('D')
367
+ end_dt = pd.to_datetime(end_date).floor('D')
368
+ # 生成缓存键名
369
+ cache_key = self._generate_cache_key(db_name, table_name, set_year)
370
+
371
+ try:
372
+ # 检查缓存
373
+ ttl = self.redis_engine.ttl(cache_key)
374
+ if ttl < 60: # 当剩余时间不足1分钟时触发更新
375
+ # 获取当前缓存
376
+ cache_data = self._fetch_redis_data(cache_key)
377
+ # 异步更新缓存
378
+ self._trigger_async_cache_update(
379
+ cache_key, db_name, table_name, set_year, start_date, end_date, cache_data
380
+ )
381
+ # 立即降级返回MySQL查询
382
+ return self.get_from_mysql(db_name, table_name, set_year, start_date, end_date)
383
+
384
+ # 按年份范围获取缓存数据(优化大数据量时的读取效率)
385
+ start_year = start_dt.year
386
+ end_year = end_dt.year
387
+ cache_data = self._fetch_redis_data(cache_key, start_year, end_year)
388
+ # 空数据检查(缓存未命中)
389
+ if cache_data.empty:
390
+ self._trigger_async_cache_update(
391
+ cache_key, db_name, table_name, set_year, start_date, end_date, cache_data
392
+ )
393
+ return self.get_from_mysql(db_name, table_name, set_year, start_date, end_date)
394
+ # 按请求范围过滤数据(应对按年存储的粗粒度缓存)
395
+ filtered_df = self._filter_by_date_range(cache_data, start_dt, end_dt)
396
+ if not filtered_df.empty:
397
+ if '日期' in filtered_df.columns.tolist():
398
+ # 缓存数据的日期在请求日期范围内时,直接返回缓存数据
399
+ exsit_min_date = filtered_df['日期'].min()
400
+ if exsit_min_date <= start_dt:
401
+ return filtered_df
402
+ else:
403
+ return filtered_df
404
+ # 缓存数据不完整时触发异步更新缓存
405
+ self._trigger_async_cache_update(
406
+ cache_key, db_name, table_name, set_year, start_date, end_date, cache_data
407
+ )
408
+ # 立即降级返回MySQL查询
409
+ return self.get_from_mysql(db_name, table_name, set_year, start_date, end_date)
410
+
411
+ except Exception as e:
412
+ # 异常策略:立即返回MySQL查询,保障服务可用
413
+ logger.error(f"Redis 连接异常: {e},直接访问 MySQL")
414
+ return self.get_from_mysql(db_name, table_name, set_year, start_date, end_date)
415
+
416
+ def set_redis(
417
+ self,
418
+ cache_key: str,
419
+ db_name: str,
420
+ table_name: str,
421
+ set_year: bool,
422
+ start_date,
423
+ end_date,
424
+ existing_data: pd.DataFrame
425
+ ) -> None:
426
+ """
427
+ 异步缓存更新方法
428
+
429
+ 核心逻辑:
430
+ 1. 获取MySQL最新数据
431
+ 2. 合并新旧数据(保留历史数据中不在新数据时间范围内的部分)
432
+ 3. 智能存储策略:
433
+ - 无日期字段:全量存储到"all"字段
434
+ - 有日期字段:按年份分片存储(提升查询效率)
435
+
436
+ 设计特点:
437
+ - 增量更新:仅合并必要数据,避免全量覆盖
438
+ - 数据分片:按年存储提升大数据的读取性能
439
+ - 容错处理:跳过无日期字段的异常情况
440
+ """
441
+ try:
442
+ # 获取最新数据(使用最新查询条件)
443
+ new_data = self.get_from_mysql(db_name, table_name, set_year, start_date, end_date)
444
+ if new_data.empty:
445
+ return
446
+
447
+ # 合并缓存数据
448
+ combined_data = self._merge_data(new_data, existing_data)
449
+
450
+ if not combined_data.empty:
451
+ # 处理无日期字段的特殊情况
452
+ if '日期' not in combined_data.columns.tolist():
453
+ # 数据序列化
454
+ serialized_data = self._serialize_data(combined_data)
455
+ self.redis_engine.hset(cache_key, "all", serialized_data)
456
+ self.redis_engine.expire(cache_key, self.cache_ttl)
457
+ else:
458
+ # 按年份分片存储策略
459
+ combined_data['年份'] = combined_data['日期'].dt.year
460
+ # 分组存储到Redis哈希的不同字段(例如2024字段存储当年数据)
461
+ for year, group in combined_data.groupby('年份'):
462
+ year_str = str(year)
463
+ serialized_data = self._serialize_data(group.drop(columns=['年份']))
464
+ self.redis_engine.hset(cache_key, year_str, serialized_data)
465
+ self.redis_engine.expire(cache_key, self.cache_ttl)
466
+ logger.info(f"缓存更新 {cache_key} | 数据量: {len(combined_data)}")
467
+ except Exception as e:
468
+ logger.error(f"缓存更新失败: {cache_key} - {str(e)}")
469
+
470
+ def _fetch_table_data(
471
+ self,
472
+ db_name: str,
473
+ table_name: str,
474
+ start_date,
475
+ end_date
476
+ ) -> pd.DataFrame:
477
+ """执行MySQL查询并返回DataFrame(带异常处理)"""
478
+ try:
479
+ return self.download.data_to_df(
480
+ db_name=db_name,
481
+ table_name=table_name,
482
+ start_date=start_date,
483
+ end_date=end_date,
484
+ projection={}
485
+ )
486
+ except Exception as e:
487
+ logger.info(f"MySQL 查询异常 {db_name}.{table_name}: {e}")
488
+ return pd.DataFrame()
489
+
490
+ def _fetch_redis_data(self, cache_key: str, start_year: int = None, end_year: int = None) -> pd.DataFrame:
491
+ """
492
+ 从Redis哈希表读取数据
493
+
494
+ 优化策略:
495
+ - 当指定年份范围时,仅获取相关字段(hmget)
496
+ - 未指定范围时全量获取(hgetall)
497
+ -- 从mysql过来的表,虽然没有日期列,但也指定了 start_year/end_year,再redis中存储的键名是"all",所以要把 all也加进去
498
+ """
499
+ try:
500
+ if start_year is not None and end_year is not None:
501
+ # 按年份范围精确获取字段(提升性能)
502
+ fields = [str(y) for y in range(start_year, end_year + 1)]
503
+ fields += ['all']
504
+ data_list = self.redis_engine.hmget(cache_key, fields)
505
+ dfs = []
506
+ for data, field in zip(data_list, fields):
507
+ if data:
508
+ df = pd.DataFrame(json.loads(data.decode("utf-8")))
509
+ df = self._convert_date_columns(df)
510
+ dfs.append(df)
511
+ return pd.concat(dfs, ignore_index=True) if dfs else pd.DataFrame()
512
+ else:
513
+ # 全量获取模式
514
+ data_dict = self.redis_engine.hgetall(cache_key)
515
+ dfs = []
516
+ for field, data in data_dict.items():
517
+ try:
518
+ df = pd.DataFrame(json.loads(data.decode("utf-8")))
519
+ df = self._convert_date_columns(df)
520
+ dfs.append(df)
521
+ except Exception as e:
522
+ logger.info(f"Redis 数据解析失败 {cache_key} 字段 {field}: {e}")
523
+ return pd.concat(dfs, ignore_index=True) if dfs else pd.DataFrame()
524
+ except Exception as e:
525
+ logger.info(f"Redis 数据获取失败 {cache_key}: {e}")
526
+ return pd.DataFrame()
527
+
528
+ def _convert_date_columns(self, df: pd.DataFrame) -> pd.DataFrame:
529
+ """统一日期列格式转换"""
530
+ if "日期" in df.columns:
531
+ df["日期"] = pd.to_datetime(df["日期"], format="%Y-%m-%d", errors="coerce")
532
+ return df
533
+
534
+ def _generate_cache_key(self, db_name: str, table_name: str, set_year: bool) -> str:
535
+ """生成缓存键名"""
536
+ return f"{db_name}:{table_name}_haveyear" if set_year else f"{db_name}:{table_name}"
537
+
538
+ def _filter_by_date_range(
539
+ self,
540
+ df: pd.DataFrame,
541
+ start_dt: datetime.datetime,
542
+ end_dt: datetime.datetime
543
+ ) -> pd.DataFrame:
544
+ """按日期范围精确过滤数据"""
545
+ if "日期" not in df.columns:
546
+ return df
547
+ date_mask = (df["日期"] >= start_dt) & (df["日期"] <= end_dt)
548
+ return df[date_mask].copy()
549
+
550
+ def _trigger_async_cache_update(
551
+ self,
552
+ cache_key: str,
553
+ db_name: str,
554
+ table_name: str,
555
+ set_year: bool,
556
+ start_date: str,
557
+ end_date: str,
558
+ existing_data: pd.DataFrame
559
+ ):
560
+ """启动异步线程执行缓存更新(不阻塞主流程)"""
561
+ thread = threading.Thread(
562
+ target=self.set_redis,
563
+ args=(cache_key, db_name, table_name, set_year, start_date, end_date, existing_data),
564
+ daemon=True
565
+ )
566
+ thread.start()
567
+
568
+ def _merge_data(self, new_data: pd.DataFrame, existing_data: pd.DataFrame) -> pd.DataFrame:
569
+ """合并新旧数据集策略:保留现有数据中在新数据范围外的历史数据,并按日期排序"""
570
+ if existing_data.empty or "日期" not in existing_data.columns:
571
+ return new_data
572
+ new_data["日期"] = pd.to_datetime(new_data["日期"])
573
+ existing_data["日期"] = pd.to_datetime(existing_data["日期"])
574
+
575
+ # 计算新数据日期范围
576
+ new_min = new_data["日期"].min()
577
+ new_max = new_data["日期"].max()
578
+
579
+ # 保留现有数据中在新数据范围之外的部分
580
+ valid_historical = existing_data[
581
+ (existing_data["日期"] < new_min) | (existing_data["日期"] > new_max)
582
+ ]
583
+ merged_data = pd.concat([new_data, valid_historical], ignore_index=True)
584
+ merged_data.sort_values(['日期'], ascending=[False], ignore_index=True, inplace=True)
585
+ return merged_data
586
+
587
+ def _serialize_data(self, df: pd.DataFrame) -> bytes:
588
+ """
589
+ 高性能数据序列化方法
590
+
591
+ 处理要点:
592
+ 1. 日期类型转换为字符串
593
+ 2. Decimal类型转换为浮点数
594
+ 3. NaN值统一转换为None
595
+ 4. 优化JSON序列化性能
596
+ """
597
+ if df.empty:
598
+ return json.dumps([], ensure_ascii=False).encode("utf-8")
599
+ temp_df = df.copy()
600
+
601
+ # 处理日期类型列(安全转换)
602
+ date_cols = temp_df.select_dtypes(include=["datetime64[ns]"]).columns
603
+ for col in date_cols:
604
+ # 处理全NaT列避免类型错误
605
+ if temp_df[col].isna().all():
606
+ temp_df[col] = temp_df[col].astype(object) # 转换为object类型避免NaT
607
+ temp_df[col] = (
608
+ temp_df[col]
609
+ .dt.strftime("%Y-%m-%d") # 安全使用dt访问器(因类型强制为datetime)
610
+ .where(temp_df[col].notna(), None)
611
+ )
612
+
613
+ # 统一空值处理(保护全None列类型)
614
+ def safe_null_convert(series):
615
+ """保留全None列的原始dtype"""
616
+ if series.isna().all():
617
+ return series.astype(object).where(pd.notnull(series), None)
618
+ return series.where(pd.notnull(series), None)
619
+
620
+ temp_df = temp_df.apply(safe_null_convert)
621
+
622
+ # 类型处理函数(增强嵌套结构处理)
623
+ def decimal_serializer(obj):
624
+ """递归序列化处理"""
625
+ # 提前处理None值
626
+ if obj is None:
627
+ return None
628
+
629
+ # 按类型分发处理
630
+ if isinstance(obj, Decimal):
631
+ return round(float(obj), 6)
632
+ elif isinstance(obj, pd.Timestamp):
633
+ return obj.strftime("%Y-%m-%d %H:%M:%S") # 兜底处理漏网之鱼
634
+ elif isinstance(obj, np.generic): # 处理所有numpy标量类型
635
+ return obj.item()
636
+ elif isinstance(obj, (datetime.date, datetime.datetime)):
637
+ return obj.isoformat()
638
+ elif isinstance(obj, (list, tuple, set)):
639
+ return [decimal_serializer(item) for item in obj]
640
+ elif isinstance(obj, dict):
641
+ return {decimal_serializer(k): decimal_serializer(v) for k, v in obj.items()}
642
+ elif isinstance(obj, bytes):
643
+ return obj.decode("utf-8", errors="replace") # 二进制安全处理
644
+ elif isinstance(obj, pd.Series): # 防止意外传入Series对象
645
+ return obj.to_list()
646
+ else:
647
+ # 尝试直接转换可序列化类型
648
+ try:
649
+ json.dumps(obj)
650
+ return obj
651
+ except TypeError:
652
+ logger.error(f"无法序列化类型 {type(obj)}: {str(obj)}")
653
+ raise
654
+
655
+ # 序列化前防御性检查
656
+ try:
657
+ data_records = temp_df.to_dict(orient="records")
658
+ except Exception as e:
659
+ logger.error(f"数据转换字典失败: {str(e)}")
660
+ raise
661
+
662
+ # 空记录特殊处理
663
+ if not data_records:
664
+ return json.dumps([], ensure_ascii=False).encode("utf-8")
665
+
666
+ # 执行序列化
667
+ try:
668
+ return json.dumps(
669
+ data_records,
670
+ ensure_ascii=False,
671
+ default=decimal_serializer
672
+ ).encode("utf-8")
673
+ except TypeError as e:
674
+ logger.error(f"序列化失败,请检查未处理的数据类型: {str(e)}")
675
+ raise
676
+
677
+
271
678
  if __name__ == '__main__':
272
679
  # # ****************************************************
273
680
  # # 这一部分在外部定义,只需要定义一次,开始
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mdbq
3
- Version: 3.6.8
3
+ Version: 3.6.9
4
4
  Home-page: https://pypi.org/project/mdbq
5
5
  Author: xigua,
6
6
  Author-email: 2587125111@qq.com
@@ -19,8 +19,9 @@ mdbq/mongo/__init__.py,sha256=SILt7xMtQIQl_m-ik9WLtJSXIVf424iYgCfE_tnQFbw,13
19
19
  mdbq/mongo/mongo.py,sha256=M9DUeUCMPDngkwn9-ui0uTiFrvfNU1kLs22s5SmoNm0,31899
20
20
  mdbq/mysql/__init__.py,sha256=A_DPJyAoEvTSFojiI2e94zP0FKtCkkwKP1kYUCSyQzo,11
21
21
  mdbq/mysql/mysql.py,sha256=_jFo2_OC1BNm5wEmoYiBG_TcuNNA2xUWKNhMBfgDiAM,99699
22
+ mdbq/mysql/mysql_bak.py,sha256=_jFo2_OC1BNm5wEmoYiBG_TcuNNA2xUWKNhMBfgDiAM,99699
22
23
  mdbq/mysql/recheck_mysql.py,sha256=ppBTfBLgkRWirMVZ31e_ZPULiGPJU7K3PP9G6QBZ3QI,8605
23
- mdbq/mysql/s_query.py,sha256=Z0C3lQQcGtnizyVHa62HYIAqZ8R3KeSHP-N0O7Qb5eU,9280
24
+ mdbq/mysql/s_query.py,sha256=M186PgZR_slDdSi_m1vGw2fhZQVEfCuFRBSJlz8yL3A,9643
24
25
  mdbq/mysql/year_month_day.py,sha256=VgewoE2pJxK7ErjfviL_SMTN77ki8GVbTUcao3vFUCE,1523
25
26
  mdbq/other/__init__.py,sha256=jso1oHcy6cJEfa7udS_9uO5X6kZLoPBF8l3wCYmr5dM,18
26
27
  mdbq/other/download_sku_picture.py,sha256=GdphR7Q3psXXVuZoyJ4u_6OWn_rWlcbT0iJ-1zPT6O0,45368
@@ -33,12 +34,10 @@ mdbq/pbix/pbix_refresh.py,sha256=JUjKW3bNEyoMVfVfo77UhguvS5AWkixvVhDbw4_MHco,239
33
34
  mdbq/pbix/refresh_all.py,sha256=OBT9EewSZ0aRS9vL_FflVn74d4l2G00wzHiikCC4TC0,5926
34
35
  mdbq/pbix/refresh_all_old.py,sha256=_pq3WSQ728GPtEG5pfsZI2uTJhU8D6ra-htIk1JXYzw,7192
35
36
  mdbq/redis/__init__.py,sha256=YtgBlVSMDphtpwYX248wGge1x-Ex_mMufz4-8W0XRmA,12
36
- mdbq/redis/getredis.py,sha256=faqRBX-xV2C665Q-C9lh0XmPvruXv1jeNPpQJaT3y38,11726
37
- mdbq/redis/getredis_bak20250131.py,sha256=DQazRyKVnaDziP9JEIofAJF8dw_PKyLEgwEznlTnGDw,12284
38
- mdbq/redis/getredis_deepseek.py,sha256=bQ6VfiTYkQ5cYK6MYJPKgwbdrwsOLBLrV-ObblKaurA,9653
37
+ mdbq/redis/getredis.py,sha256=q7omKJCPw_6Zr_r6WwTv4RGSXzZzpLPkIaqJ22svJhE,29104
39
38
  mdbq/spider/__init__.py,sha256=RBMFXGy_jd1HXZhngB2T2XTvJqki8P_Fr-pBcwijnew,18
40
39
  mdbq/spider/aikucun.py,sha256=v7VO5gtEXR6_4Q6ujbTyu1FHu7TXHcwSQ6hIO249YH0,22208
41
- mdbq-3.6.8.dist-info/METADATA,sha256=6J05ZtnIu9ttqbPXHaFt_Q9RZTmbH3IG7huPeu1V6SU,243
42
- mdbq-3.6.8.dist-info/WHEEL,sha256=cpQTJ5IWu9CdaPViMhC9YzF8gZuS5-vlfoFihTBC86A,91
43
- mdbq-3.6.8.dist-info/top_level.txt,sha256=2FQ-uLnCSB-OwFiWntzmwosW3X2Xqsg0ewh1axsaylA,5
44
- mdbq-3.6.8.dist-info/RECORD,,
40
+ mdbq-3.6.9.dist-info/METADATA,sha256=m6rX1e31X7uhBfVC0ZE07nWd5EY4QVO6RZC93uAdr68,243
41
+ mdbq-3.6.9.dist-info/WHEEL,sha256=cpQTJ5IWu9CdaPViMhC9YzF8gZuS5-vlfoFihTBC86A,91
42
+ mdbq-3.6.9.dist-info/top_level.txt,sha256=2FQ-uLnCSB-OwFiWntzmwosW3X2Xqsg0ewh1axsaylA,5
43
+ mdbq-3.6.9.dist-info/RECORD,,