mdbq 3.9.3__py3-none-any.whl → 3.9.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mdbq/__version__.py CHANGED
@@ -1 +1 @@
1
- VERSION = '3.9.3'
1
+ VERSION = '3.9.4'
mdbq/mysql/mysql.py CHANGED
@@ -12,11 +12,14 @@ import os
12
12
  import logging
13
13
  import logging.handlers
14
14
  from mdbq.other import otk
15
- from typing import Union, List, Dict, Optional, Any, Tuple
15
+ from typing import Union, List, Dict, Optional, Any, Tuple, Set
16
16
  from dbutils.pooled_db import PooledDB
17
17
  import json
18
18
  import psutil
19
19
  from collections import OrderedDict
20
+ import threading
21
+ import concurrent.futures
22
+ from collections import defaultdict
20
23
 
21
24
 
22
25
  warnings.filterwarnings('ignore')
@@ -2383,6 +2386,621 @@ class MySQLUploader:
2383
2386
 
2384
2387
  return decorator
2385
2388
 
2389
+
2390
+ class MySQLDeduplicator:
2391
+ """
2392
+ MySQL数据去重
2393
+
2394
+ 功能:
2395
+ 1. 自动检测并删除MySQL数据库中的重复数据
2396
+ 2. 支持全库扫描或指定表处理
2397
+ 3. 支持多线程/多进程安全处理
2398
+ 4. 完善的错误处理和日志记录
2399
+
2400
+ 使用示例:
2401
+ deduplicator = MySQLDeduplicator(
2402
+ username='root',
2403
+ password='password',
2404
+ host='localhost',
2405
+ port=3306
2406
+ )
2407
+
2408
+ # 全库去重
2409
+ deduplicator.deduplicate_all()
2410
+
2411
+ # 指定数据库去重(多线程)
2412
+ deduplicator.deduplicate_database('my_db', parallel=True)
2413
+
2414
+ # 指定表去重(使用特定列)
2415
+ deduplicator.deduplicate_table('my_db', 'my_table', columns=['name', 'date'])
2416
+
2417
+ # 关闭连接
2418
+ deduplicator.close()
2419
+ """
2420
+
2421
+ def __init__(
2422
+ self,
2423
+ username: str,
2424
+ password: str,
2425
+ host: str = 'localhost',
2426
+ port: int = 3306,
2427
+ charset: str = 'utf8mb4',
2428
+ max_workers: int = 1,
2429
+ batch_size: int = 1000,
2430
+ skip_system_dbs: bool = True,
2431
+ logging_mode: str = 'console',
2432
+ log_level: str = 'INFO',
2433
+ log_file: str = 'mysql_deduplicate.log',
2434
+ max_retries: int = 3,
2435
+ retry_interval: int = 5,
2436
+ pool_size: int = 5
2437
+ ):
2438
+ """
2439
+ 初始化去重处理器
2440
+
2441
+ :param username: 数据库用户名
2442
+ :param password: 数据库密码
2443
+ :param host: 数据库主机,默认为localhost
2444
+ :param port: 数据库端口,默认为3306
2445
+ :param charset: 字符集,默认为utf8mb4
2446
+ :param max_workers: 最大工作线程数,默认为1(单线程)
2447
+ :param batch_size: 批量处理大小,默认为1000
2448
+ :param skip_system_dbs: 是否跳过系统数据库,默认为True
2449
+ :param logging_mode: 日志模式('console', 'file', 'both', 'none')
2450
+ :param log_level: 日志级别('DEBUG', 'INFO', 'WARNING', 'ERROR')
2451
+ :param log_file: 日志文件路径
2452
+ :param max_retries: 最大重试次数
2453
+ :param retry_interval: 重试间隔(秒)
2454
+ :param pool_size: 连接池大小
2455
+ """
2456
+ # 初始化连接池
2457
+ self.pool = PooledDB(
2458
+ creator=pymysql,
2459
+ host=host,
2460
+ port=port,
2461
+ user=username,
2462
+ password=password,
2463
+ charset=charset,
2464
+ maxconnections=pool_size,
2465
+ cursorclass=pymysql.cursors.DictCursor
2466
+ )
2467
+
2468
+ # 配置参数
2469
+ self.max_workers = max(1, min(max_workers, 20)) # 限制最大线程数
2470
+ self.batch_size = batch_size
2471
+ self.skip_system_dbs = skip_system_dbs
2472
+ self.max_retries = max_retries
2473
+ self.retry_interval = retry_interval
2474
+
2475
+ # 线程安全控制
2476
+ self._lock = threading.Lock()
2477
+ self._processing_tables = set() # 正在处理的表集合
2478
+
2479
+ # 初始化日志
2480
+ self._init_logging(logging_mode, log_level, log_file)
2481
+
2482
+ # 系统数据库列表
2483
+ self.SYSTEM_DATABASES = {
2484
+ 'information_schema', 'mysql',
2485
+ 'performance_schema', 'sys'
2486
+ }
2487
+
2488
+ def _init_logging(
2489
+ self,
2490
+ logging_mode: str,
2491
+ log_level: str,
2492
+ log_file: str
2493
+ ):
2494
+ """初始化日志配置"""
2495
+ self.logger = logging.getLogger('mysql_deduplicator')
2496
+ self.logger.setLevel(log_level.upper())
2497
+
2498
+ # 防止重复添加handler
2499
+ if self.logger.handlers:
2500
+ for handler in self.logger.handlers[:]:
2501
+ self.logger.removeHandler(handler)
2502
+
2503
+ formatter = logging.Formatter(
2504
+ '%(asctime)s - %(levelname)s - %(message)s',
2505
+ datefmt='%Y-%m-%d %H:%M:%S'
2506
+ )
2507
+
2508
+ mode = logging_mode.lower()
2509
+ if mode in ('both', 'console'):
2510
+ console_handler = logging.StreamHandler()
2511
+ console_handler.setFormatter(formatter)
2512
+ self.logger.addHandler(console_handler)
2513
+
2514
+ if mode in ('both', 'file'):
2515
+ file_handler = logging.FileHandler(
2516
+ filename=log_file,
2517
+ encoding='utf-8'
2518
+ )
2519
+ file_handler.setFormatter(formatter)
2520
+ self.logger.addHandler(file_handler)
2521
+
2522
+ def _log(self, level: str, message: str, extra: Optional[Dict] = None):
2523
+ """统一的日志记录方法"""
2524
+ if not hasattr(self.logger, level.lower()):
2525
+ return
2526
+
2527
+ # 简化日志内容,避免过长
2528
+ if len(message) > 500:
2529
+ message = message[:500] + '...'
2530
+
2531
+ log_method = getattr(self.logger, level.lower())
2532
+ log_method(message, extra=extra)
2533
+
2534
+ def _get_connection(self):
2535
+ """从连接池获取连接"""
2536
+ try:
2537
+ conn = self.pool.connection()
2538
+ self._log('debug', "成功获取数据库连接")
2539
+ return conn
2540
+ except Exception as e:
2541
+ self._log('error', f"获取数据库连接失败: {str(e)}")
2542
+ raise ConnectionError(f"连接数据库失败: {str(e)}")
2543
+
2544
+ @staticmethod
2545
+ def _retry_on_failure(func):
2546
+ """重试装饰器"""
2547
+
2548
+ @wraps(func)
2549
+ def wrapper(self, *args, **kwargs):
2550
+ last_exception = None
2551
+ for attempt in range(self.max_retries + 1):
2552
+ try:
2553
+ return func(self, *args, **kwargs)
2554
+ except (pymysql.OperationalError, pymysql.InterfaceError) as e:
2555
+ last_exception = e
2556
+ if attempt < self.max_retries:
2557
+ wait_time = self.retry_interval * (attempt + 1)
2558
+ self._log('warning',
2559
+ f"数据库操作失败,准备重试 (尝试 {attempt + 1}/{self.max_retries})",
2560
+ {'error': str(e), 'wait_time': wait_time})
2561
+ time.sleep(wait_time)
2562
+ continue
2563
+ except Exception as e:
2564
+ last_exception = e
2565
+ self._log('error',
2566
+ f"操作失败: {str(e)}",
2567
+ {'error_type': type(e).__name__})
2568
+ break
2569
+
2570
+ if last_exception:
2571
+ raise last_exception
2572
+ raise Exception("未知错误")
2573
+
2574
+ return wrapper
2575
+
2576
+ @_retry_on_failure
2577
+ def _get_databases(self) -> List[str]:
2578
+ """获取所有非系统数据库列表"""
2579
+ sql = "SHOW DATABASES"
2580
+
2581
+ with self._get_connection() as conn:
2582
+ with conn.cursor() as cursor:
2583
+ cursor.execute(sql)
2584
+ all_dbs = [row['Database'] for row in cursor.fetchall()]
2585
+
2586
+ if self.skip_system_dbs:
2587
+ return [db for db in all_dbs if db.lower() not in self.SYSTEM_DATABASES]
2588
+ return all_dbs
2589
+
2590
+ @_retry_on_failure
2591
+ def _get_tables(self, database: str) -> List[str]:
2592
+ """获取指定数据库的所有表"""
2593
+ sql = "SHOW TABLES"
2594
+
2595
+ with self._get_connection() as conn:
2596
+ with conn.cursor() as cursor:
2597
+ cursor.execute(f"USE `{database}`")
2598
+ cursor.execute(sql)
2599
+ return [row[f'Tables_in_{database}'] for row in cursor.fetchall()]
2600
+
2601
+ @_retry_on_failure
2602
+ def _get_table_columns(self, database: str, table: str) -> List[str]:
2603
+ """获取表的列名(排除id列)"""
2604
+ sql = """
2605
+ SELECT COLUMN_NAME
2606
+ FROM INFORMATION_SCHEMA.COLUMNS
2607
+ WHERE TABLE_SCHEMA = %s AND TABLE_NAME = %s
2608
+ ORDER BY ORDINAL_POSITION
2609
+ """
2610
+
2611
+ with self._get_connection() as conn:
2612
+ with conn.cursor() as cursor:
2613
+ cursor.execute(sql, (database, table))
2614
+ return [row['COLUMN_NAME'] for row in cursor.fetchall()
2615
+ if row['COLUMN_NAME'].lower() != 'id']
2616
+
2617
+ def _acquire_table_lock(self, database: str, table: str) -> bool:
2618
+ """获取表处理锁,防止并发处理同一张表"""
2619
+ key = f"{database}.{table}"
2620
+
2621
+ with self._lock:
2622
+ if key in self._processing_tables:
2623
+ self._log('debug', f"表 {key} 正在被其他线程处理,跳过")
2624
+ return False
2625
+ self._processing_tables.add(key)
2626
+ return True
2627
+
2628
+ def _release_table_lock(self, database: str, table: str):
2629
+ """释放表处理锁"""
2630
+ key = f"{database}.{table}"
2631
+
2632
+ with self._lock:
2633
+ if key in self._processing_tables:
2634
+ self._processing_tables.remove(key)
2635
+
2636
+ def _deduplicate_table(
2637
+ self,
2638
+ database: str,
2639
+ table: str,
2640
+ columns: Optional[List[str]] = None,
2641
+ dry_run: bool = False
2642
+ ) -> Tuple[int, int]:
2643
+ """
2644
+ 执行单表去重
2645
+
2646
+ :param database: 数据库名
2647
+ :param table: 表名
2648
+ :param columns: 用于去重的列(为None时使用所有列)
2649
+ :param dry_run: 是否模拟运行(只统计不实际删除)
2650
+ :return: (重复行数, 删除行数)
2651
+ """
2652
+ if not self._acquire_table_lock(database, table):
2653
+ return (0, 0)
2654
+
2655
+ try:
2656
+ self._log('info', f"开始处理表: {database}.{table}")
2657
+
2658
+ # 获取实际列名
2659
+ all_columns = self._get_table_columns(database, table)
2660
+ if not all_columns:
2661
+ self._log('warning', f"表 {database}.{table} 没有有效列(可能只有id列),跳过")
2662
+ return (0, 0)
2663
+
2664
+ # 使用指定列或所有列
2665
+ use_columns = columns or all_columns
2666
+ invalid_columns = set(use_columns) - set(all_columns)
2667
+
2668
+ if invalid_columns:
2669
+ self._log('warning',
2670
+ f"表 {database}.{table} 中不存在以下列: {invalid_columns},使用有效列",
2671
+ {'invalid_columns': invalid_columns}
2672
+ )
2673
+ use_columns = [col for col in use_columns if col in all_columns]
2674
+
2675
+ if not use_columns:
2676
+ self._log('error', f"表 {database}.{table} 没有有效的去重列")
2677
+ return (0, 0)
2678
+
2679
+ # 构建去重SQL
2680
+ column_list = ', '.join([f'`{col}`' for col in use_columns])
2681
+ temp_table = f"temp_{table}_{int(time.time())}"
2682
+
2683
+ # 使用临时表方案处理去重,避免锁表问题
2684
+ create_temp_sql = f"""
2685
+ CREATE TABLE `{database}`.`{temp_table}` AS
2686
+ SELECT MIN(`id`) as `min_id`, {column_list}, COUNT(*) as `dup_count`
2687
+ FROM `{database}`.`{table}`
2688
+ GROUP BY {column_list}
2689
+ HAVING COUNT(*) > 1
2690
+ """
2691
+
2692
+ delete_dup_sql = f"""
2693
+ DELETE FROM `{database}`.`{table}`
2694
+ WHERE `id` NOT IN (
2695
+ SELECT `min_id` FROM `{database}`.`{temp_table}`
2696
+ ) AND ({' OR '.join([f'`{col}` IS NOT NULL' for col in use_columns])})
2697
+ """
2698
+
2699
+ drop_temp_sql = f"DROP TABLE IF EXISTS `{database}`.`{temp_table}`"
2700
+
2701
+ with self._get_connection() as conn:
2702
+ with conn.cursor() as cursor:
2703
+ # 创建临时表统计重复数据
2704
+ cursor.execute(create_temp_sql)
2705
+ cursor.execute(f"SELECT COUNT(*) as cnt FROM `{database}`.`{temp_table}`")
2706
+ dup_count = cursor.fetchone()['cnt']
2707
+
2708
+ if dup_count == 0:
2709
+ self._log('info', f"表 {database}.{table} 没有重复数据")
2710
+ cursor.execute(drop_temp_sql)
2711
+ conn.commit()
2712
+ return (0, 0)
2713
+
2714
+ self._log('info',
2715
+ f"表 {database}.{table} 发现 {dup_count} 组重复数据",
2716
+ {'columns': use_columns}
2717
+ )
2718
+
2719
+ if not dry_run:
2720
+ # 执行实际删除
2721
+ cursor.execute(delete_dup_sql)
2722
+ affected_rows = cursor.rowcount
2723
+ conn.commit()
2724
+ self._log('info',
2725
+ f"表 {database}.{table} 已删除 {affected_rows} 行重复数据",
2726
+ {'columns': use_columns}
2727
+ )
2728
+ else:
2729
+ affected_rows = 0
2730
+ self._log('info',
2731
+ f"[模拟运行] 表 {database}.{table} 将删除 {dup_count} 组重复数据",
2732
+ {'columns': use_columns}
2733
+ )
2734
+
2735
+ # 清理临时表
2736
+ cursor.execute(drop_temp_sql)
2737
+ conn.commit()
2738
+
2739
+ return (dup_count, affected_rows)
2740
+
2741
+ except Exception as e:
2742
+ self._log('error',
2743
+ f"处理表 {database}.{table} 时出错: {str(e)}",
2744
+ {'error_type': type(e).__name__}
2745
+ )
2746
+ return (0, 0)
2747
+ finally:
2748
+ self._release_table_lock(database, table)
2749
+
2750
+ def deduplicate_table(
2751
+ self,
2752
+ database: str,
2753
+ table: str,
2754
+ columns: Optional[List[str]] = None,
2755
+ dry_run: bool = False
2756
+ ) -> Tuple[int, int]:
2757
+ """
2758
+ 对指定表进行去重
2759
+
2760
+ :param database: 数据库名
2761
+ :param table: 表名
2762
+ :param columns: 用于去重的列(为None时使用所有列)
2763
+ :param dry_run: 是否模拟运行(只统计不实际删除)
2764
+ :return: (重复行数, 删除行数)
2765
+ """
2766
+ try:
2767
+ # 检查表是否存在
2768
+ if not self._check_table_exists(database, table):
2769
+ self._log('warning', f"表 {database}.{table} 不存在,跳过")
2770
+ return (0, 0)
2771
+
2772
+ return self._deduplicate_table(database, table, columns, dry_run)
2773
+ except Exception as e:
2774
+ self._log('error',
2775
+ f"处理表 {database}.{table} 时发生全局错误: {str(e)}",
2776
+ {'error_type': type(e).__name__}
2777
+ )
2778
+ return (0, 0)
2779
+
2780
+ def deduplicate_database(
2781
+ self,
2782
+ database: str,
2783
+ tables: Optional[List[str]] = None,
2784
+ columns_map: Optional[Dict[str, List[str]]] = None,
2785
+ dry_run: bool = False,
2786
+ parallel: bool = False
2787
+ ) -> Dict[str, Tuple[int, int]]:
2788
+ """
2789
+ 对指定数据库的所有表进行去重
2790
+
2791
+ :param database: 数据库名
2792
+ :param tables: 要处理的表列表(为None时处理所有表)
2793
+ :param columns_map: 各表使用的去重列 {表名: [列名]}
2794
+ :param dry_run: 是否模拟运行
2795
+ :param parallel: 是否并行处理
2796
+ :return: 字典 {表名: (重复行数, 删除行数)}
2797
+ """
2798
+ results = {}
2799
+
2800
+ try:
2801
+ # 检查数据库是否存在
2802
+ if not self._check_database_exists(database):
2803
+ self._log('warning', f"数据库 {database} 不存在,跳过")
2804
+ return results
2805
+
2806
+ # 获取要处理的表
2807
+ target_tables = tables or self._get_tables(database)
2808
+ if not target_tables:
2809
+ self._log('info', f"数据库 {database} 中没有表,跳过")
2810
+ return results
2811
+
2812
+ self._log('info',
2813
+ f"开始处理数据库 {database} 中的 {len(target_tables)} 张表",
2814
+ {'tables': target_tables}
2815
+ )
2816
+
2817
+ if parallel and self.max_workers > 1:
2818
+ # 并行处理
2819
+ with concurrent.futures.ThreadPoolExecutor(
2820
+ max_workers=self.max_workers
2821
+ ) as executor:
2822
+ futures = {}
2823
+ for table in target_tables:
2824
+ columns = columns_map.get(table) if columns_map else None
2825
+ futures[executor.submit(
2826
+ self.deduplicate_table,
2827
+ database, table, columns, dry_run
2828
+ )] = table
2829
+
2830
+ for future in concurrent.futures.as_completed(futures):
2831
+ table = futures[future]
2832
+ try:
2833
+ dup_count, affected_rows = future.result()
2834
+ results[table] = (dup_count, affected_rows)
2835
+ except Exception as e:
2836
+ self._log('error',
2837
+ f"处理表 {database}.{table} 时出错: {str(e)}",
2838
+ {'error_type': type(e).__name__}
2839
+ )
2840
+ results[table] = (0, 0)
2841
+ else:
2842
+ # 串行处理
2843
+ for table in target_tables:
2844
+ columns = columns_map.get(table) if columns_map else None
2845
+ dup_count, affected_rows = self.deduplicate_table(
2846
+ database, table, columns, dry_run
2847
+ )
2848
+ results[table] = (dup_count, affected_rows)
2849
+
2850
+ # 统计结果
2851
+ total_dup = sum(r[0] for r in results.values())
2852
+ total_del = sum(r[1] for r in results.values())
2853
+
2854
+ self._log('info',
2855
+ f"数据库 {database} 处理完成 - 共发现 {total_dup} 组重复数据,删除 {total_del} 行",
2856
+ {'results': results}
2857
+ )
2858
+
2859
+ return results
2860
+
2861
+ except Exception as e:
2862
+ self._log('error',
2863
+ f"处理数据库 {database} 时发生全局错误: {str(e)}",
2864
+ {'error_type': type(e).__name__}
2865
+ )
2866
+ return results
2867
+
2868
+ def deduplicate_all(
2869
+ self,
2870
+ databases: Optional[List[str]] = None,
2871
+ tables_map: Optional[Dict[str, List[str]]] = None,
2872
+ columns_map: Optional[Dict[str, Dict[str, List[str]]]] = None,
2873
+ dry_run: bool = False,
2874
+ parallel: bool = False
2875
+ ) -> Dict[str, Dict[str, Tuple[int, int]]]:
2876
+ """
2877
+ 对所有数据库进行去重
2878
+
2879
+ :param databases: 要处理的数据库列表(为None时处理所有非系统数据库)
2880
+ :param tables_map: 各数据库要处理的表 {数据库名: [表名]}
2881
+ :param columns_map: 各表使用的去重列 {数据库名: {表名: [列名]}}
2882
+ :param dry_run: 是否模拟运行
2883
+ :param parallel: 是否并行处理
2884
+ :return: 嵌套字典 {数据库名: {表名: (重复行数, 删除行数)}}
2885
+ """
2886
+ all_results = defaultdict(dict)
2887
+
2888
+ try:
2889
+ # 获取要处理的数据库
2890
+ target_dbs = databases or self._get_databases()
2891
+ if not target_dbs:
2892
+ self._log('warning', "没有可处理的数据库")
2893
+ return all_results
2894
+
2895
+ self._log('info',
2896
+ f"开始处理 {len(target_dbs)} 个数据库",
2897
+ {'databases': target_dbs}
2898
+ )
2899
+
2900
+ if parallel and self.max_workers > 1:
2901
+ # 并行处理数据库
2902
+ with concurrent.futures.ThreadPoolExecutor(
2903
+ max_workers=self.max_workers
2904
+ ) as executor:
2905
+ futures = {}
2906
+ for db in target_dbs:
2907
+ tables = tables_map.get(db) if tables_map else None
2908
+ db_columns_map = columns_map.get(db) if columns_map else None
2909
+ futures[executor.submit(
2910
+ self.deduplicate_database,
2911
+ db, tables, db_columns_map, dry_run, False
2912
+ )] = db
2913
+
2914
+ for future in concurrent.futures.as_completed(futures):
2915
+ db = futures[future]
2916
+ try:
2917
+ db_results = future.result()
2918
+ all_results[db] = db_results
2919
+ except Exception as e:
2920
+ self._log('error',
2921
+ f"处理数据库 {db} 时出错: {str(e)}",
2922
+ {'error_type': type(e).__name__}
2923
+ )
2924
+ all_results[db] = {}
2925
+ else:
2926
+ # 串行处理数据库
2927
+ for db in target_dbs:
2928
+ tables = tables_map.get(db) if tables_map else None
2929
+ db_columns_map = columns_map.get(db) if columns_map else None
2930
+ db_results = self.deduplicate_database(
2931
+ db, tables, db_columns_map, dry_run, parallel
2932
+ )
2933
+ all_results[db] = db_results
2934
+
2935
+ # 统计总体结果
2936
+ total_dup = sum(
2937
+ r[0] for db in all_results.values()
2938
+ for r in db.values()
2939
+ )
2940
+ total_del = sum(
2941
+ r[1] for db in all_results.values()
2942
+ for r in db.values()
2943
+ )
2944
+
2945
+ self._log('info',
2946
+ f"所有数据库处理完成 - 共发现 {total_dup} 组重复数据,删除 {total_del} 行",
2947
+ {'total_results': all_results}
2948
+ )
2949
+
2950
+ return all_results
2951
+
2952
+ except Exception as e:
2953
+ self._log('error',
2954
+ f"全局处理时发生错误: {str(e)}",
2955
+ {'error_type': type(e).__name__}
2956
+ )
2957
+ return all_results
2958
+
2959
+ @_retry_on_failure
2960
+ def _check_database_exists(self, database: str) -> bool:
2961
+ """检查数据库是否存在"""
2962
+ sql = "SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = %s"
2963
+
2964
+ with self._get_connection() as conn:
2965
+ with conn.cursor() as cursor:
2966
+ cursor.execute(sql, (database,))
2967
+ return bool(cursor.fetchone())
2968
+
2969
+ @_retry_on_failure
2970
+ def _check_table_exists(self, database: str, table: str) -> bool:
2971
+ """检查表是否存在"""
2972
+ sql = """
2973
+ SELECT TABLE_NAME
2974
+ FROM INFORMATION_SCHEMA.TABLES
2975
+ WHERE TABLE_SCHEMA = %s AND TABLE_NAME = %s
2976
+ """
2977
+
2978
+ with self._get_connection() as conn:
2979
+ with conn.cursor() as cursor:
2980
+ cursor.execute(sql, (database, table))
2981
+ return bool(cursor.fetchone())
2982
+
2983
+ def close(self):
2984
+ """关闭连接池"""
2985
+ try:
2986
+ if hasattr(self, 'pool') and self.pool:
2987
+ self.pool.close()
2988
+ self._log('info', "数据库连接池已关闭")
2989
+ except Exception as e:
2990
+ self._log('error',
2991
+ f"关闭连接池时出错: {str(e)}",
2992
+ {'error_type': type(e).__name__}
2993
+ )
2994
+ finally:
2995
+ self.pool = None
2996
+
2997
+ def __enter__(self):
2998
+ return self
2999
+
3000
+ def __exit__(self, exc_type, exc_val, exc_tb):
3001
+ self.close()
3002
+
3003
+
2386
3004
  def main():
2387
3005
  uploader = MySQLUploader(
2388
3006
  username='root',
@@ -2417,7 +3035,7 @@ def main():
2417
3035
  data=data,
2418
3036
  set_typ=set_typ, # 定义列和数据类型
2419
3037
  primary_keys=[], # 创建唯一主键
2420
- check_duplicate=True, # 检查重复数据
3038
+ check_duplicate=False, # 检查重复数据
2421
3039
  duplicate_columns=[], # 指定排重的组合键
2422
3040
  allow_null=False, # 允许插入空值
2423
3041
  partition_by='year', # 按月分表
@@ -2429,5 +3047,27 @@ def main():
2429
3047
  uploader.close()
2430
3048
 
2431
3049
 
3050
+ def main2():
3051
+ deduplicator = MySQLDeduplicator(
3052
+ username='root',
3053
+ password='1',
3054
+ host='localhost',
3055
+ port=3306
3056
+ )
3057
+
3058
+ # # 全库去重(单线程)
3059
+ # deduplicator.deduplicate_all()
3060
+
3061
+ # # 指定数据库去重(多线程)
3062
+ # deduplicator.deduplicate_database('my_db', parallel=True)
3063
+
3064
+ # 指定表去重(使用特定列)
3065
+ deduplicator.deduplicate_table('my_db', 'my_table', columns=['name', 'date'])
3066
+
3067
+ # 关闭连接
3068
+ deduplicator.close()
3069
+
2432
3070
  if __name__ == '__main__':
2433
3071
  pass
3072
+
3073
+ main2()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: mdbq
3
- Version: 3.9.3
3
+ Version: 3.9.4
4
4
  Home-page: https://pypi.org/project/mdbq
5
5
  Author: xigua,
6
6
  Author-email: 2587125111@qq.com
@@ -1,5 +1,5 @@
1
1
  mdbq/__init__.py,sha256=Il5Q9ATdX8yXqVxtP_nYqUhExzxPC_qk_WXQ_4h0exg,16
2
- mdbq/__version__.py,sha256=Pw2FixsnE8Hf360X55h_tF8Xeez7UgHd2pSgUkJY-v4,17
2
+ mdbq/__version__.py,sha256=44Qvc6l4hjIIQuGixaBICZNZB9jeL2ztNkT4fkONEBc,17
3
3
  mdbq/aggregation/__init__.py,sha256=EeDqX2Aml6SPx8363J-v1lz0EcZtgwIBYyCJV6CcEDU,40
4
4
  mdbq/aggregation/optimize.py,sha256=2oalzD9weZhDclUC22OLxYa8Zj7KnmsGUoUau_Jlyc4,19796
5
5
  mdbq/aggregation/query_data.py,sha256=5_OzjGR5Sq00q-EgAYmSE5V9i4Solw9y4hkldl4mvt8,179808
@@ -8,7 +8,7 @@ mdbq/config/config.py,sha256=eaTfrfXQ65xLqjr5I8-HkZd_jEY1JkGinEgv3TSLeoQ,3170
8
8
  mdbq/log/__init__.py,sha256=Mpbrav0s0ifLL7lVDAuePEi1hJKiSHhxcv1byBKDl5E,15
9
9
  mdbq/log/spider_logging.py,sha256=-ozWWEGm3HVv604ozs_OOvVwumjokmUPwbaodesUrPY,1664
10
10
  mdbq/mysql/__init__.py,sha256=A_DPJyAoEvTSFojiI2e94zP0FKtCkkwKP1kYUCSyQzo,11
11
- mdbq/mysql/mysql.py,sha256=YX-tgugceODrJHcXgbosWFVThjXv3I2gCvTt_siKBOI,108606
11
+ mdbq/mysql/mysql.py,sha256=ylGvSzFE2B78y77wG266tf_RaEuETnngqDKUTqjQCjs,132378
12
12
  mdbq/mysql/s_query.py,sha256=X055aLRAgxVvueXx4NbfNjp6MyBI02_XBb1pTKw09L0,8660
13
13
  mdbq/other/__init__.py,sha256=jso1oHcy6cJEfa7udS_9uO5X6kZLoPBF8l3wCYmr5dM,18
14
14
  mdbq/other/download_sku_picture.py,sha256=YU8DxKMXbdeE1OOKEA848WVp62jYHw5O4tXTjUdq9H0,44832
@@ -22,7 +22,7 @@ mdbq/redis/__init__.py,sha256=YtgBlVSMDphtpwYX248wGge1x-Ex_mMufz4-8W0XRmA,12
22
22
  mdbq/redis/getredis.py,sha256=Uk8-cOWT0JU1qRyIVqdbYokSLvkDIAfcokmYj1ebw8k,24104
23
23
  mdbq/spider/__init__.py,sha256=RBMFXGy_jd1HXZhngB2T2XTvJqki8P_Fr-pBcwijnew,18
24
24
  mdbq/spider/aikucun.py,sha256=OhyEv1VyAKTOHjLDM37iNDQeRg5OnrNoKODoG2VxHes,19806
25
- mdbq-3.9.3.dist-info/METADATA,sha256=Vt2mII7wAfEhzQa9G8PreCPV_hkdM1DLTTDcUMyepPg,363
26
- mdbq-3.9.3.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
27
- mdbq-3.9.3.dist-info/top_level.txt,sha256=2FQ-uLnCSB-OwFiWntzmwosW3X2Xqsg0ewh1axsaylA,5
28
- mdbq-3.9.3.dist-info/RECORD,,
25
+ mdbq-3.9.4.dist-info/METADATA,sha256=1FQB3vRRNlxontQEXd6gE-RhnHbjAPOZcnc_Xh9I4B0,363
26
+ mdbq-3.9.4.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
27
+ mdbq-3.9.4.dist-info/top_level.txt,sha256=2FQ-uLnCSB-OwFiWntzmwosW3X2Xqsg0ewh1axsaylA,5
28
+ mdbq-3.9.4.dist-info/RECORD,,
File without changes