crawlo 1.4.7__py3-none-any.whl → 1.4.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +90 -90
- crawlo/__version__.py +1 -1
- crawlo/cli.py +75 -75
- crawlo/commands/__init__.py +14 -14
- crawlo/commands/check.py +594 -594
- crawlo/commands/genspider.py +186 -186
- crawlo/commands/help.py +140 -140
- crawlo/commands/list.py +155 -155
- crawlo/commands/run.py +379 -379
- crawlo/commands/startproject.py +460 -460
- crawlo/commands/stats.py +187 -187
- crawlo/commands/utils.py +196 -196
- crawlo/config.py +320 -320
- crawlo/config_validator.py +277 -277
- crawlo/core/__init__.py +52 -52
- crawlo/core/engine.py +451 -451
- crawlo/core/processor.py +47 -47
- crawlo/core/scheduler.py +290 -290
- crawlo/crawler.py +698 -698
- crawlo/data/__init__.py +5 -5
- crawlo/data/user_agents.py +194 -194
- crawlo/downloader/__init__.py +280 -280
- crawlo/downloader/aiohttp_downloader.py +233 -233
- crawlo/downloader/cffi_downloader.py +250 -250
- crawlo/downloader/httpx_downloader.py +265 -265
- crawlo/downloader/hybrid_downloader.py +212 -212
- crawlo/downloader/playwright_downloader.py +425 -425
- crawlo/downloader/selenium_downloader.py +486 -486
- crawlo/event.py +45 -45
- crawlo/exceptions.py +214 -214
- crawlo/extension/__init__.py +64 -64
- crawlo/extension/health_check.py +141 -141
- crawlo/extension/log_interval.py +94 -94
- crawlo/extension/log_stats.py +70 -70
- crawlo/extension/logging_extension.py +53 -53
- crawlo/extension/memory_monitor.py +104 -104
- crawlo/extension/performance_profiler.py +133 -133
- crawlo/extension/request_recorder.py +107 -107
- crawlo/factories/__init__.py +27 -27
- crawlo/factories/base.py +68 -68
- crawlo/factories/crawler.py +104 -104
- crawlo/factories/registry.py +84 -84
- crawlo/factories/utils.py +134 -134
- crawlo/filters/__init__.py +170 -170
- crawlo/filters/aioredis_filter.py +347 -347
- crawlo/filters/memory_filter.py +261 -261
- crawlo/framework.py +306 -306
- crawlo/initialization/__init__.py +44 -44
- crawlo/initialization/built_in.py +391 -391
- crawlo/initialization/context.py +141 -141
- crawlo/initialization/core.py +240 -240
- crawlo/initialization/phases.py +229 -229
- crawlo/initialization/registry.py +143 -143
- crawlo/initialization/utils.py +48 -48
- crawlo/interfaces.py +23 -23
- crawlo/items/__init__.py +23 -23
- crawlo/items/base.py +23 -23
- crawlo/items/fields.py +52 -52
- crawlo/items/items.py +104 -104
- crawlo/logging/__init__.py +42 -42
- crawlo/logging/config.py +280 -276
- crawlo/logging/factory.py +175 -175
- crawlo/logging/manager.py +104 -104
- crawlo/middleware/__init__.py +87 -87
- crawlo/middleware/default_header.py +132 -132
- crawlo/middleware/download_delay.py +104 -104
- crawlo/middleware/middleware_manager.py +142 -142
- crawlo/middleware/offsite.py +123 -123
- crawlo/middleware/proxy.py +209 -209
- crawlo/middleware/request_ignore.py +86 -86
- crawlo/middleware/response_code.py +150 -150
- crawlo/middleware/response_filter.py +136 -136
- crawlo/middleware/retry.py +124 -124
- crawlo/mode_manager.py +287 -287
- crawlo/network/__init__.py +21 -21
- crawlo/network/request.py +408 -376
- crawlo/network/response.py +598 -569
- crawlo/pipelines/__init__.py +52 -52
- crawlo/pipelines/base_pipeline.py +452 -452
- crawlo/pipelines/bloom_dedup_pipeline.py +145 -146
- crawlo/pipelines/console_pipeline.py +39 -39
- crawlo/pipelines/csv_pipeline.py +316 -316
- crawlo/pipelines/database_dedup_pipeline.py +196 -197
- crawlo/pipelines/json_pipeline.py +218 -218
- crawlo/pipelines/memory_dedup_pipeline.py +104 -105
- crawlo/pipelines/mongo_pipeline.py +140 -139
- crawlo/pipelines/mysql_pipeline.py +468 -469
- crawlo/pipelines/pipeline_manager.py +100 -100
- crawlo/pipelines/redis_dedup_pipeline.py +155 -155
- crawlo/project.py +347 -347
- crawlo/queue/__init__.py +9 -9
- crawlo/queue/pqueue.py +38 -38
- crawlo/queue/queue_manager.py +591 -591
- crawlo/queue/redis_priority_queue.py +518 -518
- crawlo/settings/__init__.py +7 -7
- crawlo/settings/default_settings.py +287 -284
- crawlo/settings/setting_manager.py +219 -219
- crawlo/spider/__init__.py +658 -657
- crawlo/stats_collector.py +81 -81
- crawlo/subscriber.py +129 -129
- crawlo/task_manager.py +138 -138
- crawlo/templates/crawlo.cfg.tmpl +10 -10
- crawlo/templates/project/__init__.py.tmpl +1 -1
- crawlo/templates/project/items.py.tmpl +13 -13
- crawlo/templates/project/middlewares.py.tmpl +38 -38
- crawlo/templates/project/pipelines.py.tmpl +35 -35
- crawlo/templates/project/settings.py.tmpl +113 -109
- crawlo/templates/project/settings_distributed.py.tmpl +160 -156
- crawlo/templates/project/settings_gentle.py.tmpl +174 -170
- crawlo/templates/project/settings_high_performance.py.tmpl +175 -171
- crawlo/templates/project/settings_minimal.py.tmpl +102 -98
- crawlo/templates/project/settings_simple.py.tmpl +172 -168
- crawlo/templates/project/spiders/__init__.py.tmpl +9 -9
- crawlo/templates/run.py.tmpl +23 -23
- crawlo/templates/spider/spider.py.tmpl +32 -32
- crawlo/templates/spiders_init.py.tmpl +4 -4
- crawlo/tools/__init__.py +86 -86
- crawlo/tools/date_tools.py +289 -289
- crawlo/tools/distributed_coordinator.py +384 -384
- crawlo/tools/scenario_adapter.py +262 -262
- crawlo/tools/text_cleaner.py +232 -232
- crawlo/utils/__init__.py +74 -50
- crawlo/utils/batch_processor.py +276 -276
- crawlo/utils/config_manager.py +442 -442
- crawlo/utils/controlled_spider_mixin.py +439 -439
- crawlo/utils/db_helper.py +250 -250
- crawlo/utils/encoding_helper.py +190 -0
- crawlo/utils/error_handler.py +410 -410
- crawlo/utils/fingerprint.py +121 -121
- crawlo/utils/func_tools.py +82 -82
- crawlo/utils/large_scale_helper.py +344 -344
- crawlo/utils/leak_detector.py +335 -335
- crawlo/utils/misc.py +81 -81
- crawlo/utils/mongo_connection_pool.py +157 -157
- crawlo/utils/mysql_connection_pool.py +197 -197
- crawlo/utils/performance_monitor.py +285 -285
- crawlo/utils/queue_helper.py +175 -175
- crawlo/utils/redis_checker.py +90 -90
- crawlo/utils/redis_connection_pool.py +578 -578
- crawlo/utils/redis_key_validator.py +198 -198
- crawlo/utils/request.py +278 -278
- crawlo/utils/request_serializer.py +225 -225
- crawlo/utils/resource_manager.py +337 -337
- crawlo/utils/response_helper.py +113 -0
- crawlo/utils/selector_helper.py +138 -137
- crawlo/utils/singleton.py +69 -69
- crawlo/utils/spider_loader.py +201 -201
- crawlo/utils/text_helper.py +94 -94
- {crawlo-1.4.7.dist-info → crawlo-1.4.8.dist-info}/METADATA +831 -689
- crawlo-1.4.8.dist-info/RECORD +347 -0
- examples/__init__.py +7 -7
- tests/__init__.py +7 -7
- tests/advanced_tools_example.py +217 -217
- tests/authenticated_proxy_example.py +110 -110
- tests/baidu_performance_test.py +108 -108
- tests/baidu_test.py +59 -59
- tests/bug_check_test.py +250 -250
- tests/cleaners_example.py +160 -160
- tests/comprehensive_framework_test.py +212 -212
- tests/comprehensive_test.py +81 -81
- tests/comprehensive_testing_summary.md +186 -186
- tests/config_validation_demo.py +142 -142
- tests/controlled_spider_example.py +205 -205
- tests/date_tools_example.py +180 -180
- tests/debug_configure.py +69 -69
- tests/debug_framework_logger.py +84 -84
- tests/debug_log_config.py +126 -126
- tests/debug_log_levels.py +63 -63
- tests/debug_pipelines.py +66 -66
- tests/detailed_log_test.py +233 -233
- tests/direct_selector_helper_test.py +96 -96
- tests/distributed_dedup_test.py +467 -467
- tests/distributed_test.py +66 -66
- tests/distributed_test_debug.py +76 -76
- tests/dynamic_loading_example.py +523 -523
- tests/dynamic_loading_test.py +104 -104
- tests/error_handling_example.py +171 -171
- tests/explain_mysql_update_behavior.py +76 -76
- tests/final_comprehensive_test.py +151 -151
- tests/final_log_test.py +260 -260
- tests/final_validation_test.py +182 -182
- tests/fix_log_test.py +142 -142
- tests/framework_performance_test.py +202 -202
- tests/log_buffering_test.py +111 -111
- tests/log_generation_timing_test.py +153 -153
- tests/monitor_redis_dedup.sh +72 -72
- tests/ofweek_scrapy/ofweek_scrapy/items.py +12 -12
- tests/ofweek_scrapy/ofweek_scrapy/middlewares.py +100 -100
- tests/ofweek_scrapy/ofweek_scrapy/pipelines.py +13 -13
- tests/ofweek_scrapy/ofweek_scrapy/settings.py +84 -84
- tests/ofweek_scrapy/ofweek_scrapy/spiders/__init__.py +4 -4
- tests/ofweek_scrapy/scrapy.cfg +11 -11
- tests/optimized_performance_test.py +211 -211
- tests/performance_comparison.py +244 -244
- tests/queue_blocking_test.py +113 -113
- tests/queue_test.py +89 -89
- tests/redis_key_validation_demo.py +130 -130
- tests/request_params_example.py +150 -150
- tests/response_improvements_example.py +144 -144
- tests/scrapy_comparison/ofweek_scrapy.py +138 -138
- tests/scrapy_comparison/scrapy_test.py +133 -133
- tests/simple_cli_test.py +54 -54
- tests/simple_command_test.py +119 -119
- tests/simple_crawlo_test.py +126 -126
- tests/simple_follow_test.py +38 -38
- tests/simple_log_test2.py +137 -137
- tests/simple_optimization_test.py +128 -128
- tests/simple_queue_type_test.py +41 -41
- tests/simple_response_selector_test.py +94 -94
- tests/simple_selector_helper_test.py +154 -154
- tests/simple_selector_test.py +207 -207
- tests/simple_spider_test.py +49 -49
- tests/simple_url_test.py +73 -73
- tests/simulate_mysql_update_test.py +139 -139
- tests/spider_log_timing_test.py +177 -177
- tests/test_advanced_tools.py +148 -148
- tests/test_all_commands.py +230 -230
- tests/test_all_pipeline_fingerprints.py +133 -133
- tests/test_all_redis_key_configs.py +145 -145
- tests/test_asyncmy_usage.py +56 -56
- tests/test_batch_processor.py +178 -178
- tests/test_cleaners.py +54 -54
- tests/test_cli_arguments.py +118 -118
- tests/test_component_factory.py +174 -174
- tests/test_config_consistency.py +80 -80
- tests/test_config_merge.py +152 -152
- tests/test_config_validator.py +182 -182
- tests/test_controlled_spider_mixin.py +79 -79
- tests/test_crawler_process_import.py +38 -38
- tests/test_crawler_process_spider_modules.py +47 -47
- tests/test_crawlo_proxy_integration.py +114 -114
- tests/test_date_tools.py +123 -123
- tests/test_dedup_fix.py +220 -220
- tests/test_dedup_pipeline_consistency.py +124 -124
- tests/test_default_header_middleware.py +313 -313
- tests/test_distributed.py +65 -65
- tests/test_double_crawlo_fix.py +204 -204
- tests/test_double_crawlo_fix_simple.py +124 -124
- tests/test_download_delay_middleware.py +221 -221
- tests/test_downloader_proxy_compatibility.py +272 -272
- tests/test_edge_cases.py +305 -305
- tests/test_encoding_core.py +56 -56
- tests/test_encoding_detection.py +126 -126
- tests/test_enhanced_error_handler.py +270 -270
- tests/test_enhanced_error_handler_comprehensive.py +245 -245
- tests/test_error_handler_compatibility.py +112 -112
- tests/test_factories.py +252 -252
- tests/test_factory_compatibility.py +196 -196
- tests/test_final_validation.py +153 -153
- tests/test_fingerprint_consistency.py +135 -135
- tests/test_fingerprint_simple.py +51 -51
- tests/test_get_component_logger.py +83 -83
- tests/test_hash_performance.py +99 -99
- tests/test_integration.py +169 -169
- tests/test_item_dedup_redis_key.py +122 -122
- tests/test_large_scale_helper.py +235 -235
- tests/test_logging_enhancements.py +374 -374
- tests/test_logging_final.py +184 -184
- tests/test_logging_integration.py +312 -312
- tests/test_logging_system.py +282 -282
- tests/test_middleware_debug.py +141 -141
- tests/test_mode_consistency.py +51 -51
- tests/test_multi_directory.py +67 -67
- tests/test_multiple_spider_modules.py +80 -80
- tests/test_mysql_pipeline_config.py +164 -164
- tests/test_mysql_pipeline_error.py +98 -98
- tests/test_mysql_pipeline_init_log.py +82 -82
- tests/test_mysql_pipeline_integration.py +132 -132
- tests/test_mysql_pipeline_refactor.py +143 -143
- tests/test_mysql_pipeline_refactor_simple.py +85 -85
- tests/test_mysql_pipeline_robustness.py +195 -195
- tests/test_mysql_pipeline_types.py +88 -88
- tests/test_mysql_update_columns.py +93 -93
- tests/test_offsite_middleware.py +244 -244
- tests/test_offsite_middleware_simple.py +203 -203
- tests/test_optimized_selector_naming.py +100 -100
- tests/test_parsel.py +29 -29
- tests/test_performance.py +327 -327
- tests/test_performance_monitor.py +115 -115
- tests/test_pipeline_fingerprint_consistency.py +86 -86
- tests/test_priority_behavior.py +211 -211
- tests/test_priority_consistency.py +151 -151
- tests/test_priority_consistency_fixed.py +249 -249
- tests/test_proxy_health_check.py +32 -32
- tests/test_proxy_middleware.py +217 -217
- tests/test_proxy_middleware_enhanced.py +212 -212
- tests/test_proxy_middleware_integration.py +142 -142
- tests/test_proxy_middleware_refactored.py +207 -207
- tests/test_proxy_only.py +83 -83
- tests/test_proxy_providers.py +56 -56
- tests/test_proxy_stats.py +19 -19
- tests/test_proxy_strategies.py +59 -59
- tests/test_proxy_with_downloader.py +152 -152
- tests/test_queue_empty_check.py +41 -41
- tests/test_queue_manager_double_crawlo.py +173 -173
- tests/test_queue_manager_redis_key.py +179 -179
- tests/test_queue_naming.py +154 -154
- tests/test_queue_type.py +106 -106
- tests/test_queue_type_redis_config_consistency.py +130 -130
- tests/test_random_headers_default.py +322 -322
- tests/test_random_headers_necessity.py +308 -308
- tests/test_random_user_agent.py +72 -72
- tests/test_redis_config.py +28 -28
- tests/test_redis_connection_pool.py +294 -294
- tests/test_redis_key_naming.py +181 -181
- tests/test_redis_key_validator.py +123 -123
- tests/test_redis_queue.py +224 -224
- tests/test_redis_queue_name_fix.py +175 -175
- tests/test_redis_queue_type_fallback.py +129 -129
- tests/test_request_ignore_middleware.py +182 -182
- tests/test_request_params.py +111 -111
- tests/test_request_serialization.py +70 -70
- tests/test_response_code_middleware.py +349 -349
- tests/test_response_filter_middleware.py +427 -427
- tests/test_response_follow.py +104 -104
- tests/test_response_improvements.py +152 -152
- tests/test_response_selector_methods.py +92 -92
- tests/test_response_url_methods.py +70 -70
- tests/test_response_urljoin.py +86 -86
- tests/test_retry_middleware.py +333 -333
- tests/test_retry_middleware_realistic.py +273 -273
- tests/test_scheduler.py +252 -252
- tests/test_scheduler_config_update.py +133 -133
- tests/test_scrapy_style_encoding.py +112 -112
- tests/test_selector_helper.py +100 -100
- tests/test_selector_optimizations.py +146 -146
- tests/test_simple_response.py +61 -61
- tests/test_spider_loader.py +49 -49
- tests/test_spider_loader_comprehensive.py +69 -69
- tests/test_spider_modules.py +84 -84
- tests/test_spiders/test_spider.py +9 -9
- tests/test_telecom_spider_redis_key.py +205 -205
- tests/test_template_content.py +87 -87
- tests/test_template_redis_key.py +134 -134
- tests/test_tools.py +159 -159
- tests/test_user_agent_randomness.py +176 -176
- tests/test_user_agents.py +96 -96
- tests/untested_features_report.md +138 -138
- tests/verify_debug.py +51 -51
- tests/verify_distributed.py +117 -117
- tests/verify_log_fix.py +111 -111
- tests/verify_mysql_warnings.py +109 -109
- crawlo/utils/log.py +0 -80
- crawlo/utils/url_utils.py +0 -40
- crawlo-1.4.7.dist-info/RECORD +0 -347
- {crawlo-1.4.7.dist-info → crawlo-1.4.8.dist-info}/WHEEL +0 -0
- {crawlo-1.4.7.dist-info → crawlo-1.4.8.dist-info}/entry_points.txt +0 -0
- {crawlo-1.4.7.dist-info → crawlo-1.4.8.dist-info}/top_level.txt +0 -0
|
@@ -1,519 +1,519 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
import pickle
|
|
3
|
-
import time
|
|
4
|
-
import traceback
|
|
5
|
-
from typing import Optional, TYPE_CHECKING, List, Union, Any
|
|
6
|
-
|
|
7
|
-
import redis.asyncio as aioredis
|
|
8
|
-
|
|
9
|
-
# 尝试导入Redis集群支持
|
|
10
|
-
try:
|
|
11
|
-
from redis.asyncio.cluster import RedisCluster
|
|
12
|
-
REDIS_CLUSTER_AVAILABLE = True
|
|
13
|
-
except ImportError:
|
|
14
|
-
RedisCluster = None
|
|
15
|
-
REDIS_CLUSTER_AVAILABLE = False
|
|
16
|
-
|
|
17
|
-
# 使用 TYPE_CHECKING 避免运行时循环导入
|
|
18
|
-
if TYPE_CHECKING:
|
|
19
|
-
from crawlo import Request
|
|
20
|
-
|
|
21
|
-
from crawlo.utils.error_handler import ErrorHandler, ErrorContext
|
|
22
|
-
from crawlo.logging import get_logger
|
|
23
|
-
from crawlo.utils.redis_connection_pool import get_redis_pool, RedisConnectionPool
|
|
24
|
-
from crawlo.utils.request_serializer import RequestSerializer
|
|
25
|
-
|
|
26
|
-
# 延迟初始化避免循环依赖
|
|
27
|
-
_logger = None
|
|
28
|
-
_error_handler = None
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
def get_module_logger():
|
|
32
|
-
global _logger
|
|
33
|
-
if _logger is None:
|
|
34
|
-
_logger = get_logger(__name__)
|
|
35
|
-
return _logger
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
def get_module_error_handler():
|
|
39
|
-
global _error_handler
|
|
40
|
-
if _error_handler is None:
|
|
41
|
-
_error_handler = ErrorHandler(__name__)
|
|
42
|
-
return _error_handler
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
class RedisPriorityQueue:
|
|
46
|
-
"""
|
|
47
|
-
基于 Redis 的分布式异步优先级队列
|
|
48
|
-
"""
|
|
49
|
-
|
|
50
|
-
def __init__(
|
|
51
|
-
self,
|
|
52
|
-
redis_url: Optional[str] = None,
|
|
53
|
-
queue_name: Optional[str] = None, # 修改默认值为 None
|
|
54
|
-
processing_queue: Optional[str] = None, # 修改默认值为 None
|
|
55
|
-
failed_queue: Optional[str] = None, # 修改默认值为 None
|
|
56
|
-
max_retries: int = 3,
|
|
57
|
-
timeout: int = 300, # 任务处理超时时间(秒)
|
|
58
|
-
max_connections: int = 10, # 连接池大小
|
|
59
|
-
module_name: str = "default", # 添加 module_name 参数
|
|
60
|
-
is_cluster: bool = False, # 是否为集群模式
|
|
61
|
-
cluster_nodes: Optional[List[str]] = None # 集群节点列表
|
|
62
|
-
):
|
|
63
|
-
# 移除直接使用 os.getenv(),要求通过参数传递 redis_url
|
|
64
|
-
if redis_url is None:
|
|
65
|
-
# 如果没有提供 redis_url,则抛出异常,要求在 settings 中配置
|
|
66
|
-
raise ValueError("redis_url must be provided. Configure it in settings instead of using os.getenv()")
|
|
67
|
-
|
|
68
|
-
self.redis_url = redis_url
|
|
69
|
-
self.module_name = module_name # 保存 module_name
|
|
70
|
-
self.is_cluster = is_cluster
|
|
71
|
-
self.cluster_nodes = cluster_nodes
|
|
72
|
-
|
|
73
|
-
# 如果未提供 queue_name,则根据 module_name 自动生成
|
|
74
|
-
if queue_name is None:
|
|
75
|
-
self.queue_name = f"crawlo:{module_name}:queue:requests"
|
|
76
|
-
else:
|
|
77
|
-
# 处理多重 crawlo 前缀,规范化队列名称
|
|
78
|
-
self.queue_name = self._normalize_queue_name(queue_name)
|
|
79
|
-
|
|
80
|
-
# 如果未提供 processing_queue,则根据 queue_name 自动生成
|
|
81
|
-
if processing_queue is None:
|
|
82
|
-
if ":queue:requests" in self.queue_name:
|
|
83
|
-
self.processing_queue = self.queue_name.replace(":queue:requests", ":queue:processing")
|
|
84
|
-
else:
|
|
85
|
-
self.processing_queue = f"{self.queue_name}:processing"
|
|
86
|
-
else:
|
|
87
|
-
self.processing_queue = processing_queue
|
|
88
|
-
|
|
89
|
-
# 如果未提供 failed_queue,则根据 queue_name 自动生成
|
|
90
|
-
if failed_queue is None:
|
|
91
|
-
if ":queue:requests" in self.queue_name:
|
|
92
|
-
self.failed_queue = self.queue_name.replace(":queue:requests", ":queue:failed")
|
|
93
|
-
else:
|
|
94
|
-
self.failed_queue = f"{self.queue_name}:failed"
|
|
95
|
-
else:
|
|
96
|
-
self.failed_queue = failed_queue
|
|
97
|
-
|
|
98
|
-
self.max_retries = max_retries
|
|
99
|
-
self.timeout = timeout
|
|
100
|
-
self.max_connections = max_connections
|
|
101
|
-
self._redis_pool: Optional[RedisConnectionPool] = None
|
|
102
|
-
self._redis: Optional[Any] = None
|
|
103
|
-
self._lock = asyncio.Lock() # 用于连接初始化的锁
|
|
104
|
-
self.request_serializer = RequestSerializer() # 处理序列化
|
|
105
|
-
|
|
106
|
-
def _normalize_queue_name(self, queue_name: str) -> str:
|
|
107
|
-
"""
|
|
108
|
-
规范化队列名称,处理多重 crawlo 前缀
|
|
109
|
-
|
|
110
|
-
:param queue_name: 原始队列名称
|
|
111
|
-
:return: 规范化后的队列名称
|
|
112
|
-
"""
|
|
113
|
-
# 如果队列名称已经符合规范(以 crawlo: 开头且不是 crawlo:crawlo:),则保持不变
|
|
114
|
-
if queue_name.startswith("crawlo:") and not queue_name.startswith("crawlo:crawlo:"):
|
|
115
|
-
return queue_name
|
|
116
|
-
|
|
117
|
-
# 处理三重 crawlo 前缀,简化为标准格式
|
|
118
|
-
if queue_name.startswith("crawlo:crawlo:crawlo:"):
|
|
119
|
-
# 三重 crawlo 前缀,简化为标准 crawlo: 格式
|
|
120
|
-
remaining = queue_name[21:] # 去掉 "crawlo:crawlo:crawlo:" 前缀
|
|
121
|
-
if remaining:
|
|
122
|
-
return f"crawlo:{remaining}"
|
|
123
|
-
else:
|
|
124
|
-
return "crawlo:requests" # 默认名称
|
|
125
|
-
|
|
126
|
-
# 处理双重 crawlo 前缀
|
|
127
|
-
elif queue_name.startswith("crawlo:crawlo:"):
|
|
128
|
-
# 双重 crawlo 前缀,简化为标准 crawlo: 格式
|
|
129
|
-
remaining = queue_name[14:] # 去掉 "crawlo:crawlo:" 前缀
|
|
130
|
-
if remaining:
|
|
131
|
-
return f"crawlo:{remaining}"
|
|
132
|
-
else:
|
|
133
|
-
return "crawlo:requests" # 默认名称
|
|
134
|
-
|
|
135
|
-
# 处理无 crawlo 前缀的情况
|
|
136
|
-
elif not queue_name.startswith("crawlo:"):
|
|
137
|
-
# 无 crawlo 前缀,添加 crawlo: 前缀
|
|
138
|
-
if queue_name:
|
|
139
|
-
return f"crawlo:{queue_name}"
|
|
140
|
-
else:
|
|
141
|
-
return "crawlo:requests" # 默认名称
|
|
142
|
-
|
|
143
|
-
# 其他情况,保持不变
|
|
144
|
-
else:
|
|
145
|
-
return queue_name
|
|
146
|
-
|
|
147
|
-
async def connect(self, max_retries=3, delay=1):
|
|
148
|
-
"""异步连接 Redis,支持重试"""
|
|
149
|
-
async with self._lock:
|
|
150
|
-
if self._redis is not None:
|
|
151
|
-
# 如果已经连接,测试连接是否仍然有效
|
|
152
|
-
try:
|
|
153
|
-
await self._redis.ping()
|
|
154
|
-
return self._redis
|
|
155
|
-
except Exception:
|
|
156
|
-
# 连接失效,重新连接
|
|
157
|
-
self._redis = None
|
|
158
|
-
|
|
159
|
-
for attempt in range(max_retries):
|
|
160
|
-
try:
|
|
161
|
-
# 使用优化的连接池,确保 decode_responses=False 以避免编码问题
|
|
162
|
-
self._redis_pool = get_redis_pool(
|
|
163
|
-
self.redis_url,
|
|
164
|
-
is_cluster=self.is_cluster,
|
|
165
|
-
cluster_nodes=self.cluster_nodes,
|
|
166
|
-
max_connections=self.max_connections,
|
|
167
|
-
socket_connect_timeout=5,
|
|
168
|
-
socket_timeout=30,
|
|
169
|
-
health_check_interval=30,
|
|
170
|
-
retry_on_timeout=True,
|
|
171
|
-
decode_responses=False, # 确保不自动解码响应
|
|
172
|
-
encoding='utf-8'
|
|
173
|
-
)
|
|
174
|
-
|
|
175
|
-
self._redis = await self._redis_pool.get_connection()
|
|
176
|
-
|
|
177
|
-
# 测试连接
|
|
178
|
-
if self._redis:
|
|
179
|
-
await self._redis.ping()
|
|
180
|
-
return self._redis
|
|
181
|
-
except Exception as e:
|
|
182
|
-
error_msg = f"Redis 连接失败 (尝试 {attempt + 1}/{max_retries}, Module: {self.module_name}): {e}"
|
|
183
|
-
get_module_logger().warning(error_msg)
|
|
184
|
-
get_module_logger().debug(f"详细错误信息:\n{traceback.format_exc()}")
|
|
185
|
-
if attempt < max_retries - 1:
|
|
186
|
-
await asyncio.sleep(delay)
|
|
187
|
-
else:
|
|
188
|
-
raise ConnectionError(f"无法连接 Redis (Module: {self.module_name}): {e}")
|
|
189
|
-
|
|
190
|
-
async def _ensure_connection(self):
|
|
191
|
-
"""确保连接有效"""
|
|
192
|
-
if self._redis is None:
|
|
193
|
-
await self.connect()
|
|
194
|
-
try:
|
|
195
|
-
if self._redis:
|
|
196
|
-
await self._redis.ping()
|
|
197
|
-
except Exception as e:
|
|
198
|
-
get_module_logger().warning(f"Redis 连接失效 (Module: {self.module_name}),尝试重连...: {e}")
|
|
199
|
-
self._redis = None
|
|
200
|
-
await self.connect()
|
|
201
|
-
|
|
202
|
-
def _is_cluster_mode(self) -> bool:
|
|
203
|
-
"""检查是否为集群模式"""
|
|
204
|
-
if REDIS_CLUSTER_AVAILABLE and RedisCluster is not None:
|
|
205
|
-
# 检查 _redis 是否为 RedisCluster 实例
|
|
206
|
-
if self._redis is not None and isinstance(self._redis, RedisCluster):
|
|
207
|
-
return True
|
|
208
|
-
return False
|
|
209
|
-
|
|
210
|
-
async def put(self, request, priority: int = 0) -> bool:
|
|
211
|
-
"""放入请求到队列"""
|
|
212
|
-
try:
|
|
213
|
-
await self._ensure_connection()
|
|
214
|
-
if not self._redis:
|
|
215
|
-
return False
|
|
216
|
-
|
|
217
|
-
# 修复优先级行为一致性问题
|
|
218
|
-
# 原来: score = -priority (导致priority大的先出队)
|
|
219
|
-
# 现在: score = priority (确保priority小的先出队,与内存队列一致)
|
|
220
|
-
score = priority
|
|
221
|
-
key = self._get_request_key(request)
|
|
222
|
-
|
|
223
|
-
# 🔥 使用专用的序列化工具清理 Request
|
|
224
|
-
clean_request = self.request_serializer.prepare_for_serialization(request)
|
|
225
|
-
|
|
226
|
-
# 确保序列化后的数据可以被正确反序列化
|
|
227
|
-
try:
|
|
228
|
-
serialized = pickle.dumps(clean_request)
|
|
229
|
-
# 验证序列化数据可以被反序列化
|
|
230
|
-
pickle.loads(serialized)
|
|
231
|
-
except Exception as serialize_error:
|
|
232
|
-
get_module_logger().error(f"请求序列化验证失败 (Module: {self.module_name}): {serialize_error}")
|
|
233
|
-
return False
|
|
234
|
-
|
|
235
|
-
# 处理集群模式下的操作
|
|
236
|
-
if self._is_cluster_mode():
|
|
237
|
-
# 在集群模式下,确保所有键都在同一个slot中
|
|
238
|
-
# 可以通过在键名中添加相同的哈希标签来实现
|
|
239
|
-
hash_tag = "{queue}" # 使用哈希标签确保键在同一个slot
|
|
240
|
-
queue_name_with_tag = f"{self.queue_name}{hash_tag}"
|
|
241
|
-
data_key_with_tag = f"{self.queue_name}:data{hash_tag}"
|
|
242
|
-
|
|
243
|
-
pipe = self._redis.pipeline()
|
|
244
|
-
pipe.zadd(queue_name_with_tag, {key: score})
|
|
245
|
-
pipe.hset(data_key_with_tag, key, serialized)
|
|
246
|
-
result = await pipe.execute()
|
|
247
|
-
else:
|
|
248
|
-
pipe = self._redis.pipeline()
|
|
249
|
-
pipe.zadd(self.queue_name, {key: score})
|
|
250
|
-
pipe.hset(f"{self.queue_name}:data", key, serialized)
|
|
251
|
-
result = await pipe.execute()
|
|
252
|
-
|
|
253
|
-
if result[0] > 0:
|
|
254
|
-
get_module_logger().debug(f"成功入队 (Module: {self.module_name}): {request.url}")
|
|
255
|
-
return result[0] > 0
|
|
256
|
-
except Exception as e:
|
|
257
|
-
error_context = ErrorContext(
|
|
258
|
-
context=f"放入队列失败 (Module: {self.module_name})"
|
|
259
|
-
)
|
|
260
|
-
get_module_error_handler().handle_error(
|
|
261
|
-
e,
|
|
262
|
-
context=error_context,
|
|
263
|
-
raise_error=False
|
|
264
|
-
)
|
|
265
|
-
return False
|
|
266
|
-
|
|
267
|
-
async def get(self, timeout: float = 5.0):
|
|
268
|
-
"""
|
|
269
|
-
获取请求(带超时)
|
|
270
|
-
:param timeout: 最大等待时间(秒),避免无限轮询
|
|
271
|
-
"""
|
|
272
|
-
try:
|
|
273
|
-
await self._ensure_connection()
|
|
274
|
-
if not self._redis:
|
|
275
|
-
return None
|
|
276
|
-
|
|
277
|
-
start_time = asyncio.get_event_loop().time()
|
|
278
|
-
|
|
279
|
-
while True:
|
|
280
|
-
# 尝试获取任务
|
|
281
|
-
if self._is_cluster_mode():
|
|
282
|
-
# 集群模式处理
|
|
283
|
-
hash_tag = "{queue}"
|
|
284
|
-
queue_name_with_tag = f"{self.queue_name}{hash_tag}"
|
|
285
|
-
result = await self._redis.zpopmin(queue_name_with_tag, count=1)
|
|
286
|
-
else:
|
|
287
|
-
result = await self._redis.zpopmin(self.queue_name, count=1)
|
|
288
|
-
|
|
289
|
-
if result:
|
|
290
|
-
key, score = result[0]
|
|
291
|
-
data_key = f"{self.queue_name}:data"
|
|
292
|
-
if self._is_cluster_mode():
|
|
293
|
-
hash_tag = "{queue}"
|
|
294
|
-
data_key = f"{self.queue_name}:data{hash_tag}"
|
|
295
|
-
|
|
296
|
-
serialized = await self._redis.hget(data_key, key)
|
|
297
|
-
if not serialized:
|
|
298
|
-
continue
|
|
299
|
-
|
|
300
|
-
# 移动到 processing
|
|
301
|
-
processing_key = f"{key}:{int(time.time())}"
|
|
302
|
-
processing_queue = self.processing_queue
|
|
303
|
-
processing_data_key = f"{self.processing_queue}:data"
|
|
304
|
-
|
|
305
|
-
if self._is_cluster_mode():
|
|
306
|
-
hash_tag = "{queue}"
|
|
307
|
-
processing_queue = f"{self.processing_queue}{hash_tag}"
|
|
308
|
-
processing_data_key = f"{self.processing_queue}:data{hash_tag}"
|
|
309
|
-
|
|
310
|
-
if self._is_cluster_mode():
|
|
311
|
-
pipe = self._redis.pipeline()
|
|
312
|
-
pipe.zadd(processing_queue, {processing_key: time.time() + self.timeout})
|
|
313
|
-
pipe.hset(processing_data_key, processing_key, serialized)
|
|
314
|
-
pipe.hdel(data_key, key)
|
|
315
|
-
await pipe.execute()
|
|
316
|
-
else:
|
|
317
|
-
pipe = self._redis.pipeline()
|
|
318
|
-
pipe.zadd(processing_queue, {processing_key: time.time() + self.timeout})
|
|
319
|
-
pipe.hset(processing_data_key, processing_key, serialized)
|
|
320
|
-
pipe.hdel(data_key, key)
|
|
321
|
-
await pipe.execute()
|
|
322
|
-
|
|
323
|
-
# 更安全的反序列化方式
|
|
324
|
-
try:
|
|
325
|
-
# 首先尝试标准的 pickle 反序列化
|
|
326
|
-
request = pickle.loads(serialized)
|
|
327
|
-
return request
|
|
328
|
-
except UnicodeDecodeError:
|
|
329
|
-
# 如果出现编码错误,尝试使用 latin1 解码
|
|
330
|
-
request = pickle.loads(serialized, encoding='latin1')
|
|
331
|
-
return request
|
|
332
|
-
except Exception as pickle_error:
|
|
333
|
-
# 如果pickle反序列化失败,记录错误并跳过这个任务
|
|
334
|
-
get_module_logger().error(f"无法反序列化请求数据 (Module: {self.module_name}): {pickle_error}")
|
|
335
|
-
# 从processing队列中移除这个无效的任务
|
|
336
|
-
if self._is_cluster_mode():
|
|
337
|
-
await self._redis.zrem(processing_queue, processing_key)
|
|
338
|
-
await self._redis.hdel(processing_data_key, processing_key)
|
|
339
|
-
else:
|
|
340
|
-
await self._redis.zrem(processing_queue, processing_key)
|
|
341
|
-
await self._redis.hdel(processing_data_key, processing_key)
|
|
342
|
-
# 继续尝试下一个任务
|
|
343
|
-
continue
|
|
344
|
-
|
|
345
|
-
# 检查是否超时
|
|
346
|
-
if asyncio.get_event_loop().time() - start_time > timeout:
|
|
347
|
-
return None
|
|
348
|
-
|
|
349
|
-
# 短暂等待,避免空轮询,但减少等待时间以提高响应速度
|
|
350
|
-
await asyncio.sleep(0.001) # 从0.01减少到0.001
|
|
351
|
-
|
|
352
|
-
except Exception as e:
|
|
353
|
-
error_context = ErrorContext(
|
|
354
|
-
context=f"获取队列任务失败 (Module: {self.module_name})"
|
|
355
|
-
)
|
|
356
|
-
get_module_error_handler().handle_error(
|
|
357
|
-
e,
|
|
358
|
-
context=error_context,
|
|
359
|
-
raise_error=False
|
|
360
|
-
)
|
|
361
|
-
return None
|
|
362
|
-
|
|
363
|
-
async def ack(self, request: "Request"):
|
|
364
|
-
"""确认任务完成"""
|
|
365
|
-
try:
|
|
366
|
-
await self._ensure_connection()
|
|
367
|
-
if not self._redis:
|
|
368
|
-
return
|
|
369
|
-
|
|
370
|
-
key = self._get_request_key(request)
|
|
371
|
-
processing_queue = self.processing_queue
|
|
372
|
-
processing_data_key = f"{self.processing_queue}:data"
|
|
373
|
-
|
|
374
|
-
if self._is_cluster_mode():
|
|
375
|
-
hash_tag = "{queue}"
|
|
376
|
-
processing_queue = f"{self.processing_queue}{hash_tag}"
|
|
377
|
-
processing_data_key = f"{self.processing_queue}:data{hash_tag}"
|
|
378
|
-
|
|
379
|
-
cursor = 0
|
|
380
|
-
while True:
|
|
381
|
-
if self._is_cluster_mode():
|
|
382
|
-
cursor, keys = await self._redis.zscan(processing_queue, cursor, match=f"{key}:*")
|
|
383
|
-
else:
|
|
384
|
-
cursor, keys = await self._redis.zscan(processing_queue, cursor, match=f"{key}:*")
|
|
385
|
-
if keys:
|
|
386
|
-
if self._is_cluster_mode():
|
|
387
|
-
pipe = self._redis.pipeline()
|
|
388
|
-
for k in keys:
|
|
389
|
-
pipe.zrem(processing_queue, k)
|
|
390
|
-
pipe.hdel(processing_data_key, k)
|
|
391
|
-
await pipe.execute()
|
|
392
|
-
else:
|
|
393
|
-
pipe = self._redis.pipeline()
|
|
394
|
-
for k in keys:
|
|
395
|
-
pipe.zrem(processing_queue, k)
|
|
396
|
-
pipe.hdel(processing_data_key, k)
|
|
397
|
-
await pipe.execute()
|
|
398
|
-
if cursor == 0:
|
|
399
|
-
break
|
|
400
|
-
except Exception as e:
|
|
401
|
-
error_context = ErrorContext(
|
|
402
|
-
context=f"确认任务完成失败 (Module: {self.module_name})"
|
|
403
|
-
)
|
|
404
|
-
get_module_error_handler().handle_error(
|
|
405
|
-
e,
|
|
406
|
-
context=error_context,
|
|
407
|
-
raise_error=False
|
|
408
|
-
)
|
|
409
|
-
|
|
410
|
-
async def fail(self, request: "Request", reason: str = ""):
|
|
411
|
-
"""标记任务失败"""
|
|
412
|
-
try:
|
|
413
|
-
await self._ensure_connection()
|
|
414
|
-
if not self._redis:
|
|
415
|
-
return
|
|
416
|
-
|
|
417
|
-
key = self._get_request_key(request)
|
|
418
|
-
await self.ack(request)
|
|
419
|
-
|
|
420
|
-
retry_key = f"{self.failed_queue}:retries:{key}"
|
|
421
|
-
failed_queue = self.failed_queue
|
|
422
|
-
|
|
423
|
-
if self._is_cluster_mode():
|
|
424
|
-
hash_tag = "{queue}"
|
|
425
|
-
retry_key = f"{self.failed_queue}:retries:{key}{hash_tag}"
|
|
426
|
-
failed_queue = f"{self.failed_queue}{hash_tag}"
|
|
427
|
-
|
|
428
|
-
retries = await self._redis.incr(retry_key)
|
|
429
|
-
await self._redis.expire(retry_key, 86400)
|
|
430
|
-
|
|
431
|
-
if retries <= self.max_retries:
|
|
432
|
-
await self.put(request, priority=request.priority + 1)
|
|
433
|
-
get_module_logger().info(
|
|
434
|
-
f"任务重试 [{retries}/{self.max_retries}] (Module: {self.module_name}): {request.url}")
|
|
435
|
-
else:
|
|
436
|
-
failed_data = {
|
|
437
|
-
"url": request.url,
|
|
438
|
-
"reason": reason,
|
|
439
|
-
"retries": retries,
|
|
440
|
-
"failed_at": time.time(),
|
|
441
|
-
"request_pickle": pickle.dumps(request).hex(), # 可选:保存完整请求
|
|
442
|
-
}
|
|
443
|
-
await self._redis.lpush(failed_queue, pickle.dumps(failed_data))
|
|
444
|
-
get_module_logger().error(f"任务彻底失败 [{retries}次] (Module: {self.module_name}): {request.url}")
|
|
445
|
-
except Exception as e:
|
|
446
|
-
error_context = ErrorContext(
|
|
447
|
-
context=f"标记任务失败失败 (Module: {self.module_name})"
|
|
448
|
-
)
|
|
449
|
-
get_module_error_handler().handle_error(
|
|
450
|
-
e,
|
|
451
|
-
context=error_context,
|
|
452
|
-
raise_error=False
|
|
453
|
-
)
|
|
454
|
-
|
|
455
|
-
def _get_request_key(self, request) -> str:
|
|
456
|
-
"""生成请求唯一键"""
|
|
457
|
-
return f"{self.module_name}:url:{hash(request.url) & 0x7FFFFFFF}" # 确保正数
|
|
458
|
-
|
|
459
|
-
async def qsize(self) -> int:
|
|
460
|
-
"""Get queue size"""
|
|
461
|
-
try:
|
|
462
|
-
await self._ensure_connection()
|
|
463
|
-
if not self._redis:
|
|
464
|
-
return 0
|
|
465
|
-
|
|
466
|
-
if self._is_cluster_mode():
|
|
467
|
-
hash_tag = "{queue}"
|
|
468
|
-
queue_name_with_tag = f"{self.queue_name}{hash_tag}"
|
|
469
|
-
return await self._redis.zcard(queue_name_with_tag)
|
|
470
|
-
else:
|
|
471
|
-
return await self._redis.zcard(self.queue_name)
|
|
472
|
-
except Exception as e:
|
|
473
|
-
error_context = ErrorContext(
|
|
474
|
-
context=f"Failed to get queue size (Module: {self.module_name})"
|
|
475
|
-
)
|
|
476
|
-
get_module_error_handler().handle_error(
|
|
477
|
-
e,
|
|
478
|
-
context=error_context,
|
|
479
|
-
raise_error=False
|
|
480
|
-
)
|
|
481
|
-
return 0
|
|
482
|
-
|
|
483
|
-
async def close(self):
|
|
484
|
-
"""关闭连接"""
|
|
485
|
-
try:
|
|
486
|
-
# 显式关闭Redis连接
|
|
487
|
-
if self._redis is not None:
|
|
488
|
-
try:
|
|
489
|
-
# 尝试关闭连接
|
|
490
|
-
if hasattr(self._redis, 'close'):
|
|
491
|
-
close_result = self._redis.close()
|
|
492
|
-
if asyncio.iscoroutine(close_result):
|
|
493
|
-
await close_result
|
|
494
|
-
|
|
495
|
-
# 等待连接关闭完成
|
|
496
|
-
if hasattr(self._redis, 'wait_closed'):
|
|
497
|
-
wait_result = self._redis.wait_closed()
|
|
498
|
-
if asyncio.iscoroutine(wait_result):
|
|
499
|
-
await wait_result
|
|
500
|
-
except Exception as close_error:
|
|
501
|
-
get_module_logger().warning(
|
|
502
|
-
f"Error closing Redis connection (Module: {self.module_name}): {close_error}"
|
|
503
|
-
)
|
|
504
|
-
finally:
|
|
505
|
-
self._redis = None
|
|
506
|
-
|
|
507
|
-
# 释放连接池引用(连接池由全局管理器管理)
|
|
508
|
-
self._redis_pool = None
|
|
509
|
-
|
|
510
|
-
get_module_logger().debug(f"Redis 连接已释放 (Module: {self.module_name})")
|
|
511
|
-
except Exception as e:
|
|
512
|
-
error_context = ErrorContext(
|
|
513
|
-
context=f"释放 Redis 连接失败 (Module: {self.module_name})"
|
|
514
|
-
)
|
|
515
|
-
get_module_error_handler().handle_error(
|
|
516
|
-
e,
|
|
517
|
-
context=error_context,
|
|
518
|
-
raise_error=False
|
|
1
|
+
import asyncio
|
|
2
|
+
import pickle
|
|
3
|
+
import time
|
|
4
|
+
import traceback
|
|
5
|
+
from typing import Optional, TYPE_CHECKING, List, Union, Any
|
|
6
|
+
|
|
7
|
+
import redis.asyncio as aioredis
|
|
8
|
+
|
|
9
|
+
# 尝试导入Redis集群支持
|
|
10
|
+
try:
|
|
11
|
+
from redis.asyncio.cluster import RedisCluster
|
|
12
|
+
REDIS_CLUSTER_AVAILABLE = True
|
|
13
|
+
except ImportError:
|
|
14
|
+
RedisCluster = None
|
|
15
|
+
REDIS_CLUSTER_AVAILABLE = False
|
|
16
|
+
|
|
17
|
+
# 使用 TYPE_CHECKING 避免运行时循环导入
|
|
18
|
+
if TYPE_CHECKING:
|
|
19
|
+
from crawlo import Request
|
|
20
|
+
|
|
21
|
+
from crawlo.utils.error_handler import ErrorHandler, ErrorContext
|
|
22
|
+
from crawlo.logging import get_logger
|
|
23
|
+
from crawlo.utils.redis_connection_pool import get_redis_pool, RedisConnectionPool
|
|
24
|
+
from crawlo.utils.request_serializer import RequestSerializer
|
|
25
|
+
|
|
26
|
+
# 延迟初始化避免循环依赖
|
|
27
|
+
_logger = None
|
|
28
|
+
_error_handler = None
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def get_module_logger():
|
|
32
|
+
global _logger
|
|
33
|
+
if _logger is None:
|
|
34
|
+
_logger = get_logger(__name__)
|
|
35
|
+
return _logger
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def get_module_error_handler():
|
|
39
|
+
global _error_handler
|
|
40
|
+
if _error_handler is None:
|
|
41
|
+
_error_handler = ErrorHandler(__name__)
|
|
42
|
+
return _error_handler
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class RedisPriorityQueue:
|
|
46
|
+
"""
|
|
47
|
+
基于 Redis 的分布式异步优先级队列
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
def __init__(
|
|
51
|
+
self,
|
|
52
|
+
redis_url: Optional[str] = None,
|
|
53
|
+
queue_name: Optional[str] = None, # 修改默认值为 None
|
|
54
|
+
processing_queue: Optional[str] = None, # 修改默认值为 None
|
|
55
|
+
failed_queue: Optional[str] = None, # 修改默认值为 None
|
|
56
|
+
max_retries: int = 3,
|
|
57
|
+
timeout: int = 300, # 任务处理超时时间(秒)
|
|
58
|
+
max_connections: int = 10, # 连接池大小
|
|
59
|
+
module_name: str = "default", # 添加 module_name 参数
|
|
60
|
+
is_cluster: bool = False, # 是否为集群模式
|
|
61
|
+
cluster_nodes: Optional[List[str]] = None # 集群节点列表
|
|
62
|
+
):
|
|
63
|
+
# 移除直接使用 os.getenv(),要求通过参数传递 redis_url
|
|
64
|
+
if redis_url is None:
|
|
65
|
+
# 如果没有提供 redis_url,则抛出异常,要求在 settings 中配置
|
|
66
|
+
raise ValueError("redis_url must be provided. Configure it in settings instead of using os.getenv()")
|
|
67
|
+
|
|
68
|
+
self.redis_url = redis_url
|
|
69
|
+
self.module_name = module_name # 保存 module_name
|
|
70
|
+
self.is_cluster = is_cluster
|
|
71
|
+
self.cluster_nodes = cluster_nodes
|
|
72
|
+
|
|
73
|
+
# 如果未提供 queue_name,则根据 module_name 自动生成
|
|
74
|
+
if queue_name is None:
|
|
75
|
+
self.queue_name = f"crawlo:{module_name}:queue:requests"
|
|
76
|
+
else:
|
|
77
|
+
# 处理多重 crawlo 前缀,规范化队列名称
|
|
78
|
+
self.queue_name = self._normalize_queue_name(queue_name)
|
|
79
|
+
|
|
80
|
+
# 如果未提供 processing_queue,则根据 queue_name 自动生成
|
|
81
|
+
if processing_queue is None:
|
|
82
|
+
if ":queue:requests" in self.queue_name:
|
|
83
|
+
self.processing_queue = self.queue_name.replace(":queue:requests", ":queue:processing")
|
|
84
|
+
else:
|
|
85
|
+
self.processing_queue = f"{self.queue_name}:processing"
|
|
86
|
+
else:
|
|
87
|
+
self.processing_queue = processing_queue
|
|
88
|
+
|
|
89
|
+
# 如果未提供 failed_queue,则根据 queue_name 自动生成
|
|
90
|
+
if failed_queue is None:
|
|
91
|
+
if ":queue:requests" in self.queue_name:
|
|
92
|
+
self.failed_queue = self.queue_name.replace(":queue:requests", ":queue:failed")
|
|
93
|
+
else:
|
|
94
|
+
self.failed_queue = f"{self.queue_name}:failed"
|
|
95
|
+
else:
|
|
96
|
+
self.failed_queue = failed_queue
|
|
97
|
+
|
|
98
|
+
self.max_retries = max_retries
|
|
99
|
+
self.timeout = timeout
|
|
100
|
+
self.max_connections = max_connections
|
|
101
|
+
self._redis_pool: Optional[RedisConnectionPool] = None
|
|
102
|
+
self._redis: Optional[Any] = None
|
|
103
|
+
self._lock = asyncio.Lock() # 用于连接初始化的锁
|
|
104
|
+
self.request_serializer = RequestSerializer() # 处理序列化
|
|
105
|
+
|
|
106
|
+
def _normalize_queue_name(self, queue_name: str) -> str:
|
|
107
|
+
"""
|
|
108
|
+
规范化队列名称,处理多重 crawlo 前缀
|
|
109
|
+
|
|
110
|
+
:param queue_name: 原始队列名称
|
|
111
|
+
:return: 规范化后的队列名称
|
|
112
|
+
"""
|
|
113
|
+
# 如果队列名称已经符合规范(以 crawlo: 开头且不是 crawlo:crawlo:),则保持不变
|
|
114
|
+
if queue_name.startswith("crawlo:") and not queue_name.startswith("crawlo:crawlo:"):
|
|
115
|
+
return queue_name
|
|
116
|
+
|
|
117
|
+
# 处理三重 crawlo 前缀,简化为标准格式
|
|
118
|
+
if queue_name.startswith("crawlo:crawlo:crawlo:"):
|
|
119
|
+
# 三重 crawlo 前缀,简化为标准 crawlo: 格式
|
|
120
|
+
remaining = queue_name[21:] # 去掉 "crawlo:crawlo:crawlo:" 前缀
|
|
121
|
+
if remaining:
|
|
122
|
+
return f"crawlo:{remaining}"
|
|
123
|
+
else:
|
|
124
|
+
return "crawlo:requests" # 默认名称
|
|
125
|
+
|
|
126
|
+
# 处理双重 crawlo 前缀
|
|
127
|
+
elif queue_name.startswith("crawlo:crawlo:"):
|
|
128
|
+
# 双重 crawlo 前缀,简化为标准 crawlo: 格式
|
|
129
|
+
remaining = queue_name[14:] # 去掉 "crawlo:crawlo:" 前缀
|
|
130
|
+
if remaining:
|
|
131
|
+
return f"crawlo:{remaining}"
|
|
132
|
+
else:
|
|
133
|
+
return "crawlo:requests" # 默认名称
|
|
134
|
+
|
|
135
|
+
# 处理无 crawlo 前缀的情况
|
|
136
|
+
elif not queue_name.startswith("crawlo:"):
|
|
137
|
+
# 无 crawlo 前缀,添加 crawlo: 前缀
|
|
138
|
+
if queue_name:
|
|
139
|
+
return f"crawlo:{queue_name}"
|
|
140
|
+
else:
|
|
141
|
+
return "crawlo:requests" # 默认名称
|
|
142
|
+
|
|
143
|
+
# 其他情况,保持不变
|
|
144
|
+
else:
|
|
145
|
+
return queue_name
|
|
146
|
+
|
|
147
|
+
async def connect(self, max_retries=3, delay=1):
|
|
148
|
+
"""异步连接 Redis,支持重试"""
|
|
149
|
+
async with self._lock:
|
|
150
|
+
if self._redis is not None:
|
|
151
|
+
# 如果已经连接,测试连接是否仍然有效
|
|
152
|
+
try:
|
|
153
|
+
await self._redis.ping()
|
|
154
|
+
return self._redis
|
|
155
|
+
except Exception:
|
|
156
|
+
# 连接失效,重新连接
|
|
157
|
+
self._redis = None
|
|
158
|
+
|
|
159
|
+
for attempt in range(max_retries):
|
|
160
|
+
try:
|
|
161
|
+
# 使用优化的连接池,确保 decode_responses=False 以避免编码问题
|
|
162
|
+
self._redis_pool = get_redis_pool(
|
|
163
|
+
self.redis_url,
|
|
164
|
+
is_cluster=self.is_cluster,
|
|
165
|
+
cluster_nodes=self.cluster_nodes,
|
|
166
|
+
max_connections=self.max_connections,
|
|
167
|
+
socket_connect_timeout=5,
|
|
168
|
+
socket_timeout=30,
|
|
169
|
+
health_check_interval=30,
|
|
170
|
+
retry_on_timeout=True,
|
|
171
|
+
decode_responses=False, # 确保不自动解码响应
|
|
172
|
+
encoding='utf-8'
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
self._redis = await self._redis_pool.get_connection()
|
|
176
|
+
|
|
177
|
+
# 测试连接
|
|
178
|
+
if self._redis:
|
|
179
|
+
await self._redis.ping()
|
|
180
|
+
return self._redis
|
|
181
|
+
except Exception as e:
|
|
182
|
+
error_msg = f"Redis 连接失败 (尝试 {attempt + 1}/{max_retries}, Module: {self.module_name}): {e}"
|
|
183
|
+
get_module_logger().warning(error_msg)
|
|
184
|
+
get_module_logger().debug(f"详细错误信息:\n{traceback.format_exc()}")
|
|
185
|
+
if attempt < max_retries - 1:
|
|
186
|
+
await asyncio.sleep(delay)
|
|
187
|
+
else:
|
|
188
|
+
raise ConnectionError(f"无法连接 Redis (Module: {self.module_name}): {e}")
|
|
189
|
+
|
|
190
|
+
async def _ensure_connection(self):
|
|
191
|
+
"""确保连接有效"""
|
|
192
|
+
if self._redis is None:
|
|
193
|
+
await self.connect()
|
|
194
|
+
try:
|
|
195
|
+
if self._redis:
|
|
196
|
+
await self._redis.ping()
|
|
197
|
+
except Exception as e:
|
|
198
|
+
get_module_logger().warning(f"Redis 连接失效 (Module: {self.module_name}),尝试重连...: {e}")
|
|
199
|
+
self._redis = None
|
|
200
|
+
await self.connect()
|
|
201
|
+
|
|
202
|
+
def _is_cluster_mode(self) -> bool:
|
|
203
|
+
"""检查是否为集群模式"""
|
|
204
|
+
if REDIS_CLUSTER_AVAILABLE and RedisCluster is not None:
|
|
205
|
+
# 检查 _redis 是否为 RedisCluster 实例
|
|
206
|
+
if self._redis is not None and isinstance(self._redis, RedisCluster):
|
|
207
|
+
return True
|
|
208
|
+
return False
|
|
209
|
+
|
|
210
|
+
async def put(self, request, priority: int = 0) -> bool:
|
|
211
|
+
"""放入请求到队列"""
|
|
212
|
+
try:
|
|
213
|
+
await self._ensure_connection()
|
|
214
|
+
if not self._redis:
|
|
215
|
+
return False
|
|
216
|
+
|
|
217
|
+
# 修复优先级行为一致性问题
|
|
218
|
+
# 原来: score = -priority (导致priority大的先出队)
|
|
219
|
+
# 现在: score = priority (确保priority小的先出队,与内存队列一致)
|
|
220
|
+
score = priority
|
|
221
|
+
key = self._get_request_key(request)
|
|
222
|
+
|
|
223
|
+
# 🔥 使用专用的序列化工具清理 Request
|
|
224
|
+
clean_request = self.request_serializer.prepare_for_serialization(request)
|
|
225
|
+
|
|
226
|
+
# 确保序列化后的数据可以被正确反序列化
|
|
227
|
+
try:
|
|
228
|
+
serialized = pickle.dumps(clean_request)
|
|
229
|
+
# 验证序列化数据可以被反序列化
|
|
230
|
+
pickle.loads(serialized)
|
|
231
|
+
except Exception as serialize_error:
|
|
232
|
+
get_module_logger().error(f"请求序列化验证失败 (Module: {self.module_name}): {serialize_error}")
|
|
233
|
+
return False
|
|
234
|
+
|
|
235
|
+
# 处理集群模式下的操作
|
|
236
|
+
if self._is_cluster_mode():
|
|
237
|
+
# 在集群模式下,确保所有键都在同一个slot中
|
|
238
|
+
# 可以通过在键名中添加相同的哈希标签来实现
|
|
239
|
+
hash_tag = "{queue}" # 使用哈希标签确保键在同一个slot
|
|
240
|
+
queue_name_with_tag = f"{self.queue_name}{hash_tag}"
|
|
241
|
+
data_key_with_tag = f"{self.queue_name}:data{hash_tag}"
|
|
242
|
+
|
|
243
|
+
pipe = self._redis.pipeline()
|
|
244
|
+
pipe.zadd(queue_name_with_tag, {key: score})
|
|
245
|
+
pipe.hset(data_key_with_tag, key, serialized)
|
|
246
|
+
result = await pipe.execute()
|
|
247
|
+
else:
|
|
248
|
+
pipe = self._redis.pipeline()
|
|
249
|
+
pipe.zadd(self.queue_name, {key: score})
|
|
250
|
+
pipe.hset(f"{self.queue_name}:data", key, serialized)
|
|
251
|
+
result = await pipe.execute()
|
|
252
|
+
|
|
253
|
+
if result[0] > 0:
|
|
254
|
+
get_module_logger().debug(f"成功入队 (Module: {self.module_name}): {request.url}")
|
|
255
|
+
return result[0] > 0
|
|
256
|
+
except Exception as e:
|
|
257
|
+
error_context = ErrorContext(
|
|
258
|
+
context=f"放入队列失败 (Module: {self.module_name})"
|
|
259
|
+
)
|
|
260
|
+
get_module_error_handler().handle_error(
|
|
261
|
+
e,
|
|
262
|
+
context=error_context,
|
|
263
|
+
raise_error=False
|
|
264
|
+
)
|
|
265
|
+
return False
|
|
266
|
+
|
|
267
|
+
async def get(self, timeout: float = 5.0):
|
|
268
|
+
"""
|
|
269
|
+
获取请求(带超时)
|
|
270
|
+
:param timeout: 最大等待时间(秒),避免无限轮询
|
|
271
|
+
"""
|
|
272
|
+
try:
|
|
273
|
+
await self._ensure_connection()
|
|
274
|
+
if not self._redis:
|
|
275
|
+
return None
|
|
276
|
+
|
|
277
|
+
start_time = asyncio.get_event_loop().time()
|
|
278
|
+
|
|
279
|
+
while True:
|
|
280
|
+
# 尝试获取任务
|
|
281
|
+
if self._is_cluster_mode():
|
|
282
|
+
# 集群模式处理
|
|
283
|
+
hash_tag = "{queue}"
|
|
284
|
+
queue_name_with_tag = f"{self.queue_name}{hash_tag}"
|
|
285
|
+
result = await self._redis.zpopmin(queue_name_with_tag, count=1)
|
|
286
|
+
else:
|
|
287
|
+
result = await self._redis.zpopmin(self.queue_name, count=1)
|
|
288
|
+
|
|
289
|
+
if result:
|
|
290
|
+
key, score = result[0]
|
|
291
|
+
data_key = f"{self.queue_name}:data"
|
|
292
|
+
if self._is_cluster_mode():
|
|
293
|
+
hash_tag = "{queue}"
|
|
294
|
+
data_key = f"{self.queue_name}:data{hash_tag}"
|
|
295
|
+
|
|
296
|
+
serialized = await self._redis.hget(data_key, key)
|
|
297
|
+
if not serialized:
|
|
298
|
+
continue
|
|
299
|
+
|
|
300
|
+
# 移动到 processing
|
|
301
|
+
processing_key = f"{key}:{int(time.time())}"
|
|
302
|
+
processing_queue = self.processing_queue
|
|
303
|
+
processing_data_key = f"{self.processing_queue}:data"
|
|
304
|
+
|
|
305
|
+
if self._is_cluster_mode():
|
|
306
|
+
hash_tag = "{queue}"
|
|
307
|
+
processing_queue = f"{self.processing_queue}{hash_tag}"
|
|
308
|
+
processing_data_key = f"{self.processing_queue}:data{hash_tag}"
|
|
309
|
+
|
|
310
|
+
if self._is_cluster_mode():
|
|
311
|
+
pipe = self._redis.pipeline()
|
|
312
|
+
pipe.zadd(processing_queue, {processing_key: time.time() + self.timeout})
|
|
313
|
+
pipe.hset(processing_data_key, processing_key, serialized)
|
|
314
|
+
pipe.hdel(data_key, key)
|
|
315
|
+
await pipe.execute()
|
|
316
|
+
else:
|
|
317
|
+
pipe = self._redis.pipeline()
|
|
318
|
+
pipe.zadd(processing_queue, {processing_key: time.time() + self.timeout})
|
|
319
|
+
pipe.hset(processing_data_key, processing_key, serialized)
|
|
320
|
+
pipe.hdel(data_key, key)
|
|
321
|
+
await pipe.execute()
|
|
322
|
+
|
|
323
|
+
# 更安全的反序列化方式
|
|
324
|
+
try:
|
|
325
|
+
# 首先尝试标准的 pickle 反序列化
|
|
326
|
+
request = pickle.loads(serialized)
|
|
327
|
+
return request
|
|
328
|
+
except UnicodeDecodeError:
|
|
329
|
+
# 如果出现编码错误,尝试使用 latin1 解码
|
|
330
|
+
request = pickle.loads(serialized, encoding='latin1')
|
|
331
|
+
return request
|
|
332
|
+
except Exception as pickle_error:
|
|
333
|
+
# 如果pickle反序列化失败,记录错误并跳过这个任务
|
|
334
|
+
get_module_logger().error(f"无法反序列化请求数据 (Module: {self.module_name}): {pickle_error}")
|
|
335
|
+
# 从processing队列中移除这个无效的任务
|
|
336
|
+
if self._is_cluster_mode():
|
|
337
|
+
await self._redis.zrem(processing_queue, processing_key)
|
|
338
|
+
await self._redis.hdel(processing_data_key, processing_key)
|
|
339
|
+
else:
|
|
340
|
+
await self._redis.zrem(processing_queue, processing_key)
|
|
341
|
+
await self._redis.hdel(processing_data_key, processing_key)
|
|
342
|
+
# 继续尝试下一个任务
|
|
343
|
+
continue
|
|
344
|
+
|
|
345
|
+
# 检查是否超时
|
|
346
|
+
if asyncio.get_event_loop().time() - start_time > timeout:
|
|
347
|
+
return None
|
|
348
|
+
|
|
349
|
+
# 短暂等待,避免空轮询,但减少等待时间以提高响应速度
|
|
350
|
+
await asyncio.sleep(0.001) # 从0.01减少到0.001
|
|
351
|
+
|
|
352
|
+
except Exception as e:
|
|
353
|
+
error_context = ErrorContext(
|
|
354
|
+
context=f"获取队列任务失败 (Module: {self.module_name})"
|
|
355
|
+
)
|
|
356
|
+
get_module_error_handler().handle_error(
|
|
357
|
+
e,
|
|
358
|
+
context=error_context,
|
|
359
|
+
raise_error=False
|
|
360
|
+
)
|
|
361
|
+
return None
|
|
362
|
+
|
|
363
|
+
async def ack(self, request: "Request"):
|
|
364
|
+
"""确认任务完成"""
|
|
365
|
+
try:
|
|
366
|
+
await self._ensure_connection()
|
|
367
|
+
if not self._redis:
|
|
368
|
+
return
|
|
369
|
+
|
|
370
|
+
key = self._get_request_key(request)
|
|
371
|
+
processing_queue = self.processing_queue
|
|
372
|
+
processing_data_key = f"{self.processing_queue}:data"
|
|
373
|
+
|
|
374
|
+
if self._is_cluster_mode():
|
|
375
|
+
hash_tag = "{queue}"
|
|
376
|
+
processing_queue = f"{self.processing_queue}{hash_tag}"
|
|
377
|
+
processing_data_key = f"{self.processing_queue}:data{hash_tag}"
|
|
378
|
+
|
|
379
|
+
cursor = 0
|
|
380
|
+
while True:
|
|
381
|
+
if self._is_cluster_mode():
|
|
382
|
+
cursor, keys = await self._redis.zscan(processing_queue, cursor, match=f"{key}:*")
|
|
383
|
+
else:
|
|
384
|
+
cursor, keys = await self._redis.zscan(processing_queue, cursor, match=f"{key}:*")
|
|
385
|
+
if keys:
|
|
386
|
+
if self._is_cluster_mode():
|
|
387
|
+
pipe = self._redis.pipeline()
|
|
388
|
+
for k in keys:
|
|
389
|
+
pipe.zrem(processing_queue, k)
|
|
390
|
+
pipe.hdel(processing_data_key, k)
|
|
391
|
+
await pipe.execute()
|
|
392
|
+
else:
|
|
393
|
+
pipe = self._redis.pipeline()
|
|
394
|
+
for k in keys:
|
|
395
|
+
pipe.zrem(processing_queue, k)
|
|
396
|
+
pipe.hdel(processing_data_key, k)
|
|
397
|
+
await pipe.execute()
|
|
398
|
+
if cursor == 0:
|
|
399
|
+
break
|
|
400
|
+
except Exception as e:
|
|
401
|
+
error_context = ErrorContext(
|
|
402
|
+
context=f"确认任务完成失败 (Module: {self.module_name})"
|
|
403
|
+
)
|
|
404
|
+
get_module_error_handler().handle_error(
|
|
405
|
+
e,
|
|
406
|
+
context=error_context,
|
|
407
|
+
raise_error=False
|
|
408
|
+
)
|
|
409
|
+
|
|
410
|
+
async def fail(self, request: "Request", reason: str = ""):
|
|
411
|
+
"""标记任务失败"""
|
|
412
|
+
try:
|
|
413
|
+
await self._ensure_connection()
|
|
414
|
+
if not self._redis:
|
|
415
|
+
return
|
|
416
|
+
|
|
417
|
+
key = self._get_request_key(request)
|
|
418
|
+
await self.ack(request)
|
|
419
|
+
|
|
420
|
+
retry_key = f"{self.failed_queue}:retries:{key}"
|
|
421
|
+
failed_queue = self.failed_queue
|
|
422
|
+
|
|
423
|
+
if self._is_cluster_mode():
|
|
424
|
+
hash_tag = "{queue}"
|
|
425
|
+
retry_key = f"{self.failed_queue}:retries:{key}{hash_tag}"
|
|
426
|
+
failed_queue = f"{self.failed_queue}{hash_tag}"
|
|
427
|
+
|
|
428
|
+
retries = await self._redis.incr(retry_key)
|
|
429
|
+
await self._redis.expire(retry_key, 86400)
|
|
430
|
+
|
|
431
|
+
if retries <= self.max_retries:
|
|
432
|
+
await self.put(request, priority=request.priority + 1)
|
|
433
|
+
get_module_logger().info(
|
|
434
|
+
f"任务重试 [{retries}/{self.max_retries}] (Module: {self.module_name}): {request.url}")
|
|
435
|
+
else:
|
|
436
|
+
failed_data = {
|
|
437
|
+
"url": request.url,
|
|
438
|
+
"reason": reason,
|
|
439
|
+
"retries": retries,
|
|
440
|
+
"failed_at": time.time(),
|
|
441
|
+
"request_pickle": pickle.dumps(request).hex(), # 可选:保存完整请求
|
|
442
|
+
}
|
|
443
|
+
await self._redis.lpush(failed_queue, pickle.dumps(failed_data))
|
|
444
|
+
get_module_logger().error(f"任务彻底失败 [{retries}次] (Module: {self.module_name}): {request.url}")
|
|
445
|
+
except Exception as e:
|
|
446
|
+
error_context = ErrorContext(
|
|
447
|
+
context=f"标记任务失败失败 (Module: {self.module_name})"
|
|
448
|
+
)
|
|
449
|
+
get_module_error_handler().handle_error(
|
|
450
|
+
e,
|
|
451
|
+
context=error_context,
|
|
452
|
+
raise_error=False
|
|
453
|
+
)
|
|
454
|
+
|
|
455
|
+
def _get_request_key(self, request) -> str:
|
|
456
|
+
"""生成请求唯一键"""
|
|
457
|
+
return f"{self.module_name}:url:{hash(request.url) & 0x7FFFFFFF}" # 确保正数
|
|
458
|
+
|
|
459
|
+
async def qsize(self) -> int:
|
|
460
|
+
"""Get queue size"""
|
|
461
|
+
try:
|
|
462
|
+
await self._ensure_connection()
|
|
463
|
+
if not self._redis:
|
|
464
|
+
return 0
|
|
465
|
+
|
|
466
|
+
if self._is_cluster_mode():
|
|
467
|
+
hash_tag = "{queue}"
|
|
468
|
+
queue_name_with_tag = f"{self.queue_name}{hash_tag}"
|
|
469
|
+
return await self._redis.zcard(queue_name_with_tag)
|
|
470
|
+
else:
|
|
471
|
+
return await self._redis.zcard(self.queue_name)
|
|
472
|
+
except Exception as e:
|
|
473
|
+
error_context = ErrorContext(
|
|
474
|
+
context=f"Failed to get queue size (Module: {self.module_name})"
|
|
475
|
+
)
|
|
476
|
+
get_module_error_handler().handle_error(
|
|
477
|
+
e,
|
|
478
|
+
context=error_context,
|
|
479
|
+
raise_error=False
|
|
480
|
+
)
|
|
481
|
+
return 0
|
|
482
|
+
|
|
483
|
+
async def close(self):
|
|
484
|
+
"""关闭连接"""
|
|
485
|
+
try:
|
|
486
|
+
# 显式关闭Redis连接
|
|
487
|
+
if self._redis is not None:
|
|
488
|
+
try:
|
|
489
|
+
# 尝试关闭连接
|
|
490
|
+
if hasattr(self._redis, 'close'):
|
|
491
|
+
close_result = self._redis.close()
|
|
492
|
+
if asyncio.iscoroutine(close_result):
|
|
493
|
+
await close_result
|
|
494
|
+
|
|
495
|
+
# 等待连接关闭完成
|
|
496
|
+
if hasattr(self._redis, 'wait_closed'):
|
|
497
|
+
wait_result = self._redis.wait_closed()
|
|
498
|
+
if asyncio.iscoroutine(wait_result):
|
|
499
|
+
await wait_result
|
|
500
|
+
except Exception as close_error:
|
|
501
|
+
get_module_logger().warning(
|
|
502
|
+
f"Error closing Redis connection (Module: {self.module_name}): {close_error}"
|
|
503
|
+
)
|
|
504
|
+
finally:
|
|
505
|
+
self._redis = None
|
|
506
|
+
|
|
507
|
+
# 释放连接池引用(连接池由全局管理器管理)
|
|
508
|
+
self._redis_pool = None
|
|
509
|
+
|
|
510
|
+
get_module_logger().debug(f"Redis 连接已释放 (Module: {self.module_name})")
|
|
511
|
+
except Exception as e:
|
|
512
|
+
error_context = ErrorContext(
|
|
513
|
+
context=f"释放 Redis 连接失败 (Module: {self.module_name})"
|
|
514
|
+
)
|
|
515
|
+
get_module_error_handler().handle_error(
|
|
516
|
+
e,
|
|
517
|
+
context=error_context,
|
|
518
|
+
raise_error=False
|
|
519
519
|
)
|