crawlo 1.1.2__py3-none-any.whl → 1.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (41) hide show
  1. crawlo/__version__.py +1 -1
  2. crawlo/core/scheduler.py +20 -16
  3. crawlo/downloader/httpx_downloader.py +14 -12
  4. crawlo/exceptions.py +4 -0
  5. crawlo/extension/__init__.py +17 -10
  6. crawlo/extension/health_check.py +142 -0
  7. crawlo/extension/log_interval.py +27 -18
  8. crawlo/extension/log_stats.py +62 -24
  9. crawlo/extension/logging_extension.py +18 -9
  10. crawlo/extension/memory_monitor.py +89 -0
  11. crawlo/extension/performance_profiler.py +118 -0
  12. crawlo/extension/request_recorder.py +108 -0
  13. crawlo/filters/aioredis_filter.py +2 -2
  14. crawlo/middleware/retry.py +3 -3
  15. crawlo/network/request.py +2 -2
  16. crawlo/network/response.py +25 -23
  17. crawlo/pipelines/__init__.py +9 -0
  18. crawlo/pipelines/bloom_dedup_pipeline.py +157 -0
  19. crawlo/pipelines/database_dedup_pipeline.py +225 -0
  20. crawlo/pipelines/memory_dedup_pipeline.py +116 -0
  21. crawlo/pipelines/mongo_pipeline.py +81 -66
  22. crawlo/pipelines/mysql_pipeline.py +165 -43
  23. crawlo/pipelines/redis_dedup_pipeline.py +163 -0
  24. crawlo/queue/queue_manager.py +4 -0
  25. crawlo/queue/redis_priority_queue.py +20 -3
  26. crawlo/settings/default_settings.py +119 -66
  27. crawlo/subscriber.py +62 -37
  28. crawlo/templates/project/items.py.tmpl +1 -1
  29. crawlo/templates/project/middlewares.py.tmpl +73 -49
  30. crawlo/templates/project/pipelines.py.tmpl +52 -290
  31. crawlo/templates/project/run.py.tmpl +20 -7
  32. crawlo/templates/project/settings.py.tmpl +35 -3
  33. crawlo/templates/spider/spider.py.tmpl +1 -37
  34. crawlo/utils/controlled_spider_mixin.py +109 -5
  35. crawlo-1.1.4.dist-info/METADATA +403 -0
  36. {crawlo-1.1.2.dist-info → crawlo-1.1.4.dist-info}/RECORD +40 -31
  37. examples/controlled_spider_example.py +205 -0
  38. crawlo-1.1.2.dist-info/METADATA +0 -567
  39. {crawlo-1.1.2.dist-info → crawlo-1.1.4.dist-info}/WHEEL +0 -0
  40. {crawlo-1.1.2.dist-info → crawlo-1.1.4.dist-info}/entry_points.txt +0 -0
  41. {crawlo-1.1.2.dist-info → crawlo-1.1.4.dist-info}/top_level.txt +0 -0
@@ -1,11 +1,11 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  import asyncio
3
3
  import aiomysql
4
- from typing import Optional
4
+ from typing import Optional, List, Dict
5
5
  from asyncmy import create_pool
6
6
  from crawlo.utils.log import get_logger
7
7
  from crawlo.exceptions import ItemDiscard
8
- from crawlo.utils.db_helper import make_insert_sql, logger
8
+ from crawlo.utils.db_helper import make_insert_sql, make_batch_sql, logger
9
9
 
10
10
 
11
11
  class AsyncmyMySQLPipeline:
@@ -24,6 +24,11 @@ class AsyncmyMySQLPipeline:
24
24
  f"{crawler.spider.name}_items"
25
25
  )
26
26
 
27
+ # 批量插入配置
28
+ self.batch_size = self.settings.get_int('MYSQL_BATCH_SIZE', 100)
29
+ self.use_batch = self.settings.get_bool('MYSQL_USE_BATCH', False)
30
+ self.batch_buffer: List[Dict] = [] # 批量缓冲区
31
+
27
32
  # 注册关闭事件
28
33
  crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
29
34
 
@@ -59,30 +64,45 @@ class AsyncmyMySQLPipeline:
59
64
  """处理item的核心方法"""
60
65
  kwargs = kwargs or {}
61
66
  spider_name = getattr(spider, 'name', 'unknown') # 获取爬虫名称
62
- try:
63
- await self._ensure_pool()
64
- item_dict = dict(item)
65
- sql = make_insert_sql(table=self.table_name, data=item_dict, **kwargs)
66
-
67
- rowcount = await self._execute_sql(sql=sql)
68
- if rowcount > 1:
69
- self.logger.info(
70
- f"爬虫 {spider_name} 成功插入 {rowcount} 条记录到表 {self.table_name}"
71
- )
72
- elif rowcount == 1:
73
- self.logger.debug(
74
- f"爬虫 {spider_name} 成功插入单条记录到表 {self.table_name}"
75
- )
76
- else:
77
- self.logger.warning(
78
- f"爬虫 {spider_name}: SQL执行成功但未插入新记录 - {sql[:100]}..."
79
- )
80
-
67
+
68
+ # 如果启用批量插入,将item添加到缓冲区
69
+ if self.use_batch:
70
+ self.batch_buffer.append(dict(item))
71
+
72
+ # 如果缓冲区达到批量大小,执行批量插入
73
+ if len(self.batch_buffer) >= self.batch_size:
74
+ await self._flush_batch(spider_name)
75
+
81
76
  return item
77
+ else:
78
+ # 单条插入逻辑
79
+ try:
80
+ await self._ensure_pool()
81
+ item_dict = dict(item)
82
+ sql = make_insert_sql(table=self.table_name, data=item_dict, **kwargs)
82
83
 
83
- except Exception as e:
84
- self.logger.error(f"处理item时发生错误: {e}")
85
- raise ItemDiscard(f"处理失败: {e}")
84
+ rowcount = await self._execute_sql(sql=sql)
85
+ if rowcount > 1:
86
+ self.logger.info(
87
+ f"爬虫 {spider_name} 成功插入 {rowcount} 条记录到表 {self.table_name}"
88
+ )
89
+ elif rowcount == 1:
90
+ self.logger.debug(
91
+ f"爬虫 {spider_name} 成功插入单条记录到表 {self.table_name}"
92
+ )
93
+ else:
94
+ self.logger.warning(
95
+ f"爬虫 {spider_name}: SQL执行成功但未插入新记录 - {sql[:100]}..."
96
+ )
97
+
98
+ # 统计计数移到这里,与AiomysqlMySQLPipeline保持一致
99
+ self.crawler.stats.inc_value('mysql/insert_success')
100
+ return item
101
+
102
+ except Exception as e:
103
+ self.logger.error(f"处理item时发生错误: {e}")
104
+ self.crawler.stats.inc_value('mysql/insert_failed')
105
+ raise ItemDiscard(f"处理失败: {e}")
86
106
 
87
107
  async def _execute_sql(self, sql: str, values: list = None) -> int:
88
108
  """执行SQL语句并处理结果"""
@@ -96,15 +116,59 @@ class AsyncmyMySQLPipeline:
96
116
  rowcount = await cursor.execute(sql)
97
117
 
98
118
  await conn.commit()
99
- self.crawler.stats.inc_value('mysql/insert_success')
119
+ # 移除这里的统计计数
100
120
  return rowcount
101
121
  except Exception as e:
102
122
  await conn.rollback()
103
- self.crawler.stats.inc_value('mysql/insert_failed')
123
+ # 移除这里的统计计数
104
124
  raise ItemDiscard(f"MySQL插入失败: {e}")
105
125
 
126
+ async def _flush_batch(self, spider_name: str):
127
+ """刷新批量缓冲区并执行批量插入"""
128
+ if not self.batch_buffer:
129
+ return
130
+
131
+ try:
132
+ await self._ensure_pool()
133
+
134
+ # 使用批量SQL生成函数
135
+ batch_result = make_batch_sql(table=self.table_name, datas=self.batch_buffer)
136
+ if batch_result is None:
137
+ self.logger.warning("批量插入数据为空")
138
+ self.batch_buffer.clear()
139
+ return
140
+
141
+ sql, values_list = batch_result
142
+
143
+ async with self.pool.acquire() as conn:
144
+ async with conn.cursor() as cursor:
145
+ try:
146
+ # 执行批量插入
147
+ rowcount = await cursor.executemany(sql, values_list)
148
+ await conn.commit()
149
+
150
+ self.logger.info(
151
+ f"爬虫 {spider_name} 批量插入 {rowcount} 条记录到表 {self.table_name}"
152
+ )
153
+ # 更新统计计数
154
+ self.crawler.stats.inc_value('mysql/insert_success', rowcount)
155
+ self.batch_buffer.clear()
156
+ except Exception as e:
157
+ await conn.rollback()
158
+ self.crawler.stats.inc_value('mysql/insert_failed', len(self.batch_buffer))
159
+ self.logger.error(f"批量插入失败: {e}")
160
+ raise ItemDiscard(f"批量插入失败: {e}")
161
+ except Exception as e:
162
+ self.logger.error(f"批量插入过程中发生错误: {e}")
163
+ raise ItemDiscard(f"批量插入处理失败: {e}")
164
+
106
165
  async def spider_closed(self):
107
166
  """关闭爬虫时清理资源"""
167
+ # 在关闭前刷新剩余的批量数据
168
+ if self.use_batch and self.batch_buffer:
169
+ spider_name = getattr(self.crawler.spider, 'name', 'unknown')
170
+ await self._flush_batch(spider_name)
171
+
108
172
  if self.pool:
109
173
  self.pool.close()
110
174
  await self.pool.wait_closed()
@@ -127,6 +191,11 @@ class AiomysqlMySQLPipeline:
127
191
  f"{crawler.spider.name}_items"
128
192
  )
129
193
 
194
+ # 批量插入配置
195
+ self.batch_size = self.settings.get_int('MYSQL_BATCH_SIZE', 100)
196
+ self.use_batch = self.settings.get_bool('MYSQL_USE_BATCH', False)
197
+ self.batch_buffer: List[Dict] = [] # 批量缓冲区
198
+
130
199
  crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
131
200
 
132
201
  @classmethod
@@ -160,36 +229,89 @@ class AiomysqlMySQLPipeline:
160
229
 
161
230
  async def process_item(self, item, spider) -> Optional[dict]:
162
231
  """处理item方法"""
232
+ # 如果启用批量插入,将item添加到缓冲区
233
+ if self.use_batch:
234
+ self.batch_buffer.append(dict(item))
235
+
236
+ # 如果缓冲区达到批量大小,执行批量插入
237
+ if len(self.batch_buffer) >= self.batch_size:
238
+ spider_name = getattr(spider, 'name', 'unknown')
239
+ await self._flush_batch(spider_name)
240
+
241
+ return item
242
+ else:
243
+ # 单条插入逻辑
244
+ try:
245
+ await self._init_pool()
246
+
247
+ item_dict = dict(item)
248
+ # 使用make_insert_sql工具函数生成SQL
249
+ sql = make_insert_sql(table=self.table_name, data=item_dict)
250
+
251
+ async with self.pool.acquire() as conn:
252
+ async with conn.cursor() as cursor:
253
+ try:
254
+ await cursor.execute(sql)
255
+ await conn.commit()
256
+ self.crawler.stats.inc_value('mysql/insert_success')
257
+ except aiomysql.Error as e:
258
+ await conn.rollback()
259
+ self.crawler.stats.inc_value('mysql/insert_failed')
260
+ raise ItemDiscard(f"MySQL错误: {e.args[1]}")
261
+
262
+ return item
263
+
264
+ except Exception as e:
265
+ self.logger.error(f"Pipeline处理异常: {e}")
266
+ raise ItemDiscard(f"处理失败: {e}")
267
+
268
+ async def _flush_batch(self, spider_name: str):
269
+ """刷新批量缓冲区并执行批量插入"""
270
+ if not self.batch_buffer:
271
+ return
272
+
163
273
  try:
164
274
  await self._init_pool()
275
+
276
+ # 使用批量SQL生成函数
277
+ batch_result = make_batch_sql(table=self.table_name, datas=self.batch_buffer)
278
+ if batch_result is None:
279
+ self.logger.warning("批量插入数据为空")
280
+ self.batch_buffer.clear()
281
+ return
165
282
 
166
- item_dict = dict(item)
167
- sql = f"""
168
- INSERT INTO `{self.table_name}`
169
- ({', '.join([f'`{k}`' for k in item_dict.keys()])})
170
- VALUES ({', '.join(['%s'] * len(item_dict))})
171
- """
283
+ sql, values_list = batch_result
172
284
 
173
285
  async with self.pool.acquire() as conn:
174
286
  async with conn.cursor() as cursor:
175
287
  try:
176
- await cursor.execute(sql, list(item_dict.values()))
288
+ # 执行批量插入
289
+ rowcount = await cursor.executemany(sql, values_list)
177
290
  await conn.commit()
178
- self.crawler.stats.inc_value('mysql/insert_success')
179
- except aiomysql.Error as e:
291
+
292
+ self.logger.info(
293
+ f"爬虫 {spider_name} 批量插入 {rowcount} 条记录到表 {self.table_name}"
294
+ )
295
+ # 更新统计计数
296
+ self.crawler.stats.inc_value('mysql/insert_success', rowcount)
297
+ self.batch_buffer.clear()
298
+ except Exception as e:
180
299
  await conn.rollback()
181
- self.crawler.stats.inc_value('mysql/insert_failed')
182
- raise ItemDiscard(f"MySQL错误: {e.args[1]}")
183
-
184
- return item
185
-
300
+ self.crawler.stats.inc_value('mysql/insert_failed', len(self.batch_buffer))
301
+ self.logger.error(f"批量插入失败: {e}")
302
+ raise ItemDiscard(f"批量插入失败: {e}")
186
303
  except Exception as e:
187
- self.logger.error(f"Pipeline处理异常: {e}")
188
- raise ItemDiscard(f"处理失败: {e}")
304
+ self.logger.error(f"批量插入过程中发生错误: {e}")
305
+ raise ItemDiscard(f"批量插入处理失败: {e}")
189
306
 
190
307
  async def spider_closed(self):
191
308
  """资源清理"""
309
+ # 在关闭前刷新剩余的批量数据
310
+ if self.use_batch and self.batch_buffer:
311
+ spider_name = getattr(self.crawler.spider, 'name', 'unknown')
312
+ await self._flush_batch(spider_name)
313
+
192
314
  if self.pool:
193
315
  self.pool.close()
194
316
  await self.pool.wait_closed()
195
- self.logger.info("aiomysql连接池已释放")
317
+ self.logger.info("aiomysql连接池已释放")
@@ -0,0 +1,163 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ 基于 Redis 的数据项去重管道
5
+ ========================
6
+ 提供分布式环境下的数据项去重功能,防止保存重复的数据记录。
7
+
8
+ 特点:
9
+ - 分布式支持: 多节点共享去重数据
10
+ - 高性能: 使用 Redis 集合进行快速查找
11
+ - 可配置: 支持自定义 Redis 连接参数
12
+ - 容错设计: 网络异常时不会丢失数据
13
+ """
14
+
15
+ import hashlib
16
+ from typing import Dict, Any, Optional
17
+ import redis
18
+
19
+ from crawlo import Item
20
+ from crawlo.spider import Spider
21
+ from crawlo.utils.log import get_logger
22
+ from crawlo.exceptions import DropItem
23
+
24
+
25
+ class RedisDedupPipeline:
26
+ """基于 Redis 的数据项去重管道"""
27
+
28
+ def __init__(
29
+ self,
30
+ redis_host: str = 'localhost',
31
+ redis_port: int = 6379,
32
+ redis_db: int = 0,
33
+ redis_password: Optional[str] = None,
34
+ redis_key: str = 'crawlo:item_fingerprints',
35
+ log_level: str = "INFO"
36
+ ):
37
+ """
38
+ 初始化 Redis 去重管道
39
+
40
+ :param redis_host: Redis 主机地址
41
+ :param redis_port: Redis 端口
42
+ :param redis_db: Redis 数据库编号
43
+ :param redis_password: Redis 密码
44
+ :param redis_key: 存储指纹的 Redis 键名
45
+ :param log_level: 日志级别
46
+ """
47
+ self.logger = get_logger(self.__class__.__name__, log_level)
48
+
49
+ # 初始化 Redis 连接
50
+ try:
51
+ self.redis_client = redis.Redis(
52
+ host=redis_host,
53
+ port=redis_port,
54
+ db=redis_db,
55
+ password=redis_password,
56
+ decode_responses=True,
57
+ socket_connect_timeout=5,
58
+ socket_timeout=5
59
+ )
60
+ # 测试连接
61
+ self.redis_client.ping()
62
+ self.logger.info(f"Redis 连接成功: {redis_host}:{redis_port}/{redis_db}")
63
+ except Exception as e:
64
+ self.logger.error(f"Redis 连接失败: {e}")
65
+ raise RuntimeError(f"Redis 连接失败: {e}")
66
+
67
+ self.redis_key = redis_key
68
+ self.dropped_count = 0
69
+
70
+ @classmethod
71
+ def from_crawler(cls, crawler):
72
+ """从爬虫配置创建管道实例"""
73
+ settings = crawler.settings
74
+
75
+ return cls(
76
+ redis_host=settings.get('REDIS_HOST', 'localhost'),
77
+ redis_port=settings.getint('REDIS_PORT', 6379),
78
+ redis_db=settings.getint('REDIS_DB', 0),
79
+ redis_password=settings.get('REDIS_PASSWORD') or None,
80
+ redis_key=settings.get('REDIS_DEDUP_KEY', 'crawlo:item_fingerprints'),
81
+ log_level=settings.get('LOG_LEVEL', 'INFO')
82
+ )
83
+
84
+ def process_item(self, item: Item, spider: Spider) -> Item:
85
+ """
86
+ 处理数据项,进行去重检查
87
+
88
+ :param item: 要处理的数据项
89
+ :param spider: 爬虫实例
90
+ :return: 处理后的数据项或抛出 DropItem 异常
91
+ """
92
+ try:
93
+ # 生成数据项指纹
94
+ fingerprint = self._generate_item_fingerprint(item)
95
+
96
+ # 使用 Redis 的 SADD 命令检查并添加指纹
97
+ # 如果指纹已存在,SADD 返回 0;如果指纹是新的,SADD 返回 1
98
+ is_new = self.redis_client.sadd(self.redis_key, fingerprint)
99
+
100
+ if not is_new:
101
+ # 如果指纹已存在,丢弃这个数据项
102
+ self.dropped_count += 1
103
+ self.logger.debug(f"丢弃重复数据项: {fingerprint[:20]}...")
104
+ raise DropItem(f"重复的数据项: {fingerprint}")
105
+ else:
106
+ # 如果是新数据项,继续处理
107
+ self.logger.debug(f"处理新数据项: {fingerprint[:20]}...")
108
+ return item
109
+
110
+ except redis.RedisError as e:
111
+ self.logger.error(f"Redis 错误: {e}")
112
+ # 在 Redis 错误时继续处理,避免丢失数据
113
+ return item
114
+ except Exception as e:
115
+ self.logger.error(f"处理数据项时出错: {e}")
116
+ # 在其他错误时继续处理
117
+ return item
118
+
119
+ def _generate_item_fingerprint(self, item: Item) -> str:
120
+ """
121
+ 生成数据项指纹
122
+
123
+ 基于数据项的所有字段生成唯一指纹,用于去重判断。
124
+
125
+ :param item: 数据项
126
+ :return: 指纹字符串
127
+ """
128
+ # 将数据项转换为可序列化的字典
129
+ try:
130
+ item_dict = item.to_dict()
131
+ except AttributeError:
132
+ # 兼容没有to_dict方法的Item实现
133
+ item_dict = dict(item)
134
+
135
+ # 对字典进行排序以确保一致性
136
+ sorted_items = sorted(item_dict.items())
137
+
138
+ # 生成指纹字符串
139
+ fingerprint_string = '|'.join([f"{k}={v}" for k, v in sorted_items if v is not None])
140
+
141
+ # 使用 SHA256 生成固定长度的指纹
142
+ return hashlib.sha256(fingerprint_string.encode('utf-8')).hexdigest()
143
+
144
+ def close_spider(self, spider: Spider) -> None:
145
+ """
146
+ 爬虫关闭时的清理工作
147
+
148
+ :param spider: 爬虫实例
149
+ """
150
+ try:
151
+ # 获取去重统计信息
152
+ total_items = self.redis_client.scard(self.redis_key)
153
+ self.logger.info(f"爬虫 {spider.name} 关闭:")
154
+ self.logger.info(f" - 丢弃的重复数据项: {self.dropped_count}")
155
+ self.logger.info(f" - Redis 中存储的指纹数: {total_items}")
156
+
157
+ # 注意:默认情况下不清理 Redis 中的指纹
158
+ # 如果需要清理,可以在设置中配置
159
+ if spider.crawler.settings.getbool('REDIS_DEDUP_CLEANUP', False):
160
+ deleted = self.redis_client.delete(self.redis_key)
161
+ self.logger.info(f" - 清理的指纹数: {deleted}")
162
+ except Exception as e:
163
+ self.logger.error(f"关闭爬虫时出错: {e}")
@@ -7,6 +7,8 @@
7
7
  from typing import Optional, Dict, Any, Union
8
8
  from enum import Enum
9
9
  import asyncio
10
+ import traceback
11
+ import os
10
12
 
11
13
  from crawlo.utils.log import get_logger
12
14
  from crawlo.utils.request_serializer import RequestSerializer
@@ -106,7 +108,9 @@ class QueueManager:
106
108
  return True
107
109
 
108
110
  except Exception as e:
111
+ # 记录详细的错误信息和堆栈跟踪
109
112
  self.logger.error(f"❌ 队列初始化失败: {e}")
113
+ self.logger.error(f"详细错误信息:\n{traceback.format_exc()}")
110
114
  self._health_status = "error"
111
115
  return False
112
116
 
@@ -3,6 +3,8 @@ import time
3
3
  import asyncio
4
4
  from typing import Optional
5
5
  import redis.asyncio as aioredis
6
+ import traceback
7
+ import os
6
8
 
7
9
  from crawlo import Request
8
10
  from crawlo.utils.log import get_logger
@@ -19,7 +21,7 @@ class RedisPriorityQueue:
19
21
 
20
22
  def __init__(
21
23
  self,
22
- redis_url: str = "redis://localhost:6379/0",
24
+ redis_url: str = None,
23
25
  queue_name: str = "crawlo:requests",
24
26
  processing_queue: str = "crawlo:processing",
25
27
  failed_queue: str = "crawlo:failed",
@@ -27,6 +29,18 @@ class RedisPriorityQueue:
27
29
  timeout: int = 300, # 任务处理超时时间(秒)
28
30
  max_connections: int = 10, # 连接池大小
29
31
  ):
32
+ # 如果没有提供 redis_url,则从环境变量构造
33
+ if redis_url is None:
34
+ redis_host = os.getenv('REDIS_HOST', 'localhost')
35
+ redis_port = os.getenv('REDIS_PORT', '6379')
36
+ redis_db = os.getenv('REDIS_DB', '0')
37
+ redis_password = os.getenv('REDIS_PASSWORD', '')
38
+
39
+ if redis_password:
40
+ redis_url = f"redis://:{redis_password}@{redis_host}:{redis_port}/{redis_db}"
41
+ else:
42
+ redis_url = f"redis://{redis_host}:{redis_port}/{redis_db}"
43
+
30
44
  self.redis_url = redis_url
31
45
  self.queue_name = queue_name
32
46
  self.processing_queue = processing_queue
@@ -59,6 +73,7 @@ class RedisPriorityQueue:
59
73
  return self._redis
60
74
  except Exception as e:
61
75
  logger.warning(f"⚠️ Redis 连接失败 (尝试 {attempt + 1}/{max_retries}): {e}")
76
+ logger.warning(f"详细错误信息:\n{traceback.format_exc()}")
62
77
  if attempt < max_retries - 1:
63
78
  await asyncio.sleep(delay)
64
79
  else:
@@ -70,8 +85,8 @@ class RedisPriorityQueue:
70
85
  await self.connect()
71
86
  try:
72
87
  await self._redis.ping()
73
- except Exception:
74
- logger.warning("🔄 Redis 连接失效,尝试重连...")
88
+ except Exception as e:
89
+ logger.warning(f"🔄 Redis 连接失效,尝试重连...: {e}")
75
90
  self._redis = None
76
91
  await self.connect()
77
92
 
@@ -95,6 +110,7 @@ class RedisPriorityQueue:
95
110
  return result[0] > 0
96
111
  except Exception as e:
97
112
  logger.error(f"❌ 放入队列失败: {e}")
113
+ logger.error(f"详细错误信息:\n{traceback.format_exc()}")
98
114
  return False
99
115
 
100
116
  async def get(self, timeout: float = 5.0) -> Optional[Request]:
@@ -134,6 +150,7 @@ class RedisPriorityQueue:
134
150
 
135
151
  except Exception as e:
136
152
  logger.error(f"❌ 获取队列任务失败: {e}")
153
+ logger.error(f"详细错误信息:\n{traceback.format_exc()}")
137
154
  return None
138
155
 
139
156
  async def ack(self, request: Request):