crawlo 1.1.2__py3-none-any.whl → 1.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (113) hide show
  1. crawlo/__init__.py +34 -34
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +40 -40
  4. crawlo/commands/__init__.py +13 -13
  5. crawlo/commands/check.py +594 -594
  6. crawlo/commands/genspider.py +151 -151
  7. crawlo/commands/list.py +155 -155
  8. crawlo/commands/run.py +285 -285
  9. crawlo/commands/startproject.py +196 -196
  10. crawlo/commands/stats.py +188 -188
  11. crawlo/commands/utils.py +186 -186
  12. crawlo/config.py +279 -279
  13. crawlo/core/__init__.py +2 -2
  14. crawlo/core/engine.py +171 -171
  15. crawlo/core/enhanced_engine.py +189 -189
  16. crawlo/core/processor.py +40 -40
  17. crawlo/core/scheduler.py +166 -162
  18. crawlo/crawler.py +1027 -1027
  19. crawlo/downloader/__init__.py +242 -242
  20. crawlo/downloader/aiohttp_downloader.py +212 -212
  21. crawlo/downloader/cffi_downloader.py +251 -251
  22. crawlo/downloader/httpx_downloader.py +259 -257
  23. crawlo/event.py +11 -11
  24. crawlo/exceptions.py +82 -78
  25. crawlo/extension/__init__.py +31 -31
  26. crawlo/extension/log_interval.py +49 -49
  27. crawlo/extension/log_stats.py +44 -44
  28. crawlo/extension/logging_extension.py +34 -34
  29. crawlo/filters/__init__.py +154 -154
  30. crawlo/filters/aioredis_filter.py +242 -242
  31. crawlo/filters/memory_filter.py +269 -269
  32. crawlo/items/__init__.py +23 -23
  33. crawlo/items/base.py +21 -21
  34. crawlo/items/fields.py +53 -53
  35. crawlo/items/items.py +104 -104
  36. crawlo/middleware/__init__.py +21 -21
  37. crawlo/middleware/default_header.py +32 -32
  38. crawlo/middleware/download_delay.py +28 -28
  39. crawlo/middleware/middleware_manager.py +135 -135
  40. crawlo/middleware/proxy.py +248 -248
  41. crawlo/middleware/request_ignore.py +30 -30
  42. crawlo/middleware/response_code.py +18 -18
  43. crawlo/middleware/response_filter.py +26 -26
  44. crawlo/middleware/retry.py +125 -125
  45. crawlo/mode_manager.py +200 -200
  46. crawlo/network/__init__.py +21 -21
  47. crawlo/network/request.py +311 -311
  48. crawlo/network/response.py +271 -269
  49. crawlo/pipelines/__init__.py +22 -13
  50. crawlo/pipelines/bloom_dedup_pipeline.py +157 -0
  51. crawlo/pipelines/console_pipeline.py +39 -39
  52. crawlo/pipelines/csv_pipeline.py +316 -316
  53. crawlo/pipelines/database_dedup_pipeline.py +225 -0
  54. crawlo/pipelines/json_pipeline.py +218 -218
  55. crawlo/pipelines/memory_dedup_pipeline.py +116 -0
  56. crawlo/pipelines/mongo_pipeline.py +116 -116
  57. crawlo/pipelines/mysql_pipeline.py +195 -195
  58. crawlo/pipelines/pipeline_manager.py +56 -56
  59. crawlo/pipelines/redis_dedup_pipeline.py +163 -0
  60. crawlo/project.py +153 -153
  61. crawlo/queue/pqueue.py +37 -37
  62. crawlo/queue/queue_manager.py +307 -303
  63. crawlo/queue/redis_priority_queue.py +208 -191
  64. crawlo/settings/__init__.py +7 -7
  65. crawlo/settings/default_settings.py +245 -226
  66. crawlo/settings/setting_manager.py +99 -99
  67. crawlo/spider/__init__.py +639 -639
  68. crawlo/stats_collector.py +59 -59
  69. crawlo/subscriber.py +106 -106
  70. crawlo/task_manager.py +30 -30
  71. crawlo/templates/crawlo.cfg.tmpl +10 -10
  72. crawlo/templates/project/__init__.py.tmpl +3 -3
  73. crawlo/templates/project/items.py.tmpl +17 -17
  74. crawlo/templates/project/middlewares.py.tmpl +86 -86
  75. crawlo/templates/project/pipelines.py.tmpl +341 -335
  76. crawlo/templates/project/run.py.tmpl +251 -238
  77. crawlo/templates/project/settings.py.tmpl +250 -247
  78. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  79. crawlo/templates/spider/spider.py.tmpl +177 -177
  80. crawlo/utils/__init__.py +7 -7
  81. crawlo/utils/controlled_spider_mixin.py +439 -335
  82. crawlo/utils/date_tools.py +233 -233
  83. crawlo/utils/db_helper.py +343 -343
  84. crawlo/utils/func_tools.py +82 -82
  85. crawlo/utils/large_scale_config.py +286 -286
  86. crawlo/utils/large_scale_helper.py +343 -343
  87. crawlo/utils/log.py +128 -128
  88. crawlo/utils/queue_helper.py +175 -175
  89. crawlo/utils/request.py +267 -267
  90. crawlo/utils/request_serializer.py +219 -219
  91. crawlo/utils/spider_loader.py +62 -62
  92. crawlo/utils/system.py +11 -11
  93. crawlo/utils/tools.py +4 -4
  94. crawlo/utils/url.py +39 -39
  95. {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/METADATA +635 -567
  96. crawlo-1.1.3.dist-info/RECORD +113 -0
  97. examples/__init__.py +7 -7
  98. examples/controlled_spider_example.py +205 -0
  99. tests/__init__.py +7 -7
  100. tests/test_final_validation.py +153 -153
  101. tests/test_proxy_health_check.py +32 -32
  102. tests/test_proxy_middleware_integration.py +136 -136
  103. tests/test_proxy_providers.py +56 -56
  104. tests/test_proxy_stats.py +19 -19
  105. tests/test_proxy_strategies.py +59 -59
  106. tests/test_redis_config.py +28 -28
  107. tests/test_redis_queue.py +224 -224
  108. tests/test_request_serialization.py +70 -70
  109. tests/test_scheduler.py +241 -241
  110. crawlo-1.1.2.dist-info/RECORD +0 -108
  111. {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/WHEEL +0 -0
  112. {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/entry_points.txt +0 -0
  113. {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/top_level.txt +0 -0
@@ -1,29 +1,29 @@
1
- #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
3
- """
4
- 快速测试 Redis 连接配置修复
5
- """
6
- import asyncio
7
- from crawlo.queue.redis_priority_queue import RedisPriorityQueue
8
- from crawlo.settings.default_settings import REDIS_URL
9
-
10
- async def test_redis_config():
11
- """测试修复后的 Redis 配置"""
12
- print(f"🔍 测试 Redis 配置: {REDIS_URL}")
13
-
14
- try:
15
- queue = RedisPriorityQueue(redis_url=REDIS_URL)
16
- await queue.connect()
17
- print("✅ Redis 连接成功!")
18
- await queue.close()
19
- return True
20
- except Exception as e:
21
- print(f"❌ Redis 连接失败: {e}")
22
- return False
23
-
24
- if __name__ == "__main__":
25
- success = asyncio.run(test_redis_config())
26
- if success:
27
- print("🎉 配置修复成功!现在可以运行你的爬虫了。")
28
- else:
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ 快速测试 Redis 连接配置修复
5
+ """
6
+ import asyncio
7
+ from crawlo.queue.redis_priority_queue import RedisPriorityQueue
8
+ from crawlo.settings.default_settings import REDIS_URL
9
+
10
+ async def test_redis_config():
11
+ """测试修复后的 Redis 配置"""
12
+ print(f"🔍 测试 Redis 配置: {REDIS_URL}")
13
+
14
+ try:
15
+ queue = RedisPriorityQueue(redis_url=REDIS_URL)
16
+ await queue.connect()
17
+ print("✅ Redis 连接成功!")
18
+ await queue.close()
19
+ return True
20
+ except Exception as e:
21
+ print(f"❌ Redis 连接失败: {e}")
22
+ return False
23
+
24
+ if __name__ == "__main__":
25
+ success = asyncio.run(test_redis_config())
26
+ if success:
27
+ print("🎉 配置修复成功!现在可以运行你的爬虫了。")
28
+ else:
29
29
  print("❌ 配置仍有问题,请检查 Redis 服务状态。")
tests/test_redis_queue.py CHANGED
@@ -1,225 +1,225 @@
1
- #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
3
- """
4
- Redis 分布式队列测试脚本
5
- 用于诊断和修复分布式队列问题
6
- """
7
- import asyncio
8
- import sys
9
- import traceback
10
- import time
11
- from crawlo.queue.redis_priority_queue import RedisPriorityQueue
12
- from crawlo.network.request import Request
13
-
14
-
15
- async def test_redis_connection():
16
- """测试 Redis 连接"""
17
- print("🔍 1. 测试 Redis 连接...")
18
-
19
- # 测试不同的 Redis URL 格式
20
- test_urls = [
21
- "redis://localhost:6379/0",
22
- "redis://:oscar&0503@127.0.0.1:6379/0", # 带密码
23
- "redis://127.0.0.1:6379/0", # 无密码
24
- ]
25
-
26
- for redis_url in test_urls:
27
- try:
28
- print(f" 尝试连接: {redis_url}")
29
- queue = RedisPriorityQueue(redis_url=redis_url)
30
- await queue.connect()
31
- print(f" ✅ 连接成功: {redis_url}")
32
- await queue.close()
33
- return redis_url
34
- except Exception as e:
35
- print(f" ❌ 连接失败: {redis_url} - {e}")
36
-
37
- raise ConnectionError("所有 Redis URL 都连接失败")
38
-
39
-
40
- async def test_queue_operations(redis_url):
41
- """测试队列基本操作"""
42
- print("🔍 2. 测试队列基本操作...")
43
-
44
- queue = RedisPriorityQueue(
45
- redis_url=redis_url,
46
- queue_name="test:crawlo:requests",
47
- max_retries=2
48
- )
49
-
50
- try:
51
- await queue.connect()
52
-
53
- # 测试 put 操作
54
- test_request = Request(url="https://example.com", priority=5)
55
- print(f" 📤 插入请求: {test_request.url}")
56
-
57
- success = await queue.put(test_request, priority=5)
58
- if success:
59
- print(" ✅ 插入成功")
60
- else:
61
- print(" ❌ 插入失败")
62
- return False
63
-
64
- # 测试队列大小
65
- size = await queue.qsize()
66
- print(f" 📊 队列大小: {size}")
67
-
68
- # 测试 get 操作
69
- print(" 📥 获取请求...")
70
- retrieved_request = await queue.get(timeout=2.0)
71
-
72
- if retrieved_request:
73
- print(f" ✅ 获取成功: {retrieved_request.url}")
74
- # 测试 ack
75
- await queue.ack(retrieved_request)
76
- print(" ✅ ACK 成功")
77
- else:
78
- print(" ❌ 获取失败(超时)")
79
- return False
80
-
81
- return True
82
-
83
- except Exception as e:
84
- print(f" ❌ 队列操作失败: {e}")
85
- traceback.print_exc()
86
- return False
87
- finally:
88
- await queue.close()
89
-
90
-
91
- async def test_serialization():
92
- """测试序列化问题"""
93
- print("🔍 3. 测试 Request 序列化...")
94
-
95
- try:
96
- import pickle
97
- from crawlo.network.request import Request
98
-
99
- # 创建测试请求
100
- request = Request(
101
- url="https://example.com",
102
- method="GET",
103
- headers={"User-Agent": "Test"},
104
- meta={"test": "data"},
105
- priority=5
106
- )
107
-
108
- # 测试序列化
109
- serialized = pickle.dumps(request)
110
- print(f" ✅ 序列化成功,大小: {len(serialized)} bytes")
111
-
112
- # 测试反序列化
113
- deserialized = pickle.loads(serialized)
114
- print(f" ✅ 反序列化成功: {deserialized.url}")
115
-
116
- return True
117
-
118
- except Exception as e:
119
- print(f" ❌ 序列化失败: {e}")
120
- traceback.print_exc()
121
- return False
122
-
123
-
124
- async def test_concurrent_operations(redis_url):
125
- """测试并发操作"""
126
- print("🔍 4. 测试并发操作...")
127
-
128
- async def producer(queue, start_id):
129
- """生产者"""
130
- try:
131
- for i in range(5):
132
- request = Request(url=f"https://example{start_id + i}.com", priority=i)
133
- await queue.put(request, priority=i)
134
- await asyncio.sleep(0.1)
135
- print(f" ✅ 生产者 {start_id} 完成")
136
- except Exception as e:
137
- print(f" ❌ 生产者 {start_id} 失败: {e}")
138
-
139
- async def consumer(queue, consumer_id):
140
- """消费者"""
141
- consumed = 0
142
- try:
143
- for _ in range(3): # 每个消费者处理3个请求
144
- request = await queue.get(timeout=5.0)
145
- if request:
146
- await queue.ack(request)
147
- consumed += 1
148
- await asyncio.sleep(0.05)
149
- else:
150
- break
151
- print(f" ✅ 消费者 {consumer_id} 处理了 {consumed} 个请求")
152
- except Exception as e:
153
- print(f" ❌ 消费者 {consumer_id} 失败: {e}")
154
-
155
- queue = RedisPriorityQueue(
156
- redis_url=redis_url,
157
- queue_name="test:concurrent:requests"
158
- )
159
-
160
- try:
161
- await queue.connect()
162
-
163
- # 并发运行生产者和消费者
164
- tasks = [
165
- producer(queue, 0),
166
- producer(queue, 10),
167
- consumer(queue, 1),
168
- consumer(queue, 2),
169
- ]
170
-
171
- await asyncio.gather(*tasks, return_exceptions=True)
172
-
173
- # 检查剩余队列大小
174
- final_size = await queue.qsize()
175
- print(f" 📊 最终队列大小: {final_size}")
176
-
177
- return True
178
-
179
- except Exception as e:
180
- print(f" ❌ 并发测试失败: {e}")
181
- return False
182
- finally:
183
- await queue.close()
184
-
185
-
186
- async def main():
187
- """主测试函数"""
188
- print("🚀 开始 Redis 分布式队列诊断...")
189
- print("=" * 50)
190
-
191
- try:
192
- # 1. 测试连接
193
- redis_url = await test_redis_connection()
194
-
195
- # 2. 测试序列化
196
- if not await test_serialization():
197
- return
198
-
199
- # 3. 测试基本操作
200
- if not await test_queue_operations(redis_url):
201
- return
202
-
203
- # 4. 测试并发操作
204
- if not await test_concurrent_operations(redis_url):
205
- return
206
-
207
- print("=" * 50)
208
- print("🎉 所有测试通过!Redis 队列工作正常")
209
-
210
- except Exception as e:
211
- print("=" * 50)
212
- print(f"❌ 诊断失败: {e}")
213
- traceback.print_exc()
214
-
215
- # 提供解决建议
216
- print("\n🔧 可能的解决方案:")
217
- print("1. 检查 Redis 服务是否启动: redis-server")
218
- print("2. 检查 Redis 密码配置")
219
- print("3. 检查防火墙和端口 6379")
220
- print("4. 安装 Redis: pip install redis")
221
- print("5. 检查 Redis 配置文件中的 bind 设置")
222
-
223
-
224
- if __name__ == "__main__":
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ Redis 分布式队列测试脚本
5
+ 用于诊断和修复分布式队列问题
6
+ """
7
+ import asyncio
8
+ import sys
9
+ import traceback
10
+ import time
11
+ from crawlo.queue.redis_priority_queue import RedisPriorityQueue
12
+ from crawlo.network.request import Request
13
+
14
+
15
+ async def test_redis_connection():
16
+ """测试 Redis 连接"""
17
+ print("🔍 1. 测试 Redis 连接...")
18
+
19
+ # 测试不同的 Redis URL 格式
20
+ test_urls = [
21
+ "redis://localhost:6379/0",
22
+ "redis://:oscar&0503@127.0.0.1:6379/0", # 带密码
23
+ "redis://127.0.0.1:6379/0", # 无密码
24
+ ]
25
+
26
+ for redis_url in test_urls:
27
+ try:
28
+ print(f" 尝试连接: {redis_url}")
29
+ queue = RedisPriorityQueue(redis_url=redis_url)
30
+ await queue.connect()
31
+ print(f" ✅ 连接成功: {redis_url}")
32
+ await queue.close()
33
+ return redis_url
34
+ except Exception as e:
35
+ print(f" ❌ 连接失败: {redis_url} - {e}")
36
+
37
+ raise ConnectionError("所有 Redis URL 都连接失败")
38
+
39
+
40
+ async def test_queue_operations(redis_url):
41
+ """测试队列基本操作"""
42
+ print("🔍 2. 测试队列基本操作...")
43
+
44
+ queue = RedisPriorityQueue(
45
+ redis_url=redis_url,
46
+ queue_name="test:crawlo:requests",
47
+ max_retries=2
48
+ )
49
+
50
+ try:
51
+ await queue.connect()
52
+
53
+ # 测试 put 操作
54
+ test_request = Request(url="https://example.com", priority=5)
55
+ print(f" 📤 插入请求: {test_request.url}")
56
+
57
+ success = await queue.put(test_request, priority=5)
58
+ if success:
59
+ print(" ✅ 插入成功")
60
+ else:
61
+ print(" ❌ 插入失败")
62
+ return False
63
+
64
+ # 测试队列大小
65
+ size = await queue.qsize()
66
+ print(f" 📊 队列大小: {size}")
67
+
68
+ # 测试 get 操作
69
+ print(" 📥 获取请求...")
70
+ retrieved_request = await queue.get(timeout=2.0)
71
+
72
+ if retrieved_request:
73
+ print(f" ✅ 获取成功: {retrieved_request.url}")
74
+ # 测试 ack
75
+ await queue.ack(retrieved_request)
76
+ print(" ✅ ACK 成功")
77
+ else:
78
+ print(" ❌ 获取失败(超时)")
79
+ return False
80
+
81
+ return True
82
+
83
+ except Exception as e:
84
+ print(f" ❌ 队列操作失败: {e}")
85
+ traceback.print_exc()
86
+ return False
87
+ finally:
88
+ await queue.close()
89
+
90
+
91
+ async def test_serialization():
92
+ """测试序列化问题"""
93
+ print("🔍 3. 测试 Request 序列化...")
94
+
95
+ try:
96
+ import pickle
97
+ from crawlo.network.request import Request
98
+
99
+ # 创建测试请求
100
+ request = Request(
101
+ url="https://example.com",
102
+ method="GET",
103
+ headers={"User-Agent": "Test"},
104
+ meta={"test": "data"},
105
+ priority=5
106
+ )
107
+
108
+ # 测试序列化
109
+ serialized = pickle.dumps(request)
110
+ print(f" ✅ 序列化成功,大小: {len(serialized)} bytes")
111
+
112
+ # 测试反序列化
113
+ deserialized = pickle.loads(serialized)
114
+ print(f" ✅ 反序列化成功: {deserialized.url}")
115
+
116
+ return True
117
+
118
+ except Exception as e:
119
+ print(f" ❌ 序列化失败: {e}")
120
+ traceback.print_exc()
121
+ return False
122
+
123
+
124
+ async def test_concurrent_operations(redis_url):
125
+ """测试并发操作"""
126
+ print("🔍 4. 测试并发操作...")
127
+
128
+ async def producer(queue, start_id):
129
+ """生产者"""
130
+ try:
131
+ for i in range(5):
132
+ request = Request(url=f"https://example{start_id + i}.com", priority=i)
133
+ await queue.put(request, priority=i)
134
+ await asyncio.sleep(0.1)
135
+ print(f" ✅ 生产者 {start_id} 完成")
136
+ except Exception as e:
137
+ print(f" ❌ 生产者 {start_id} 失败: {e}")
138
+
139
+ async def consumer(queue, consumer_id):
140
+ """消费者"""
141
+ consumed = 0
142
+ try:
143
+ for _ in range(3): # 每个消费者处理3个请求
144
+ request = await queue.get(timeout=5.0)
145
+ if request:
146
+ await queue.ack(request)
147
+ consumed += 1
148
+ await asyncio.sleep(0.05)
149
+ else:
150
+ break
151
+ print(f" ✅ 消费者 {consumer_id} 处理了 {consumed} 个请求")
152
+ except Exception as e:
153
+ print(f" ❌ 消费者 {consumer_id} 失败: {e}")
154
+
155
+ queue = RedisPriorityQueue(
156
+ redis_url=redis_url,
157
+ queue_name="test:concurrent:requests"
158
+ )
159
+
160
+ try:
161
+ await queue.connect()
162
+
163
+ # 并发运行生产者和消费者
164
+ tasks = [
165
+ producer(queue, 0),
166
+ producer(queue, 10),
167
+ consumer(queue, 1),
168
+ consumer(queue, 2),
169
+ ]
170
+
171
+ await asyncio.gather(*tasks, return_exceptions=True)
172
+
173
+ # 检查剩余队列大小
174
+ final_size = await queue.qsize()
175
+ print(f" 📊 最终队列大小: {final_size}")
176
+
177
+ return True
178
+
179
+ except Exception as e:
180
+ print(f" ❌ 并发测试失败: {e}")
181
+ return False
182
+ finally:
183
+ await queue.close()
184
+
185
+
186
+ async def main():
187
+ """主测试函数"""
188
+ print("🚀 开始 Redis 分布式队列诊断...")
189
+ print("=" * 50)
190
+
191
+ try:
192
+ # 1. 测试连接
193
+ redis_url = await test_redis_connection()
194
+
195
+ # 2. 测试序列化
196
+ if not await test_serialization():
197
+ return
198
+
199
+ # 3. 测试基本操作
200
+ if not await test_queue_operations(redis_url):
201
+ return
202
+
203
+ # 4. 测试并发操作
204
+ if not await test_concurrent_operations(redis_url):
205
+ return
206
+
207
+ print("=" * 50)
208
+ print("🎉 所有测试通过!Redis 队列工作正常")
209
+
210
+ except Exception as e:
211
+ print("=" * 50)
212
+ print(f"❌ 诊断失败: {e}")
213
+ traceback.print_exc()
214
+
215
+ # 提供解决建议
216
+ print("\n🔧 可能的解决方案:")
217
+ print("1. 检查 Redis 服务是否启动: redis-server")
218
+ print("2. 检查 Redis 密码配置")
219
+ print("3. 检查防火墙和端口 6379")
220
+ print("4. 安装 Redis: pip install redis")
221
+ print("5. 检查 Redis 配置文件中的 bind 设置")
222
+
223
+
224
+ if __name__ == "__main__":
225
225
  asyncio.run(main())
@@ -1,71 +1,71 @@
1
- #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
3
- """
4
- 测试 Request 序列化问题修复
5
- """
6
- import pickle
7
- import sys
8
- sys.path.insert(0, "..")
9
-
10
- from crawlo.network.request import Request
11
- from crawlo.core.scheduler import Scheduler
12
- from unittest.mock import Mock
13
-
14
- # 模拟一个带 logger 的 Request
15
- class TestRequest(Request):
16
- def __init__(self, *args, **kwargs):
17
- super().__init__(*args, **kwargs)
18
- # 添加一个 logger 属性模拟问题
19
- from crawlo.utils.log import get_logger
20
- self.logger = get_logger("test_request")
21
- self.meta['spider_logger'] = get_logger("spider_logger")
22
-
23
- def test_request_serialization():
24
- """测试 Request 序列化"""
25
- print("🔍 测试 Request 序列化修复...")
26
-
27
- # 创建一个带 logger 的请求
28
- request = TestRequest(
29
- url="https://example.com",
30
- meta={"test": "data"} # 移除 Mock 对象
31
- )
32
-
33
- print(f" 📦 原始请求: {request}")
34
- print(f" 🔧 请求有 logger: {hasattr(request, 'logger')}")
35
- print(f" 🔧 meta 有 logger: {'spider_logger' in request.meta}")
36
-
37
- # 创建一个 mock scheduler 来测试清理
38
- class MockScheduler:
39
- def _deep_clean_loggers(self, request):
40
- return Scheduler._deep_clean_loggers(self, request)
41
- def _remove_logger_from_dict(self, d):
42
- return Scheduler._remove_logger_from_dict(self, d)
43
-
44
- scheduler = MockScheduler()
45
-
46
- # 执行清理
47
- scheduler._deep_clean_loggers(request)
48
-
49
- print(f" 🧹 清理后有 logger: {hasattr(request, 'logger')}")
50
- print(f" 🧹 清理后 meta 有 logger: {'spider_logger' in request.meta}")
51
-
52
- # 测试序列化
53
- try:
54
- serialized = pickle.dumps(request)
55
- print(f" ✅ 序列化成功,大小: {len(serialized)} bytes")
56
-
57
- # 测试反序列化
58
- deserialized = pickle.loads(serialized)
59
- print(f" ✅ 反序列化成功: {deserialized}")
60
- return True
61
-
62
- except Exception as e:
63
- print(f" ❌ 序列化失败: {e}")
64
- return False
65
-
66
- if __name__ == "__main__":
67
- success = test_request_serialization()
68
- if success:
69
- print("🎉 Request 序列化修复成功!")
70
- else:
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ 测试 Request 序列化问题修复
5
+ """
6
+ import pickle
7
+ import sys
8
+ sys.path.insert(0, "..")
9
+
10
+ from crawlo.network.request import Request
11
+ from crawlo.core.scheduler import Scheduler
12
+ from unittest.mock import Mock
13
+
14
+ # 模拟一个带 logger 的 Request
15
+ class TestRequest(Request):
16
+ def __init__(self, *args, **kwargs):
17
+ super().__init__(*args, **kwargs)
18
+ # 添加一个 logger 属性模拟问题
19
+ from crawlo.utils.log import get_logger
20
+ self.logger = get_logger("test_request")
21
+ self.meta['spider_logger'] = get_logger("spider_logger")
22
+
23
+ def test_request_serialization():
24
+ """测试 Request 序列化"""
25
+ print("🔍 测试 Request 序列化修复...")
26
+
27
+ # 创建一个带 logger 的请求
28
+ request = TestRequest(
29
+ url="https://example.com",
30
+ meta={"test": "data"} # 移除 Mock 对象
31
+ )
32
+
33
+ print(f" 📦 原始请求: {request}")
34
+ print(f" 🔧 请求有 logger: {hasattr(request, 'logger')}")
35
+ print(f" 🔧 meta 有 logger: {'spider_logger' in request.meta}")
36
+
37
+ # 创建一个 mock scheduler 来测试清理
38
+ class MockScheduler:
39
+ def _deep_clean_loggers(self, request):
40
+ return Scheduler._deep_clean_loggers(self, request)
41
+ def _remove_logger_from_dict(self, d):
42
+ return Scheduler._remove_logger_from_dict(self, d)
43
+
44
+ scheduler = MockScheduler()
45
+
46
+ # 执行清理
47
+ scheduler._deep_clean_loggers(request)
48
+
49
+ print(f" 🧹 清理后有 logger: {hasattr(request, 'logger')}")
50
+ print(f" 🧹 清理后 meta 有 logger: {'spider_logger' in request.meta}")
51
+
52
+ # 测试序列化
53
+ try:
54
+ serialized = pickle.dumps(request)
55
+ print(f" ✅ 序列化成功,大小: {len(serialized)} bytes")
56
+
57
+ # 测试反序列化
58
+ deserialized = pickle.loads(serialized)
59
+ print(f" ✅ 反序列化成功: {deserialized}")
60
+ return True
61
+
62
+ except Exception as e:
63
+ print(f" ❌ 序列化失败: {e}")
64
+ return False
65
+
66
+ if __name__ == "__main__":
67
+ success = test_request_serialization()
68
+ if success:
69
+ print("🎉 Request 序列化修复成功!")
70
+ else:
71
71
  print("❌ 序列化问题仍未解决")