crawlo 1.1.4__py3-none-any.whl → 1.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (186) hide show
  1. crawlo/__init__.py +61 -34
  2. crawlo/__version__.py +1 -1
  3. crawlo/cleaners/__init__.py +61 -0
  4. crawlo/cleaners/data_formatter.py +226 -0
  5. crawlo/cleaners/encoding_converter.py +126 -0
  6. crawlo/cleaners/text_cleaner.py +233 -0
  7. crawlo/cli.py +40 -40
  8. crawlo/commands/__init__.py +13 -13
  9. crawlo/commands/check.py +594 -594
  10. crawlo/commands/genspider.py +151 -151
  11. crawlo/commands/list.py +155 -155
  12. crawlo/commands/run.py +285 -285
  13. crawlo/commands/startproject.py +300 -196
  14. crawlo/commands/stats.py +188 -188
  15. crawlo/commands/utils.py +186 -186
  16. crawlo/config.py +309 -279
  17. crawlo/config_validator.py +253 -0
  18. crawlo/core/__init__.py +2 -2
  19. crawlo/core/engine.py +346 -172
  20. crawlo/core/processor.py +40 -40
  21. crawlo/core/scheduler.py +137 -166
  22. crawlo/crawler.py +1027 -1027
  23. crawlo/downloader/__init__.py +266 -242
  24. crawlo/downloader/aiohttp_downloader.py +220 -212
  25. crawlo/downloader/cffi_downloader.py +256 -251
  26. crawlo/downloader/httpx_downloader.py +259 -259
  27. crawlo/downloader/hybrid_downloader.py +214 -0
  28. crawlo/downloader/playwright_downloader.py +403 -0
  29. crawlo/downloader/selenium_downloader.py +473 -0
  30. crawlo/event.py +11 -11
  31. crawlo/exceptions.py +81 -81
  32. crawlo/extension/__init__.py +37 -37
  33. crawlo/extension/health_check.py +141 -141
  34. crawlo/extension/log_interval.py +57 -57
  35. crawlo/extension/log_stats.py +81 -81
  36. crawlo/extension/logging_extension.py +43 -43
  37. crawlo/extension/memory_monitor.py +104 -88
  38. crawlo/extension/performance_profiler.py +133 -117
  39. crawlo/extension/request_recorder.py +107 -107
  40. crawlo/filters/__init__.py +154 -154
  41. crawlo/filters/aioredis_filter.py +280 -242
  42. crawlo/filters/memory_filter.py +269 -269
  43. crawlo/items/__init__.py +23 -23
  44. crawlo/items/base.py +21 -21
  45. crawlo/items/fields.py +53 -53
  46. crawlo/items/items.py +104 -104
  47. crawlo/middleware/__init__.py +21 -21
  48. crawlo/middleware/default_header.py +32 -32
  49. crawlo/middleware/download_delay.py +28 -28
  50. crawlo/middleware/middleware_manager.py +135 -135
  51. crawlo/middleware/proxy.py +272 -248
  52. crawlo/middleware/request_ignore.py +30 -30
  53. crawlo/middleware/response_code.py +18 -18
  54. crawlo/middleware/response_filter.py +26 -26
  55. crawlo/middleware/retry.py +124 -124
  56. crawlo/mode_manager.py +206 -201
  57. crawlo/network/__init__.py +21 -21
  58. crawlo/network/request.py +338 -311
  59. crawlo/network/response.py +360 -271
  60. crawlo/pipelines/__init__.py +21 -21
  61. crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
  62. crawlo/pipelines/console_pipeline.py +39 -39
  63. crawlo/pipelines/csv_pipeline.py +316 -316
  64. crawlo/pipelines/database_dedup_pipeline.py +224 -224
  65. crawlo/pipelines/json_pipeline.py +218 -218
  66. crawlo/pipelines/memory_dedup_pipeline.py +115 -115
  67. crawlo/pipelines/mongo_pipeline.py +131 -131
  68. crawlo/pipelines/mysql_pipeline.py +316 -316
  69. crawlo/pipelines/pipeline_manager.py +56 -56
  70. crawlo/pipelines/redis_dedup_pipeline.py +166 -162
  71. crawlo/project.py +153 -153
  72. crawlo/queue/pqueue.py +37 -37
  73. crawlo/queue/queue_manager.py +320 -307
  74. crawlo/queue/redis_priority_queue.py +277 -209
  75. crawlo/settings/__init__.py +7 -7
  76. crawlo/settings/default_settings.py +216 -278
  77. crawlo/settings/setting_manager.py +99 -99
  78. crawlo/spider/__init__.py +639 -639
  79. crawlo/stats_collector.py +59 -59
  80. crawlo/subscriber.py +130 -130
  81. crawlo/task_manager.py +30 -30
  82. crawlo/templates/crawlo.cfg.tmpl +10 -10
  83. crawlo/templates/project/__init__.py.tmpl +3 -3
  84. crawlo/templates/project/items.py.tmpl +17 -17
  85. crawlo/templates/project/middlewares.py.tmpl +110 -110
  86. crawlo/templates/project/pipelines.py.tmpl +97 -97
  87. crawlo/templates/project/run.py.tmpl +251 -251
  88. crawlo/templates/project/settings.py.tmpl +326 -279
  89. crawlo/templates/project/settings_distributed.py.tmpl +120 -0
  90. crawlo/templates/project/settings_gentle.py.tmpl +95 -0
  91. crawlo/templates/project/settings_high_performance.py.tmpl +152 -0
  92. crawlo/templates/project/settings_simple.py.tmpl +69 -0
  93. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  94. crawlo/templates/spider/spider.py.tmpl +141 -141
  95. crawlo/tools/__init__.py +183 -0
  96. crawlo/tools/anti_crawler.py +269 -0
  97. crawlo/tools/authenticated_proxy.py +241 -0
  98. crawlo/tools/data_validator.py +181 -0
  99. crawlo/tools/date_tools.py +36 -0
  100. crawlo/tools/distributed_coordinator.py +387 -0
  101. crawlo/tools/retry_mechanism.py +221 -0
  102. crawlo/tools/scenario_adapter.py +263 -0
  103. crawlo/utils/__init__.py +35 -7
  104. crawlo/utils/batch_processor.py +261 -0
  105. crawlo/utils/controlled_spider_mixin.py +439 -439
  106. crawlo/utils/date_tools.py +290 -233
  107. crawlo/utils/db_helper.py +343 -343
  108. crawlo/utils/enhanced_error_handler.py +360 -0
  109. crawlo/utils/env_config.py +106 -0
  110. crawlo/utils/error_handler.py +126 -0
  111. crawlo/utils/func_tools.py +82 -82
  112. crawlo/utils/large_scale_config.py +286 -286
  113. crawlo/utils/large_scale_helper.py +343 -343
  114. crawlo/utils/log.py +128 -128
  115. crawlo/utils/performance_monitor.py +285 -0
  116. crawlo/utils/queue_helper.py +175 -175
  117. crawlo/utils/redis_connection_pool.py +335 -0
  118. crawlo/utils/redis_key_validator.py +200 -0
  119. crawlo/utils/request.py +267 -267
  120. crawlo/utils/request_serializer.py +219 -219
  121. crawlo/utils/spider_loader.py +62 -62
  122. crawlo/utils/system.py +11 -11
  123. crawlo/utils/tools.py +4 -4
  124. crawlo/utils/url.py +39 -39
  125. {crawlo-1.1.4.dist-info → crawlo-1.1.5.dist-info}/METADATA +401 -403
  126. crawlo-1.1.5.dist-info/RECORD +185 -0
  127. examples/__init__.py +7 -7
  128. tests/__init__.py +7 -7
  129. tests/advanced_tools_example.py +276 -0
  130. tests/authenticated_proxy_example.py +237 -0
  131. tests/cleaners_example.py +161 -0
  132. tests/config_validation_demo.py +103 -0
  133. {examples → tests}/controlled_spider_example.py +205 -205
  134. tests/date_tools_example.py +181 -0
  135. tests/dynamic_loading_example.py +524 -0
  136. tests/dynamic_loading_test.py +105 -0
  137. tests/env_config_example.py +134 -0
  138. tests/error_handling_example.py +172 -0
  139. tests/redis_key_validation_demo.py +131 -0
  140. tests/response_improvements_example.py +145 -0
  141. tests/test_advanced_tools.py +149 -0
  142. tests/test_all_redis_key_configs.py +146 -0
  143. tests/test_authenticated_proxy.py +142 -0
  144. tests/test_cleaners.py +55 -0
  145. tests/test_comprehensive.py +147 -0
  146. tests/test_config_validator.py +194 -0
  147. tests/test_date_tools.py +124 -0
  148. tests/test_dynamic_downloaders_proxy.py +125 -0
  149. tests/test_dynamic_proxy.py +93 -0
  150. tests/test_dynamic_proxy_config.py +147 -0
  151. tests/test_dynamic_proxy_real.py +110 -0
  152. tests/test_edge_cases.py +304 -0
  153. tests/test_enhanced_error_handler.py +271 -0
  154. tests/test_env_config.py +122 -0
  155. tests/test_error_handler_compatibility.py +113 -0
  156. tests/test_final_validation.py +153 -153
  157. tests/test_framework_env_usage.py +104 -0
  158. tests/test_integration.py +357 -0
  159. tests/test_item_dedup_redis_key.py +123 -0
  160. tests/test_parsel.py +30 -0
  161. tests/test_performance.py +328 -0
  162. tests/test_proxy_health_check.py +32 -32
  163. tests/test_proxy_middleware_integration.py +136 -136
  164. tests/test_proxy_providers.py +56 -56
  165. tests/test_proxy_stats.py +19 -19
  166. tests/test_proxy_strategies.py +59 -59
  167. tests/test_queue_manager_redis_key.py +177 -0
  168. tests/test_redis_config.py +28 -28
  169. tests/test_redis_connection_pool.py +295 -0
  170. tests/test_redis_key_naming.py +182 -0
  171. tests/test_redis_key_validator.py +124 -0
  172. tests/test_redis_queue.py +224 -224
  173. tests/test_request_serialization.py +70 -70
  174. tests/test_response_improvements.py +153 -0
  175. tests/test_scheduler.py +241 -241
  176. tests/test_simple_response.py +62 -0
  177. tests/test_telecom_spider_redis_key.py +206 -0
  178. tests/test_template_content.py +88 -0
  179. tests/test_template_redis_key.py +135 -0
  180. tests/test_tools.py +154 -0
  181. tests/tools_example.py +258 -0
  182. crawlo/core/enhanced_engine.py +0 -190
  183. crawlo-1.1.4.dist-info/RECORD +0 -117
  184. {crawlo-1.1.4.dist-info → crawlo-1.1.5.dist-info}/WHEEL +0 -0
  185. {crawlo-1.1.4.dist-info → crawlo-1.1.5.dist-info}/entry_points.txt +0 -0
  186. {crawlo-1.1.4.dist-info → crawlo-1.1.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,153 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ Response 改进功能测试
5
+ """
6
+ import unittest
7
+ from crawlo.network.response import Response
8
+
9
+
10
+ class TestResponseImprovements(unittest.TestCase):
11
+ """Response 改进功能测试类"""
12
+
13
+ def setUp(self):
14
+ """测试前准备"""
15
+ # 创建一个模拟的HTML响应
16
+ html_content = """
17
+ <html>
18
+ <head>
19
+ <title>测试页面</title>
20
+ </head>
21
+ <body>
22
+ <div class="content">
23
+ <h1>主标题</h1>
24
+ <p class="intro">这是介绍段落</p>
25
+ <ul class="list">
26
+ <li>项目1</li>
27
+ <li>项目2</li>
28
+ <li>项目3</li>
29
+ </ul>
30
+ <a href="https://example.com" class="link">链接文本</a>
31
+ <img src="image.jpg" alt="图片描述" class="image">
32
+ </div>
33
+ </body>
34
+ </html>
35
+ """
36
+
37
+ self.response = Response(
38
+ url="https://example.com/test",
39
+ body=html_content.encode('utf-8'),
40
+ headers={"content-type": "text/html; charset=utf-8"}
41
+ )
42
+
43
+ def test_extract_text_with_css(self):
44
+ """测试使用CSS选择器提取文本"""
45
+ # 测试提取单个元素文本
46
+ title = self.response.extract_text('title')
47
+ self.assertEqual(title, "测试页面")
48
+
49
+ # 测试提取class元素文本
50
+ h1_text = self.response.extract_text('.content h1')
51
+ self.assertEqual(h1_text, "主标题")
52
+
53
+ # 测试提取带有默认值的情况
54
+ non_exist = self.response.extract_text('.non-exist', default='默认值')
55
+ self.assertEqual(non_exist, '默认值')
56
+
57
+ def test_extract_text_with_xpath(self):
58
+ """测试使用XPath选择器提取文本"""
59
+ # 测试提取单个元素文本
60
+ title = self.response.extract_text('//title')
61
+ self.assertEqual(title, "测试页面")
62
+
63
+ # 测试提取class元素文本
64
+ h1_text = self.response.extract_text('//div[@class="content"]/h1')
65
+ self.assertEqual(h1_text, "主标题")
66
+
67
+ def test_extract_texts_with_css(self):
68
+ """测试使用CSS选择器提取多个文本"""
69
+ # 测试提取多个li元素的文本
70
+ list_items = self.response.extract_texts('.list li')
71
+ expected = ["项目1", "项目2", "项目3"]
72
+ self.assertEqual(list_items, expected)
73
+
74
+ # 测试提取不存在元素的默认值
75
+ non_exist = self.response.extract_texts('.non-exist', default=['默认值'])
76
+ self.assertEqual(non_exist, ['默认值'])
77
+
78
+ def test_extract_texts_with_xpath(self):
79
+ """测试使用XPath选择器提取多个文本"""
80
+ # 测试提取多个li元素的文本
81
+ list_items = self.response.extract_texts('//ul[@class="list"]/li')
82
+ expected = ["项目1", "项目2", "项目3"]
83
+ self.assertEqual(list_items, expected)
84
+
85
+ def test_extract_attr(self):
86
+ """测试提取元素属性"""
87
+ # 测试提取链接的href属性
88
+ link_href = self.response.extract_attr('.link', 'href')
89
+ self.assertEqual(link_href, "https://example.com")
90
+
91
+ # 测试提取图片的alt属性
92
+ img_alt = self.response.extract_attr('.image', 'alt')
93
+ self.assertEqual(img_alt, "图片描述")
94
+
95
+ # 测试提取不存在属性的默认值
96
+ non_exist = self.response.extract_attr('.link', 'non-exist', default='默认值')
97
+ self.assertEqual(non_exist, '默认值')
98
+
99
+ def test_extract_attrs(self):
100
+ """测试提取多个元素的属性"""
101
+ # 测试提取所有li元素的属性(这里我们测试class属性)
102
+ list_classes = self.response.extract_attrs('.list li', 'class')
103
+ # 注意:在当前HTML中li元素没有class属性,所以应该返回空列表
104
+ self.assertEqual(list_classes, [])
105
+
106
+ # 测试提取所有图片元素的alt属性
107
+ img_alts = self.response.extract_attrs('.image', 'alt')
108
+ self.assertEqual(img_alts, ['图片描述'])
109
+
110
+ # 测试提取不存在元素时的默认值
111
+ non_exist = self.response.extract_attrs('.non-exist-elements', 'alt', default=['默认值'])
112
+ self.assertEqual(non_exist, ['默认值'])
113
+
114
+ def test_extract_text_from_elements(self):
115
+ """测试从复杂元素中提取文本"""
116
+ # 创建包含嵌套标签的HTML
117
+ complex_html = """
118
+ <div class="complex">
119
+ <p>段落文本 <strong>粗体文本</strong> 普通文本</p>
120
+ <p>第二段落 <em>斜体文本</em></p>
121
+ </div>
122
+ """
123
+
124
+ complex_response = Response(
125
+ url="https://example.com/complex",
126
+ body=complex_html.encode('utf-8')
127
+ )
128
+
129
+ # 测试提取复杂元素的文本
130
+ complex_text = complex_response.extract_text('.complex p', join_str=' ')
131
+ self.assertIn("段落文本", complex_text)
132
+ self.assertIn("粗体文本", complex_text)
133
+ self.assertIn("普通文本", complex_text)
134
+
135
+ def test_edge_cases(self):
136
+ """测试边界情况"""
137
+ # 测试空响应
138
+ empty_response = Response(url="https://example.com/empty", body=b"")
139
+ empty_text = empty_response.extract_text('title', default='默认标题')
140
+ self.assertEqual(empty_text, '默认标题')
141
+
142
+ # 测试只包含空白字符的元素
143
+ whitespace_html = "<div class='whitespace'> </div>"
144
+ whitespace_response = Response(
145
+ url="https://example.com/whitespace",
146
+ body=whitespace_html.encode('utf-8')
147
+ )
148
+ whitespace_text = whitespace_response.extract_text('.whitespace')
149
+ self.assertEqual(whitespace_text, '')
150
+
151
+
152
+ if __name__ == '__main__':
153
+ unittest.main()
tests/test_scheduler.py CHANGED
@@ -1,242 +1,242 @@
1
- #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
3
- """
4
- 测试修复后的 Scheduler 分布式队列功能
5
- """
6
- import asyncio
7
- import sys
8
- from unittest.mock import Mock
9
- from crawlo.core.scheduler import Scheduler
10
- from crawlo.network.request import Request
11
- from crawlo.utils.log import get_logger
12
-
13
-
14
- class MockCrawler:
15
- """模拟 Crawler 对象"""
16
- def __init__(self, use_redis=True):
17
- self.settings = MockSettings(use_redis)
18
- self.stats = Mock()
19
-
20
-
21
- class MockSettings:
22
- """模拟 Settings 对象"""
23
- def __init__(self, use_redis=True):
24
- self.use_redis = use_redis
25
-
26
- def get(self, key, default=None):
27
- config = {
28
- 'FILTER_CLASS': 'crawlo.filters.memory_filter.MemoryFilter',
29
- 'LOG_LEVEL': 'INFO',
30
- 'DEPTH_PRIORITY': 1,
31
- 'SCHEDULER_MAX_QUEUE_SIZE': 100,
32
- 'SCHEDULER_QUEUE_NAME': 'test:crawlo:requests',
33
- 'FILTER_DEBUG': False,
34
- 'PROJECT_NAME': 'test',
35
- }
36
- if self.use_redis:
37
- config['REDIS_URL'] = 'redis://localhost:6379/0'
38
-
39
- return config.get(key, default)
40
-
41
- def get_int(self, key, default=0):
42
- value = self.get(key, default)
43
- return int(value) if value is not None else default
44
-
45
- def get_bool(self, key, default=False):
46
- value = self.get(key, default)
47
- if isinstance(value, bool):
48
- return value
49
- if isinstance(value, str):
50
- return value.lower() in ('true', '1', 'yes')
51
- return bool(value) if value is not None else default
52
-
53
-
54
- class MockFilter:
55
- """模拟去重过滤器"""
56
- def __init__(self):
57
- self.seen = set()
58
-
59
- @classmethod
60
- def create_instance(cls, crawler):
61
- return cls()
62
-
63
- async def requested(self, request):
64
- if request.url in self.seen:
65
- return True
66
- self.seen.add(request.url)
67
- return False
68
-
69
- def log_stats(self, request):
70
- pass
71
-
72
-
73
- async def test_memory_scheduler():
74
- """测试内存调度器"""
75
- print("🔍 测试内存调度器...")
76
-
77
- crawler = MockCrawler(use_redis=False)
78
- scheduler = Scheduler.create_instance(crawler)
79
-
80
- # 模拟去重过滤器
81
- scheduler.dupe_filter = MockFilter()
82
-
83
- scheduler.open()
84
-
85
- # 测试入队
86
- request1 = Request(url="https://example1.com")
87
- request2 = Request(url="https://example2.com")
88
-
89
- success1 = await scheduler.enqueue_request(request1)
90
- success2 = await scheduler.enqueue_request(request2)
91
-
92
- print(f" 📤 入队结果: {success1}, {success2}")
93
- print(f" 📊 队列大小: {len(scheduler)}")
94
-
95
- # 测试出队
96
- req1 = await scheduler.next_request()
97
- req2 = await scheduler.next_request()
98
-
99
- print(f" 📥 出队结果: {req1.url if req1 else None}, {req2.url if req2 else None}")
100
- print(f" 📊 剩余大小: {len(scheduler)}")
101
-
102
- await scheduler.close()
103
- print(" ✅ 内存调度器测试完成")
104
-
105
-
106
- async def test_redis_scheduler():
107
- """测试 Redis 调度器"""
108
- print("🔍 测试 Redis 调度器...")
109
-
110
- try:
111
- crawler = MockCrawler(use_redis=True)
112
- scheduler = Scheduler.create_instance(crawler)
113
-
114
- # 模拟去重过滤器
115
- scheduler.dupe_filter = MockFilter()
116
-
117
- scheduler.open()
118
-
119
- # 测试入队
120
- request1 = Request(url="https://redis-test1.com", priority=5)
121
- request2 = Request(url="https://redis-test2.com", priority=3)
122
- request3 = Request(url="https://redis-test3.com", priority=8)
123
-
124
- success1 = await scheduler.enqueue_request(request1)
125
- success2 = await scheduler.enqueue_request(request2)
126
- success3 = await scheduler.enqueue_request(request3)
127
-
128
- print(f" 📤 入队结果: {success1}, {success2}, {success3}")
129
- print(f" 📊 队列大小: {len(scheduler)}")
130
-
131
- # 等待一小段时间让 Redis 操作完成
132
- await asyncio.sleep(0.5)
133
-
134
- # 测试出队(应该按优先级排序)
135
- req1 = await scheduler.next_request()
136
- req2 = await scheduler.next_request()
137
- req3 = await scheduler.next_request()
138
-
139
- print(" 📥 出队结果(按优先级):")
140
- if req1:
141
- print(f" {req1.url} (优先级: {getattr(req1, 'priority', 0)})")
142
- if req2:
143
- print(f" {req2.url} (优先级: {getattr(req2, 'priority', 0)})")
144
- if req3:
145
- print(f" {req3.url} (优先级: {getattr(req3, 'priority', 0)})")
146
-
147
- print(f" 📊 剩余大小: {len(scheduler)}")
148
-
149
- await scheduler.close()
150
- print(" ✅ Redis 调度器测试完成")
151
-
152
- except Exception as e:
153
- print(f" ❌ Redis 调度器测试失败: {e}")
154
- import traceback
155
- traceback.print_exc()
156
-
157
-
158
- async def test_concurrent_redis():
159
- """测试并发 Redis 操作"""
160
- print("🔍 测试并发 Redis 操作...")
161
-
162
- async def producer(scheduler, name, count):
163
- """生产者"""
164
- for i in range(count):
165
- request = Request(url=f"https://{name}-{i}.com", priority=i % 10)
166
- await scheduler.enqueue_request(request)
167
- await asyncio.sleep(0.01)
168
- print(f" ✅ 生产者 {name} 完成 ({count} 个请求)")
169
-
170
- async def consumer(scheduler, name, count):
171
- """消费者"""
172
- consumed = 0
173
- for _ in range(count):
174
- request = await scheduler.next_request()
175
- if request:
176
- consumed += 1
177
- await asyncio.sleep(0.005)
178
- else:
179
- break
180
- print(f" ✅ 消费者 {name} 处理了 {consumed} 个请求")
181
-
182
- try:
183
- crawler = MockCrawler(use_redis=True)
184
- scheduler = Scheduler.create_instance(crawler)
185
- scheduler.dupe_filter = MockFilter()
186
- scheduler.open()
187
-
188
- # 并发运行生产者和消费者
189
- tasks = [
190
- producer(scheduler, "producer-1", 5),
191
- producer(scheduler, "producer-2", 5),
192
- consumer(scheduler, "consumer-1", 3),
193
- consumer(scheduler, "consumer-2", 3),
194
- consumer(scheduler, "consumer-3", 4),
195
- ]
196
-
197
- await asyncio.gather(*tasks, return_exceptions=True)
198
-
199
- print(f" 📊 最终队列大小: {len(scheduler)}")
200
-
201
- await scheduler.close()
202
- print(" ✅ 并发测试完成")
203
-
204
- except Exception as e:
205
- print(f" ❌ 并发测试失败: {e}")
206
- import traceback
207
- traceback.print_exc()
208
-
209
-
210
- async def main():
211
- """主测试函数"""
212
- print("🚀 开始测试修复后的 Scheduler...")
213
- print("=" * 50)
214
-
215
- try:
216
- # 1. 测试内存调度器
217
- await test_memory_scheduler()
218
- print()
219
-
220
- # 2. 测试 Redis 调度器
221
- await test_redis_scheduler()
222
- print()
223
-
224
- # 3. 测试并发操作
225
- await test_concurrent_redis()
226
-
227
- print("=" * 50)
228
- print("🎉 所有 Scheduler 测试完成!")
229
-
230
- except Exception as e:
231
- print("=" * 50)
232
- print(f"❌ 测试失败: {e}")
233
- import traceback
234
- traceback.print_exc()
235
-
236
-
237
- if __name__ == "__main__":
238
- # 设置日志级别避免过多输出
239
- import logging
240
- logging.getLogger('crawlo').setLevel(logging.WARNING)
241
-
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ 测试修复后的 Scheduler 分布式队列功能
5
+ """
6
+ import asyncio
7
+ import sys
8
+ from unittest.mock import Mock
9
+ from crawlo.core.scheduler import Scheduler
10
+ from crawlo.network.request import Request
11
+ from crawlo.utils.log import get_logger
12
+
13
+
14
+ class MockCrawler:
15
+ """模拟 Crawler 对象"""
16
+ def __init__(self, use_redis=True):
17
+ self.settings = MockSettings(use_redis)
18
+ self.stats = Mock()
19
+
20
+
21
+ class MockSettings:
22
+ """模拟 Settings 对象"""
23
+ def __init__(self, use_redis=True):
24
+ self.use_redis = use_redis
25
+
26
+ def get(self, key, default=None):
27
+ config = {
28
+ 'FILTER_CLASS': 'crawlo.filters.memory_filter.MemoryFilter',
29
+ 'LOG_LEVEL': 'INFO',
30
+ 'DEPTH_PRIORITY': 1,
31
+ 'SCHEDULER_MAX_QUEUE_SIZE': 100,
32
+ 'SCHEDULER_QUEUE_NAME': 'test:crawlo:requests',
33
+ 'FILTER_DEBUG': False,
34
+ 'PROJECT_NAME': 'test',
35
+ }
36
+ if self.use_redis:
37
+ config['REDIS_URL'] = 'redis://localhost:6379/0'
38
+
39
+ return config.get(key, default)
40
+
41
+ def get_int(self, key, default=0):
42
+ value = self.get(key, default)
43
+ return int(value) if value is not None else default
44
+
45
+ def get_bool(self, key, default=False):
46
+ value = self.get(key, default)
47
+ if isinstance(value, bool):
48
+ return value
49
+ if isinstance(value, str):
50
+ return value.lower() in ('true', '1', 'yes')
51
+ return bool(value) if value is not None else default
52
+
53
+
54
+ class MockFilter:
55
+ """模拟去重过滤器"""
56
+ def __init__(self):
57
+ self.seen = set()
58
+
59
+ @classmethod
60
+ def create_instance(cls, crawler):
61
+ return cls()
62
+
63
+ async def requested(self, request):
64
+ if request.url in self.seen:
65
+ return True
66
+ self.seen.add(request.url)
67
+ return False
68
+
69
+ def log_stats(self, request):
70
+ pass
71
+
72
+
73
+ async def test_memory_scheduler():
74
+ """测试内存调度器"""
75
+ print("🔍 测试内存调度器...")
76
+
77
+ crawler = MockCrawler(use_redis=False)
78
+ scheduler = Scheduler.create_instance(crawler)
79
+
80
+ # 模拟去重过滤器
81
+ scheduler.dupe_filter = MockFilter()
82
+
83
+ scheduler.open()
84
+
85
+ # 测试入队
86
+ request1 = Request(url="https://example1.com")
87
+ request2 = Request(url="https://example2.com")
88
+
89
+ success1 = await scheduler.enqueue_request(request1)
90
+ success2 = await scheduler.enqueue_request(request2)
91
+
92
+ print(f" 📤 入队结果: {success1}, {success2}")
93
+ print(f" 📊 队列大小: {len(scheduler)}")
94
+
95
+ # 测试出队
96
+ req1 = await scheduler.next_request()
97
+ req2 = await scheduler.next_request()
98
+
99
+ print(f" 📥 出队结果: {req1.url if req1 else None}, {req2.url if req2 else None}")
100
+ print(f" 📊 剩余大小: {len(scheduler)}")
101
+
102
+ await scheduler.close()
103
+ print(" ✅ 内存调度器测试完成")
104
+
105
+
106
+ async def test_redis_scheduler():
107
+ """测试 Redis 调度器"""
108
+ print("🔍 测试 Redis 调度器...")
109
+
110
+ try:
111
+ crawler = MockCrawler(use_redis=True)
112
+ scheduler = Scheduler.create_instance(crawler)
113
+
114
+ # 模拟去重过滤器
115
+ scheduler.dupe_filter = MockFilter()
116
+
117
+ scheduler.open()
118
+
119
+ # 测试入队
120
+ request1 = Request(url="https://redis-test1.com", priority=5)
121
+ request2 = Request(url="https://redis-test2.com", priority=3)
122
+ request3 = Request(url="https://redis-test3.com", priority=8)
123
+
124
+ success1 = await scheduler.enqueue_request(request1)
125
+ success2 = await scheduler.enqueue_request(request2)
126
+ success3 = await scheduler.enqueue_request(request3)
127
+
128
+ print(f" 📤 入队结果: {success1}, {success2}, {success3}")
129
+ print(f" 📊 队列大小: {len(scheduler)}")
130
+
131
+ # 等待一小段时间让 Redis 操作完成
132
+ await asyncio.sleep(0.5)
133
+
134
+ # 测试出队(应该按优先级排序)
135
+ req1 = await scheduler.next_request()
136
+ req2 = await scheduler.next_request()
137
+ req3 = await scheduler.next_request()
138
+
139
+ print(" 📥 出队结果(按优先级):")
140
+ if req1:
141
+ print(f" {req1.url} (优先级: {getattr(req1, 'priority', 0)})")
142
+ if req2:
143
+ print(f" {req2.url} (优先级: {getattr(req2, 'priority', 0)})")
144
+ if req3:
145
+ print(f" {req3.url} (优先级: {getattr(req3, 'priority', 0)})")
146
+
147
+ print(f" 📊 剩余大小: {len(scheduler)}")
148
+
149
+ await scheduler.close()
150
+ print(" ✅ Redis 调度器测试完成")
151
+
152
+ except Exception as e:
153
+ print(f" ❌ Redis 调度器测试失败: {e}")
154
+ import traceback
155
+ traceback.print_exc()
156
+
157
+
158
+ async def test_concurrent_redis():
159
+ """测试并发 Redis 操作"""
160
+ print("🔍 测试并发 Redis 操作...")
161
+
162
+ async def producer(scheduler, name, count):
163
+ """生产者"""
164
+ for i in range(count):
165
+ request = Request(url=f"https://{name}-{i}.com", priority=i % 10)
166
+ await scheduler.enqueue_request(request)
167
+ await asyncio.sleep(0.01)
168
+ print(f" ✅ 生产者 {name} 完成 ({count} 个请求)")
169
+
170
+ async def consumer(scheduler, name, count):
171
+ """消费者"""
172
+ consumed = 0
173
+ for _ in range(count):
174
+ request = await scheduler.next_request()
175
+ if request:
176
+ consumed += 1
177
+ await asyncio.sleep(0.005)
178
+ else:
179
+ break
180
+ print(f" ✅ 消费者 {name} 处理了 {consumed} 个请求")
181
+
182
+ try:
183
+ crawler = MockCrawler(use_redis=True)
184
+ scheduler = Scheduler.create_instance(crawler)
185
+ scheduler.dupe_filter = MockFilter()
186
+ scheduler.open()
187
+
188
+ # 并发运行生产者和消费者
189
+ tasks = [
190
+ producer(scheduler, "producer-1", 5),
191
+ producer(scheduler, "producer-2", 5),
192
+ consumer(scheduler, "consumer-1", 3),
193
+ consumer(scheduler, "consumer-2", 3),
194
+ consumer(scheduler, "consumer-3", 4),
195
+ ]
196
+
197
+ await asyncio.gather(*tasks, return_exceptions=True)
198
+
199
+ print(f" 📊 最终队列大小: {len(scheduler)}")
200
+
201
+ await scheduler.close()
202
+ print(" ✅ 并发测试完成")
203
+
204
+ except Exception as e:
205
+ print(f" ❌ 并发测试失败: {e}")
206
+ import traceback
207
+ traceback.print_exc()
208
+
209
+
210
+ async def main():
211
+ """主测试函数"""
212
+ print("🚀 开始测试修复后的 Scheduler...")
213
+ print("=" * 50)
214
+
215
+ try:
216
+ # 1. 测试内存调度器
217
+ await test_memory_scheduler()
218
+ print()
219
+
220
+ # 2. 测试 Redis 调度器
221
+ await test_redis_scheduler()
222
+ print()
223
+
224
+ # 3. 测试并发操作
225
+ await test_concurrent_redis()
226
+
227
+ print("=" * 50)
228
+ print("🎉 所有 Scheduler 测试完成!")
229
+
230
+ except Exception as e:
231
+ print("=" * 50)
232
+ print(f"❌ 测试失败: {e}")
233
+ import traceback
234
+ traceback.print_exc()
235
+
236
+
237
+ if __name__ == "__main__":
238
+ # 设置日志级别避免过多输出
239
+ import logging
240
+ logging.getLogger('crawlo').setLevel(logging.WARNING)
241
+
242
242
  asyncio.run(main())