crawlo 1.2.0__py3-none-any.whl → 1.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +61 -61
- crawlo/__version__.py +1 -1
- crawlo/cleaners/__init__.py +60 -60
- crawlo/cleaners/data_formatter.py +225 -225
- crawlo/cleaners/encoding_converter.py +125 -125
- crawlo/cleaners/text_cleaner.py +232 -232
- crawlo/cli.py +65 -65
- crawlo/commands/__init__.py +14 -14
- crawlo/commands/check.py +594 -594
- crawlo/commands/genspider.py +151 -151
- crawlo/commands/help.py +142 -132
- crawlo/commands/list.py +155 -155
- crawlo/commands/run.py +292 -292
- crawlo/commands/startproject.py +418 -418
- crawlo/commands/stats.py +188 -188
- crawlo/commands/utils.py +186 -186
- crawlo/config.py +312 -312
- crawlo/config_validator.py +252 -252
- crawlo/core/__init__.py +2 -2
- crawlo/core/engine.py +354 -354
- crawlo/core/processor.py +40 -40
- crawlo/core/scheduler.py +143 -143
- crawlo/crawler.py +1027 -1027
- crawlo/downloader/__init__.py +266 -266
- crawlo/downloader/aiohttp_downloader.py +220 -220
- crawlo/downloader/cffi_downloader.py +256 -256
- crawlo/downloader/httpx_downloader.py +259 -259
- crawlo/downloader/hybrid_downloader.py +213 -213
- crawlo/downloader/playwright_downloader.py +402 -402
- crawlo/downloader/selenium_downloader.py +472 -472
- crawlo/event.py +11 -11
- crawlo/exceptions.py +81 -81
- crawlo/extension/__init__.py +37 -37
- crawlo/extension/health_check.py +141 -141
- crawlo/extension/log_interval.py +57 -57
- crawlo/extension/log_stats.py +81 -81
- crawlo/extension/logging_extension.py +43 -43
- crawlo/extension/memory_monitor.py +104 -104
- crawlo/extension/performance_profiler.py +133 -133
- crawlo/extension/request_recorder.py +107 -107
- crawlo/filters/__init__.py +154 -154
- crawlo/filters/aioredis_filter.py +280 -280
- crawlo/filters/memory_filter.py +269 -269
- crawlo/items/__init__.py +23 -23
- crawlo/items/base.py +21 -21
- crawlo/items/fields.py +53 -53
- crawlo/items/items.py +104 -104
- crawlo/middleware/__init__.py +21 -21
- crawlo/middleware/default_header.py +132 -32
- crawlo/middleware/download_delay.py +105 -28
- crawlo/middleware/middleware_manager.py +135 -135
- crawlo/middleware/offsite.py +116 -0
- crawlo/middleware/proxy.py +366 -272
- crawlo/middleware/request_ignore.py +88 -30
- crawlo/middleware/response_code.py +164 -18
- crawlo/middleware/response_filter.py +138 -26
- crawlo/middleware/retry.py +124 -124
- crawlo/mode_manager.py +211 -211
- crawlo/network/__init__.py +21 -21
- crawlo/network/request.py +338 -338
- crawlo/network/response.py +359 -359
- crawlo/pipelines/__init__.py +21 -21
- crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
- crawlo/pipelines/console_pipeline.py +39 -39
- crawlo/pipelines/csv_pipeline.py +316 -316
- crawlo/pipelines/database_dedup_pipeline.py +224 -224
- crawlo/pipelines/json_pipeline.py +218 -218
- crawlo/pipelines/memory_dedup_pipeline.py +115 -115
- crawlo/pipelines/mongo_pipeline.py +131 -131
- crawlo/pipelines/mysql_pipeline.py +316 -316
- crawlo/pipelines/pipeline_manager.py +61 -61
- crawlo/pipelines/redis_dedup_pipeline.py +167 -167
- crawlo/project.py +187 -187
- crawlo/queue/pqueue.py +37 -37
- crawlo/queue/queue_manager.py +337 -337
- crawlo/queue/redis_priority_queue.py +298 -298
- crawlo/settings/__init__.py +7 -7
- crawlo/settings/default_settings.py +226 -219
- crawlo/settings/setting_manager.py +122 -122
- crawlo/spider/__init__.py +639 -639
- crawlo/stats_collector.py +59 -59
- crawlo/subscriber.py +130 -130
- crawlo/task_manager.py +30 -30
- crawlo/templates/crawlo.cfg.tmpl +10 -10
- crawlo/templates/project/__init__.py.tmpl +3 -3
- crawlo/templates/project/items.py.tmpl +17 -17
- crawlo/templates/project/middlewares.py.tmpl +118 -109
- crawlo/templates/project/pipelines.py.tmpl +96 -96
- crawlo/templates/project/run.py.tmpl +45 -45
- crawlo/templates/project/settings.py.tmpl +327 -326
- crawlo/templates/project/settings_distributed.py.tmpl +119 -119
- crawlo/templates/project/settings_gentle.py.tmpl +94 -94
- crawlo/templates/project/settings_high_performance.py.tmpl +151 -151
- crawlo/templates/project/settings_simple.py.tmpl +68 -68
- crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
- crawlo/templates/spider/spider.py.tmpl +143 -141
- crawlo/tools/__init__.py +182 -182
- crawlo/tools/anti_crawler.py +268 -268
- crawlo/tools/authenticated_proxy.py +240 -240
- crawlo/tools/data_validator.py +180 -180
- crawlo/tools/date_tools.py +35 -35
- crawlo/tools/distributed_coordinator.py +386 -386
- crawlo/tools/retry_mechanism.py +220 -220
- crawlo/tools/scenario_adapter.py +262 -262
- crawlo/utils/__init__.py +35 -35
- crawlo/utils/batch_processor.py +260 -260
- crawlo/utils/controlled_spider_mixin.py +439 -439
- crawlo/utils/date_tools.py +290 -290
- crawlo/utils/db_helper.py +343 -343
- crawlo/utils/enhanced_error_handler.py +359 -359
- crawlo/utils/env_config.py +105 -105
- crawlo/utils/error_handler.py +125 -125
- crawlo/utils/func_tools.py +82 -82
- crawlo/utils/large_scale_config.py +286 -286
- crawlo/utils/large_scale_helper.py +343 -343
- crawlo/utils/log.py +128 -128
- crawlo/utils/performance_monitor.py +284 -284
- crawlo/utils/queue_helper.py +175 -175
- crawlo/utils/redis_connection_pool.py +334 -334
- crawlo/utils/redis_key_validator.py +199 -199
- crawlo/utils/request.py +267 -267
- crawlo/utils/request_serializer.py +219 -219
- crawlo/utils/spider_loader.py +62 -62
- crawlo/utils/system.py +11 -11
- crawlo/utils/tools.py +4 -4
- crawlo/utils/url.py +39 -39
- {crawlo-1.2.0.dist-info → crawlo-1.2.1.dist-info}/METADATA +692 -697
- crawlo-1.2.1.dist-info/RECORD +220 -0
- examples/__init__.py +7 -7
- examples/aiohttp_settings.py +42 -0
- examples/curl_cffi_settings.py +41 -0
- examples/default_header_middleware_example.py +107 -0
- examples/default_header_spider_example.py +129 -0
- examples/download_delay_middleware_example.py +160 -0
- examples/httpx_settings.py +42 -0
- examples/multi_downloader_proxy_example.py +81 -0
- examples/offsite_middleware_example.py +55 -0
- examples/offsite_spider_example.py +107 -0
- examples/proxy_spider_example.py +166 -0
- examples/request_ignore_middleware_example.py +51 -0
- examples/request_ignore_spider_example.py +99 -0
- examples/response_code_middleware_example.py +52 -0
- examples/response_filter_middleware_example.py +67 -0
- examples/tong_hua_shun_settings.py +62 -0
- examples/tong_hua_shun_spider.py +170 -0
- tests/DOUBLE_CRAWLO_PREFIX_FIX_REPORT.md +81 -81
- tests/__init__.py +7 -7
- tests/advanced_tools_example.py +275 -275
- tests/authenticated_proxy_example.py +236 -236
- tests/cleaners_example.py +160 -160
- tests/config_validation_demo.py +102 -102
- tests/controlled_spider_example.py +205 -205
- tests/date_tools_example.py +180 -180
- tests/dynamic_loading_example.py +523 -523
- tests/dynamic_loading_test.py +104 -104
- tests/env_config_example.py +133 -133
- tests/error_handling_example.py +171 -171
- tests/redis_key_validation_demo.py +130 -130
- tests/response_improvements_example.py +144 -144
- tests/test_advanced_tools.py +148 -148
- tests/test_all_redis_key_configs.py +145 -145
- tests/test_authenticated_proxy.py +141 -141
- tests/test_cleaners.py +54 -54
- tests/test_comprehensive.py +146 -146
- tests/test_config_validator.py +193 -193
- tests/test_crawlo_proxy_integration.py +173 -0
- tests/test_date_tools.py +123 -123
- tests/test_default_header_middleware.py +159 -0
- tests/test_double_crawlo_fix.py +207 -207
- tests/test_double_crawlo_fix_simple.py +124 -124
- tests/test_download_delay_middleware.py +222 -0
- tests/test_downloader_proxy_compatibility.py +269 -0
- tests/test_dynamic_downloaders_proxy.py +124 -124
- tests/test_dynamic_proxy.py +92 -92
- tests/test_dynamic_proxy_config.py +146 -146
- tests/test_dynamic_proxy_real.py +109 -109
- tests/test_edge_cases.py +303 -303
- tests/test_enhanced_error_handler.py +270 -270
- tests/test_env_config.py +121 -121
- tests/test_error_handler_compatibility.py +112 -112
- tests/test_final_validation.py +153 -153
- tests/test_framework_env_usage.py +103 -103
- tests/test_integration.py +356 -356
- tests/test_item_dedup_redis_key.py +122 -122
- tests/test_offsite_middleware.py +222 -0
- tests/test_parsel.py +29 -29
- tests/test_performance.py +327 -327
- tests/test_proxy_api.py +265 -0
- tests/test_proxy_health_check.py +32 -32
- tests/test_proxy_middleware.py +122 -0
- tests/test_proxy_middleware_enhanced.py +217 -0
- tests/test_proxy_middleware_integration.py +136 -136
- tests/test_proxy_providers.py +56 -56
- tests/test_proxy_stats.py +19 -19
- tests/test_proxy_strategies.py +59 -59
- tests/test_queue_manager_double_crawlo.py +173 -173
- tests/test_queue_manager_redis_key.py +176 -176
- tests/test_real_scenario_proxy.py +196 -0
- tests/test_redis_config.py +28 -28
- tests/test_redis_connection_pool.py +294 -294
- tests/test_redis_key_naming.py +181 -181
- tests/test_redis_key_validator.py +123 -123
- tests/test_redis_queue.py +224 -224
- tests/test_request_ignore_middleware.py +183 -0
- tests/test_request_serialization.py +70 -70
- tests/test_response_code_middleware.py +350 -0
- tests/test_response_filter_middleware.py +428 -0
- tests/test_response_improvements.py +152 -152
- tests/test_retry_middleware.py +242 -0
- tests/test_scheduler.py +241 -241
- tests/test_simple_response.py +61 -61
- tests/test_telecom_spider_redis_key.py +205 -205
- tests/test_template_content.py +87 -87
- tests/test_template_redis_key.py +134 -134
- tests/test_tools.py +153 -153
- tests/tools_example.py +257 -257
- crawlo-1.2.0.dist-info/RECORD +0 -190
- {crawlo-1.2.0.dist-info → crawlo-1.2.1.dist-info}/WHEEL +0 -0
- {crawlo-1.2.0.dist-info → crawlo-1.2.1.dist-info}/entry_points.txt +0 -0
- {crawlo-1.2.0.dist-info → crawlo-1.2.1.dist-info}/top_level.txt +0 -0
|
@@ -1,153 +1,153 @@
|
|
|
1
|
-
#!/usr/bin/python
|
|
2
|
-
# -*- coding:UTF-8 -*-
|
|
3
|
-
"""
|
|
4
|
-
Response 改进功能测试
|
|
5
|
-
"""
|
|
6
|
-
import unittest
|
|
7
|
-
from crawlo.network.response import Response
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
class TestResponseImprovements(unittest.TestCase):
|
|
11
|
-
"""Response 改进功能测试类"""
|
|
12
|
-
|
|
13
|
-
def setUp(self):
|
|
14
|
-
"""测试前准备"""
|
|
15
|
-
# 创建一个模拟的HTML响应
|
|
16
|
-
html_content = """
|
|
17
|
-
<html>
|
|
18
|
-
<head>
|
|
19
|
-
<title>测试页面</title>
|
|
20
|
-
</head>
|
|
21
|
-
<body>
|
|
22
|
-
<div class="content">
|
|
23
|
-
<h1>主标题</h1>
|
|
24
|
-
<p class="intro">这是介绍段落</p>
|
|
25
|
-
<ul class="list">
|
|
26
|
-
<li>项目1</li>
|
|
27
|
-
<li>项目2</li>
|
|
28
|
-
<li>项目3</li>
|
|
29
|
-
</ul>
|
|
30
|
-
<a href="https://example.com" class="link">链接文本</a>
|
|
31
|
-
<img src="image.jpg" alt="图片描述" class="image">
|
|
32
|
-
</div>
|
|
33
|
-
</body>
|
|
34
|
-
</html>
|
|
35
|
-
"""
|
|
36
|
-
|
|
37
|
-
self.response = Response(
|
|
38
|
-
url="https://example.com/test",
|
|
39
|
-
body=html_content.encode('utf-8'),
|
|
40
|
-
headers={"content-type": "text/html; charset=utf-8"}
|
|
41
|
-
)
|
|
42
|
-
|
|
43
|
-
def test_extract_text_with_css(self):
|
|
44
|
-
"""测试使用CSS选择器提取文本"""
|
|
45
|
-
# 测试提取单个元素文本
|
|
46
|
-
title = self.response.extract_text('title')
|
|
47
|
-
self.assertEqual(title, "测试页面")
|
|
48
|
-
|
|
49
|
-
# 测试提取class元素文本
|
|
50
|
-
h1_text = self.response.extract_text('.content h1')
|
|
51
|
-
self.assertEqual(h1_text, "主标题")
|
|
52
|
-
|
|
53
|
-
# 测试提取带有默认值的情况
|
|
54
|
-
non_exist = self.response.extract_text('.non-exist', default='默认值')
|
|
55
|
-
self.assertEqual(non_exist, '默认值')
|
|
56
|
-
|
|
57
|
-
def test_extract_text_with_xpath(self):
|
|
58
|
-
"""测试使用XPath选择器提取文本"""
|
|
59
|
-
# 测试提取单个元素文本
|
|
60
|
-
title = self.response.extract_text('//title')
|
|
61
|
-
self.assertEqual(title, "测试页面")
|
|
62
|
-
|
|
63
|
-
# 测试提取class元素文本
|
|
64
|
-
h1_text = self.response.extract_text('//div[@class="content"]/h1')
|
|
65
|
-
self.assertEqual(h1_text, "主标题")
|
|
66
|
-
|
|
67
|
-
def test_extract_texts_with_css(self):
|
|
68
|
-
"""测试使用CSS选择器提取多个文本"""
|
|
69
|
-
# 测试提取多个li元素的文本
|
|
70
|
-
list_items = self.response.extract_texts('.list li')
|
|
71
|
-
expected = ["项目1", "项目2", "项目3"]
|
|
72
|
-
self.assertEqual(list_items, expected)
|
|
73
|
-
|
|
74
|
-
# 测试提取不存在元素的默认值
|
|
75
|
-
non_exist = self.response.extract_texts('.non-exist', default=['默认值'])
|
|
76
|
-
self.assertEqual(non_exist, ['默认值'])
|
|
77
|
-
|
|
78
|
-
def test_extract_texts_with_xpath(self):
|
|
79
|
-
"""测试使用XPath选择器提取多个文本"""
|
|
80
|
-
# 测试提取多个li元素的文本
|
|
81
|
-
list_items = self.response.extract_texts('//ul[@class="list"]/li')
|
|
82
|
-
expected = ["项目1", "项目2", "项目3"]
|
|
83
|
-
self.assertEqual(list_items, expected)
|
|
84
|
-
|
|
85
|
-
def test_extract_attr(self):
|
|
86
|
-
"""测试提取元素属性"""
|
|
87
|
-
# 测试提取链接的href属性
|
|
88
|
-
link_href = self.response.extract_attr('.link', 'href')
|
|
89
|
-
self.assertEqual(link_href, "https://example.com")
|
|
90
|
-
|
|
91
|
-
# 测试提取图片的alt属性
|
|
92
|
-
img_alt = self.response.extract_attr('.image', 'alt')
|
|
93
|
-
self.assertEqual(img_alt, "图片描述")
|
|
94
|
-
|
|
95
|
-
# 测试提取不存在属性的默认值
|
|
96
|
-
non_exist = self.response.extract_attr('.link', 'non-exist', default='默认值')
|
|
97
|
-
self.assertEqual(non_exist, '默认值')
|
|
98
|
-
|
|
99
|
-
def test_extract_attrs(self):
|
|
100
|
-
"""测试提取多个元素的属性"""
|
|
101
|
-
# 测试提取所有li元素的属性(这里我们测试class属性)
|
|
102
|
-
list_classes = self.response.extract_attrs('.list li', 'class')
|
|
103
|
-
# 注意:在当前HTML中li元素没有class属性,所以应该返回空列表
|
|
104
|
-
self.assertEqual(list_classes, [])
|
|
105
|
-
|
|
106
|
-
# 测试提取所有图片元素的alt属性
|
|
107
|
-
img_alts = self.response.extract_attrs('.image', 'alt')
|
|
108
|
-
self.assertEqual(img_alts, ['图片描述'])
|
|
109
|
-
|
|
110
|
-
# 测试提取不存在元素时的默认值
|
|
111
|
-
non_exist = self.response.extract_attrs('.non-exist-elements', 'alt', default=['默认值'])
|
|
112
|
-
self.assertEqual(non_exist, ['默认值'])
|
|
113
|
-
|
|
114
|
-
def test_extract_text_from_elements(self):
|
|
115
|
-
"""测试从复杂元素中提取文本"""
|
|
116
|
-
# 创建包含嵌套标签的HTML
|
|
117
|
-
complex_html = """
|
|
118
|
-
<div class="complex">
|
|
119
|
-
<p>段落文本 <strong>粗体文本</strong> 普通文本</p>
|
|
120
|
-
<p>第二段落 <em>斜体文本</em></p>
|
|
121
|
-
</div>
|
|
122
|
-
"""
|
|
123
|
-
|
|
124
|
-
complex_response = Response(
|
|
125
|
-
url="https://example.com/complex",
|
|
126
|
-
body=complex_html.encode('utf-8')
|
|
127
|
-
)
|
|
128
|
-
|
|
129
|
-
# 测试提取复杂元素的文本
|
|
130
|
-
complex_text = complex_response.extract_text('.complex p', join_str=' ')
|
|
131
|
-
self.assertIn("段落文本", complex_text)
|
|
132
|
-
self.assertIn("粗体文本", complex_text)
|
|
133
|
-
self.assertIn("普通文本", complex_text)
|
|
134
|
-
|
|
135
|
-
def test_edge_cases(self):
|
|
136
|
-
"""测试边界情况"""
|
|
137
|
-
# 测试空响应
|
|
138
|
-
empty_response = Response(url="https://example.com/empty", body=b"")
|
|
139
|
-
empty_text = empty_response.extract_text('title', default='默认标题')
|
|
140
|
-
self.assertEqual(empty_text, '默认标题')
|
|
141
|
-
|
|
142
|
-
# 测试只包含空白字符的元素
|
|
143
|
-
whitespace_html = "<div class='whitespace'> </div>"
|
|
144
|
-
whitespace_response = Response(
|
|
145
|
-
url="https://example.com/whitespace",
|
|
146
|
-
body=whitespace_html.encode('utf-8')
|
|
147
|
-
)
|
|
148
|
-
whitespace_text = whitespace_response.extract_text('.whitespace')
|
|
149
|
-
self.assertEqual(whitespace_text, '')
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
if __name__ == '__main__':
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding:UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
Response 改进功能测试
|
|
5
|
+
"""
|
|
6
|
+
import unittest
|
|
7
|
+
from crawlo.network.response import Response
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class TestResponseImprovements(unittest.TestCase):
|
|
11
|
+
"""Response 改进功能测试类"""
|
|
12
|
+
|
|
13
|
+
def setUp(self):
|
|
14
|
+
"""测试前准备"""
|
|
15
|
+
# 创建一个模拟的HTML响应
|
|
16
|
+
html_content = """
|
|
17
|
+
<html>
|
|
18
|
+
<head>
|
|
19
|
+
<title>测试页面</title>
|
|
20
|
+
</head>
|
|
21
|
+
<body>
|
|
22
|
+
<div class="content">
|
|
23
|
+
<h1>主标题</h1>
|
|
24
|
+
<p class="intro">这是介绍段落</p>
|
|
25
|
+
<ul class="list">
|
|
26
|
+
<li>项目1</li>
|
|
27
|
+
<li>项目2</li>
|
|
28
|
+
<li>项目3</li>
|
|
29
|
+
</ul>
|
|
30
|
+
<a href="https://example.com" class="link">链接文本</a>
|
|
31
|
+
<img src="image.jpg" alt="图片描述" class="image">
|
|
32
|
+
</div>
|
|
33
|
+
</body>
|
|
34
|
+
</html>
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
self.response = Response(
|
|
38
|
+
url="https://example.com/test",
|
|
39
|
+
body=html_content.encode('utf-8'),
|
|
40
|
+
headers={"content-type": "text/html; charset=utf-8"}
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
def test_extract_text_with_css(self):
|
|
44
|
+
"""测试使用CSS选择器提取文本"""
|
|
45
|
+
# 测试提取单个元素文本
|
|
46
|
+
title = self.response.extract_text('title')
|
|
47
|
+
self.assertEqual(title, "测试页面")
|
|
48
|
+
|
|
49
|
+
# 测试提取class元素文本
|
|
50
|
+
h1_text = self.response.extract_text('.content h1')
|
|
51
|
+
self.assertEqual(h1_text, "主标题")
|
|
52
|
+
|
|
53
|
+
# 测试提取带有默认值的情况
|
|
54
|
+
non_exist = self.response.extract_text('.non-exist', default='默认值')
|
|
55
|
+
self.assertEqual(non_exist, '默认值')
|
|
56
|
+
|
|
57
|
+
def test_extract_text_with_xpath(self):
|
|
58
|
+
"""测试使用XPath选择器提取文本"""
|
|
59
|
+
# 测试提取单个元素文本
|
|
60
|
+
title = self.response.extract_text('//title')
|
|
61
|
+
self.assertEqual(title, "测试页面")
|
|
62
|
+
|
|
63
|
+
# 测试提取class元素文本
|
|
64
|
+
h1_text = self.response.extract_text('//div[@class="content"]/h1')
|
|
65
|
+
self.assertEqual(h1_text, "主标题")
|
|
66
|
+
|
|
67
|
+
def test_extract_texts_with_css(self):
|
|
68
|
+
"""测试使用CSS选择器提取多个文本"""
|
|
69
|
+
# 测试提取多个li元素的文本
|
|
70
|
+
list_items = self.response.extract_texts('.list li')
|
|
71
|
+
expected = ["项目1", "项目2", "项目3"]
|
|
72
|
+
self.assertEqual(list_items, expected)
|
|
73
|
+
|
|
74
|
+
# 测试提取不存在元素的默认值
|
|
75
|
+
non_exist = self.response.extract_texts('.non-exist', default=['默认值'])
|
|
76
|
+
self.assertEqual(non_exist, ['默认值'])
|
|
77
|
+
|
|
78
|
+
def test_extract_texts_with_xpath(self):
|
|
79
|
+
"""测试使用XPath选择器提取多个文本"""
|
|
80
|
+
# 测试提取多个li元素的文本
|
|
81
|
+
list_items = self.response.extract_texts('//ul[@class="list"]/li')
|
|
82
|
+
expected = ["项目1", "项目2", "项目3"]
|
|
83
|
+
self.assertEqual(list_items, expected)
|
|
84
|
+
|
|
85
|
+
def test_extract_attr(self):
|
|
86
|
+
"""测试提取元素属性"""
|
|
87
|
+
# 测试提取链接的href属性
|
|
88
|
+
link_href = self.response.extract_attr('.link', 'href')
|
|
89
|
+
self.assertEqual(link_href, "https://example.com")
|
|
90
|
+
|
|
91
|
+
# 测试提取图片的alt属性
|
|
92
|
+
img_alt = self.response.extract_attr('.image', 'alt')
|
|
93
|
+
self.assertEqual(img_alt, "图片描述")
|
|
94
|
+
|
|
95
|
+
# 测试提取不存在属性的默认值
|
|
96
|
+
non_exist = self.response.extract_attr('.link', 'non-exist', default='默认值')
|
|
97
|
+
self.assertEqual(non_exist, '默认值')
|
|
98
|
+
|
|
99
|
+
def test_extract_attrs(self):
|
|
100
|
+
"""测试提取多个元素的属性"""
|
|
101
|
+
# 测试提取所有li元素的属性(这里我们测试class属性)
|
|
102
|
+
list_classes = self.response.extract_attrs('.list li', 'class')
|
|
103
|
+
# 注意:在当前HTML中li元素没有class属性,所以应该返回空列表
|
|
104
|
+
self.assertEqual(list_classes, [])
|
|
105
|
+
|
|
106
|
+
# 测试提取所有图片元素的alt属性
|
|
107
|
+
img_alts = self.response.extract_attrs('.image', 'alt')
|
|
108
|
+
self.assertEqual(img_alts, ['图片描述'])
|
|
109
|
+
|
|
110
|
+
# 测试提取不存在元素时的默认值
|
|
111
|
+
non_exist = self.response.extract_attrs('.non-exist-elements', 'alt', default=['默认值'])
|
|
112
|
+
self.assertEqual(non_exist, ['默认值'])
|
|
113
|
+
|
|
114
|
+
def test_extract_text_from_elements(self):
|
|
115
|
+
"""测试从复杂元素中提取文本"""
|
|
116
|
+
# 创建包含嵌套标签的HTML
|
|
117
|
+
complex_html = """
|
|
118
|
+
<div class="complex">
|
|
119
|
+
<p>段落文本 <strong>粗体文本</strong> 普通文本</p>
|
|
120
|
+
<p>第二段落 <em>斜体文本</em></p>
|
|
121
|
+
</div>
|
|
122
|
+
"""
|
|
123
|
+
|
|
124
|
+
complex_response = Response(
|
|
125
|
+
url="https://example.com/complex",
|
|
126
|
+
body=complex_html.encode('utf-8')
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
# 测试提取复杂元素的文本
|
|
130
|
+
complex_text = complex_response.extract_text('.complex p', join_str=' ')
|
|
131
|
+
self.assertIn("段落文本", complex_text)
|
|
132
|
+
self.assertIn("粗体文本", complex_text)
|
|
133
|
+
self.assertIn("普通文本", complex_text)
|
|
134
|
+
|
|
135
|
+
def test_edge_cases(self):
|
|
136
|
+
"""测试边界情况"""
|
|
137
|
+
# 测试空响应
|
|
138
|
+
empty_response = Response(url="https://example.com/empty", body=b"")
|
|
139
|
+
empty_text = empty_response.extract_text('title', default='默认标题')
|
|
140
|
+
self.assertEqual(empty_text, '默认标题')
|
|
141
|
+
|
|
142
|
+
# 测试只包含空白字符的元素
|
|
143
|
+
whitespace_html = "<div class='whitespace'> </div>"
|
|
144
|
+
whitespace_response = Response(
|
|
145
|
+
url="https://example.com/whitespace",
|
|
146
|
+
body=whitespace_html.encode('utf-8')
|
|
147
|
+
)
|
|
148
|
+
whitespace_text = whitespace_response.extract_text('.whitespace')
|
|
149
|
+
self.assertEqual(whitespace_text, '')
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
if __name__ == '__main__':
|
|
153
153
|
unittest.main()
|
|
@@ -0,0 +1,242 @@
|
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding:UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
RetryMiddleware 测试文件
|
|
5
|
+
用于测试重试中间件的功能
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import unittest
|
|
9
|
+
from unittest.mock import Mock, patch
|
|
10
|
+
|
|
11
|
+
from crawlo.middleware.retry import RetryMiddleware
|
|
12
|
+
from crawlo.settings.setting_manager import SettingManager
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class MockStats:
|
|
16
|
+
"""Mock Stats 类,用于测试统计信息"""
|
|
17
|
+
def __init__(self):
|
|
18
|
+
self.stats = {}
|
|
19
|
+
|
|
20
|
+
def inc_value(self, key, value=1):
|
|
21
|
+
if key in self.stats:
|
|
22
|
+
self.stats[key] += value
|
|
23
|
+
else:
|
|
24
|
+
self.stats[key] = value
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class TestRetryMiddleware(unittest.TestCase):
|
|
28
|
+
"""RetryMiddleware 测试类"""
|
|
29
|
+
|
|
30
|
+
def setUp(self):
|
|
31
|
+
"""测试前准备"""
|
|
32
|
+
# 创建设置管理器
|
|
33
|
+
self.settings = SettingManager()
|
|
34
|
+
|
|
35
|
+
# 创建爬虫模拟对象
|
|
36
|
+
self.crawler = Mock()
|
|
37
|
+
self.crawler.settings = self.settings
|
|
38
|
+
self.crawler.stats = MockStats()
|
|
39
|
+
|
|
40
|
+
def test_middleware_initialization(self):
|
|
41
|
+
"""测试中间件初始化"""
|
|
42
|
+
# 设置重试配置
|
|
43
|
+
self.settings.set('RETRY_HTTP_CODES', [500, 502, 503, 504, 408])
|
|
44
|
+
self.settings.set('IGNORE_HTTP_CODES', [404])
|
|
45
|
+
self.settings.set('MAX_RETRY_TIMES', 3)
|
|
46
|
+
self.settings.set('RETRY_EXCEPTIONS', [])
|
|
47
|
+
self.settings.set('RETRY_PRIORITY', -10)
|
|
48
|
+
|
|
49
|
+
# 应该正常创建实例
|
|
50
|
+
middleware = RetryMiddleware.create_instance(self.crawler)
|
|
51
|
+
self.assertIsInstance(middleware, RetryMiddleware)
|
|
52
|
+
self.assertEqual(middleware.max_retry_times, 3)
|
|
53
|
+
self.assertEqual(middleware.retry_priority, -10)
|
|
54
|
+
|
|
55
|
+
def test_process_response_with_retry_code(self):
|
|
56
|
+
"""测试处理需要重试的响应码"""
|
|
57
|
+
# 创建中间件实例
|
|
58
|
+
middleware = RetryMiddleware(
|
|
59
|
+
retry_http_codes=[500, 502, 503, 504, 408],
|
|
60
|
+
ignore_http_codes=[404],
|
|
61
|
+
max_retry_times=3,
|
|
62
|
+
retry_exceptions=[],
|
|
63
|
+
stats=MockStats(),
|
|
64
|
+
retry_priority=-10
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
# 创建请求和响应对象
|
|
68
|
+
request = Mock()
|
|
69
|
+
request.meta = {}
|
|
70
|
+
request.priority = 0 # 添加priority属性
|
|
71
|
+
response = Mock()
|
|
72
|
+
response.status_code = 500
|
|
73
|
+
spider = Mock()
|
|
74
|
+
|
|
75
|
+
# 处理响应
|
|
76
|
+
result = middleware.process_response(request, response, spider)
|
|
77
|
+
|
|
78
|
+
# 应该返回重试的请求
|
|
79
|
+
self.assertEqual(result, request)
|
|
80
|
+
self.assertEqual(request.meta['retry_times'], 1)
|
|
81
|
+
self.assertTrue(request.meta['dont_retry'])
|
|
82
|
+
self.assertEqual(request.priority, -10)
|
|
83
|
+
|
|
84
|
+
def test_process_response_with_ignore_code(self):
|
|
85
|
+
"""测试处理需要忽略的响应码"""
|
|
86
|
+
# 创建中间件实例
|
|
87
|
+
middleware = RetryMiddleware(
|
|
88
|
+
retry_http_codes=[500, 502, 503, 504, 408],
|
|
89
|
+
ignore_http_codes=[404],
|
|
90
|
+
max_retry_times=3,
|
|
91
|
+
retry_exceptions=[],
|
|
92
|
+
stats=MockStats(),
|
|
93
|
+
retry_priority=-10
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
# 创建请求和响应对象
|
|
97
|
+
request = Mock()
|
|
98
|
+
request.meta = {}
|
|
99
|
+
request.priority = 0 # 添加priority属性
|
|
100
|
+
response = Mock()
|
|
101
|
+
response.status_code = 404
|
|
102
|
+
spider = Mock()
|
|
103
|
+
|
|
104
|
+
# 处理响应
|
|
105
|
+
result = middleware.process_response(request, response, spider)
|
|
106
|
+
|
|
107
|
+
# 应该返回原始响应
|
|
108
|
+
self.assertEqual(result, response)
|
|
109
|
+
|
|
110
|
+
def test_process_response_with_dont_retry(self):
|
|
111
|
+
"""测试处理带有dont_retry标记的响应"""
|
|
112
|
+
# 创建中间件实例
|
|
113
|
+
middleware = RetryMiddleware(
|
|
114
|
+
retry_http_codes=[500, 502, 503, 504, 408],
|
|
115
|
+
ignore_http_codes=[404],
|
|
116
|
+
max_retry_times=3,
|
|
117
|
+
retry_exceptions=[],
|
|
118
|
+
stats=MockStats(),
|
|
119
|
+
retry_priority=-10
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
# 创建请求和响应对象
|
|
123
|
+
request = Mock()
|
|
124
|
+
request.meta = {'dont_retry': True}
|
|
125
|
+
request.priority = 0 # 添加priority属性
|
|
126
|
+
response = Mock()
|
|
127
|
+
response.status_code = 500
|
|
128
|
+
spider = Mock()
|
|
129
|
+
|
|
130
|
+
# 处理响应
|
|
131
|
+
result = middleware.process_response(request, response, spider)
|
|
132
|
+
|
|
133
|
+
# 应该返回原始响应
|
|
134
|
+
self.assertEqual(result, response)
|
|
135
|
+
|
|
136
|
+
def test_process_response_with_max_retries_exceeded(self):
|
|
137
|
+
"""测试超过最大重试次数的响应"""
|
|
138
|
+
# 创建中间件实例
|
|
139
|
+
middleware = RetryMiddleware(
|
|
140
|
+
retry_http_codes=[500, 502, 503, 504, 408],
|
|
141
|
+
ignore_http_codes=[404],
|
|
142
|
+
max_retry_times=3,
|
|
143
|
+
retry_exceptions=[],
|
|
144
|
+
stats=MockStats(),
|
|
145
|
+
retry_priority=-10
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
# 创建请求和响应对象
|
|
149
|
+
request = Mock()
|
|
150
|
+
request.meta = {'retry_times': 3} # 已达到最大重试次数
|
|
151
|
+
request.priority = 0 # 添加priority属性
|
|
152
|
+
response = Mock()
|
|
153
|
+
response.status_code = 500
|
|
154
|
+
spider = Mock()
|
|
155
|
+
|
|
156
|
+
# 处理响应
|
|
157
|
+
result = middleware.process_response(request, response, spider)
|
|
158
|
+
|
|
159
|
+
# 应该返回原始响应
|
|
160
|
+
self.assertEqual(result, response)
|
|
161
|
+
|
|
162
|
+
def test_process_exception_with_retry_exception(self):
|
|
163
|
+
"""测试处理需要重试的异常"""
|
|
164
|
+
# 创建中间件实例
|
|
165
|
+
middleware = RetryMiddleware(
|
|
166
|
+
retry_http_codes=[500, 502, 503, 504, 408],
|
|
167
|
+
ignore_http_codes=[404],
|
|
168
|
+
max_retry_times=3,
|
|
169
|
+
retry_exceptions=[ValueError],
|
|
170
|
+
stats=MockStats(),
|
|
171
|
+
retry_priority=-10
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
# 创建请求和异常对象
|
|
175
|
+
request = Mock()
|
|
176
|
+
request.meta = {}
|
|
177
|
+
request.priority = 0 # 添加priority属性
|
|
178
|
+
exc = ValueError("test error")
|
|
179
|
+
spider = Mock()
|
|
180
|
+
|
|
181
|
+
# 处理异常
|
|
182
|
+
result = middleware.process_exception(request, exc, spider)
|
|
183
|
+
|
|
184
|
+
# 应该返回重试的请求
|
|
185
|
+
self.assertEqual(result, request)
|
|
186
|
+
self.assertEqual(request.meta['retry_times'], 1)
|
|
187
|
+
self.assertTrue(request.meta['dont_retry'])
|
|
188
|
+
self.assertEqual(request.priority, -10)
|
|
189
|
+
|
|
190
|
+
def test_process_exception_with_dont_retry(self):
|
|
191
|
+
"""测试处理带有dont_retry标记的异常"""
|
|
192
|
+
# 创建中间件实例
|
|
193
|
+
middleware = RetryMiddleware(
|
|
194
|
+
retry_http_codes=[500, 502, 503, 504, 408],
|
|
195
|
+
ignore_http_codes=[404],
|
|
196
|
+
max_retry_times=3,
|
|
197
|
+
retry_exceptions=[ValueError],
|
|
198
|
+
stats=MockStats(),
|
|
199
|
+
retry_priority=-10
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
# 创建请求和异常对象
|
|
203
|
+
request = Mock()
|
|
204
|
+
request.meta = {'dont_retry': True}
|
|
205
|
+
request.priority = 0 # 添加priority属性
|
|
206
|
+
exc = ValueError("test error")
|
|
207
|
+
spider = Mock()
|
|
208
|
+
|
|
209
|
+
# 处理异常
|
|
210
|
+
result = middleware.process_exception(request, exc, spider)
|
|
211
|
+
|
|
212
|
+
# 应该返回None
|
|
213
|
+
self.assertIsNone(result)
|
|
214
|
+
|
|
215
|
+
def test_process_exception_with_non_retry_exception(self):
|
|
216
|
+
"""测试处理不需要重试的异常"""
|
|
217
|
+
# 创建中间件实例
|
|
218
|
+
middleware = RetryMiddleware(
|
|
219
|
+
retry_http_codes=[500, 502, 503, 504, 408],
|
|
220
|
+
ignore_http_codes=[404],
|
|
221
|
+
max_retry_times=3,
|
|
222
|
+
retry_exceptions=[ValueError],
|
|
223
|
+
stats=MockStats(),
|
|
224
|
+
retry_priority=-10
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
# 创建请求和异常对象
|
|
228
|
+
request = Mock()
|
|
229
|
+
request.meta = {}
|
|
230
|
+
request.priority = 0 # 添加priority属性
|
|
231
|
+
exc = TypeError("test error") # 不在重试异常列表中
|
|
232
|
+
spider = Mock()
|
|
233
|
+
|
|
234
|
+
# 处理异常
|
|
235
|
+
result = middleware.process_exception(request, exc, spider)
|
|
236
|
+
|
|
237
|
+
# 应该返回None
|
|
238
|
+
self.assertIsNone(result)
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
if __name__ == '__main__':
|
|
242
|
+
unittest.main()
|