crawlo 1.2.8__py3-none-any.whl → 1.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +63 -61
- crawlo/__version__.py +1 -1
- crawlo/cli.py +75 -75
- crawlo/commands/__init__.py +14 -14
- crawlo/commands/check.py +594 -594
- crawlo/commands/genspider.py +151 -151
- crawlo/commands/help.py +138 -138
- crawlo/commands/list.py +155 -155
- crawlo/commands/run.py +314 -323
- crawlo/commands/startproject.py +436 -436
- crawlo/commands/stats.py +187 -187
- crawlo/commands/utils.py +186 -186
- crawlo/config.py +312 -312
- crawlo/config_validator.py +277 -251
- crawlo/core/__init__.py +2 -2
- crawlo/core/engine.py +365 -365
- crawlo/core/processor.py +40 -40
- crawlo/core/scheduler.py +256 -251
- crawlo/crawler.py +1097 -1099
- crawlo/data/__init__.py +5 -5
- crawlo/data/user_agents.py +194 -107
- crawlo/downloader/__init__.py +273 -266
- crawlo/downloader/aiohttp_downloader.py +226 -228
- crawlo/downloader/cffi_downloader.py +245 -256
- crawlo/downloader/httpx_downloader.py +259 -259
- crawlo/downloader/hybrid_downloader.py +212 -212
- crawlo/downloader/playwright_downloader.py +402 -402
- crawlo/downloader/selenium_downloader.py +472 -472
- crawlo/event.py +11 -11
- crawlo/exceptions.py +81 -81
- crawlo/extension/__init__.py +39 -39
- crawlo/extension/health_check.py +141 -141
- crawlo/extension/log_interval.py +57 -57
- crawlo/extension/log_stats.py +81 -81
- crawlo/extension/logging_extension.py +45 -43
- crawlo/extension/memory_monitor.py +104 -104
- crawlo/extension/performance_profiler.py +133 -133
- crawlo/extension/request_recorder.py +107 -107
- crawlo/filters/__init__.py +154 -154
- crawlo/filters/aioredis_filter.py +234 -234
- crawlo/filters/memory_filter.py +269 -269
- crawlo/items/__init__.py +23 -23
- crawlo/items/base.py +21 -21
- crawlo/items/fields.py +52 -52
- crawlo/items/items.py +104 -104
- crawlo/middleware/__init__.py +21 -21
- crawlo/middleware/default_header.py +132 -132
- crawlo/middleware/download_delay.py +104 -104
- crawlo/middleware/middleware_manager.py +136 -136
- crawlo/middleware/offsite.py +114 -114
- crawlo/middleware/proxy.py +386 -368
- crawlo/middleware/request_ignore.py +86 -86
- crawlo/middleware/response_code.py +163 -163
- crawlo/middleware/response_filter.py +136 -136
- crawlo/middleware/retry.py +124 -124
- crawlo/middleware/simple_proxy.py +65 -0
- crawlo/mode_manager.py +212 -211
- crawlo/network/__init__.py +21 -21
- crawlo/network/request.py +379 -338
- crawlo/network/response.py +359 -359
- crawlo/pipelines/__init__.py +21 -21
- crawlo/pipelines/bloom_dedup_pipeline.py +157 -157
- crawlo/pipelines/console_pipeline.py +39 -39
- crawlo/pipelines/csv_pipeline.py +316 -316
- crawlo/pipelines/database_dedup_pipeline.py +223 -223
- crawlo/pipelines/json_pipeline.py +218 -218
- crawlo/pipelines/memory_dedup_pipeline.py +115 -115
- crawlo/pipelines/mongo_pipeline.py +131 -131
- crawlo/pipelines/mysql_pipeline.py +317 -317
- crawlo/pipelines/pipeline_manager.py +74 -62
- crawlo/pipelines/redis_dedup_pipeline.py +167 -167
- crawlo/project.py +284 -315
- crawlo/queue/pqueue.py +37 -37
- crawlo/queue/queue_manager.py +379 -378
- crawlo/queue/redis_priority_queue.py +306 -306
- crawlo/settings/__init__.py +7 -7
- crawlo/settings/default_settings.py +216 -220
- crawlo/settings/setting_manager.py +175 -122
- crawlo/spider/__init__.py +639 -639
- crawlo/stats_collector.py +59 -59
- crawlo/subscriber.py +129 -129
- crawlo/task_manager.py +30 -30
- crawlo/templates/crawlo.cfg.tmpl +10 -10
- crawlo/templates/project/__init__.py.tmpl +3 -3
- crawlo/templates/project/items.py.tmpl +17 -17
- crawlo/templates/project/middlewares.py.tmpl +118 -118
- crawlo/templates/project/pipelines.py.tmpl +96 -96
- crawlo/templates/project/settings.py.tmpl +261 -288
- crawlo/templates/project/settings_distributed.py.tmpl +174 -157
- crawlo/templates/project/settings_gentle.py.tmpl +95 -100
- crawlo/templates/project/settings_high_performance.py.tmpl +125 -134
- crawlo/templates/project/settings_minimal.py.tmpl +30 -0
- crawlo/templates/project/settings_simple.py.tmpl +96 -98
- crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
- crawlo/templates/run.py.tmpl +47 -47
- crawlo/templates/spider/spider.py.tmpl +143 -143
- crawlo/tools/__init__.py +200 -182
- crawlo/tools/anti_crawler.py +268 -268
- crawlo/tools/authenticated_proxy.py +240 -240
- crawlo/{cleaners → tools}/data_formatter.py +225 -225
- crawlo/tools/data_validator.py +180 -180
- crawlo/tools/date_tools.py +290 -36
- crawlo/tools/distributed_coordinator.py +388 -387
- crawlo/{cleaners → tools}/encoding_converter.py +127 -126
- crawlo/tools/request_tools.py +83 -0
- crawlo/tools/retry_mechanism.py +224 -221
- crawlo/tools/scenario_adapter.py +262 -262
- crawlo/{cleaners → tools}/text_cleaner.py +232 -232
- crawlo/utils/__init__.py +35 -35
- crawlo/utils/batch_processor.py +259 -259
- crawlo/utils/controlled_spider_mixin.py +439 -439
- crawlo/utils/db_helper.py +343 -343
- crawlo/utils/enhanced_error_handler.py +356 -356
- crawlo/utils/env_config.py +142 -142
- crawlo/utils/error_handler.py +123 -123
- crawlo/utils/func_tools.py +82 -82
- crawlo/utils/large_scale_config.py +286 -286
- crawlo/utils/large_scale_helper.py +344 -344
- crawlo/utils/log.py +146 -128
- crawlo/utils/performance_monitor.py +285 -285
- crawlo/utils/queue_helper.py +175 -175
- crawlo/utils/redis_connection_pool.py +351 -351
- crawlo/utils/redis_key_validator.py +198 -198
- crawlo/utils/request.py +267 -267
- crawlo/utils/request_serializer.py +218 -218
- crawlo/utils/spider_loader.py +61 -61
- crawlo/utils/system.py +11 -11
- crawlo/utils/tools.py +4 -4
- crawlo/utils/url.py +39 -39
- {crawlo-1.2.8.dist-info → crawlo-1.3.0.dist-info}/METADATA +1011 -764
- crawlo-1.3.0.dist-info/RECORD +219 -0
- examples/__init__.py +7 -7
- tests/DOUBLE_CRAWLO_PREFIX_FIX_REPORT.md +81 -81
- tests/__init__.py +7 -7
- tests/advanced_tools_example.py +275 -275
- tests/authenticated_proxy_example.py +107 -237
- tests/cleaners_example.py +160 -160
- tests/config_validation_demo.py +143 -103
- tests/controlled_spider_example.py +205 -205
- tests/date_tools_example.py +180 -180
- tests/debug_pipelines.py +67 -0
- tests/dynamic_loading_example.py +523 -523
- tests/dynamic_loading_test.py +104 -104
- tests/env_config_example.py +133 -133
- tests/error_handling_example.py +171 -171
- tests/redis_key_validation_demo.py +130 -130
- tests/request_params_example.py +151 -0
- tests/response_improvements_example.py +144 -144
- tests/test_advanced_tools.py +148 -148
- tests/test_all_redis_key_configs.py +145 -145
- tests/test_authenticated_proxy.py +141 -141
- tests/test_cleaners.py +54 -54
- tests/test_comprehensive.py +146 -146
- tests/test_config_consistency.py +80 -80
- tests/test_config_merge.py +153 -0
- tests/test_config_validator.py +182 -193
- tests/test_crawlo_proxy_integration.py +109 -173
- tests/test_date_tools.py +123 -123
- tests/test_default_header_middleware.py +158 -158
- tests/test_distributed.py +65 -0
- tests/test_double_crawlo_fix.py +207 -207
- tests/test_double_crawlo_fix_simple.py +124 -124
- tests/test_download_delay_middleware.py +221 -221
- tests/test_downloader_proxy_compatibility.py +268 -268
- tests/test_dynamic_downloaders_proxy.py +124 -124
- tests/test_dynamic_proxy.py +92 -92
- tests/test_dynamic_proxy_config.py +146 -146
- tests/test_dynamic_proxy_real.py +109 -109
- tests/test_edge_cases.py +303 -303
- tests/test_enhanced_error_handler.py +270 -270
- tests/test_env_config.py +121 -121
- tests/test_error_handler_compatibility.py +112 -112
- tests/test_final_validation.py +153 -153
- tests/test_framework_env_usage.py +103 -103
- tests/test_integration.py +169 -357
- tests/test_item_dedup_redis_key.py +122 -122
- tests/test_mode_consistency.py +51 -51
- tests/test_offsite_middleware.py +221 -221
- tests/test_parsel.py +29 -29
- tests/test_performance.py +327 -327
- tests/test_proxy_api.py +264 -264
- tests/test_proxy_health_check.py +32 -32
- tests/test_proxy_middleware.py +121 -121
- tests/test_proxy_middleware_enhanced.py +216 -216
- tests/test_proxy_middleware_integration.py +136 -136
- tests/test_proxy_middleware_refactored.py +185 -0
- tests/test_proxy_providers.py +56 -56
- tests/test_proxy_stats.py +19 -19
- tests/test_proxy_strategies.py +59 -59
- tests/test_queue_manager_double_crawlo.py +173 -173
- tests/test_queue_manager_redis_key.py +176 -176
- tests/test_random_user_agent.py +73 -0
- tests/test_real_scenario_proxy.py +195 -195
- tests/test_redis_config.py +28 -28
- tests/test_redis_connection_pool.py +294 -294
- tests/test_redis_key_naming.py +181 -181
- tests/test_redis_key_validator.py +123 -123
- tests/test_redis_queue.py +224 -224
- tests/test_request_ignore_middleware.py +182 -182
- tests/test_request_params.py +112 -0
- tests/test_request_serialization.py +70 -70
- tests/test_response_code_middleware.py +349 -349
- tests/test_response_filter_middleware.py +427 -427
- tests/test_response_improvements.py +152 -152
- tests/test_retry_middleware.py +241 -241
- tests/test_scheduler.py +252 -252
- tests/test_scheduler_config_update.py +133 -133
- tests/test_simple_response.py +61 -61
- tests/test_telecom_spider_redis_key.py +205 -205
- tests/test_template_content.py +87 -87
- tests/test_template_redis_key.py +134 -134
- tests/test_tools.py +159 -153
- tests/test_user_agents.py +97 -0
- tests/tools_example.py +260 -257
- tests/verify_distributed.py +117 -0
- crawlo/cleaners/__init__.py +0 -61
- crawlo/utils/date_tools.py +0 -290
- crawlo-1.2.8.dist-info/RECORD +0 -209
- {crawlo-1.2.8.dist-info → crawlo-1.3.0.dist-info}/WHEEL +0 -0
- {crawlo-1.2.8.dist-info → crawlo-1.3.0.dist-info}/entry_points.txt +0 -0
- {crawlo-1.2.8.dist-info → crawlo-1.3.0.dist-info}/top_level.txt +0 -0
|
@@ -1,237 +1,107 @@
|
|
|
1
|
-
#!/usr/bin/
|
|
2
|
-
# -*- coding:
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
from crawlo.
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
"
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
"http://user1:pass1@proxy1.example.com:8080",
|
|
109
|
-
"http://user2:pass2@proxy2.example.com:8080",
|
|
110
|
-
"http://proxy3.example.com:8080", # 不带认证
|
|
111
|
-
"https://secureuser:securepass@secure.proxy.com:443"
|
|
112
|
-
]
|
|
113
|
-
self.current_proxy_index = 0
|
|
114
|
-
|
|
115
|
-
def get_next_proxy(self):
|
|
116
|
-
"""获取下一个代理"""
|
|
117
|
-
proxy_url = self.proxies[self.current_proxy_index]
|
|
118
|
-
self.current_proxy_index = (self.current_proxy_index + 1) % len(self.proxies)
|
|
119
|
-
return proxy_url
|
|
120
|
-
|
|
121
|
-
def start_requests(self):
|
|
122
|
-
urls = [
|
|
123
|
-
'https://httpbin.org/ip', # 查看IP地址
|
|
124
|
-
'https://httpbin.org/headers', # 查看请求头
|
|
125
|
-
'https://example.com', # 普通网站
|
|
126
|
-
]
|
|
127
|
-
|
|
128
|
-
for url in urls:
|
|
129
|
-
# 获取代理
|
|
130
|
-
proxy_url = self.get_next_proxy()
|
|
131
|
-
proxy = AuthenticatedProxy(proxy_url)
|
|
132
|
-
|
|
133
|
-
# 创建请求
|
|
134
|
-
request = Request(url=url, callback=self.parse)
|
|
135
|
-
|
|
136
|
-
# 根据不同下载器设置代理
|
|
137
|
-
# 这里以AioHttp为例
|
|
138
|
-
if self.crawler.settings.get("DOWNLOADER_TYPE") == "aiohttp":
|
|
139
|
-
request.proxy = proxy.clean_url
|
|
140
|
-
auth = proxy.get_auth_credentials()
|
|
141
|
-
if auth:
|
|
142
|
-
# AioHttp需要在下载器中处理认证
|
|
143
|
-
request.meta["proxy_auth"] = auth
|
|
144
|
-
else:
|
|
145
|
-
# 其他下载器
|
|
146
|
-
request.proxy = proxy.proxy_dict
|
|
147
|
-
|
|
148
|
-
yield request
|
|
149
|
-
|
|
150
|
-
def parse(self, response):
|
|
151
|
-
"""解析响应"""
|
|
152
|
-
print(f"成功访问: {response.url}")
|
|
153
|
-
print(f"状态码: {response.status_code}")
|
|
154
|
-
# 显示前200个字符
|
|
155
|
-
print(f"响应内容: {response.text[:200]}...\n")
|
|
156
|
-
yield {"url": response.url, "status": response.status_code}
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
def demo_in_spider():
|
|
160
|
-
"""演示在爬虫中使用代理"""
|
|
161
|
-
print("=== 在爬虫中使用代理 ===\n")
|
|
162
|
-
print("在爬虫项目中,您可以这样使用带认证的代理:")
|
|
163
|
-
print("""
|
|
164
|
-
from crawlo import Spider, Request
|
|
165
|
-
from crawlo.tools import AuthenticatedProxy
|
|
166
|
-
|
|
167
|
-
class MySpider(Spider):
|
|
168
|
-
name = 'my_spider'
|
|
169
|
-
|
|
170
|
-
def __init__(self):
|
|
171
|
-
super().__init__()
|
|
172
|
-
self.proxy_urls = [
|
|
173
|
-
"http://username:password@proxy1.example.com:8080",
|
|
174
|
-
"http://user:pass@proxy2.example.com:8080",
|
|
175
|
-
"http://proxy3.example.com:8080" # 不带认证
|
|
176
|
-
]
|
|
177
|
-
|
|
178
|
-
def start_requests(self):
|
|
179
|
-
urls = ['https://httpbin.org/ip', 'https://example.com']
|
|
180
|
-
|
|
181
|
-
for i, url in enumerate(urls):
|
|
182
|
-
# 选择代理
|
|
183
|
-
proxy_url = self.proxy_urls[i % len(self.proxy_urls)]
|
|
184
|
-
proxy = AuthenticatedProxy(proxy_url)
|
|
185
|
-
|
|
186
|
-
# 创建请求
|
|
187
|
-
request = Request(url=url, callback=self.parse)
|
|
188
|
-
|
|
189
|
-
# 设置代理(根据不同下载器)
|
|
190
|
-
downloader_type = self.crawler.settings.get("DOWNLOADER_TYPE", "aiohttp")
|
|
191
|
-
|
|
192
|
-
if downloader_type == "aiohttp":
|
|
193
|
-
# AioHttp下载器
|
|
194
|
-
request.proxy = proxy.clean_url
|
|
195
|
-
auth = proxy.get_auth_credentials()
|
|
196
|
-
if auth:
|
|
197
|
-
request.meta["proxy_auth"] = auth
|
|
198
|
-
elif downloader_type == "httpx":
|
|
199
|
-
# HttpX下载器
|
|
200
|
-
request.proxy = proxy.clean_url
|
|
201
|
-
elif downloader_type == "curl_cffi":
|
|
202
|
-
# CurlCffi下载器
|
|
203
|
-
request.proxy = proxy.proxy_dict
|
|
204
|
-
# 认证信息在URL中或通过headers传递
|
|
205
|
-
auth_header = proxy.get_auth_header()
|
|
206
|
-
if auth_header:
|
|
207
|
-
request.headers["Proxy-Authorization"] = auth_header
|
|
208
|
-
|
|
209
|
-
yield request
|
|
210
|
-
|
|
211
|
-
def parse(self, response):
|
|
212
|
-
# 处理响应
|
|
213
|
-
yield {"url": response.url, "title": response.css('title::text').get()}
|
|
214
|
-
""")
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
if __name__ == '__main__':
|
|
218
|
-
# 运行演示
|
|
219
|
-
demo_proxy_parsing()
|
|
220
|
-
demo_authenticated_proxy_class()
|
|
221
|
-
demo_in_spider()
|
|
222
|
-
|
|
223
|
-
print("\n=== 配置说明 ===")
|
|
224
|
-
print("在settings.py中配置代理:")
|
|
225
|
-
print("""
|
|
226
|
-
# 启用代理中间件
|
|
227
|
-
MIDDLEWARES = [
|
|
228
|
-
# ... 其他中间件 ...
|
|
229
|
-
'crawlo.middleware.proxy.ProxyMiddleware',
|
|
230
|
-
]
|
|
231
|
-
|
|
232
|
-
# 代理配置
|
|
233
|
-
PROXY_ENABLED = True
|
|
234
|
-
PROXY_API_URL = "https://api.proxyprovider.com/get" # 代理API地址
|
|
235
|
-
PROXY_EXTRACTOR = "proxy" # 从API响应中提取代理的字段路径
|
|
236
|
-
PROXY_REFRESH_INTERVAL = 60 # 代理刷新间隔(秒)
|
|
237
|
-
""")
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
认证代理使用示例
|
|
5
|
+
演示如何在 Crawlo 框架中使用认证代理
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import sys
|
|
10
|
+
import os
|
|
11
|
+
|
|
12
|
+
# 添加项目根目录到路径
|
|
13
|
+
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
|
|
14
|
+
|
|
15
|
+
from crawlo.config import CrawloConfig
|
|
16
|
+
from crawlo.crawler import CrawlerProcess
|
|
17
|
+
from crawlo import Spider, Request, Item
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ProxyItem(Item):
|
|
21
|
+
"""代理测试结果项"""
|
|
22
|
+
url = ''
|
|
23
|
+
status = 0
|
|
24
|
+
proxy = ''
|
|
25
|
+
response_time = 0.0
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class AuthProxySpider(Spider):
|
|
29
|
+
"""认证代理测试爬虫"""
|
|
30
|
+
name = 'auth_proxy_spider'
|
|
31
|
+
|
|
32
|
+
async def start_requests(self):
|
|
33
|
+
"""发起测试请求"""
|
|
34
|
+
urls = [
|
|
35
|
+
'https://httpbin.org/ip', # 查看IP地址
|
|
36
|
+
'https://httpbin.org/headers', # 查看请求头
|
|
37
|
+
]
|
|
38
|
+
|
|
39
|
+
for url in urls:
|
|
40
|
+
yield Request(url, callback=self.parse_response)
|
|
41
|
+
|
|
42
|
+
async def parse_response(self, response):
|
|
43
|
+
"""解析响应"""
|
|
44
|
+
import time
|
|
45
|
+
import json
|
|
46
|
+
|
|
47
|
+
# 获取代理信息
|
|
48
|
+
proxy_info = response.meta.get('proxy', 'No proxy')
|
|
49
|
+
|
|
50
|
+
# 解析响应内容
|
|
51
|
+
try:
|
|
52
|
+
data = json.loads(response.text)
|
|
53
|
+
ip_info = data.get('origin', 'Unknown')
|
|
54
|
+
except:
|
|
55
|
+
ip_info = response.text[:100] + '...' if len(response.text) > 100 else response.text
|
|
56
|
+
|
|
57
|
+
# 创建结果项
|
|
58
|
+
item = ProxyItem(
|
|
59
|
+
url=response.url,
|
|
60
|
+
status=response.status,
|
|
61
|
+
proxy=str(proxy_info),
|
|
62
|
+
response_time=response.meta.get('download_latency', 0)
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
self.logger.info(f"Proxy test result: {item}")
|
|
66
|
+
yield item
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
async def main():
|
|
70
|
+
"""主函数"""
|
|
71
|
+
print("🚀 开始认证代理测试...")
|
|
72
|
+
|
|
73
|
+
# 创建配置(使用认证代理)
|
|
74
|
+
config = CrawloConfig.standalone(
|
|
75
|
+
concurrency=2,
|
|
76
|
+
download_delay=1.0,
|
|
77
|
+
PROXY_ENABLED=True,
|
|
78
|
+
# 配置认证代理(请替换为实际的代理信息)
|
|
79
|
+
PROXY_API_URL="http://your-proxy-provider.com/api/get", # 代理API地址
|
|
80
|
+
# 如果使用固定代理,可以直接设置:
|
|
81
|
+
# PROXY_LIST=[
|
|
82
|
+
# "http://username:password@proxy1.example.com:8080",
|
|
83
|
+
# "http://username:password@proxy2.example.com:8080",
|
|
84
|
+
# ],
|
|
85
|
+
LOG_LEVEL='INFO'
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
# 添加自定义中间件
|
|
89
|
+
config.set('CUSTOM_MIDDLEWARES', [
|
|
90
|
+
'crawlo.middleware.proxy.ProxyMiddleware',
|
|
91
|
+
])
|
|
92
|
+
|
|
93
|
+
# 创建爬虫进程
|
|
94
|
+
process = CrawlerProcess(settings=config.to_dict())
|
|
95
|
+
|
|
96
|
+
# 添加爬虫
|
|
97
|
+
process.crawl(AuthProxySpider)
|
|
98
|
+
|
|
99
|
+
# 启动爬虫
|
|
100
|
+
print("🔄 正在运行代理测试...")
|
|
101
|
+
await process.start()
|
|
102
|
+
|
|
103
|
+
print("✅ 认证代理测试完成!")
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
if __name__ == "__main__":
|
|
107
|
+
asyncio.run(main())
|
tests/cleaners_example.py
CHANGED
|
@@ -1,161 +1,161 @@
|
|
|
1
|
-
#!/usr/bin/python
|
|
2
|
-
# -*- coding: UTF-8 -*-
|
|
3
|
-
"""
|
|
4
|
-
Crawlo框架数据清洗工具使用示例
|
|
5
|
-
"""
|
|
6
|
-
from crawlo.
|
|
7
|
-
TextCleaner,
|
|
8
|
-
DataFormatter,
|
|
9
|
-
remove_html_tags,
|
|
10
|
-
decode_html_entities,
|
|
11
|
-
clean_text,
|
|
12
|
-
extract_numbers,
|
|
13
|
-
extract_emails,
|
|
14
|
-
extract_urls,
|
|
15
|
-
format_number,
|
|
16
|
-
format_currency,
|
|
17
|
-
format_phone_number,
|
|
18
|
-
format_chinese_id_card
|
|
19
|
-
)
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
def demo_text_cleaner():
|
|
23
|
-
"""演示文本清洗工具的使用"""
|
|
24
|
-
print("=== 文本清洗工具演示 ===\n")
|
|
25
|
-
|
|
26
|
-
# 1. 移除HTML标签
|
|
27
|
-
print("1. 移除HTML标签:")
|
|
28
|
-
html_text = "<p>这是一个<b>测试</b>文本</p>"
|
|
29
|
-
clean_text_result = remove_html_tags(html_text)
|
|
30
|
-
print(f" 原始文本: {html_text}")
|
|
31
|
-
print(f" 清洗后: {clean_text_result}")
|
|
32
|
-
|
|
33
|
-
print()
|
|
34
|
-
|
|
35
|
-
# 2. 解码HTML实体
|
|
36
|
-
print("2. 解码HTML实体:")
|
|
37
|
-
entity_text = "这是一个 <b>测试</b>&文本"
|
|
38
|
-
decoded_text = decode_html_entities(entity_text)
|
|
39
|
-
print(f" 原始文本: {entity_text}")
|
|
40
|
-
print(f" 解码后: {decoded_text}")
|
|
41
|
-
|
|
42
|
-
print()
|
|
43
|
-
|
|
44
|
-
# 3. 移除多余空白字符
|
|
45
|
-
print("3. 移除多余空白字符:")
|
|
46
|
-
whitespace_text = "这是 一个\t\t测试\n\n文本"
|
|
47
|
-
clean_whitespace = TextCleaner.remove_extra_whitespace(whitespace_text)
|
|
48
|
-
print(f" 原始文本: {repr(whitespace_text)}")
|
|
49
|
-
print(f" 清洗后: {repr(clean_whitespace)}")
|
|
50
|
-
|
|
51
|
-
print()
|
|
52
|
-
|
|
53
|
-
# 4. 综合清洗
|
|
54
|
-
print("4. 综合清洗:")
|
|
55
|
-
complex_text = "<p>这是 一个<b>测试</b>&文本 </p>"
|
|
56
|
-
cleaned = clean_text(complex_text)
|
|
57
|
-
print(f" 原始文本: {complex_text}")
|
|
58
|
-
print(f" 清洗后: {cleaned}")
|
|
59
|
-
|
|
60
|
-
print()
|
|
61
|
-
|
|
62
|
-
# 5. 提取信息
|
|
63
|
-
print("5. 提取信息:")
|
|
64
|
-
info_text = "联系邮箱: test@example.com, 电话: 13812345678, 价格: ¥123.45"
|
|
65
|
-
numbers = extract_numbers(info_text)
|
|
66
|
-
emails = extract_emails(info_text)
|
|
67
|
-
urls = extract_urls(info_text)
|
|
68
|
-
print(f" 原始文本: {info_text}")
|
|
69
|
-
print(f" 提取的数字: {numbers}")
|
|
70
|
-
print(f" 提取的邮箱: {emails}")
|
|
71
|
-
print(f" 提取的URL: {urls}")
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
def demo_data_formatter():
|
|
75
|
-
"""演示数据格式化工具的使用"""
|
|
76
|
-
print("\n=== 数据格式化工具演示 ===\n")
|
|
77
|
-
|
|
78
|
-
# 1. 数字格式化
|
|
79
|
-
print("1. 数字格式化:")
|
|
80
|
-
number = 1234567.891
|
|
81
|
-
formatted_num1 = format_number(number, precision=2, thousand_separator=False)
|
|
82
|
-
formatted_num2 = format_number(number, precision=2, thousand_separator=True)
|
|
83
|
-
print(f" 原始数字: {number}")
|
|
84
|
-
print(f" 格式化(无千位分隔符): {formatted_num1}")
|
|
85
|
-
print(f" 格式化(有千位分隔符): {formatted_num2}")
|
|
86
|
-
|
|
87
|
-
print()
|
|
88
|
-
|
|
89
|
-
# 2. 货币格式化
|
|
90
|
-
print("2. 货币格式化:")
|
|
91
|
-
price = 1234.567
|
|
92
|
-
formatted_currency1 = format_currency(price, "¥", 2)
|
|
93
|
-
formatted_currency2 = format_currency(price, "$", 2)
|
|
94
|
-
print(f" 原始价格: {price}")
|
|
95
|
-
print(f" 人民币格式: {formatted_currency1}")
|
|
96
|
-
print(f" 美元格式: {formatted_currency2}")
|
|
97
|
-
|
|
98
|
-
print()
|
|
99
|
-
|
|
100
|
-
# 3. 电话号码格式化
|
|
101
|
-
print("3. 电话号码格式化:")
|
|
102
|
-
phone = "13812345678"
|
|
103
|
-
formatted_phone1 = format_phone_number(phone, "+86", "international")
|
|
104
|
-
formatted_phone2 = format_phone_number(phone, "", "domestic")
|
|
105
|
-
formatted_phone3 = format_phone_number(phone, "", "plain")
|
|
106
|
-
print(f" 原始号码: {phone}")
|
|
107
|
-
print(f" 国际格式: {formatted_phone1}")
|
|
108
|
-
print(f" 国内格式: {formatted_phone2}")
|
|
109
|
-
print(f" 纯数字格式: {formatted_phone3}")
|
|
110
|
-
|
|
111
|
-
print()
|
|
112
|
-
|
|
113
|
-
# 4. 身份证号码格式化
|
|
114
|
-
print("4. 身份证号码格式化:")
|
|
115
|
-
id_card = "110101199001011234"
|
|
116
|
-
formatted_id = format_chinese_id_card(id_card)
|
|
117
|
-
print(f" 原始号码: {id_card}")
|
|
118
|
-
print(f" 格式化后: {formatted_id}")
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
def demo_in_spider():
|
|
122
|
-
"""演示在爬虫中使用数据清洗工具"""
|
|
123
|
-
print("\n=== 在爬虫中使用数据清洗工具 ===\n")
|
|
124
|
-
print("在爬虫项目中,您可以这样使用数据清洗工具:")
|
|
125
|
-
print("""
|
|
126
|
-
from crawlo import Spider, Request, Item, Field
|
|
127
|
-
from crawlo.
|
|
128
|
-
|
|
129
|
-
class ProductItem(Item):
|
|
130
|
-
name = Field()
|
|
131
|
-
price = Field()
|
|
132
|
-
description = Field()
|
|
133
|
-
|
|
134
|
-
class ProductSpider(Spider):
|
|
135
|
-
def parse(self, response):
|
|
136
|
-
# 从网页中提取数据
|
|
137
|
-
name = response.css('.product-name::text').get()
|
|
138
|
-
price_text = response.css('.price::text').get()
|
|
139
|
-
description = response.css('.description::text').get()
|
|
140
|
-
|
|
141
|
-
# 清洗和格式化数据
|
|
142
|
-
clean_name = clean_text(name) if name else None
|
|
143
|
-
price_numbers = extract_numbers(price_text) if price_text else []
|
|
144
|
-
clean_price = format_currency(price_numbers[0]) if price_numbers else None
|
|
145
|
-
clean_description = clean_text(description) if description else None
|
|
146
|
-
|
|
147
|
-
# 创建数据项
|
|
148
|
-
item = ProductItem()
|
|
149
|
-
item['name'] = clean_name
|
|
150
|
-
item['price'] = clean_price
|
|
151
|
-
item['description'] = clean_description
|
|
152
|
-
|
|
153
|
-
yield item
|
|
154
|
-
""")
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
if __name__ == '__main__':
|
|
158
|
-
# 运行演示
|
|
159
|
-
demo_text_cleaner()
|
|
160
|
-
demo_data_formatter()
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
Crawlo框架数据清洗工具使用示例
|
|
5
|
+
"""
|
|
6
|
+
from crawlo.tools import (
|
|
7
|
+
TextCleaner,
|
|
8
|
+
DataFormatter,
|
|
9
|
+
remove_html_tags,
|
|
10
|
+
decode_html_entities,
|
|
11
|
+
clean_text,
|
|
12
|
+
extract_numbers,
|
|
13
|
+
extract_emails,
|
|
14
|
+
extract_urls,
|
|
15
|
+
format_number,
|
|
16
|
+
format_currency,
|
|
17
|
+
format_phone_number,
|
|
18
|
+
format_chinese_id_card
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def demo_text_cleaner():
|
|
23
|
+
"""演示文本清洗工具的使用"""
|
|
24
|
+
print("=== 文本清洗工具演示 ===\n")
|
|
25
|
+
|
|
26
|
+
# 1. 移除HTML标签
|
|
27
|
+
print("1. 移除HTML标签:")
|
|
28
|
+
html_text = "<p>这是一个<b>测试</b>文本</p>"
|
|
29
|
+
clean_text_result = remove_html_tags(html_text)
|
|
30
|
+
print(f" 原始文本: {html_text}")
|
|
31
|
+
print(f" 清洗后: {clean_text_result}")
|
|
32
|
+
|
|
33
|
+
print()
|
|
34
|
+
|
|
35
|
+
# 2. 解码HTML实体
|
|
36
|
+
print("2. 解码HTML实体:")
|
|
37
|
+
entity_text = "这是一个 <b>测试</b>&文本"
|
|
38
|
+
decoded_text = decode_html_entities(entity_text)
|
|
39
|
+
print(f" 原始文本: {entity_text}")
|
|
40
|
+
print(f" 解码后: {decoded_text}")
|
|
41
|
+
|
|
42
|
+
print()
|
|
43
|
+
|
|
44
|
+
# 3. 移除多余空白字符
|
|
45
|
+
print("3. 移除多余空白字符:")
|
|
46
|
+
whitespace_text = "这是 一个\t\t测试\n\n文本"
|
|
47
|
+
clean_whitespace = TextCleaner.remove_extra_whitespace(whitespace_text)
|
|
48
|
+
print(f" 原始文本: {repr(whitespace_text)}")
|
|
49
|
+
print(f" 清洗后: {repr(clean_whitespace)}")
|
|
50
|
+
|
|
51
|
+
print()
|
|
52
|
+
|
|
53
|
+
# 4. 综合清洗
|
|
54
|
+
print("4. 综合清洗:")
|
|
55
|
+
complex_text = "<p>这是 一个<b>测试</b>&文本 </p>"
|
|
56
|
+
cleaned = clean_text(complex_text)
|
|
57
|
+
print(f" 原始文本: {complex_text}")
|
|
58
|
+
print(f" 清洗后: {cleaned}")
|
|
59
|
+
|
|
60
|
+
print()
|
|
61
|
+
|
|
62
|
+
# 5. 提取信息
|
|
63
|
+
print("5. 提取信息:")
|
|
64
|
+
info_text = "联系邮箱: test@example.com, 电话: 13812345678, 价格: ¥123.45"
|
|
65
|
+
numbers = extract_numbers(info_text)
|
|
66
|
+
emails = extract_emails(info_text)
|
|
67
|
+
urls = extract_urls(info_text)
|
|
68
|
+
print(f" 原始文本: {info_text}")
|
|
69
|
+
print(f" 提取的数字: {numbers}")
|
|
70
|
+
print(f" 提取的邮箱: {emails}")
|
|
71
|
+
print(f" 提取的URL: {urls}")
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def demo_data_formatter():
|
|
75
|
+
"""演示数据格式化工具的使用"""
|
|
76
|
+
print("\n=== 数据格式化工具演示 ===\n")
|
|
77
|
+
|
|
78
|
+
# 1. 数字格式化
|
|
79
|
+
print("1. 数字格式化:")
|
|
80
|
+
number = 1234567.891
|
|
81
|
+
formatted_num1 = format_number(number, precision=2, thousand_separator=False)
|
|
82
|
+
formatted_num2 = format_number(number, precision=2, thousand_separator=True)
|
|
83
|
+
print(f" 原始数字: {number}")
|
|
84
|
+
print(f" 格式化(无千位分隔符): {formatted_num1}")
|
|
85
|
+
print(f" 格式化(有千位分隔符): {formatted_num2}")
|
|
86
|
+
|
|
87
|
+
print()
|
|
88
|
+
|
|
89
|
+
# 2. 货币格式化
|
|
90
|
+
print("2. 货币格式化:")
|
|
91
|
+
price = 1234.567
|
|
92
|
+
formatted_currency1 = format_currency(price, "¥", 2)
|
|
93
|
+
formatted_currency2 = format_currency(price, "$", 2)
|
|
94
|
+
print(f" 原始价格: {price}")
|
|
95
|
+
print(f" 人民币格式: {formatted_currency1}")
|
|
96
|
+
print(f" 美元格式: {formatted_currency2}")
|
|
97
|
+
|
|
98
|
+
print()
|
|
99
|
+
|
|
100
|
+
# 3. 电话号码格式化
|
|
101
|
+
print("3. 电话号码格式化:")
|
|
102
|
+
phone = "13812345678"
|
|
103
|
+
formatted_phone1 = format_phone_number(phone, "+86", "international")
|
|
104
|
+
formatted_phone2 = format_phone_number(phone, "", "domestic")
|
|
105
|
+
formatted_phone3 = format_phone_number(phone, "", "plain")
|
|
106
|
+
print(f" 原始号码: {phone}")
|
|
107
|
+
print(f" 国际格式: {formatted_phone1}")
|
|
108
|
+
print(f" 国内格式: {formatted_phone2}")
|
|
109
|
+
print(f" 纯数字格式: {formatted_phone3}")
|
|
110
|
+
|
|
111
|
+
print()
|
|
112
|
+
|
|
113
|
+
# 4. 身份证号码格式化
|
|
114
|
+
print("4. 身份证号码格式化:")
|
|
115
|
+
id_card = "110101199001011234"
|
|
116
|
+
formatted_id = format_chinese_id_card(id_card)
|
|
117
|
+
print(f" 原始号码: {id_card}")
|
|
118
|
+
print(f" 格式化后: {formatted_id}")
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def demo_in_spider():
|
|
122
|
+
"""演示在爬虫中使用数据清洗工具"""
|
|
123
|
+
print("\n=== 在爬虫中使用数据清洗工具 ===\n")
|
|
124
|
+
print("在爬虫项目中,您可以这样使用数据清洗工具:")
|
|
125
|
+
print("""
|
|
126
|
+
from crawlo import Spider, Request, Item, Field
|
|
127
|
+
from crawlo.tools import clean_text, format_currency, extract_numbers
|
|
128
|
+
|
|
129
|
+
class ProductItem(Item):
|
|
130
|
+
name = Field()
|
|
131
|
+
price = Field()
|
|
132
|
+
description = Field()
|
|
133
|
+
|
|
134
|
+
class ProductSpider(Spider):
|
|
135
|
+
def parse(self, response):
|
|
136
|
+
# 从网页中提取数据
|
|
137
|
+
name = response.css('.product-name::text').get()
|
|
138
|
+
price_text = response.css('.price::text').get()
|
|
139
|
+
description = response.css('.description::text').get()
|
|
140
|
+
|
|
141
|
+
# 清洗和格式化数据
|
|
142
|
+
clean_name = clean_text(name) if name else None
|
|
143
|
+
price_numbers = extract_numbers(price_text) if price_text else []
|
|
144
|
+
clean_price = format_currency(price_numbers[0]) if price_numbers else None
|
|
145
|
+
clean_description = clean_text(description) if description else None
|
|
146
|
+
|
|
147
|
+
# 创建数据项
|
|
148
|
+
item = ProductItem()
|
|
149
|
+
item['name'] = clean_name
|
|
150
|
+
item['price'] = clean_price
|
|
151
|
+
item['description'] = clean_description
|
|
152
|
+
|
|
153
|
+
yield item
|
|
154
|
+
""")
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
if __name__ == '__main__':
|
|
158
|
+
# 运行演示
|
|
159
|
+
demo_text_cleaner()
|
|
160
|
+
demo_data_formatter()
|
|
161
161
|
demo_in_spider()
|