crawlo 1.1.4__py3-none-any.whl → 1.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +61 -34
- crawlo/__version__.py +1 -1
- crawlo/cleaners/__init__.py +61 -0
- crawlo/cleaners/data_formatter.py +226 -0
- crawlo/cleaners/encoding_converter.py +126 -0
- crawlo/cleaners/text_cleaner.py +233 -0
- crawlo/cli.py +40 -40
- crawlo/commands/__init__.py +13 -13
- crawlo/commands/check.py +594 -594
- crawlo/commands/genspider.py +151 -151
- crawlo/commands/list.py +155 -155
- crawlo/commands/run.py +285 -285
- crawlo/commands/startproject.py +300 -196
- crawlo/commands/stats.py +188 -188
- crawlo/commands/utils.py +186 -186
- crawlo/config.py +309 -279
- crawlo/config_validator.py +253 -0
- crawlo/core/__init__.py +2 -2
- crawlo/core/engine.py +346 -172
- crawlo/core/processor.py +40 -40
- crawlo/core/scheduler.py +137 -166
- crawlo/crawler.py +1027 -1027
- crawlo/downloader/__init__.py +266 -242
- crawlo/downloader/aiohttp_downloader.py +220 -212
- crawlo/downloader/cffi_downloader.py +256 -251
- crawlo/downloader/httpx_downloader.py +259 -259
- crawlo/downloader/hybrid_downloader.py +214 -0
- crawlo/downloader/playwright_downloader.py +403 -0
- crawlo/downloader/selenium_downloader.py +473 -0
- crawlo/event.py +11 -11
- crawlo/exceptions.py +81 -81
- crawlo/extension/__init__.py +37 -37
- crawlo/extension/health_check.py +141 -141
- crawlo/extension/log_interval.py +57 -57
- crawlo/extension/log_stats.py +81 -81
- crawlo/extension/logging_extension.py +43 -43
- crawlo/extension/memory_monitor.py +104 -88
- crawlo/extension/performance_profiler.py +133 -117
- crawlo/extension/request_recorder.py +107 -107
- crawlo/filters/__init__.py +154 -154
- crawlo/filters/aioredis_filter.py +280 -242
- crawlo/filters/memory_filter.py +269 -269
- crawlo/items/__init__.py +23 -23
- crawlo/items/base.py +21 -21
- crawlo/items/fields.py +53 -53
- crawlo/items/items.py +104 -104
- crawlo/middleware/__init__.py +21 -21
- crawlo/middleware/default_header.py +32 -32
- crawlo/middleware/download_delay.py +28 -28
- crawlo/middleware/middleware_manager.py +135 -135
- crawlo/middleware/proxy.py +272 -248
- crawlo/middleware/request_ignore.py +30 -30
- crawlo/middleware/response_code.py +18 -18
- crawlo/middleware/response_filter.py +26 -26
- crawlo/middleware/retry.py +124 -124
- crawlo/mode_manager.py +206 -201
- crawlo/network/__init__.py +21 -21
- crawlo/network/request.py +338 -311
- crawlo/network/response.py +360 -271
- crawlo/pipelines/__init__.py +21 -21
- crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
- crawlo/pipelines/console_pipeline.py +39 -39
- crawlo/pipelines/csv_pipeline.py +316 -316
- crawlo/pipelines/database_dedup_pipeline.py +224 -224
- crawlo/pipelines/json_pipeline.py +218 -218
- crawlo/pipelines/memory_dedup_pipeline.py +115 -115
- crawlo/pipelines/mongo_pipeline.py +131 -131
- crawlo/pipelines/mysql_pipeline.py +316 -316
- crawlo/pipelines/pipeline_manager.py +56 -56
- crawlo/pipelines/redis_dedup_pipeline.py +166 -162
- crawlo/project.py +153 -153
- crawlo/queue/pqueue.py +37 -37
- crawlo/queue/queue_manager.py +320 -307
- crawlo/queue/redis_priority_queue.py +277 -209
- crawlo/settings/__init__.py +7 -7
- crawlo/settings/default_settings.py +216 -278
- crawlo/settings/setting_manager.py +99 -99
- crawlo/spider/__init__.py +639 -639
- crawlo/stats_collector.py +59 -59
- crawlo/subscriber.py +130 -130
- crawlo/task_manager.py +30 -30
- crawlo/templates/crawlo.cfg.tmpl +10 -10
- crawlo/templates/project/__init__.py.tmpl +3 -3
- crawlo/templates/project/items.py.tmpl +17 -17
- crawlo/templates/project/middlewares.py.tmpl +110 -110
- crawlo/templates/project/pipelines.py.tmpl +97 -97
- crawlo/templates/project/run.py.tmpl +251 -251
- crawlo/templates/project/settings.py.tmpl +326 -279
- crawlo/templates/project/settings_distributed.py.tmpl +120 -0
- crawlo/templates/project/settings_gentle.py.tmpl +95 -0
- crawlo/templates/project/settings_high_performance.py.tmpl +152 -0
- crawlo/templates/project/settings_simple.py.tmpl +69 -0
- crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
- crawlo/templates/spider/spider.py.tmpl +141 -141
- crawlo/tools/__init__.py +183 -0
- crawlo/tools/anti_crawler.py +269 -0
- crawlo/tools/authenticated_proxy.py +241 -0
- crawlo/tools/data_validator.py +181 -0
- crawlo/tools/date_tools.py +36 -0
- crawlo/tools/distributed_coordinator.py +387 -0
- crawlo/tools/retry_mechanism.py +221 -0
- crawlo/tools/scenario_adapter.py +263 -0
- crawlo/utils/__init__.py +35 -7
- crawlo/utils/batch_processor.py +261 -0
- crawlo/utils/controlled_spider_mixin.py +439 -439
- crawlo/utils/date_tools.py +290 -233
- crawlo/utils/db_helper.py +343 -343
- crawlo/utils/enhanced_error_handler.py +360 -0
- crawlo/utils/env_config.py +106 -0
- crawlo/utils/error_handler.py +126 -0
- crawlo/utils/func_tools.py +82 -82
- crawlo/utils/large_scale_config.py +286 -286
- crawlo/utils/large_scale_helper.py +343 -343
- crawlo/utils/log.py +128 -128
- crawlo/utils/performance_monitor.py +285 -0
- crawlo/utils/queue_helper.py +175 -175
- crawlo/utils/redis_connection_pool.py +335 -0
- crawlo/utils/redis_key_validator.py +200 -0
- crawlo/utils/request.py +267 -267
- crawlo/utils/request_serializer.py +219 -219
- crawlo/utils/spider_loader.py +62 -62
- crawlo/utils/system.py +11 -11
- crawlo/utils/tools.py +4 -4
- crawlo/utils/url.py +39 -39
- {crawlo-1.1.4.dist-info → crawlo-1.1.5.dist-info}/METADATA +401 -403
- crawlo-1.1.5.dist-info/RECORD +185 -0
- examples/__init__.py +7 -7
- tests/__init__.py +7 -7
- tests/advanced_tools_example.py +276 -0
- tests/authenticated_proxy_example.py +237 -0
- tests/cleaners_example.py +161 -0
- tests/config_validation_demo.py +103 -0
- {examples → tests}/controlled_spider_example.py +205 -205
- tests/date_tools_example.py +181 -0
- tests/dynamic_loading_example.py +524 -0
- tests/dynamic_loading_test.py +105 -0
- tests/env_config_example.py +134 -0
- tests/error_handling_example.py +172 -0
- tests/redis_key_validation_demo.py +131 -0
- tests/response_improvements_example.py +145 -0
- tests/test_advanced_tools.py +149 -0
- tests/test_all_redis_key_configs.py +146 -0
- tests/test_authenticated_proxy.py +142 -0
- tests/test_cleaners.py +55 -0
- tests/test_comprehensive.py +147 -0
- tests/test_config_validator.py +194 -0
- tests/test_date_tools.py +124 -0
- tests/test_dynamic_downloaders_proxy.py +125 -0
- tests/test_dynamic_proxy.py +93 -0
- tests/test_dynamic_proxy_config.py +147 -0
- tests/test_dynamic_proxy_real.py +110 -0
- tests/test_edge_cases.py +304 -0
- tests/test_enhanced_error_handler.py +271 -0
- tests/test_env_config.py +122 -0
- tests/test_error_handler_compatibility.py +113 -0
- tests/test_final_validation.py +153 -153
- tests/test_framework_env_usage.py +104 -0
- tests/test_integration.py +357 -0
- tests/test_item_dedup_redis_key.py +123 -0
- tests/test_parsel.py +30 -0
- tests/test_performance.py +328 -0
- tests/test_proxy_health_check.py +32 -32
- tests/test_proxy_middleware_integration.py +136 -136
- tests/test_proxy_providers.py +56 -56
- tests/test_proxy_stats.py +19 -19
- tests/test_proxy_strategies.py +59 -59
- tests/test_queue_manager_redis_key.py +177 -0
- tests/test_redis_config.py +28 -28
- tests/test_redis_connection_pool.py +295 -0
- tests/test_redis_key_naming.py +182 -0
- tests/test_redis_key_validator.py +124 -0
- tests/test_redis_queue.py +224 -224
- tests/test_request_serialization.py +70 -70
- tests/test_response_improvements.py +153 -0
- tests/test_scheduler.py +241 -241
- tests/test_simple_response.py +62 -0
- tests/test_telecom_spider_redis_key.py +206 -0
- tests/test_template_content.py +88 -0
- tests/test_template_redis_key.py +135 -0
- tests/test_tools.py +154 -0
- tests/tools_example.py +258 -0
- crawlo/core/enhanced_engine.py +0 -190
- crawlo-1.1.4.dist-info/RECORD +0 -117
- {crawlo-1.1.4.dist-info → crawlo-1.1.5.dist-info}/WHEEL +0 -0
- {crawlo-1.1.4.dist-info → crawlo-1.1.5.dist-info}/entry_points.txt +0 -0
- {crawlo-1.1.4.dist-info → crawlo-1.1.5.dist-info}/top_level.txt +0 -0
|
@@ -1,205 +1,205 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
# -*- coding: utf-8 -*-
|
|
3
|
-
"""
|
|
4
|
-
受控爬虫混入类使用示例
|
|
5
|
-
演示如何使用 ControlledRequestMixin 和 AsyncControlledRequestMixin 来控制大规模请求生成
|
|
6
|
-
"""
|
|
7
|
-
|
|
8
|
-
import asyncio
|
|
9
|
-
from crawlo.spider import Spider
|
|
10
|
-
from crawlo.network import Request
|
|
11
|
-
from crawlo.utils.controlled_spider_mixin import ControlledRequestMixin, AsyncControlledRequestMixin
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
class LargeScaleSpider(Spider, ControlledRequestMixin):
|
|
15
|
-
"""
|
|
16
|
-
同步版本的受控爬虫示例
|
|
17
|
-
适用于需要生成大量请求但要控制并发的场景
|
|
18
|
-
"""
|
|
19
|
-
name = 'large_scale_spider'
|
|
20
|
-
|
|
21
|
-
def __init__(self):
|
|
22
|
-
Spider.__init__(self)
|
|
23
|
-
ControlledRequestMixin.__init__(self)
|
|
24
|
-
|
|
25
|
-
# 配置受控生成参数
|
|
26
|
-
self.max_pending_requests = 150 # 最大待处理请求数
|
|
27
|
-
self.batch_size = 75 # 每批生成请求数
|
|
28
|
-
self.generation_interval = 0.02 # 生成间隔(秒)
|
|
29
|
-
self.backpressure_threshold = 300 # 背压阈值
|
|
30
|
-
|
|
31
|
-
def _original_start_requests(self):
|
|
32
|
-
"""
|
|
33
|
-
提供原始的大量请求
|
|
34
|
-
这里模拟爬取一个电商网站的商品页面
|
|
35
|
-
"""
|
|
36
|
-
# 模拟爬取 10,000 个商品页面
|
|
37
|
-
base_urls = [
|
|
38
|
-
"https://example-shop.com/products",
|
|
39
|
-
"https://example-shop.com/electronics",
|
|
40
|
-
"https://example-shop.com/clothing",
|
|
41
|
-
"https://example-shop.com/books",
|
|
42
|
-
"https://example-shop.com/home"
|
|
43
|
-
]
|
|
44
|
-
|
|
45
|
-
for category in base_urls:
|
|
46
|
-
# 每个分类爬取 2000 页
|
|
47
|
-
for page in range(1, 2001):
|
|
48
|
-
yield Request(
|
|
49
|
-
url=f"{category}?page={page}",
|
|
50
|
-
callback=self.parse_product_list,
|
|
51
|
-
meta={'category': category.split('/')[-1], 'page': page}
|
|
52
|
-
)
|
|
53
|
-
|
|
54
|
-
def _process_request_before_yield(self, request):
|
|
55
|
-
"""
|
|
56
|
-
在 yield 请求前进行处理
|
|
57
|
-
可以添加去重、优先级设置、请求头设置等逻辑
|
|
58
|
-
"""
|
|
59
|
-
# 设置请求头
|
|
60
|
-
request.headers.setdefault('User-Agent',
|
|
61
|
-
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36')
|
|
62
|
-
|
|
63
|
-
# 根据分类设置优先级
|
|
64
|
-
category = request.meta.get('category', '')
|
|
65
|
-
if category == 'electronics':
|
|
66
|
-
request.priority = 10 # 电子产品优先级最高
|
|
67
|
-
elif category == 'clothing':
|
|
68
|
-
request.priority = 8
|
|
69
|
-
else:
|
|
70
|
-
request.priority = 5
|
|
71
|
-
|
|
72
|
-
# 可以在这里添加去重逻辑
|
|
73
|
-
# if self.is_duplicate_request(request):
|
|
74
|
-
# return None # 跳过重复请求
|
|
75
|
-
|
|
76
|
-
return request
|
|
77
|
-
|
|
78
|
-
async def parse_product_list(self, response):
|
|
79
|
-
"""解析商品列表页面"""
|
|
80
|
-
# 提取商品链接
|
|
81
|
-
product_links = response.css('a.product-link::attr(href)').getall()
|
|
82
|
-
|
|
83
|
-
for link in product_links:
|
|
84
|
-
# 生成商品详情页请求
|
|
85
|
-
yield Request(
|
|
86
|
-
url=response.urljoin(link),
|
|
87
|
-
callback=self.parse_product_detail,
|
|
88
|
-
meta={'category': response.meta['category']}
|
|
89
|
-
)
|
|
90
|
-
|
|
91
|
-
# 提取分页信息
|
|
92
|
-
next_page = response.css('a.next-page::attr(href)').get()
|
|
93
|
-
if next_page:
|
|
94
|
-
yield Request(
|
|
95
|
-
url=response.urljoin(next_page),
|
|
96
|
-
callback=self.parse_product_list,
|
|
97
|
-
meta=response.meta
|
|
98
|
-
)
|
|
99
|
-
|
|
100
|
-
async def parse_product_detail(self, response):
|
|
101
|
-
"""解析商品详情页面"""
|
|
102
|
-
yield {
|
|
103
|
-
'title': response.css('h1.product-title::text').get(),
|
|
104
|
-
'price': response.css('.price::text').re_first(r'\d+\.?\d*'),
|
|
105
|
-
'description': response.css('.product-description::text').get(),
|
|
106
|
-
'category': response.meta['category'],
|
|
107
|
-
'url': response.url,
|
|
108
|
-
'in_stock': 'in-stock' in response.css('.availability::attr(class)').get(''),
|
|
109
|
-
'rating': response.css('.rating::attr(data-rating)').get(),
|
|
110
|
-
'reviews_count': response.css('.reviews-count::text').re_first(r'\d+')
|
|
111
|
-
}
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
class AsyncLargeScaleSpider(Spider, AsyncControlledRequestMixin):
|
|
115
|
-
"""
|
|
116
|
-
异步版本的受控爬虫示例
|
|
117
|
-
使用异步控制来实现更精确的并发管理
|
|
118
|
-
"""
|
|
119
|
-
name = 'async_large_scale_spider'
|
|
120
|
-
|
|
121
|
-
def __init__(self):
|
|
122
|
-
Spider.__init__(self)
|
|
123
|
-
AsyncControlledRequestMixin.__init__(self)
|
|
124
|
-
|
|
125
|
-
# 配置异步控制参数
|
|
126
|
-
self.max_concurrent_generations = 15 # 最大同时生成数
|
|
127
|
-
self.queue_monitor_interval = 0.5 # 队列监控间隔
|
|
128
|
-
|
|
129
|
-
def _original_start_requests(self):
|
|
130
|
-
"""
|
|
131
|
-
提供原始的大量请求
|
|
132
|
-
这里模拟爬取新闻网站的文章
|
|
133
|
-
"""
|
|
134
|
-
# 模拟爬取 50,000 篇新闻文章
|
|
135
|
-
news_sites = [
|
|
136
|
-
"https://news-site-1.com",
|
|
137
|
-
"https://news-site-2.com",
|
|
138
|
-
"https://news-site-3.com",
|
|
139
|
-
"https://tech-news.com",
|
|
140
|
-
"https://finance-news.com"
|
|
141
|
-
]
|
|
142
|
-
|
|
143
|
-
categories = ['tech', 'finance', 'sports', 'politics', 'entertainment']
|
|
144
|
-
|
|
145
|
-
for site in news_sites:
|
|
146
|
-
for category in categories:
|
|
147
|
-
# 每个分类爬取 2000 页
|
|
148
|
-
for page in range(1, 2001):
|
|
149
|
-
yield Request(
|
|
150
|
-
url=f"{site}/{category}?page={page}",
|
|
151
|
-
callback=self.parse_article_list,
|
|
152
|
-
meta={'site': site, 'category': category, 'page': page}
|
|
153
|
-
)
|
|
154
|
-
|
|
155
|
-
def _process_request_before_yield(self, request):
|
|
156
|
-
"""异步版本的请求预处理"""
|
|
157
|
-
# 设置请求头
|
|
158
|
-
request.headers.setdefault('User-Agent',
|
|
159
|
-
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36')
|
|
160
|
-
|
|
161
|
-
# 根据新闻类型设置优先级
|
|
162
|
-
category = request.meta.get('category', '')
|
|
163
|
-
if category in ['tech', 'finance']:
|
|
164
|
-
request.priority = 10 # 科技和财经新闻优先级最高
|
|
165
|
-
elif category in ['sports', 'politics']:
|
|
166
|
-
request.priority = 8
|
|
167
|
-
else:
|
|
168
|
-
request.priority = 5
|
|
169
|
-
|
|
170
|
-
# 设置延迟(避免对服务器造成过大压力)
|
|
171
|
-
site = request.meta.get('site', '')
|
|
172
|
-
if 'tech-news.com' in site:
|
|
173
|
-
request.meta['download_delay'] = 0.5 # 科技新闻站点较敏感,增加延迟
|
|
174
|
-
|
|
175
|
-
return request
|
|
176
|
-
|
|
177
|
-
async def parse_article_list(self, response):
|
|
178
|
-
"""解析文章列表页面"""
|
|
179
|
-
# 提取文章链接
|
|
180
|
-
article_links = response.css('a.article-link::attr(href)').getall()
|
|
181
|
-
|
|
182
|
-
for link in article_links:
|
|
183
|
-
yield Request(
|
|
184
|
-
url=response.urljoin(link),
|
|
185
|
-
callback=self.parse_article_detail,
|
|
186
|
-
meta={
|
|
187
|
-
'site': response.meta['site'],
|
|
188
|
-
'category': response.meta['category']
|
|
189
|
-
}
|
|
190
|
-
)
|
|
191
|
-
|
|
192
|
-
async def parse_article_detail(self, response):
|
|
193
|
-
"""解析文章详情页面"""
|
|
194
|
-
yield {
|
|
195
|
-
'title': response.css('h1.article-title::text').get(),
|
|
196
|
-
'content': ' '.join(response.css('.article-content p::text').getall()),
|
|
197
|
-
'author': response.css('.author::text').get(),
|
|
198
|
-
'publish_date': response.css('.publish-date::attr(datetime)').get(),
|
|
199
|
-
'category': response.meta['category'],
|
|
200
|
-
'site': response.meta['site'],
|
|
201
|
-
'url': response.url,
|
|
202
|
-
'tags': response.css('.tags a::text').getall(),
|
|
203
|
-
'views': response.css('.views-count::text').re_first(r'\d+'),
|
|
204
|
-
'comments': response.css('.comments-count::text').re_first(r'\d+')
|
|
205
|
-
}
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
受控爬虫混入类使用示例
|
|
5
|
+
演示如何使用 ControlledRequestMixin 和 AsyncControlledRequestMixin 来控制大规模请求生成
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
from crawlo.spider import Spider
|
|
10
|
+
from crawlo.network import Request
|
|
11
|
+
from crawlo.utils.controlled_spider_mixin import ControlledRequestMixin, AsyncControlledRequestMixin
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class LargeScaleSpider(Spider, ControlledRequestMixin):
|
|
15
|
+
"""
|
|
16
|
+
同步版本的受控爬虫示例
|
|
17
|
+
适用于需要生成大量请求但要控制并发的场景
|
|
18
|
+
"""
|
|
19
|
+
name = 'large_scale_spider'
|
|
20
|
+
|
|
21
|
+
def __init__(self):
|
|
22
|
+
Spider.__init__(self)
|
|
23
|
+
ControlledRequestMixin.__init__(self)
|
|
24
|
+
|
|
25
|
+
# 配置受控生成参数
|
|
26
|
+
self.max_pending_requests = 150 # 最大待处理请求数
|
|
27
|
+
self.batch_size = 75 # 每批生成请求数
|
|
28
|
+
self.generation_interval = 0.02 # 生成间隔(秒)
|
|
29
|
+
self.backpressure_threshold = 300 # 背压阈值
|
|
30
|
+
|
|
31
|
+
def _original_start_requests(self):
|
|
32
|
+
"""
|
|
33
|
+
提供原始的大量请求
|
|
34
|
+
这里模拟爬取一个电商网站的商品页面
|
|
35
|
+
"""
|
|
36
|
+
# 模拟爬取 10,000 个商品页面
|
|
37
|
+
base_urls = [
|
|
38
|
+
"https://example-shop.com/products",
|
|
39
|
+
"https://example-shop.com/electronics",
|
|
40
|
+
"https://example-shop.com/clothing",
|
|
41
|
+
"https://example-shop.com/books",
|
|
42
|
+
"https://example-shop.com/home"
|
|
43
|
+
]
|
|
44
|
+
|
|
45
|
+
for category in base_urls:
|
|
46
|
+
# 每个分类爬取 2000 页
|
|
47
|
+
for page in range(1, 2001):
|
|
48
|
+
yield Request(
|
|
49
|
+
url=f"{category}?page={page}",
|
|
50
|
+
callback=self.parse_product_list,
|
|
51
|
+
meta={'category': category.split('/')[-1], 'page': page}
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
def _process_request_before_yield(self, request):
|
|
55
|
+
"""
|
|
56
|
+
在 yield 请求前进行处理
|
|
57
|
+
可以添加去重、优先级设置、请求头设置等逻辑
|
|
58
|
+
"""
|
|
59
|
+
# 设置请求头
|
|
60
|
+
request.headers.setdefault('User-Agent',
|
|
61
|
+
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36')
|
|
62
|
+
|
|
63
|
+
# 根据分类设置优先级
|
|
64
|
+
category = request.meta.get('category', '')
|
|
65
|
+
if category == 'electronics':
|
|
66
|
+
request.priority = 10 # 电子产品优先级最高
|
|
67
|
+
elif category == 'clothing':
|
|
68
|
+
request.priority = 8
|
|
69
|
+
else:
|
|
70
|
+
request.priority = 5
|
|
71
|
+
|
|
72
|
+
# 可以在这里添加去重逻辑
|
|
73
|
+
# if self.is_duplicate_request(request):
|
|
74
|
+
# return None # 跳过重复请求
|
|
75
|
+
|
|
76
|
+
return request
|
|
77
|
+
|
|
78
|
+
async def parse_product_list(self, response):
|
|
79
|
+
"""解析商品列表页面"""
|
|
80
|
+
# 提取商品链接
|
|
81
|
+
product_links = response.css('a.product-link::attr(href)').getall()
|
|
82
|
+
|
|
83
|
+
for link in product_links:
|
|
84
|
+
# 生成商品详情页请求
|
|
85
|
+
yield Request(
|
|
86
|
+
url=response.urljoin(link),
|
|
87
|
+
callback=self.parse_product_detail,
|
|
88
|
+
meta={'category': response.meta['category']}
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
# 提取分页信息
|
|
92
|
+
next_page = response.css('a.next-page::attr(href)').get()
|
|
93
|
+
if next_page:
|
|
94
|
+
yield Request(
|
|
95
|
+
url=response.urljoin(next_page),
|
|
96
|
+
callback=self.parse_product_list,
|
|
97
|
+
meta=response.meta
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
async def parse_product_detail(self, response):
|
|
101
|
+
"""解析商品详情页面"""
|
|
102
|
+
yield {
|
|
103
|
+
'title': response.css('h1.product-title::text').get(),
|
|
104
|
+
'price': response.css('.price::text').re_first(r'\d+\.?\d*'),
|
|
105
|
+
'description': response.css('.product-description::text').get(),
|
|
106
|
+
'category': response.meta['category'],
|
|
107
|
+
'url': response.url,
|
|
108
|
+
'in_stock': 'in-stock' in response.css('.availability::attr(class)').get(''),
|
|
109
|
+
'rating': response.css('.rating::attr(data-rating)').get(),
|
|
110
|
+
'reviews_count': response.css('.reviews-count::text').re_first(r'\d+')
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
class AsyncLargeScaleSpider(Spider, AsyncControlledRequestMixin):
|
|
115
|
+
"""
|
|
116
|
+
异步版本的受控爬虫示例
|
|
117
|
+
使用异步控制来实现更精确的并发管理
|
|
118
|
+
"""
|
|
119
|
+
name = 'async_large_scale_spider'
|
|
120
|
+
|
|
121
|
+
def __init__(self):
|
|
122
|
+
Spider.__init__(self)
|
|
123
|
+
AsyncControlledRequestMixin.__init__(self)
|
|
124
|
+
|
|
125
|
+
# 配置异步控制参数
|
|
126
|
+
self.max_concurrent_generations = 15 # 最大同时生成数
|
|
127
|
+
self.queue_monitor_interval = 0.5 # 队列监控间隔
|
|
128
|
+
|
|
129
|
+
def _original_start_requests(self):
|
|
130
|
+
"""
|
|
131
|
+
提供原始的大量请求
|
|
132
|
+
这里模拟爬取新闻网站的文章
|
|
133
|
+
"""
|
|
134
|
+
# 模拟爬取 50,000 篇新闻文章
|
|
135
|
+
news_sites = [
|
|
136
|
+
"https://news-site-1.com",
|
|
137
|
+
"https://news-site-2.com",
|
|
138
|
+
"https://news-site-3.com",
|
|
139
|
+
"https://tech-news.com",
|
|
140
|
+
"https://finance-news.com"
|
|
141
|
+
]
|
|
142
|
+
|
|
143
|
+
categories = ['tech', 'finance', 'sports', 'politics', 'entertainment']
|
|
144
|
+
|
|
145
|
+
for site in news_sites:
|
|
146
|
+
for category in categories:
|
|
147
|
+
# 每个分类爬取 2000 页
|
|
148
|
+
for page in range(1, 2001):
|
|
149
|
+
yield Request(
|
|
150
|
+
url=f"{site}/{category}?page={page}",
|
|
151
|
+
callback=self.parse_article_list,
|
|
152
|
+
meta={'site': site, 'category': category, 'page': page}
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
def _process_request_before_yield(self, request):
|
|
156
|
+
"""异步版本的请求预处理"""
|
|
157
|
+
# 设置请求头
|
|
158
|
+
request.headers.setdefault('User-Agent',
|
|
159
|
+
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36')
|
|
160
|
+
|
|
161
|
+
# 根据新闻类型设置优先级
|
|
162
|
+
category = request.meta.get('category', '')
|
|
163
|
+
if category in ['tech', 'finance']:
|
|
164
|
+
request.priority = 10 # 科技和财经新闻优先级最高
|
|
165
|
+
elif category in ['sports', 'politics']:
|
|
166
|
+
request.priority = 8
|
|
167
|
+
else:
|
|
168
|
+
request.priority = 5
|
|
169
|
+
|
|
170
|
+
# 设置延迟(避免对服务器造成过大压力)
|
|
171
|
+
site = request.meta.get('site', '')
|
|
172
|
+
if 'tech-news.com' in site:
|
|
173
|
+
request.meta['download_delay'] = 0.5 # 科技新闻站点较敏感,增加延迟
|
|
174
|
+
|
|
175
|
+
return request
|
|
176
|
+
|
|
177
|
+
async def parse_article_list(self, response):
|
|
178
|
+
"""解析文章列表页面"""
|
|
179
|
+
# 提取文章链接
|
|
180
|
+
article_links = response.css('a.article-link::attr(href)').getall()
|
|
181
|
+
|
|
182
|
+
for link in article_links:
|
|
183
|
+
yield Request(
|
|
184
|
+
url=response.urljoin(link),
|
|
185
|
+
callback=self.parse_article_detail,
|
|
186
|
+
meta={
|
|
187
|
+
'site': response.meta['site'],
|
|
188
|
+
'category': response.meta['category']
|
|
189
|
+
}
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
async def parse_article_detail(self, response):
|
|
193
|
+
"""解析文章详情页面"""
|
|
194
|
+
yield {
|
|
195
|
+
'title': response.css('h1.article-title::text').get(),
|
|
196
|
+
'content': ' '.join(response.css('.article-content p::text').getall()),
|
|
197
|
+
'author': response.css('.author::text').get(),
|
|
198
|
+
'publish_date': response.css('.publish-date::attr(datetime)').get(),
|
|
199
|
+
'category': response.meta['category'],
|
|
200
|
+
'site': response.meta['site'],
|
|
201
|
+
'url': response.url,
|
|
202
|
+
'tags': response.css('.tags a::text').getall(),
|
|
203
|
+
'views': response.css('.views-count::text').re_first(r'\d+'),
|
|
204
|
+
'comments': response.css('.comments-count::text').re_first(r'\d+')
|
|
205
|
+
}
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
Crawlo框架日期工具使用示例
|
|
5
|
+
"""
|
|
6
|
+
from crawlo.tools import (
|
|
7
|
+
TimeUtils,
|
|
8
|
+
parse_time,
|
|
9
|
+
format_time,
|
|
10
|
+
time_diff,
|
|
11
|
+
to_timestamp,
|
|
12
|
+
to_datetime,
|
|
13
|
+
now,
|
|
14
|
+
to_timezone,
|
|
15
|
+
to_utc,
|
|
16
|
+
to_local
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def demo_date_parsing():
|
|
21
|
+
"""演示日期解析功能"""
|
|
22
|
+
print("=== 日期解析演示 ===\n")
|
|
23
|
+
|
|
24
|
+
# 解析各种格式的日期字符串
|
|
25
|
+
date_strings = [
|
|
26
|
+
"2025-09-10 14:30:00",
|
|
27
|
+
"September 10, 2025 2:30 PM",
|
|
28
|
+
"2025/09/10 14:30:00",
|
|
29
|
+
"10-09-2025 14:30:00",
|
|
30
|
+
"2025年9月10日 14时30分00秒"
|
|
31
|
+
]
|
|
32
|
+
|
|
33
|
+
for date_str in date_strings:
|
|
34
|
+
parsed = parse_time(date_str)
|
|
35
|
+
print(f"解析 '{date_str}' -> {parsed}")
|
|
36
|
+
|
|
37
|
+
print()
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def demo_date_formatting():
|
|
41
|
+
"""演示日期格式化功能"""
|
|
42
|
+
print("=== 日期格式化演示 ===\n")
|
|
43
|
+
|
|
44
|
+
# 获取当前时间
|
|
45
|
+
current_time = now()
|
|
46
|
+
print(f"当前时间: {current_time}")
|
|
47
|
+
|
|
48
|
+
# 使用不同格式进行格式化
|
|
49
|
+
formats = [
|
|
50
|
+
"%Y-%m-%d",
|
|
51
|
+
"%Y年%m月%d日",
|
|
52
|
+
"%Y-%m-%d %H:%M:%S",
|
|
53
|
+
"%B %d, %Y at %I:%M %p",
|
|
54
|
+
"%A, %B %d, %Y"
|
|
55
|
+
]
|
|
56
|
+
|
|
57
|
+
for fmt in formats:
|
|
58
|
+
formatted = format_time(current_time, fmt)
|
|
59
|
+
print(f"格式化为 '{fmt}' -> {formatted}")
|
|
60
|
+
|
|
61
|
+
print()
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def demo_time_calculations():
|
|
65
|
+
"""演示时间计算功能"""
|
|
66
|
+
print("=== 时间计算演示 ===\n")
|
|
67
|
+
|
|
68
|
+
# 计算时间差
|
|
69
|
+
start_time = "2025-09-10 10:00:00"
|
|
70
|
+
end_time = "2025-09-10 14:30:45"
|
|
71
|
+
|
|
72
|
+
diff_seconds = time_diff(start_time, end_time, "seconds")
|
|
73
|
+
diff_minutes = time_diff(start_time, end_time, "minutes")
|
|
74
|
+
diff_hours = time_diff(start_time, end_time, "hours")
|
|
75
|
+
|
|
76
|
+
print(f"起始时间: {start_time}")
|
|
77
|
+
print(f"结束时间: {end_time}")
|
|
78
|
+
print(f"时间差 - 秒: {diff_seconds}")
|
|
79
|
+
print(f"时间差 - 分钟: {diff_minutes}")
|
|
80
|
+
print(f"时间差 - 小时: {diff_hours}")
|
|
81
|
+
|
|
82
|
+
print()
|
|
83
|
+
|
|
84
|
+
# 日期加减
|
|
85
|
+
base_date = "2025-09-10"
|
|
86
|
+
plus_30_days = TimeUtils.add_days(base_date, 30)
|
|
87
|
+
minus_15_days = TimeUtils.add_days(base_date, -15)
|
|
88
|
+
plus_3_months = TimeUtils.add_months(base_date, 3)
|
|
89
|
+
|
|
90
|
+
print(f"基础日期: {base_date}")
|
|
91
|
+
print(f"加30天: {plus_30_days}")
|
|
92
|
+
print(f"减15天: {minus_15_days}")
|
|
93
|
+
print(f"加3个月: {plus_3_months}")
|
|
94
|
+
|
|
95
|
+
print()
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def demo_timezone_conversion():
|
|
99
|
+
"""演示时区转换功能"""
|
|
100
|
+
print("=== 时区转换演示 ===\n")
|
|
101
|
+
|
|
102
|
+
# 创建一个时间
|
|
103
|
+
dt = parse_time("2025-09-10 14:30:00")
|
|
104
|
+
print(f"原始时间: {dt}")
|
|
105
|
+
|
|
106
|
+
# 转换为UTC时区
|
|
107
|
+
utc_time = to_utc(dt)
|
|
108
|
+
print(f"UTC时间: {utc_time}")
|
|
109
|
+
|
|
110
|
+
# 转换为本地时区
|
|
111
|
+
local_time = to_local(dt)
|
|
112
|
+
print(f"本地时间: {local_time}")
|
|
113
|
+
|
|
114
|
+
# 转换为纽约时区
|
|
115
|
+
ny_time = to_timezone(dt, "America/New_York")
|
|
116
|
+
print(f"纽约时间: {ny_time}")
|
|
117
|
+
|
|
118
|
+
# 转换为伦敦时区
|
|
119
|
+
london_time = to_timezone(dt, "Europe/London")
|
|
120
|
+
print(f"伦敦时间: {london_time}")
|
|
121
|
+
|
|
122
|
+
print()
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def demo_timestamp_conversion():
|
|
126
|
+
"""演示时间戳转换功能"""
|
|
127
|
+
print("=== 时间戳转换演示 ===\n")
|
|
128
|
+
|
|
129
|
+
# 获取当前时间戳
|
|
130
|
+
current_timestamp = to_timestamp(now())
|
|
131
|
+
print(f"当前时间戳: {current_timestamp}")
|
|
132
|
+
|
|
133
|
+
# 从时间戳转换为datetime
|
|
134
|
+
dt_from_timestamp = to_datetime(current_timestamp)
|
|
135
|
+
print(f"从时间戳转换: {dt_from_timestamp}")
|
|
136
|
+
|
|
137
|
+
# 从时间戳创建带时区的datetime
|
|
138
|
+
dt_with_tz = to_timezone(to_datetime(current_timestamp), "Asia/Shanghai")
|
|
139
|
+
print(f"带时区的时间: {dt_with_tz}")
|
|
140
|
+
|
|
141
|
+
print()
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def demo_in_spider():
|
|
145
|
+
"""演示在爬虫中使用日期工具"""
|
|
146
|
+
print("=== 在爬虫中使用日期工具 ===\n")
|
|
147
|
+
print("在爬虫项目中,您可以这样使用日期工具:")
|
|
148
|
+
print("""
|
|
149
|
+
from crawlo import Spider
|
|
150
|
+
from crawlo.tools import parse_time, format_time
|
|
151
|
+
|
|
152
|
+
class NewsSpider(Spider):
|
|
153
|
+
def parse(self, response):
|
|
154
|
+
# 提取文章发布时间
|
|
155
|
+
publish_time_str = response.css('.publish-time::text').get()
|
|
156
|
+
|
|
157
|
+
# 解析发布时间
|
|
158
|
+
publish_time = parse_time(publish_time_str)
|
|
159
|
+
|
|
160
|
+
# 格式化时间为标准格式
|
|
161
|
+
formatted_time = format_time(publish_time, "%Y-%m-%d %H:%M:%S")
|
|
162
|
+
|
|
163
|
+
# 计算文章发布多久了
|
|
164
|
+
current_time = self.tools.now()
|
|
165
|
+
hours_since_publish = self.tools.time_diff(publish_time, current_time, "hours")
|
|
166
|
+
|
|
167
|
+
# 根据发布时间决定是否继续处理
|
|
168
|
+
if hours_since_publish < 24: # 只处理24小时内的文章
|
|
169
|
+
# 处理文章...
|
|
170
|
+
pass
|
|
171
|
+
""")
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
if __name__ == '__main__':
|
|
175
|
+
# 运行演示
|
|
176
|
+
demo_date_parsing()
|
|
177
|
+
demo_date_formatting()
|
|
178
|
+
demo_time_calculations()
|
|
179
|
+
demo_timezone_conversion()
|
|
180
|
+
demo_timestamp_conversion()
|
|
181
|
+
demo_in_spider()
|