crawlo 1.3.4__py3-none-any.whl → 1.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +87 -87
- crawlo/__version__.py +1 -1
- crawlo/cli.py +75 -75
- crawlo/commands/__init__.py +14 -14
- crawlo/commands/check.py +594 -594
- crawlo/commands/genspider.py +151 -151
- crawlo/commands/help.py +138 -138
- crawlo/commands/list.py +155 -155
- crawlo/commands/run.py +341 -341
- crawlo/commands/startproject.py +436 -436
- crawlo/commands/stats.py +187 -187
- crawlo/commands/utils.py +196 -196
- crawlo/config.py +312 -312
- crawlo/config_validator.py +277 -277
- crawlo/core/__init__.py +45 -45
- crawlo/core/engine.py +439 -439
- crawlo/core/processor.py +40 -40
- crawlo/core/scheduler.py +257 -257
- crawlo/crawler.py +638 -638
- crawlo/data/__init__.py +5 -5
- crawlo/data/user_agents.py +194 -194
- crawlo/downloader/__init__.py +273 -273
- crawlo/downloader/aiohttp_downloader.py +228 -228
- crawlo/downloader/cffi_downloader.py +245 -245
- crawlo/downloader/httpx_downloader.py +259 -259
- crawlo/downloader/hybrid_downloader.py +212 -212
- crawlo/downloader/playwright_downloader.py +402 -402
- crawlo/downloader/selenium_downloader.py +472 -472
- crawlo/event.py +11 -11
- crawlo/exceptions.py +81 -81
- crawlo/extension/__init__.py +39 -39
- crawlo/extension/health_check.py +141 -141
- crawlo/extension/log_interval.py +57 -57
- crawlo/extension/log_stats.py +81 -81
- crawlo/extension/logging_extension.py +61 -61
- crawlo/extension/memory_monitor.py +104 -104
- crawlo/extension/performance_profiler.py +133 -133
- crawlo/extension/request_recorder.py +107 -107
- crawlo/factories/__init__.py +27 -27
- crawlo/factories/base.py +68 -68
- crawlo/factories/crawler.py +103 -103
- crawlo/factories/registry.py +84 -84
- crawlo/filters/__init__.py +154 -154
- crawlo/filters/aioredis_filter.py +257 -257
- crawlo/filters/memory_filter.py +269 -269
- crawlo/framework.py +292 -291
- crawlo/initialization/__init__.py +39 -39
- crawlo/initialization/built_in.py +425 -425
- crawlo/initialization/context.py +141 -141
- crawlo/initialization/core.py +193 -193
- crawlo/initialization/phases.py +148 -148
- crawlo/initialization/registry.py +145 -145
- crawlo/items/__init__.py +23 -23
- crawlo/items/base.py +23 -23
- crawlo/items/fields.py +52 -52
- crawlo/items/items.py +104 -104
- crawlo/logging/__init__.py +37 -37
- crawlo/logging/config.py +96 -96
- crawlo/logging/factory.py +128 -128
- crawlo/logging/manager.py +111 -111
- crawlo/middleware/__init__.py +21 -21
- crawlo/middleware/default_header.py +132 -132
- crawlo/middleware/download_delay.py +104 -104
- crawlo/middleware/middleware_manager.py +135 -135
- crawlo/middleware/offsite.py +123 -123
- crawlo/middleware/proxy.py +386 -386
- crawlo/middleware/request_ignore.py +86 -86
- crawlo/middleware/response_code.py +163 -163
- crawlo/middleware/response_filter.py +136 -136
- crawlo/middleware/retry.py +124 -124
- crawlo/middleware/simple_proxy.py +65 -65
- crawlo/mode_manager.py +212 -212
- crawlo/network/__init__.py +21 -21
- crawlo/network/request.py +379 -379
- crawlo/network/response.py +359 -359
- crawlo/pipelines/__init__.py +21 -21
- crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
- crawlo/pipelines/console_pipeline.py +39 -39
- crawlo/pipelines/csv_pipeline.py +316 -316
- crawlo/pipelines/database_dedup_pipeline.py +222 -222
- crawlo/pipelines/json_pipeline.py +218 -218
- crawlo/pipelines/memory_dedup_pipeline.py +115 -115
- crawlo/pipelines/mongo_pipeline.py +131 -131
- crawlo/pipelines/mysql_pipeline.py +325 -318
- crawlo/pipelines/pipeline_manager.py +76 -76
- crawlo/pipelines/redis_dedup_pipeline.py +166 -166
- crawlo/project.py +327 -327
- crawlo/queue/pqueue.py +42 -42
- crawlo/queue/queue_manager.py +503 -503
- crawlo/queue/redis_priority_queue.py +326 -326
- crawlo/settings/__init__.py +7 -7
- crawlo/settings/default_settings.py +321 -321
- crawlo/settings/setting_manager.py +214 -214
- crawlo/spider/__init__.py +657 -657
- crawlo/stats_collector.py +73 -73
- crawlo/subscriber.py +129 -129
- crawlo/task_manager.py +138 -138
- crawlo/templates/crawlo.cfg.tmpl +10 -10
- crawlo/templates/project/__init__.py.tmpl +3 -3
- crawlo/templates/project/items.py.tmpl +17 -17
- crawlo/templates/project/middlewares.py.tmpl +118 -118
- crawlo/templates/project/pipelines.py.tmpl +96 -96
- crawlo/templates/project/settings.py.tmpl +167 -167
- crawlo/templates/project/settings_distributed.py.tmpl +166 -166
- crawlo/templates/project/settings_gentle.py.tmpl +166 -166
- crawlo/templates/project/settings_high_performance.py.tmpl +167 -167
- crawlo/templates/project/settings_minimal.py.tmpl +65 -65
- crawlo/templates/project/settings_simple.py.tmpl +164 -164
- crawlo/templates/project/spiders/__init__.py.tmpl +9 -9
- crawlo/templates/run.py.tmpl +34 -34
- crawlo/templates/spider/spider.py.tmpl +143 -143
- crawlo/templates/spiders_init.py.tmpl +9 -9
- crawlo/tools/__init__.py +200 -200
- crawlo/tools/anti_crawler.py +268 -268
- crawlo/tools/authenticated_proxy.py +240 -240
- crawlo/tools/data_formatter.py +225 -225
- crawlo/tools/data_validator.py +180 -180
- crawlo/tools/date_tools.py +289 -289
- crawlo/tools/distributed_coordinator.py +388 -388
- crawlo/tools/encoding_converter.py +127 -127
- crawlo/tools/network_diagnostic.py +364 -364
- crawlo/tools/request_tools.py +82 -82
- crawlo/tools/retry_mechanism.py +224 -224
- crawlo/tools/scenario_adapter.py +262 -262
- crawlo/tools/text_cleaner.py +232 -232
- crawlo/utils/__init__.py +34 -34
- crawlo/utils/batch_processor.py +259 -259
- crawlo/utils/class_loader.py +25 -25
- crawlo/utils/controlled_spider_mixin.py +439 -439
- crawlo/utils/db_helper.py +343 -343
- crawlo/utils/enhanced_error_handler.py +356 -356
- crawlo/utils/env_config.py +142 -142
- crawlo/utils/error_handler.py +165 -165
- crawlo/utils/func_tools.py +82 -82
- crawlo/utils/large_scale_config.py +286 -286
- crawlo/utils/large_scale_helper.py +344 -344
- crawlo/utils/log.py +80 -44
- crawlo/utils/performance_monitor.py +285 -285
- crawlo/utils/queue_helper.py +175 -175
- crawlo/utils/redis_connection_pool.py +388 -388
- crawlo/utils/redis_key_validator.py +198 -198
- crawlo/utils/request.py +267 -267
- crawlo/utils/request_serializer.py +225 -225
- crawlo/utils/spider_loader.py +61 -61
- crawlo/utils/system.py +11 -11
- crawlo/utils/tools.py +4 -4
- crawlo/utils/url.py +39 -39
- {crawlo-1.3.4.dist-info → crawlo-1.3.6.dist-info}/METADATA +1126 -1126
- crawlo-1.3.6.dist-info/RECORD +290 -0
- examples/__init__.py +7 -7
- tests/__init__.py +7 -7
- tests/advanced_tools_example.py +275 -275
- tests/authenticated_proxy_example.py +106 -106
- tests/baidu_performance_test.py +108 -108
- tests/baidu_test.py +59 -59
- tests/cleaners_example.py +160 -160
- tests/comprehensive_framework_test.py +212 -212
- tests/comprehensive_test.py +81 -81
- tests/comprehensive_testing_summary.md +186 -186
- tests/config_validation_demo.py +142 -142
- tests/controlled_spider_example.py +205 -205
- tests/date_tools_example.py +180 -180
- tests/debug_configure.py +69 -69
- tests/debug_framework_logger.py +84 -84
- tests/debug_log_config.py +127 -0
- tests/debug_log_levels.py +63 -63
- tests/debug_pipelines.py +66 -66
- tests/detailed_log_test.py +234 -0
- tests/distributed_test.py +66 -66
- tests/distributed_test_debug.py +76 -76
- tests/dynamic_loading_example.py +523 -523
- tests/dynamic_loading_test.py +104 -104
- tests/env_config_example.py +133 -133
- tests/error_handling_example.py +171 -171
- tests/final_comprehensive_test.py +151 -151
- tests/final_log_test.py +261 -0
- tests/final_validation_test.py +182 -182
- tests/fix_log_test.py +143 -0
- tests/framework_performance_test.py +202 -202
- tests/log_buffering_test.py +112 -0
- tests/log_generation_timing_test.py +154 -0
- tests/optimized_performance_test.py +211 -211
- tests/performance_comparison.py +245 -245
- tests/queue_blocking_test.py +113 -113
- tests/queue_test.py +89 -89
- tests/redis_key_validation_demo.py +130 -130
- tests/request_params_example.py +150 -150
- tests/response_improvements_example.py +144 -144
- tests/scrapy_comparison/ofweek_scrapy.py +138 -138
- tests/scrapy_comparison/scrapy_test.py +133 -133
- tests/simple_command_test.py +119 -119
- tests/simple_crawlo_test.py +127 -127
- tests/simple_log_test.py +57 -57
- tests/simple_log_test2.py +138 -0
- tests/simple_optimization_test.py +128 -128
- tests/simple_queue_type_test.py +42 -0
- tests/simple_spider_test.py +49 -49
- tests/simple_test.py +47 -47
- tests/spider_log_timing_test.py +178 -0
- tests/test_advanced_tools.py +148 -148
- tests/test_all_commands.py +230 -230
- tests/test_all_redis_key_configs.py +145 -145
- tests/test_authenticated_proxy.py +141 -141
- tests/test_batch_processor.py +178 -178
- tests/test_cleaners.py +54 -54
- tests/test_component_factory.py +174 -174
- tests/test_comprehensive.py +146 -146
- tests/test_config_consistency.py +80 -80
- tests/test_config_merge.py +152 -152
- tests/test_config_validator.py +182 -182
- tests/test_controlled_spider_mixin.py +79 -79
- tests/test_crawlo_proxy_integration.py +108 -108
- tests/test_date_tools.py +123 -123
- tests/test_default_header_middleware.py +158 -158
- tests/test_distributed.py +65 -65
- tests/test_double_crawlo_fix.py +207 -207
- tests/test_double_crawlo_fix_simple.py +124 -124
- tests/test_download_delay_middleware.py +221 -221
- tests/test_downloader_proxy_compatibility.py +268 -268
- tests/test_dynamic_downloaders_proxy.py +124 -124
- tests/test_dynamic_proxy.py +92 -92
- tests/test_dynamic_proxy_config.py +146 -146
- tests/test_dynamic_proxy_real.py +109 -109
- tests/test_edge_cases.py +303 -303
- tests/test_enhanced_error_handler.py +270 -270
- tests/test_enhanced_error_handler_comprehensive.py +245 -245
- tests/test_env_config.py +121 -121
- tests/test_error_handler_compatibility.py +112 -112
- tests/test_factories.py +252 -252
- tests/test_final_validation.py +153 -153
- tests/test_framework_env_usage.py +103 -103
- tests/test_framework_logger.py +66 -66
- tests/test_framework_startup.py +64 -64
- tests/test_get_component_logger.py +84 -0
- tests/test_integration.py +169 -169
- tests/test_item_dedup_redis_key.py +122 -122
- tests/test_large_scale_config.py +112 -112
- tests/test_large_scale_helper.py +235 -235
- tests/test_logging_system.py +283 -0
- tests/test_mode_change.py +72 -72
- tests/test_mode_consistency.py +51 -51
- tests/test_offsite_middleware.py +221 -221
- tests/test_parsel.py +29 -29
- tests/test_performance.py +327 -327
- tests/test_performance_monitor.py +115 -115
- tests/test_proxy_api.py +264 -264
- tests/test_proxy_health_check.py +32 -32
- tests/test_proxy_middleware.py +121 -121
- tests/test_proxy_middleware_enhanced.py +216 -216
- tests/test_proxy_middleware_integration.py +136 -136
- tests/test_proxy_middleware_refactored.py +184 -184
- tests/test_proxy_providers.py +56 -56
- tests/test_proxy_stats.py +19 -19
- tests/test_proxy_strategies.py +59 -59
- tests/test_queue_empty_check.py +41 -41
- tests/test_queue_manager_double_crawlo.py +173 -173
- tests/test_queue_manager_redis_key.py +176 -176
- tests/test_queue_type.py +107 -0
- tests/test_random_user_agent.py +72 -72
- tests/test_real_scenario_proxy.py +195 -195
- tests/test_redis_config.py +28 -28
- tests/test_redis_connection_pool.py +294 -294
- tests/test_redis_key_naming.py +181 -181
- tests/test_redis_key_validator.py +123 -123
- tests/test_redis_queue.py +224 -224
- tests/test_request_ignore_middleware.py +182 -182
- tests/test_request_params.py +111 -111
- tests/test_request_serialization.py +70 -70
- tests/test_response_code_middleware.py +349 -349
- tests/test_response_filter_middleware.py +427 -427
- tests/test_response_improvements.py +152 -152
- tests/test_retry_middleware.py +241 -241
- tests/test_scheduler.py +252 -252
- tests/test_scheduler_config_update.py +133 -133
- tests/test_simple_response.py +61 -61
- tests/test_telecom_spider_redis_key.py +205 -205
- tests/test_template_content.py +87 -87
- tests/test_template_redis_key.py +134 -134
- tests/test_tools.py +159 -159
- tests/test_user_agents.py +96 -96
- tests/tools_example.py +260 -260
- tests/untested_features_report.md +138 -138
- tests/verify_debug.py +51 -51
- tests/verify_distributed.py +117 -117
- tests/verify_log_fix.py +111 -111
- crawlo-1.3.4.dist-info/RECORD +0 -278
- {crawlo-1.3.4.dist-info → crawlo-1.3.6.dist-info}/WHEEL +0 -0
- {crawlo-1.3.4.dist-info → crawlo-1.3.6.dist-info}/entry_points.txt +0 -0
- {crawlo-1.3.4.dist-info → crawlo-1.3.6.dist-info}/top_level.txt +0 -0
tests/performance_comparison.py
CHANGED
|
@@ -1,246 +1,246 @@
|
|
|
1
|
-
#!/usr/bin/env python
|
|
2
|
-
# -*- coding: utf-8 -*-
|
|
3
|
-
"""
|
|
4
|
-
Crawlo vs Scrapy 性能对比测试
|
|
5
|
-
"""
|
|
6
|
-
import asyncio
|
|
7
|
-
import time
|
|
8
|
-
import subprocess
|
|
9
|
-
import sys
|
|
10
|
-
import os
|
|
11
|
-
import re
|
|
12
|
-
|
|
13
|
-
# 添加项目根目录到Python路径
|
|
14
|
-
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
|
|
15
|
-
|
|
16
|
-
def run_crawlo_test():
|
|
17
|
-
"""运行Crawlo性能测试"""
|
|
18
|
-
print("开始运行Crawlo性能测试...")
|
|
19
|
-
start_time = time.time()
|
|
20
|
-
|
|
21
|
-
# 运行Crawlo测试
|
|
22
|
-
try:
|
|
23
|
-
# 使用较小的页数进行测试以节省时间
|
|
24
|
-
result = subprocess.run([
|
|
25
|
-
'python', '-c', '''
|
|
26
|
-
import asyncio
|
|
27
|
-
import sys
|
|
28
|
-
import os
|
|
29
|
-
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
|
|
30
|
-
|
|
31
|
-
from crawlo import Spider, Request
|
|
32
|
-
from crawlo.crawler import CrawlerProcess
|
|
33
|
-
from crawlo.items import Item, Field
|
|
34
|
-
|
|
35
|
-
class NewsItem(Item):
|
|
36
|
-
title = Field()
|
|
37
|
-
publish_time = Field()
|
|
38
|
-
url = Field()
|
|
39
|
-
source = Field()
|
|
40
|
-
content = Field()
|
|
41
|
-
|
|
42
|
-
class OfweekPerformanceSpider(Spider):
|
|
43
|
-
name = "ofweek_performance"
|
|
44
|
-
|
|
45
|
-
def start_requests(self):
|
|
46
|
-
headers = {
|
|
47
|
-
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
|
48
|
-
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
|
|
49
|
-
"Cache-Control": "no-cache",
|
|
50
|
-
"Connection": "keep-alive",
|
|
51
|
-
"Pragma": "no-cache",
|
|
52
|
-
"Referer": "https://ee.ofweek.com/CATList-2800-8100-ee-2.html",
|
|
53
|
-
"Sec-Fetch-Dest": "document",
|
|
54
|
-
"Sec-Fetch-Mode": "navigate",
|
|
55
|
-
"Sec-Fetch-Site": "same-origin",
|
|
56
|
-
"Sec-Fetch-User": "?1",
|
|
57
|
-
"Upgrade-Insecure-Requests": "1",
|
|
58
|
-
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
|
|
59
|
-
}
|
|
60
|
-
cookies = {
|
|
61
|
-
"__utmz": "57425525.1730117117.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)",
|
|
62
|
-
"Hm_lvt_abe9900db162c6d089cdbfd107db0f03": "1739244841",
|
|
63
|
-
"Hm_lvt_af50e2fc51af73da7720fb324b88a975": "1740100727",
|
|
64
|
-
"JSESSIONID": "FEA96D3B5FC31350B2285E711BF2A541",
|
|
65
|
-
"Hm_lvt_28a416fcfc17063eb9c4f9bb1a1f5cda": "1757477622",
|
|
66
|
-
"HMACCOUNT": "08DF0D235A291EAA",
|
|
67
|
-
"__utma": "57425525.2080994505.1730117117.1747970718.1757477622.50",
|
|
68
|
-
"__utmc": "57425525",
|
|
69
|
-
"__utmt": "1",
|
|
70
|
-
"__utmb": "57425525.2.10.1757477622",
|
|
71
|
-
"Hm_lpvt_28a416fcfc17063eb9c4f9bb1a1f5cda": "1757477628",
|
|
72
|
-
"index_burying_point": "c64d6c31e69d560efe319cc9f8be279f"
|
|
73
|
-
}
|
|
74
|
-
|
|
75
|
-
# 使用较少的页数进行测试
|
|
76
|
-
max_page = 10
|
|
77
|
-
for page in range(1, max_page + 1):
|
|
78
|
-
url = f"https://ee.ofweek.com/CATList-2800-8100-ee-{page}.html"
|
|
79
|
-
yield Request(url=url, callback=self.parse, headers=headers, cookies=cookies)
|
|
80
|
-
|
|
81
|
-
def parse(self, response):
|
|
82
|
-
from urllib.parse import urljoin
|
|
83
|
-
rows = response.xpath("//div[@class=\"main_left\"]/div[@class=\"list_model\"]/div[@class=\"model_right model_right2\"]")
|
|
84
|
-
|
|
85
|
-
for row in rows:
|
|
86
|
-
try:
|
|
87
|
-
url = row.xpath("./h3/a/@href").extract_first()
|
|
88
|
-
title = row.xpath("./h3/a/text()").extract_first()
|
|
89
|
-
|
|
90
|
-
if not url or not title:
|
|
91
|
-
continue
|
|
92
|
-
|
|
93
|
-
absolute_url = urljoin(response.url, url)
|
|
94
|
-
if not absolute_url.startswith(("http://", "https://")):
|
|
95
|
-
continue
|
|
96
|
-
|
|
97
|
-
yield Request(
|
|
98
|
-
url=absolute_url,
|
|
99
|
-
meta={"title": title.strip() if title else "", "parent_url": response.url},
|
|
100
|
-
callback=self.parse_detail
|
|
101
|
-
)
|
|
102
|
-
except Exception:
|
|
103
|
-
continue
|
|
104
|
-
|
|
105
|
-
def parse_detail(self, response):
|
|
106
|
-
title = response.meta.get("title", "")
|
|
107
|
-
content_elements = response.xpath("//div[@class=\"TRS_Editor\"]|//*[@id=\"articleC\"]")
|
|
108
|
-
if content_elements:
|
|
109
|
-
content = content_elements.xpath(".//text()").extract()
|
|
110
|
-
content = "\\n".join([text.strip() for text in content if text.strip()])
|
|
111
|
-
else:
|
|
112
|
-
content = ""
|
|
113
|
-
|
|
114
|
-
publish_time = response.xpath("//div[@class=\"time fl\"]/text()").extract_first()
|
|
115
|
-
if publish_time:
|
|
116
|
-
publish_time = publish_time.strip()
|
|
117
|
-
|
|
118
|
-
source = response.xpath("//div[@class=\"source-name\"]/text()").extract_first()
|
|
119
|
-
|
|
120
|
-
item = NewsItem()
|
|
121
|
-
item["title"] = title.strip() if title else ""
|
|
122
|
-
item["publish_time"] = publish_time if publish_time else ""
|
|
123
|
-
item["url"] = response.url
|
|
124
|
-
item["source"] = source if source else ""
|
|
125
|
-
item["content"] = content
|
|
126
|
-
|
|
127
|
-
yield item
|
|
128
|
-
|
|
129
|
-
async def main():
|
|
130
|
-
process = CrawlerProcess(settings={
|
|
131
|
-
"CONCURRENCY": 8,
|
|
132
|
-
"DOWNLOAD_DELAY": 0.1,
|
|
133
|
-
"LOG_LEVEL": "ERROR", # 减少日志输出以提高性能
|
|
134
|
-
})
|
|
135
|
-
await process.crawl(OfweekPerformanceSpider)
|
|
136
|
-
|
|
137
|
-
if __name__ == "__main__":
|
|
138
|
-
asyncio.run(main())
|
|
139
|
-
'''
|
|
140
|
-
], capture_output=True, text=True, timeout=300) # 5分钟超时
|
|
141
|
-
|
|
142
|
-
end_time = time.time()
|
|
143
|
-
execution_time = end_time - start_time
|
|
144
|
-
|
|
145
|
-
# 解析结果
|
|
146
|
-
output = result.stdout
|
|
147
|
-
error = result.stderr
|
|
148
|
-
|
|
149
|
-
# 统计处理的页面数
|
|
150
|
-
pages_crawled = output.count("正在解析页面:") + output.count("正在解析详情页:")
|
|
151
|
-
|
|
152
|
-
print(f"Crawlo执行时间: {execution_time:.2f}秒")
|
|
153
|
-
print(f"Crawlo处理页面数: {pages_crawled}")
|
|
154
|
-
print(f"Crawlo平均速度: {pages_crawled/execution_time:.2f} 页面/秒")
|
|
155
|
-
|
|
156
|
-
return execution_time, pages_crawled
|
|
157
|
-
|
|
158
|
-
except subprocess.TimeoutExpired:
|
|
159
|
-
print("Crawlo测试超时")
|
|
160
|
-
return None, 0
|
|
161
|
-
except Exception as e:
|
|
162
|
-
print(f"Crawlo测试出错: {e}")
|
|
163
|
-
return None, 0
|
|
164
|
-
|
|
165
|
-
def run_scrapy_test():
|
|
166
|
-
"""运行Scrapy性能测试"""
|
|
167
|
-
print("\n开始运行Scrapy性能测试...")
|
|
168
|
-
start_time = time.time()
|
|
169
|
-
|
|
170
|
-
try:
|
|
171
|
-
# 运行Scrapy测试
|
|
172
|
-
result = subprocess.run([
|
|
173
|
-
'scrapy', 'runspider',
|
|
174
|
-
'/Users/oscar/projects/Crawlo/tests/scrapy_comparison/ofweek_scrapy.py',
|
|
175
|
-
'-s', 'CONCURRENT_REQUESTS=8',
|
|
176
|
-
'-s', 'DOWNLOAD_DELAY=0.1',
|
|
177
|
-
'-s', 'LOG_LEVEL=ERROR'
|
|
178
|
-
], capture_output=True, text=True, timeout=300, cwd='/Users/oscar/projects/Crawlo')
|
|
179
|
-
|
|
180
|
-
end_time = time.time()
|
|
181
|
-
execution_time = end_time - start_time
|
|
182
|
-
|
|
183
|
-
# 解析结果
|
|
184
|
-
output = result.stdout
|
|
185
|
-
error = result.stderr
|
|
186
|
-
|
|
187
|
-
# 统计处理的页面数
|
|
188
|
-
pages_crawled = output.count("正在解析页面:") + output.count("正在解析详情页:")
|
|
189
|
-
|
|
190
|
-
print(f"Scrapy执行时间: {execution_time:.2f}秒")
|
|
191
|
-
print(f"Scrapy处理页面数: {pages_crawled}")
|
|
192
|
-
print(f"Scrapy平均速度: {pages_crawled/execution_time:.2f} 页面/秒")
|
|
193
|
-
|
|
194
|
-
return execution_time, pages_crawled
|
|
195
|
-
|
|
196
|
-
except subprocess.TimeoutExpired:
|
|
197
|
-
print("Scrapy测试超时")
|
|
198
|
-
return None, 0
|
|
199
|
-
except Exception as e:
|
|
200
|
-
print(f"Scrapy测试出错: {e}")
|
|
201
|
-
return None, 0
|
|
202
|
-
|
|
203
|
-
def main():
|
|
204
|
-
"""主函数"""
|
|
205
|
-
print("=== Crawlo vs Scrapy 性能对比测试 ===")
|
|
206
|
-
|
|
207
|
-
# 创建测试目录
|
|
208
|
-
os.makedirs('/Users/oscar/projects/Crawlo/tests/scrapy_comparison', exist_ok=True)
|
|
209
|
-
|
|
210
|
-
# 运行测试
|
|
211
|
-
crawlo_time, crawlo_pages = run_crawlo_test()
|
|
212
|
-
scrapy_time, scrapy_pages = run_scrapy_test()
|
|
213
|
-
|
|
214
|
-
# 输出对比结果
|
|
215
|
-
print("\n=== 性能对比结果 ===")
|
|
216
|
-
if crawlo_time and scrapy_time:
|
|
217
|
-
print(f"Crawlo执行时间: {crawlo_time:.2f}秒")
|
|
218
|
-
print(f"Scrapy执行时间: {scrapy_time:.2f}秒")
|
|
219
|
-
print(f"时间差异: {abs(crawlo_time - scrapy_time):.2f}秒")
|
|
220
|
-
|
|
221
|
-
if crawlo_time < scrapy_time:
|
|
222
|
-
improvement = (scrapy_time - crawlo_time) / scrapy_time * 100
|
|
223
|
-
print(f"Crawlo比Scrapy快 {improvement:.2f}%")
|
|
224
|
-
else:
|
|
225
|
-
improvement = (crawlo_time - scrapy_time) / crawlo_time * 100
|
|
226
|
-
print(f"Scrapy比Crawlo快 {improvement:.2f}%")
|
|
227
|
-
|
|
228
|
-
if crawlo_pages and scrapy_pages:
|
|
229
|
-
print(f"\nCrawlo处理页面数: {crawlo_pages}")
|
|
230
|
-
print(f"Scrapy处理页面数: {scrapy_pages}")
|
|
231
|
-
|
|
232
|
-
crawlo_speed = crawlo_pages / crawlo_time if crawlo_time else 0
|
|
233
|
-
scrapy_speed = scrapy_pages / scrapy_time if scrapy_time else 0
|
|
234
|
-
|
|
235
|
-
print(f"Crawlo平均速度: {crawlo_speed:.2f} 页面/秒")
|
|
236
|
-
print(f"Scrapy平均速度: {scrapy_speed:.2f} 页面/秒")
|
|
237
|
-
|
|
238
|
-
if crawlo_speed > scrapy_speed:
|
|
239
|
-
speed_improvement = (crawlo_speed - scrapy_speed) / scrapy_speed * 100
|
|
240
|
-
print(f"Crawlo速度比Scrapy快 {speed_improvement:.2f}%")
|
|
241
|
-
elif scrapy_speed > crawlo_speed:
|
|
242
|
-
speed_improvement = (scrapy_speed - crawlo_speed) / crawlo_speed * 100
|
|
243
|
-
print(f"Scrapy速度比Crawlo快 {speed_improvement:.2f}%")
|
|
244
|
-
|
|
245
|
-
if __name__ == '__main__':
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
Crawlo vs Scrapy 性能对比测试
|
|
5
|
+
"""
|
|
6
|
+
import asyncio
|
|
7
|
+
import time
|
|
8
|
+
import subprocess
|
|
9
|
+
import sys
|
|
10
|
+
import os
|
|
11
|
+
import re
|
|
12
|
+
|
|
13
|
+
# 添加项目根目录到Python路径
|
|
14
|
+
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
|
|
15
|
+
|
|
16
|
+
def run_crawlo_test():
|
|
17
|
+
"""运行Crawlo性能测试"""
|
|
18
|
+
print("开始运行Crawlo性能测试...")
|
|
19
|
+
start_time = time.time()
|
|
20
|
+
|
|
21
|
+
# 运行Crawlo测试
|
|
22
|
+
try:
|
|
23
|
+
# 使用较小的页数进行测试以节省时间
|
|
24
|
+
result = subprocess.run([
|
|
25
|
+
'python', '-c', '''
|
|
26
|
+
import asyncio
|
|
27
|
+
import sys
|
|
28
|
+
import os
|
|
29
|
+
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
|
|
30
|
+
|
|
31
|
+
from crawlo import Spider, Request
|
|
32
|
+
from crawlo.crawler import CrawlerProcess
|
|
33
|
+
from crawlo.items import Item, Field
|
|
34
|
+
|
|
35
|
+
class NewsItem(Item):
|
|
36
|
+
title = Field()
|
|
37
|
+
publish_time = Field()
|
|
38
|
+
url = Field()
|
|
39
|
+
source = Field()
|
|
40
|
+
content = Field()
|
|
41
|
+
|
|
42
|
+
class OfweekPerformanceSpider(Spider):
|
|
43
|
+
name = "ofweek_performance"
|
|
44
|
+
|
|
45
|
+
def start_requests(self):
|
|
46
|
+
headers = {
|
|
47
|
+
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
|
48
|
+
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
|
|
49
|
+
"Cache-Control": "no-cache",
|
|
50
|
+
"Connection": "keep-alive",
|
|
51
|
+
"Pragma": "no-cache",
|
|
52
|
+
"Referer": "https://ee.ofweek.com/CATList-2800-8100-ee-2.html",
|
|
53
|
+
"Sec-Fetch-Dest": "document",
|
|
54
|
+
"Sec-Fetch-Mode": "navigate",
|
|
55
|
+
"Sec-Fetch-Site": "same-origin",
|
|
56
|
+
"Sec-Fetch-User": "?1",
|
|
57
|
+
"Upgrade-Insecure-Requests": "1",
|
|
58
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
|
|
59
|
+
}
|
|
60
|
+
cookies = {
|
|
61
|
+
"__utmz": "57425525.1730117117.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)",
|
|
62
|
+
"Hm_lvt_abe9900db162c6d089cdbfd107db0f03": "1739244841",
|
|
63
|
+
"Hm_lvt_af50e2fc51af73da7720fb324b88a975": "1740100727",
|
|
64
|
+
"JSESSIONID": "FEA96D3B5FC31350B2285E711BF2A541",
|
|
65
|
+
"Hm_lvt_28a416fcfc17063eb9c4f9bb1a1f5cda": "1757477622",
|
|
66
|
+
"HMACCOUNT": "08DF0D235A291EAA",
|
|
67
|
+
"__utma": "57425525.2080994505.1730117117.1747970718.1757477622.50",
|
|
68
|
+
"__utmc": "57425525",
|
|
69
|
+
"__utmt": "1",
|
|
70
|
+
"__utmb": "57425525.2.10.1757477622",
|
|
71
|
+
"Hm_lpvt_28a416fcfc17063eb9c4f9bb1a1f5cda": "1757477628",
|
|
72
|
+
"index_burying_point": "c64d6c31e69d560efe319cc9f8be279f"
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
# 使用较少的页数进行测试
|
|
76
|
+
max_page = 10
|
|
77
|
+
for page in range(1, max_page + 1):
|
|
78
|
+
url = f"https://ee.ofweek.com/CATList-2800-8100-ee-{page}.html"
|
|
79
|
+
yield Request(url=url, callback=self.parse, headers=headers, cookies=cookies)
|
|
80
|
+
|
|
81
|
+
def parse(self, response):
|
|
82
|
+
from urllib.parse import urljoin
|
|
83
|
+
rows = response.xpath("//div[@class=\"main_left\"]/div[@class=\"list_model\"]/div[@class=\"model_right model_right2\"]")
|
|
84
|
+
|
|
85
|
+
for row in rows:
|
|
86
|
+
try:
|
|
87
|
+
url = row.xpath("./h3/a/@href").extract_first()
|
|
88
|
+
title = row.xpath("./h3/a/text()").extract_first()
|
|
89
|
+
|
|
90
|
+
if not url or not title:
|
|
91
|
+
continue
|
|
92
|
+
|
|
93
|
+
absolute_url = urljoin(response.url, url)
|
|
94
|
+
if not absolute_url.startswith(("http://", "https://")):
|
|
95
|
+
continue
|
|
96
|
+
|
|
97
|
+
yield Request(
|
|
98
|
+
url=absolute_url,
|
|
99
|
+
meta={"title": title.strip() if title else "", "parent_url": response.url},
|
|
100
|
+
callback=self.parse_detail
|
|
101
|
+
)
|
|
102
|
+
except Exception:
|
|
103
|
+
continue
|
|
104
|
+
|
|
105
|
+
def parse_detail(self, response):
|
|
106
|
+
title = response.meta.get("title", "")
|
|
107
|
+
content_elements = response.xpath("//div[@class=\"TRS_Editor\"]|//*[@id=\"articleC\"]")
|
|
108
|
+
if content_elements:
|
|
109
|
+
content = content_elements.xpath(".//text()").extract()
|
|
110
|
+
content = "\\n".join([text.strip() for text in content if text.strip()])
|
|
111
|
+
else:
|
|
112
|
+
content = ""
|
|
113
|
+
|
|
114
|
+
publish_time = response.xpath("//div[@class=\"time fl\"]/text()").extract_first()
|
|
115
|
+
if publish_time:
|
|
116
|
+
publish_time = publish_time.strip()
|
|
117
|
+
|
|
118
|
+
source = response.xpath("//div[@class=\"source-name\"]/text()").extract_first()
|
|
119
|
+
|
|
120
|
+
item = NewsItem()
|
|
121
|
+
item["title"] = title.strip() if title else ""
|
|
122
|
+
item["publish_time"] = publish_time if publish_time else ""
|
|
123
|
+
item["url"] = response.url
|
|
124
|
+
item["source"] = source if source else ""
|
|
125
|
+
item["content"] = content
|
|
126
|
+
|
|
127
|
+
yield item
|
|
128
|
+
|
|
129
|
+
async def main():
|
|
130
|
+
process = CrawlerProcess(settings={
|
|
131
|
+
"CONCURRENCY": 8,
|
|
132
|
+
"DOWNLOAD_DELAY": 0.1,
|
|
133
|
+
"LOG_LEVEL": "ERROR", # 减少日志输出以提高性能
|
|
134
|
+
})
|
|
135
|
+
await process.crawl(OfweekPerformanceSpider)
|
|
136
|
+
|
|
137
|
+
if __name__ == "__main__":
|
|
138
|
+
asyncio.run(main())
|
|
139
|
+
'''
|
|
140
|
+
], capture_output=True, text=True, timeout=300) # 5分钟超时
|
|
141
|
+
|
|
142
|
+
end_time = time.time()
|
|
143
|
+
execution_time = end_time - start_time
|
|
144
|
+
|
|
145
|
+
# 解析结果
|
|
146
|
+
output = result.stdout
|
|
147
|
+
error = result.stderr
|
|
148
|
+
|
|
149
|
+
# 统计处理的页面数
|
|
150
|
+
pages_crawled = output.count("正在解析页面:") + output.count("正在解析详情页:")
|
|
151
|
+
|
|
152
|
+
print(f"Crawlo执行时间: {execution_time:.2f}秒")
|
|
153
|
+
print(f"Crawlo处理页面数: {pages_crawled}")
|
|
154
|
+
print(f"Crawlo平均速度: {pages_crawled/execution_time:.2f} 页面/秒")
|
|
155
|
+
|
|
156
|
+
return execution_time, pages_crawled
|
|
157
|
+
|
|
158
|
+
except subprocess.TimeoutExpired:
|
|
159
|
+
print("Crawlo测试超时")
|
|
160
|
+
return None, 0
|
|
161
|
+
except Exception as e:
|
|
162
|
+
print(f"Crawlo测试出错: {e}")
|
|
163
|
+
return None, 0
|
|
164
|
+
|
|
165
|
+
def run_scrapy_test():
|
|
166
|
+
"""运行Scrapy性能测试"""
|
|
167
|
+
print("\n开始运行Scrapy性能测试...")
|
|
168
|
+
start_time = time.time()
|
|
169
|
+
|
|
170
|
+
try:
|
|
171
|
+
# 运行Scrapy测试
|
|
172
|
+
result = subprocess.run([
|
|
173
|
+
'scrapy', 'runspider',
|
|
174
|
+
'/Users/oscar/projects/Crawlo/tests/scrapy_comparison/ofweek_scrapy.py',
|
|
175
|
+
'-s', 'CONCURRENT_REQUESTS=8',
|
|
176
|
+
'-s', 'DOWNLOAD_DELAY=0.1',
|
|
177
|
+
'-s', 'LOG_LEVEL=ERROR'
|
|
178
|
+
], capture_output=True, text=True, timeout=300, cwd='/Users/oscar/projects/Crawlo')
|
|
179
|
+
|
|
180
|
+
end_time = time.time()
|
|
181
|
+
execution_time = end_time - start_time
|
|
182
|
+
|
|
183
|
+
# 解析结果
|
|
184
|
+
output = result.stdout
|
|
185
|
+
error = result.stderr
|
|
186
|
+
|
|
187
|
+
# 统计处理的页面数
|
|
188
|
+
pages_crawled = output.count("正在解析页面:") + output.count("正在解析详情页:")
|
|
189
|
+
|
|
190
|
+
print(f"Scrapy执行时间: {execution_time:.2f}秒")
|
|
191
|
+
print(f"Scrapy处理页面数: {pages_crawled}")
|
|
192
|
+
print(f"Scrapy平均速度: {pages_crawled/execution_time:.2f} 页面/秒")
|
|
193
|
+
|
|
194
|
+
return execution_time, pages_crawled
|
|
195
|
+
|
|
196
|
+
except subprocess.TimeoutExpired:
|
|
197
|
+
print("Scrapy测试超时")
|
|
198
|
+
return None, 0
|
|
199
|
+
except Exception as e:
|
|
200
|
+
print(f"Scrapy测试出错: {e}")
|
|
201
|
+
return None, 0
|
|
202
|
+
|
|
203
|
+
def main():
|
|
204
|
+
"""主函数"""
|
|
205
|
+
print("=== Crawlo vs Scrapy 性能对比测试 ===")
|
|
206
|
+
|
|
207
|
+
# 创建测试目录
|
|
208
|
+
os.makedirs('/Users/oscar/projects/Crawlo/tests/scrapy_comparison', exist_ok=True)
|
|
209
|
+
|
|
210
|
+
# 运行测试
|
|
211
|
+
crawlo_time, crawlo_pages = run_crawlo_test()
|
|
212
|
+
scrapy_time, scrapy_pages = run_scrapy_test()
|
|
213
|
+
|
|
214
|
+
# 输出对比结果
|
|
215
|
+
print("\n=== 性能对比结果 ===")
|
|
216
|
+
if crawlo_time and scrapy_time:
|
|
217
|
+
print(f"Crawlo执行时间: {crawlo_time:.2f}秒")
|
|
218
|
+
print(f"Scrapy执行时间: {scrapy_time:.2f}秒")
|
|
219
|
+
print(f"时间差异: {abs(crawlo_time - scrapy_time):.2f}秒")
|
|
220
|
+
|
|
221
|
+
if crawlo_time < scrapy_time:
|
|
222
|
+
improvement = (scrapy_time - crawlo_time) / scrapy_time * 100
|
|
223
|
+
print(f"Crawlo比Scrapy快 {improvement:.2f}%")
|
|
224
|
+
else:
|
|
225
|
+
improvement = (crawlo_time - scrapy_time) / crawlo_time * 100
|
|
226
|
+
print(f"Scrapy比Crawlo快 {improvement:.2f}%")
|
|
227
|
+
|
|
228
|
+
if crawlo_pages and scrapy_pages:
|
|
229
|
+
print(f"\nCrawlo处理页面数: {crawlo_pages}")
|
|
230
|
+
print(f"Scrapy处理页面数: {scrapy_pages}")
|
|
231
|
+
|
|
232
|
+
crawlo_speed = crawlo_pages / crawlo_time if crawlo_time else 0
|
|
233
|
+
scrapy_speed = scrapy_pages / scrapy_time if scrapy_time else 0
|
|
234
|
+
|
|
235
|
+
print(f"Crawlo平均速度: {crawlo_speed:.2f} 页面/秒")
|
|
236
|
+
print(f"Scrapy平均速度: {scrapy_speed:.2f} 页面/秒")
|
|
237
|
+
|
|
238
|
+
if crawlo_speed > scrapy_speed:
|
|
239
|
+
speed_improvement = (crawlo_speed - scrapy_speed) / scrapy_speed * 100
|
|
240
|
+
print(f"Crawlo速度比Scrapy快 {speed_improvement:.2f}%")
|
|
241
|
+
elif scrapy_speed > crawlo_speed:
|
|
242
|
+
speed_improvement = (scrapy_speed - crawlo_speed) / crawlo_speed * 100
|
|
243
|
+
print(f"Scrapy速度比Crawlo快 {speed_improvement:.2f}%")
|
|
244
|
+
|
|
245
|
+
if __name__ == '__main__':
|
|
246
246
|
main()
|